repo_name
stringlengths
8
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
gnafit/gna
[ "c1a58dac11783342c97a2da1b19c97b85bce0394" ]
[ "pylib/gna/bundles/root_histograms_v03.py" ]
[ "\nfrom load import ROOT as R\nimport numpy as N\nfrom gna.env import env, namespace\nfrom collections import OrderedDict\nfrom mpl_tools.root2numpy import get_buffer_hist1, get_bin_edges_axis\nfrom gna.constructors import Histogram\nfrom gna.configurator import NestedDict\nfrom gna.grouping import Categories\n\nfrom gna.bundle import TransformationBundle\n\nclass root_histograms_v03(TransformationBundle):\n def __init__(self, *args, **kwargs):\n TransformationBundle.__init__(self, *args, **kwargs)\n self.check_nidx_dim(0, 1, 'major')\n self.check_nidx_dim(0, 0, 'minor')\n\n self.groups = Categories( self.cfg.get('groups', {}), recursive=True )\n\n @staticmethod\n def _provides(cfg):\n return (), (cfg.name,)\n\n def build(self):\n file = R.TFile( self.cfg.filename, 'READ' )\n if file.IsZombie():\n raise Exception('Can not read ROOT file '+file.GetName())\n\n print( 'Read input file {}:'.format(file.GetName()) )\n\n for it in self.nidx.iterate():\n if it.ndim()>0:\n subst, = it.current_values()\n else:\n subst = ''\n hname = self.groups.format(subst, self.cfg.format)\n h = file.Get( hname )\n if not h:\n raise Exception('Can not read {hist} from {file}'.format( hist=hname, file=file.GetName() ))\n\n print( ' read{}: {}'.format(' '+subst, hname), end='' )\n edges = get_bin_edges_axis( h.GetXaxis() )\n data = get_buffer_hist1( h )\n if self.cfg.get( 'normalize', False ):\n print( ' [normalized]' )\n data=N.ascontiguousarray(data, dtype='d')\n data=data/data.sum()\n else:\n print()\n\n fmt = self.cfg.get('label', 'hist {name}\\n{autoindex}')\n hist=Histogram(edges, data, labels=it.current_format(fmt, name=self.cfg.name))\n self.set_output(self.cfg.name, it, hist.single())\n\n self.context.objects[('hist', subst)] = hist\n\n file.Close()\n" ]
[ [ "numpy.ascontiguousarray" ] ]
nwlambert/matsubara
[ "859596d2112e8be540618164d067db79fc5f4e47" ]
[ "matsubara/tests/test_pure_dephasing.py" ]
[ "\"\"\"\nTests for the pure dephasing analytical computations.\n\"\"\"\n\nimport numpy as np\nfrom numpy.testing import (run_module_suite, assert_,\n assert_array_almost_equal, assert_raises)\nfrom matsubara.pure_dephasing import (pure_dephasing_integrand,\n pure_dephasing_evolution,\n pure_dephasing_evolution_analytical)\nfrom matsubara.correlation import (biexp_fit, nonmatsubara_exponents, matsubara_exponents,\n matsubara_zero_analytical)\n\n\ndef test_pure_dephasing():\n \"\"\"\n pure_dephasing: Tests the pure_dephasing integrand.\n \"\"\"\n coup_strength, cav_broad, cav_freq = .08, .4, 1.\n\n # Test only at short times\n tlist = np.linspace(0, 20, 100)\n\n # Set qubit frequency to 0 to see only the dynamics due to the interaction.\n wq = 0\n # Zero temperature case\n beta = np.inf\n ck1, vk1 = nonmatsubara_exponents(coup_strength, cav_broad, cav_freq, beta)\n # ck2, vk2 = matsubara_exponents(lam, gamma, w0, beta, N_exp)\n\n mats_data_zero = matsubara_zero_analytical(coup_strength, cav_broad, cav_freq, tlist)\n ck20, vk20 = biexp_fit(tlist, mats_data_zero)\n\n ck = np.concatenate([ck1, ck20])\n vk = np.concatenate([vk1, vk20])\n\n pd_analytical = pure_dephasing_evolution(tlist, coup_strength, cav_broad, cav_freq, beta, wq)\n pd_numerical_fitting = pure_dephasing_evolution_analytical(tlist, wq, ck, vk)\n residue = np.abs(pd_analytical - pd_numerical_fitting)\n assert_(np.max(residue) < 1e-3)\n\n\nif __name__ == \"__main__\":\n run_module_suite()\n" ]
[ [ "numpy.testing.run_module_suite", "numpy.abs", "numpy.max", "numpy.concatenate", "numpy.linspace" ] ]
wl1447215262/hyper
[ "93cc5877e4a20f0bb6f5788510a590e658fc22b7" ]
[ "HyperLprGUI.py" ]
[ "\"\"\"\nAuthor: youngorsu\nEmail : [email protected]\nLast edited: 2018.1.29\n\"\"\"\n# coding=utf-8\n\n\nimport sys\nimport os\nfrom PyQt5.QtWidgets import (\n QMainWindow,\n QLabel,\n QLineEdit,\n QPushButton,\n QHBoxLayout,\n QVBoxLayout,\n QGridLayout,\n QTableWidget,\n QWidget,\n QAbstractItemView,\n QHeaderView,\n QGraphicsView,\n QGraphicsScene,\n QGraphicsPixmapItem,\n QSplitter,\n QFileDialog,\n QTableWidgetItem,\n QGraphicsRectItem,\n QCheckBox,\n QMessageBox,\n QGroupBox,\n QGraphicsSimpleTextItem,\n qApp,\n QAction,\n QApplication)\nfrom PyQt5.QtGui import QIcon, QColor, QPainter, QImage, QPixmap, QPen, QBrush, QFont, QPalette, QKeySequence\nfrom PyQt5.QtCore import Qt, QDir, QSize, QEventLoop, QThread, pyqtSignal\n\nfrom hyperlpr_py3 import pipline as pp\n\nimport cv2\n\nimport numpy as np\n\nimport time\n\nimport shutil\n\ndraw_plate_in_image_enable = 1\n\nplateTypeName = [\"蓝\", \"黄\", \"绿\", \"白\", \"黑 \"]\n\n\ndef SimpleRecognizePlateWithGui(image):\n t0 = time.time()\n\n images = pp.detect.detectPlateRough(\n image, image.shape[0], top_bottom_padding_rate=0.1)\n\n res_set = []\n y_offset = 32\n for j, plate in enumerate(images):\n plate, rect, origin_plate = plate\n\n plate = cv2.resize(plate, (136, 36 * 2))\n t1 = time.time()\n\n plate_type = pp.td.SimplePredict(plate)\n plate_color = plateTypeName[plate_type]\n\n if (plate_type > 0) and (plate_type < 5):\n plate = cv2.bitwise_not(plate)\n\n if draw_plate_in_image_enable == 1:\n image[y_offset:y_offset + plate.shape[0], 0:plate.shape[1]] = plate\n y_offset = y_offset + plate.shape[0] + 4\n\n image_rgb = pp.fm.findContoursAndDrawBoundingBox(plate)\n\n if draw_plate_in_image_enable == 1:\n image[y_offset:y_offset + image_rgb.shape[0],\n 0:image_rgb.shape[1]] = image_rgb\n y_offset = y_offset + image_rgb.shape[0] + 4\n\n image_rgb = pp.fv.finemappingVertical(image_rgb)\n\n if draw_plate_in_image_enable == 1:\n image[y_offset:y_offset + image_rgb.shape[0],\n 0:image_rgb.shape[1]] = image_rgb\n y_offset = y_offset + image_rgb.shape[0] + 4\n\n pp.cache.verticalMappingToFolder(image_rgb)\n\n if draw_plate_in_image_enable == 1:\n image[y_offset:y_offset + image_rgb.shape[0],\n 0:image_rgb.shape[1]] = image_rgb\n y_offset = y_offset + image_rgb.shape[0] + 4\n\n e2e_plate, e2e_confidence = pp.e2e.recognizeOne(image_rgb)\n print(\"e2e:\", e2e_plate, e2e_confidence)\n\n image_gray = cv2.cvtColor(image_rgb, cv2.COLOR_RGB2GRAY)\n\n #print(\"校正\", time.time() - t1, \"s\")\n\n t2 = time.time()\n val = pp.segmentation.slidingWindowsEval(image_gray)\n # print val\n #print(\"分割和识别\", time.time() - t2, \"s\")\n\n res=\"\"\n confidence = 0\n if len(val) == 3:\n blocks, res, confidence = val\n if confidence / 7 > 0.7:\n\n if draw_plate_in_image_enable == 1:\n image = pp.drawRectBox(image, rect, res)\n for i, block in enumerate(blocks):\n block_ = cv2.resize(block, (24, 24))\n block_ = cv2.cvtColor(block_, cv2.COLOR_GRAY2BGR)\n image[j * 24:(j * 24) + 24, i *\n 24:(i * 24) + 24] = block_\n if image[j * 24:(j * 24) + 24,\n i * 24:(i * 24) + 24].shape == block_.shape:\n pass\n\n res_set.append([res,\n confidence / 7,\n rect,\n plate_color,\n e2e_plate,\n e2e_confidence,\n len(blocks)])\n print(\"seg:\",res,confidence/7)\n #print(time.time() - t0, \"s\")\n\n print(\"---------------------------------\")\n return image, res_set\n\n\nclass LicenseRecognizationThread(QThread):\n\n recognization_done_signal = pyqtSignal(list)\n\n def __init__(self, parent=None):\n super().__init__(parent)\n self.hyperlpr_dir_path = \"\"\n self.filenames = []\n\n def set_parameter(self, filename_list, path):\n self.hyperlpr_dir_path = path\n self.filenames = filename_list\n\n def run(self):\n while True:\n time.sleep(1)\n if len(self.hyperlpr_dir_path) > 0:\n for i in range(0, len(self.filenames)):\n path = os.path.join(\n self.hyperlpr_dir_path, self.filenames[i])\n image = cv2.imdecode(np.fromfile(path, dtype=np.uint8), -1)\n image, res_set = SimpleRecognizePlateWithGui(image)\n self.recognization_done_signal.emit([i, res_set])\n\n self.hyperlpr_dir_path = \"\"\n\n\nclass HyperLprImageView(QGraphicsView):\n\n def __init__(self):\n\n super().__init__()\n\n self.init_ui()\n\n def init_ui(self):\n\n scene = QGraphicsScene()\n scene.setBackgroundBrush(QColor(100, 100, 100))\n scene.setItemIndexMethod(QGraphicsScene.BspTreeIndex)\n\n scene.setSceneRect(scene.itemsBoundingRect())\n\n self.setDragMode(QGraphicsView.RubberBandDrag)\n self.setViewportUpdateMode(QGraphicsView.FullViewportUpdate)\n self.setRenderHints(QPainter.Antialiasing | QPainter.TextAntialiasing)\n\n self.frame_item = QGraphicsPixmapItem()\n\n self.text_item_offset = 0\n self.rect_item_array = []\n self.text_item_array = []\n for i in range(0, 5):\n rect_item = QGraphicsRectItem()\n rect_item.setVisible(False)\n rect_item.setZValue(20.0)\n rect_item.setPen(QPen(Qt.red, 5))\n rect_item.setRect(20, 20, 20, 20)\n scene.addItem(rect_item)\n self.rect_item_array.append(rect_item)\n text_item = QGraphicsSimpleTextItem(\"\")\n text_item.setBrush(QBrush(Qt.red))\n text_item.setZValue(20.0)\n text_item.setPos(10, 50)\n text_item.setFont(QFont(\"黑体\", 24))\n text_item.setVisible(False)\n scene.addItem(text_item)\n self.text_item_array.append(text_item)\n\n scene.addItem(self.frame_item)\n\n self.curr_factor = 1.0\n\n self.setScene(scene)\n\n def resetRectText(self, res_set):\n max_no = len(res_set)\n\n if max_no > 5:\n max_no = 5\n\n for i in range(0, 5):\n if i < max_no:\n curr_rect = res_set[i][2]\n self.rect_item_array[i].setRect(int(curr_rect[0]), int(\n curr_rect[1]), int(curr_rect[2]), int(curr_rect[3]))\n self.rect_item_array[i].setVisible(True)\n\n self.text_item_array[i].setText(\n res_set[i][4] + \" \" + res_set[i][3])\n self.text_item_array[i].setPos(\n int(curr_rect[0]), int(curr_rect[1]) - 48)\n self.text_item_array[i].setVisible(True)\n else:\n self.text_item_array[i].setVisible(False)\n self.rect_item_array[i].setVisible(False)\n\n def wheelEvent(self, event):\n factor = event.angleDelta().y() / 120.0\n if event.angleDelta().y() / 120.0 > 0:\n factor = 1.08\n else:\n factor = 0.92\n\n if self.curr_factor > 0.1 and self.curr_factor < 10:\n self.curr_factor = self.curr_factor * factor\n self.scale(factor, factor)\n\n def resetPixmap(self, image):\n\n self.frame_item.setPixmap(QPixmap.fromImage(image))\n\n\nclass HyperLprWindow(QMainWindow):\n\n start_init_signal = pyqtSignal()\n\n def __init__(self):\n\n super().__init__()\n\n self.initUI()\n\n def initUI(self):\n\n self.statusBar().showMessage('Ready')\n\n self.left_action = QAction('上一个', self)\n self.left_action.setShortcut(QKeySequence.MoveToPreviousChar)\n self.left_action.triggered.connect(self.analyze_last_one_image)\n\n self.right_action = QAction('下一个', self)\n self.right_action.setShortcut(QKeySequence.MoveToNextChar)\n self.right_action.triggered.connect(self.analyze_next_one_image)\n\n self.rename_image_action = QAction('保存e2e文件名', self)\n self.rename_image_action.setShortcut(QKeySequence.MoveToPreviousLine)\n self.rename_image_action.triggered.connect(self.rename_current_image_with_info)\n\n self.statusBar()\n\n menubar = self.menuBar()\n fileMenu = menubar.addMenu('&Function')\n fileMenu.addAction(self.left_action)\n fileMenu.addAction(self.right_action)\n fileMenu.addAction(self.rename_image_action)\n\n self.image_window_view = HyperLprImageView()\n\n table_widget_header_labels = [\n \"文件名\",\n \"分割识别\",\n \"置信度\",\n \"颜色\",\n \"E2E识别\",\n \"E2E置信度\"]\n\n self.hyperlpr_tableview = QTableWidget(\n 0, len(table_widget_header_labels))\n self.hyperlpr_tableview.setHorizontalHeaderLabels(\n table_widget_header_labels)\n\n self.hyperlpr_tableview.setSelectionBehavior(\n QAbstractItemView.SelectItems)\n self.hyperlpr_tableview.setSelectionMode(\n QAbstractItemView.SingleSelection)\n self.hyperlpr_tableview.setEditTriggers(\n QAbstractItemView.NoEditTriggers)\n self.hyperlpr_tableview.horizontalHeader().setSectionResizeMode(\n QHeaderView.ResizeToContents)\n self.hyperlpr_tableview.setEditTriggers(\n QAbstractItemView.NoEditTriggers)\n\n self.hyperlpr_tableview.cellClicked.connect(\n self.recognize_one_license_plate)\n\n self.left_button = QPushButton(\"<\")\n self.left_button.setFixedWidth(60)\n self.right_button = QPushButton(\">\")\n self.right_button.setFixedWidth(60)\n self.left_button.setEnabled(False)\n self.right_button.setEnabled(False)\n self.left_button.clicked.connect(self.analyze_last_one_image)\n self.right_button.clicked.connect(self.analyze_next_one_image)\n left_right_layout = QHBoxLayout()\n left_right_layout.addStretch()\n left_right_layout.addWidget(self.left_button)\n left_right_layout.addStretch()\n left_right_layout.addWidget(self.right_button)\n left_right_layout.addStretch()\n\n self.location_label = QLabel(\"车牌目录\", self)\n self.location_text = QLineEdit(self)\n self.location_text.setEnabled(False)\n #self.location_text.setFixedWidth(300)\n self.location_button = QPushButton(\"...\")\n self.location_button.clicked.connect(self.select_new_dir)\n\n self.location_layout = QHBoxLayout()\n self.location_layout.addWidget(self.location_label)\n self.location_layout.addWidget(self.location_text)\n self.location_layout.addWidget(self.location_button)\n self.location_layout.addStretch()\n\n self.check_box = QCheckBox(\"与文件名比较车牌\")\n self.check_box.setChecked(True)\n\n self.update_file_path_button = QPushButton('批量识别')\n self.update_file_path_button.clicked.connect(\n self.batch_recognize_all_images)\n\n self.update_file_path_layout = QHBoxLayout()\n self.update_file_path_layout.addWidget(self.check_box)\n self.update_file_path_layout.addWidget(self.update_file_path_button)\n self.update_file_path_layout.addStretch()\n\n self.save_as_e2e_filename_button = QPushButton(\"保存e2e文件名\")\n self.save_as_e2e_filename_button.setEnabled(False)\n self.save_as_e2e_filename_button.clicked.connect(self.rename_current_image_with_info)\n self.save_layout = QHBoxLayout()\n self.save_layout.addWidget(self.save_as_e2e_filename_button)\n self.save_layout.addStretch()\n\n self.top_layout = QVBoxLayout()\n self.top_layout.addLayout(left_right_layout)\n self.top_layout.addLayout(self.location_layout)\n self.top_layout.addLayout(self.update_file_path_layout)\n self.top_layout.addLayout(self.save_layout)\n\n function_groupbox = QGroupBox(\"功能区\")\n function_groupbox.setLayout(self.top_layout)\n\n license_plate_image_label = QLabel(\"车牌图\")\n self.license_plate_widget = QLabel(\"\")\n\n block_image_label = QLabel(\"分割图\")\n self.block_plate_widget = QLabel(\"\")\n\n filename_label = QLabel(\"文件名:\")\n self.filename_edit = QLineEdit()\n\n segmentation_recognition_label = QLabel(\"分割识别:\")\n self.segmentation_recognition_edit = QLineEdit()\n self.segmentation_recognition_edit.setFont(QFont(\"黑体\", 24, QFont.Bold))\n # self.segmentation_recognition_edit.setStyleSheet(\"color:red\")\n\n confidence_label = QLabel(\"分割识别\\n置信度\")\n self.confidence_edit = QLineEdit()\n #self.confidence_edit.setFont(QFont(\"黑体\", 24, QFont.Bold))\n # self.confidence_edit.setStyleSheet(\"color:red\")\n\n plate_color_label = QLabel(\"车牌颜色\")\n self.plate_color_edit = QLineEdit()\n self.plate_color_edit.setFont(QFont(\"黑体\", 24, QFont.Bold))\n # self.plate_color_edit.setStyleSheet(\"color:red\")\n\n e2e_recognization_label = QLabel(\"e2e识别:\")\n self.e2e_recognization_edit = QLineEdit()\n self.e2e_recognization_edit.setFont(QFont(\"黑体\", 24, QFont.Bold))\n # self.e2e_recognization_edit.setStyleSheet(\"color:red\")\n\n e2e_confidence_label = QLabel(\"e2e置信度\")\n self.e2e_confidence_edit = QLineEdit()\n #self.e2e_confidence_edit.setFont(QFont(\"黑体\", 24, QFont.Bold))\n # self.e2e_confidence_edit.setStyleSheet(\"color:red\")\n\n info_gridlayout = QGridLayout()\n line_index = 0\n info_gridlayout.addWidget(filename_label, line_index, 0)\n info_gridlayout.addWidget(self.filename_edit, line_index, 1)\n line_index += 1\n info_gridlayout.addWidget(license_plate_image_label, line_index, 0)\n info_gridlayout.addWidget(self.license_plate_widget, line_index, 1)\n line_index += 1\n info_gridlayout.addWidget(e2e_recognization_label, line_index, 0)\n info_gridlayout.addWidget(self.e2e_recognization_edit, line_index, 1)\n line_index += 1\n info_gridlayout.addWidget(\n segmentation_recognition_label, line_index, 0)\n info_gridlayout.addWidget(\n self.segmentation_recognition_edit, line_index, 1)\n line_index += 1\n info_gridlayout.addWidget(plate_color_label, line_index, 0)\n info_gridlayout.addWidget(self.plate_color_edit, line_index, 1)\n line_index += 1\n info_gridlayout.addWidget(block_image_label, line_index, 0)\n info_gridlayout.addWidget(self.block_plate_widget, line_index, 1)\n line_index += 1\n info_gridlayout.addWidget(confidence_label, line_index, 0)\n info_gridlayout.addWidget(self.confidence_edit, line_index, 1)\n line_index += 1\n info_gridlayout.addWidget(e2e_confidence_label, line_index, 0)\n info_gridlayout.addWidget(self.e2e_confidence_edit, line_index, 1)\n\n info_widget = QGroupBox(\"分割识别&e2e\")\n\n info_widget.setLayout(info_gridlayout)\n\n right_splitter = QSplitter(Qt.Vertical)\n right_splitter.addWidget(self.hyperlpr_tableview)\n right_splitter.addWidget(function_groupbox)\n right_splitter.addWidget(info_widget)\n right_splitter.setStretchFactor(0, 2)\n right_splitter.setStretchFactor(2, 1)\n\n main_splitter = QSplitter(Qt.Horizontal)\n main_splitter.addWidget(self.image_window_view)\n main_splitter.addWidget(right_splitter)\n main_splitter.setStretchFactor(0, 1)\n\n self.image_filename_list = []\n self.hyperlpr_dir_path = \"\"\n self.segmentation_recognition_correct_number = 0\n self.color_correct_number = 0\n self.e2e_recognization_correct_number = 0\n self.current_row = 0\n\n self.batch_recognization_thread = LicenseRecognizationThread()\n self.batch_recognization_thread.recognization_done_signal.connect(\n self.recognization_done_slot)\n self.batch_recognization_thread.start()\n\n self.start_init_signal.connect(self.read_path_and_show_one_image)\n\n self.setCentralWidget(main_splitter)\n\n self.setWindowTitle(\"HyperLPR车牌识别软件v1.0\")\n\n self.start_init_signal.emit()\n\n def read_path_and_show_one_image(self):\n\n hyperlpr_dir_info_filepath = QDir.homePath() + \"/hyperlpr_dir_file\"\n if os.path.exists(hyperlpr_dir_info_filepath):\n with open(hyperlpr_dir_info_filepath, 'r') as f:\n self.hyperlpr_dir_path = f.read()\n\n if len(self.hyperlpr_dir_path) > 0:\n self.reset_info_gui()\n\n if len(self.image_filename_list) > 0:\n self.recognize_and_show_one_image(self.image_filename_list[0], 0)\n\n def select_new_dir(self):\n\n self.hyperlpr_dir_path = QFileDialog.getExistingDirectory(\n self, \"读取文件夹\", QDir.currentPath())\n\n if len(self.hyperlpr_dir_path) > 0:\n hyperlpr_dir_info_filepath = QDir.homePath() + \"/hyperlpr_dir_file\"\n with open(hyperlpr_dir_info_filepath, 'w') as f:\n f.write(self.hyperlpr_dir_path)\n self.reset_info_gui()\n\n def rename_current_image_with_info(self):\n if len(self.hyperlpr_dir_path) > 0:\n target_dir_path = self.hyperlpr_dir_path + \"/result\"\n if not os.path.exists(target_dir_path):\n os.makedirs(target_dir_path)\n if len(self.plate_color_edit.text())>0 and len(self.e2e_recognization_edit.text())>0:\n orign_path = os.path.join(self.hyperlpr_dir_path, self.filename_edit.text())\n target_path = os.path.join(target_dir_path,self.plate_color_edit.text()+\"-\"+self.e2e_recognization_edit.text()+\".jpg\")\n shutil.copyfile(orign_path, target_path)\n\n def reset_info_gui(self):\n\n self.location_text.setText(self.hyperlpr_dir_path)\n self.scan_files_with_new_dir(self.hyperlpr_dir_path)\n self.fill_table_with_new_info()\n\n def scan_files_with_new_dir(self, path):\n\n name_list = os.listdir(path) # 列出文件夹下所有的目录与文件\n self.image_filename_list.clear()\n for i in range(0, len(name_list)):\n if name_list[i].endswith(\n \".jpg\") or name_list[i].endswith(\".png\"):\n self.image_filename_list.append(name_list[i])\n\n def fill_table_with_new_info(self):\n self.hyperlpr_tableview.clearContents()\n row_count = self.hyperlpr_tableview.rowCount()\n for i in range(row_count, -1, -1):\n self.hyperlpr_tableview.removeRow(i)\n\n for i in range(0, len(self.image_filename_list)):\n row = self.hyperlpr_tableview.rowCount()\n self.hyperlpr_tableview.insertRow(row)\n\n item0 = QTableWidgetItem()\n item0.setTextAlignment(Qt.AlignCenter)\n self.hyperlpr_tableview.setItem(row, 0, item0)\n self.hyperlpr_tableview.item(\n row, 0).setText(\n self.image_filename_list[i])\n\n item1 = QTableWidgetItem()\n item1.setTextAlignment(Qt.AlignCenter)\n self.hyperlpr_tableview.setItem(row, 1, item1)\n\n item2 = QTableWidgetItem()\n item2.setTextAlignment(Qt.AlignCenter)\n self.hyperlpr_tableview.setItem(row, 2, item2)\n\n item3 = QTableWidgetItem()\n item3.setTextAlignment(Qt.AlignCenter)\n self.hyperlpr_tableview.setItem(row, 3, item3)\n\n item4 = QTableWidgetItem()\n item4.setTextAlignment(Qt.AlignCenter)\n self.hyperlpr_tableview.setItem(row, 4, item4)\n\n item5 = QTableWidgetItem()\n item5.setTextAlignment(Qt.AlignCenter)\n self.hyperlpr_tableview.setItem(row, 5, item5)\n\n if len(self.image_filename_list) > 0:\n self.left_button.setEnabled(True)\n self.right_button.setEnabled(True)\n self.save_as_e2e_filename_button.setEnabled(True)\n\n def analyze_last_one_image(self):\n if self.current_row > 0:\n self.recognize_one_license_plate(self.current_row-1, 0)\n\n def analyze_next_one_image(self):\n if self.current_row < (len(self.image_filename_list)-1):\n self.recognize_one_license_plate(self.current_row + 1, 0)\n\n def recognize_one_license_plate(self, row, col):\n if col == 0 and row < len(self.image_filename_list):\n self.current_row = row\n self.recognize_and_show_one_image(\n self.image_filename_list[row], row)\n\n def recognize_and_show_one_image(self, image_filename_text, row):\n\n if image_filename_text.endswith(\".jpg\"):\n\n print(image_filename_text)\n path = os.path.join(self.hyperlpr_dir_path, image_filename_text)\n image = cv2.imdecode(np.fromfile(path, dtype=np.uint8), -1)\n image, res_set = SimpleRecognizePlateWithGui(image)\n img = QImage(\n image.data,\n image.shape[1],\n image.shape[0],\n image.shape[1] * image.shape[2],\n QImage.Format_RGB888)\n self.image_window_view.resetPixmap(img.rgbSwapped())\n self.image_window_view.resetRectText(res_set)\n\n if len(res_set) > 0:\n curr_rect = res_set[0][2]\n image_crop = image[int(curr_rect[1]):int(\n curr_rect[1] + curr_rect[3]), int(curr_rect[0]):int(curr_rect[0] + curr_rect[2])]\n curr_plate = cv2.resize(image_crop, (204, 108))\n plate_img = QImage(\n curr_plate.data,\n curr_plate.shape[1],\n curr_plate.shape[0],\n curr_plate.shape[1] *\n curr_plate.shape[2],\n QImage.Format_RGB888)\n self.license_plate_widget.setPixmap(\n QPixmap.fromImage(plate_img.rgbSwapped()))\n\n # print(res_set[0][6])\n block_crop = image[0:24, 0:(24 * int(res_set[0][6]))]\n curr_block = cv2.resize(\n block_crop, (24 * int(res_set[0][6]), 24))\n block_image = QImage(\n curr_block.data,\n curr_block.shape[1],\n curr_block.shape[0],\n curr_block.shape[1] *\n curr_block.shape[2],\n QImage.Format_RGB888)\n self.block_plate_widget.setPixmap(\n QPixmap.fromImage(block_image.rgbSwapped()))\n\n self.segmentation_recognition_edit.setText(res_set[0][0])\n if res_set[0][0] in image_filename_text:\n self.segmentation_recognition_edit.setStyleSheet(\"color:black\")\n else:\n self.segmentation_recognition_edit.setStyleSheet(\"color:red\")\n\n\n self.filename_edit.setText(image_filename_text)\n self.confidence_edit.setText(\"%.3f\" % (float(res_set[0][1])))\n\n self.plate_color_edit.setText(res_set[0][3])\n if res_set[0][3] in image_filename_text:\n self.plate_color_edit.setStyleSheet(\"color:black\")\n else:\n self.plate_color_edit.setStyleSheet(\"color:red\")\n\n self.e2e_recognization_edit.setText(res_set[0][4])\n if res_set[0][4] in image_filename_text:\n self.e2e_recognization_edit.setStyleSheet(\"color:black\")\n else:\n self.e2e_recognization_edit.setStyleSheet(\"color:red\")\n\n self.e2e_confidence_edit.setText(\n \"%.3f\" % (float(res_set[0][5])))\n else:\n self.license_plate_widget.clear()\n self.block_plate_widget.clear()\n self.segmentation_recognition_edit.setText(\"\")\n self.filename_edit.setText(image_filename_text)\n self.confidence_edit.setText(\"\")\n self.plate_color_edit.setText(\"\")\n self.e2e_recognization_edit.setText(\"\")\n self.e2e_confidence_edit.setText(\"\")\n\n self.fill_table_widget_with_res_info(res_set, row)\n\n def batch_recognize_all_images(self):\n self.segmentation_recognition_correct_number = 0\n self.color_correct_number = 0\n self.e2e_recognization_correct_number = 0\n self.batch_recognization_thread.set_parameter(\n self.image_filename_list, self.hyperlpr_dir_path)\n\n def recognization_done_slot(self, result_list):\n row = result_list[0]\n res_set = result_list[1]\n self.fill_table_widget_with_res_info(res_set, row)\n\n if row == len(self.image_filename_list) - 1:\n total_number = len(self.image_filename_list)\n\n row_count = self.hyperlpr_tableview.rowCount()\n if row_count > total_number:\n self.hyperlpr_tableview.removeRow(total_number)\n\n self.hyperlpr_tableview.insertRow(total_number)\n\n item0 = QTableWidgetItem()\n item0.setTextAlignment(Qt.AlignCenter)\n self.hyperlpr_tableview.setItem(total_number, 0, item0)\n self.hyperlpr_tableview.item(\n total_number, 0).setText(\n \"统计结果\")\n\n item1 = QTableWidgetItem()\n item1.setTextAlignment(Qt.AlignCenter)\n self.hyperlpr_tableview.setItem(total_number, 1, item1)\n self.hyperlpr_tableview.item(\n total_number,\n 1).setText(\n \"{0} / {1} = {2: .3f}\".format(\n self.segmentation_recognition_correct_number,\n total_number,\n self.segmentation_recognition_correct_number /\n total_number))\n\n item2 = QTableWidgetItem()\n item2.setTextAlignment(Qt.AlignCenter)\n self.hyperlpr_tableview.setItem(total_number, 2, item2)\n\n item3 = QTableWidgetItem()\n item3.setTextAlignment(Qt.AlignCenter)\n self.hyperlpr_tableview.setItem(total_number, 3, item3)\n self.hyperlpr_tableview.item(\n total_number, 3).setText(\n \"{0} / {1} = {2: .3f}\".format(self.e2e_recognization_correct_number, total_number,\n self.e2e_recognization_correct_number / total_number))\n\n item4 = QTableWidgetItem()\n item4.setTextAlignment(Qt.AlignCenter)\n self.hyperlpr_tableview.setItem(total_number, 4, item4)\n self.hyperlpr_tableview.item(\n total_number, 4).setText(\n \"{0} / {1} = {2: .3f}\".format(self.color_correct_number, total_number,\n self.color_correct_number / total_number))\n\n item5 = QTableWidgetItem()\n item5.setTextAlignment(Qt.AlignCenter)\n self.hyperlpr_tableview.setItem(total_number, 5, item5)\n\n def fill_table_widget_with_res_info(self, res_set, row):\n image_filename_text = self.image_filename_list[row]\n if len(res_set) > 0:\n\n self.hyperlpr_tableview.item(row, 1).setText(res_set[0][0])\n if res_set[0][0] in image_filename_text:\n self.hyperlpr_tableview.item(\n row, 1).setForeground(\n QBrush(\n QColor(\n 0, 0, 255)))\n self.segmentation_recognition_correct_number += 1\n else:\n self.hyperlpr_tableview.item(\n row, 1).setForeground(\n QBrush(\n QColor(\n 255, 0, 0)))\n\n self.hyperlpr_tableview.item(\n row, 2).setText(\n \"%.3f\" %\n (float(\n res_set[0][1])))\n\n self.hyperlpr_tableview.item(row, 3).setText(res_set[0][3])\n if res_set[0][3] in image_filename_text:\n self.hyperlpr_tableview.item(\n row, 3).setForeground(\n QBrush(\n QColor(\n 0, 0, 255)))\n self.color_correct_number += 1\n else:\n self.hyperlpr_tableview.item(\n row, 3).setForeground(\n QBrush(\n QColor(\n 255, 0, 0)))\n\n self.hyperlpr_tableview.item(row, 4).setText(res_set[0][4])\n if res_set[0][4] in image_filename_text:\n self.hyperlpr_tableview.item(\n row, 4).setForeground(\n QBrush(\n QColor(\n 0, 0, 255)))\n self.e2e_recognization_correct_number += 1\n else:\n self.hyperlpr_tableview.item(\n row, 4).setForeground(\n QBrush(\n QColor(\n 255, 0, 0)))\n\n self.hyperlpr_tableview.item(\n row, 5).setText(\n \"%.3f\" %\n (float(\n res_set[0][5])))\n\n\nif __name__ == '__main__':\n\n app = QApplication(sys.argv)\n\n hyper_lpr_widow = HyperLprWindow()\n\n hyper_lpr_widow.showMaximized()\n\n sys.exit(app.exec_())\n" ]
[ [ "numpy.fromfile" ] ]
monshri/adversarial-robustness-toolbox
[ "6465240cb6a71bc376dae52459a7133e403df8d2", "6465240cb6a71bc376dae52459a7133e403df8d2" ]
[ "art/attacks/evasion/auto_projected_gradient_descent.py", "tests/defences/test_feature_squeezing.py" ]
[ "# MIT License\n#\n# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2020\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the\n# Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\"\"\"\nThis module implements the `Auto Projected Gradient Descent` attack.\n\n| Paper link: https://arxiv.org/abs/2003.01690\n\"\"\"\nimport logging\nimport math\nfrom typing import Optional, Union, TYPE_CHECKING\n\nimport numpy as np\nfrom tqdm.auto import trange\n\nfrom art.config import ART_NUMPY_DTYPE\nfrom art.attacks.attack import EvasionAttack\nfrom art.estimators.estimator import BaseEstimator, LossGradientsMixin\nfrom art.estimators.classification.classifier import ClassifierMixin\nfrom art.utils import check_and_transform_label_format, projection, random_sphere, is_probability, get_labels_np_array\n\nif TYPE_CHECKING:\n from art.utils import CLASSIFIER_LOSS_GRADIENTS_TYPE\n\nlogger = logging.getLogger(__name__)\n\n\nclass AutoProjectedGradientDescent(EvasionAttack):\n \"\"\"\n Implementation of the `Auto Projected Gradient Descent` attack.\n\n | Paper link: https://arxiv.org/abs/2003.01690\n \"\"\"\n\n attack_params = EvasionAttack.attack_params + [\n \"norm\",\n \"eps\",\n \"eps_step\",\n \"max_iter\",\n \"targeted\",\n \"nb_random_init\",\n \"batch_size\",\n \"loss_type\",\n \"verbose\",\n ]\n _estimator_requirements = (BaseEstimator, LossGradientsMixin, ClassifierMixin)\n _predefined_losses = [None, \"cross_entropy\", \"difference_logits_ratio\"]\n\n def __init__(\n self,\n estimator: \"CLASSIFIER_LOSS_GRADIENTS_TYPE\",\n norm: Union[int, float, str] = np.inf,\n eps: float = 0.3,\n eps_step: float = 0.1,\n max_iter: int = 100,\n targeted: bool = False,\n nb_random_init: int = 5,\n batch_size: int = 32,\n loss_type: Optional[str] = None,\n verbose: bool = True,\n ):\n \"\"\"\n Create a :class:`.AutoProjectedGradientDescent` instance.\n\n :param estimator: An trained estimator.\n :param norm: The norm of the adversarial perturbation. Possible values: \"inf\", np.inf, 1 or 2.\n :param eps: Maximum perturbation that the attacker can introduce.\n :param eps_step: Attack step size (input variation) at each iteration.\n :param max_iter: The maximum number of iterations.\n :param targeted: Indicates whether the attack is targeted (True) or untargeted (False).\n :param nb_random_init: Number of random initialisations within the epsilon ball. For num_random_init=0\n starting at the original input.\n :param batch_size: Size of the batch on which adversarial samples are generated.\n :param loss_type: Defines the loss to attack. Available options: None (Use loss defined by estimator),\n \"cross_entropy\", or \"difference_logits_ratio\"\n :param verbose: Show progress bars.\n \"\"\"\n from art.estimators.classification import TensorFlowClassifier, TensorFlowV2Classifier, PyTorchClassifier\n\n if loss_type not in self._predefined_losses:\n raise ValueError(\n \"The argument loss_type has an invalid value. The following options for `loss_type` are currently \"\n \"supported: {}\".format(self._predefined_losses)\n )\n\n if loss_type is None:\n if hasattr(estimator, \"predict\") and is_probability(\n estimator.predict(x=np.ones(shape=(1, *estimator.input_shape), dtype=np.float32))\n ):\n raise ValueError( # pragma: no cover\n \"AutoProjectedGradientDescent is expecting logits as estimator output, the provided \"\n \"estimator seems to predict probabilities.\"\n )\n\n estimator_apgd = estimator\n else:\n if isinstance(estimator, TensorFlowClassifier):\n import tensorflow as tf\n\n if loss_type == \"cross_entropy\":\n if is_probability(estimator.predict(x=np.ones(shape=(1, *estimator.input_shape)))):\n raise NotImplementedError(\"Cross-entropy loss is not implemented for probability output.\")\n\n self._loss_object = tf.reduce_mean(\n tf.keras.losses.categorical_crossentropy(\n y_pred=estimator._output, y_true=estimator._labels_ph, from_logits=True\n )\n )\n\n elif loss_type == \"difference_logits_ratio\":\n if is_probability(estimator.predict(x=np.ones(shape=(1, *estimator.input_shape)))):\n raise ValueError( # pragma: no cover\n \"The provided estimator seems to predict probabilities. \"\n \"If loss_type='difference_logits_ratio' the estimator has to to predict logits.\"\n )\n\n raise ValueError(\n \"The loss `difference_logits_ratio` has not been validated completely. It seems that the \"\n \"commented implemented below is failing to selected the second largest logit for cases \"\n \"where the largest logit is the true logit. For future work `difference_logits_ratio` and \"\n \"loss_fn should return the same loss value.\"\n )\n\n # def difference_logits_ratio(y_true, y_pred):\n # i_y_true = tf.cast(tf.math.argmax(tf.cast(y_true, tf.int32), axis=1), tf.int32)\n # i_y_pred_arg = tf.argsort(y_pred, axis=1)\n # # Not completely sure if the following line is correct.\n # # `i_y_pred_arg[:, -2], i_y_pred_arg[:, -1]` seems closer to the output of `loss_fn` than\n # # `i_y_pred_arg[:, -1], i_y_pred_arg[:, -2]`\n # i_z_i = tf.where(i_y_pred_arg[:, -1] != i_y_true[:], i_y_pred_arg[:, -2],\n # i_y_pred_arg[:, -1])\n #\n # z_1 = tf.gather(y_pred, i_y_pred_arg[:, -1], axis=1, batch_dims=0)\n # z_3 = tf.gather(y_pred, i_y_pred_arg[:, -3], axis=1, batch_dims=0)\n # z_i = tf.gather(y_pred, i_z_i, axis=1, batch_dims=0)\n # z_y = tf.gather(y_pred, i_y_true, axis=1, batch_dims=0)\n #\n # z_1 = tf.linalg.diag_part(z_1)\n # z_3 = tf.linalg.diag_part(z_3)\n # z_i = tf.linalg.diag_part(z_i)\n # z_y = tf.linalg.diag_part(z_y)\n #\n # dlr = -(z_y - z_i) / (z_1 - z_3)\n #\n # return tf.reduce_mean(dlr)\n #\n # def loss_fn(y_true, y_pred):\n # i_y_true = np.argmax(y_true, axis=1)\n # i_y_pred_arg = np.argsort(y_pred, axis=1)\n # i_z_i = np.where(i_y_pred_arg[:, -1] != i_y_true[:], i_y_pred_arg[:, -1],\n # i_y_pred_arg[:, -2])\n #\n # z_1 = y_pred[:, i_y_pred_arg[:, -1]]\n # z_3 = y_pred[:, i_y_pred_arg[:, -3]]\n # z_i = y_pred[:, i_z_i]\n # z_y = y_pred[:, i_y_true]\n #\n # z_1 = np.diag(z_1)\n # z_3 = np.diag(z_3)\n # z_i = np.diag(z_i)\n # z_y = np.diag(z_y)\n #\n # dlr = -(z_y - z_i) / (z_1 - z_3)\n #\n # return np.mean(dlr)\n #\n # self._loss_fn = loss_fn\n # self._loss_object = difference_logits_ratio(y_true=estimator._labels_ph,\n # y_pred=estimator._output)\n\n estimator_apgd = TensorFlowClassifier(\n input_ph=estimator._input_ph,\n output=estimator._output,\n labels_ph=estimator._labels_ph,\n train=estimator._train,\n loss=self._loss_object,\n learning=estimator._learning,\n sess=estimator._sess,\n channels_first=estimator.channels_first,\n clip_values=estimator.clip_values,\n preprocessing_defences=estimator.preprocessing_defences,\n postprocessing_defences=estimator.postprocessing_defences,\n preprocessing=estimator.preprocessing,\n feed_dict=estimator._feed_dict,\n )\n\n elif isinstance(estimator, TensorFlowV2Classifier):\n import tensorflow as tf\n\n if loss_type == \"cross_entropy\":\n if is_probability(estimator.predict(x=np.ones(shape=(1, *estimator.input_shape)))):\n self._loss_object = tf.keras.losses.CategoricalCrossentropy(from_logits=False)\n else:\n self._loss_object = tf.keras.losses.CategoricalCrossentropy(from_logits=True)\n elif loss_type == \"difference_logits_ratio\":\n if is_probability(estimator.predict(x=np.ones(shape=(1, *estimator.input_shape)))):\n raise ValueError( # pragma: no cover\n \"The provided estimator seems to predict probabilities. \"\n \"If loss_type='difference_logits_ratio' the estimator has to to predict logits.\"\n )\n\n class DifferenceLogitsRatioTensorFlowV2:\n \"\"\"\n Callable class for Difference Logits Ratio loss in TensorFlow v2.\n \"\"\"\n\n def __init__(self):\n self.reduction = \"mean\"\n\n def __call__(self, y_true, y_pred):\n i_y_true = tf.cast(tf.math.argmax(tf.cast(y_true, tf.int32), axis=1), tf.int32)\n i_y_pred_arg = tf.argsort(y_pred, axis=1)\n i_z_i_list = list()\n\n for i in range(y_true.shape[0]):\n if i_y_pred_arg[i, -1] != i_y_true[i]:\n i_z_i_list.append(i_y_pred_arg[i, -1])\n else:\n i_z_i_list.append(i_y_pred_arg[i, -2])\n\n i_z_i = tf.stack(i_z_i_list)\n\n z_1 = tf.gather(y_pred, i_y_pred_arg[:, -1], axis=1, batch_dims=0)\n z_3 = tf.gather(y_pred, i_y_pred_arg[:, -3], axis=1, batch_dims=0)\n z_i = tf.gather(y_pred, i_z_i, axis=1, batch_dims=0)\n z_y = tf.gather(y_pred, i_y_true, axis=1, batch_dims=0)\n\n z_1 = tf.linalg.diag_part(z_1)\n z_3 = tf.linalg.diag_part(z_3)\n z_i = tf.linalg.diag_part(z_i)\n z_y = tf.linalg.diag_part(z_y)\n\n dlr = -(z_y - z_i) / (z_1 - z_3)\n\n return tf.reduce_mean(dlr)\n\n self._loss_fn = DifferenceLogitsRatioTensorFlowV2()\n self._loss_object = DifferenceLogitsRatioTensorFlowV2()\n\n estimator_apgd = TensorFlowV2Classifier(\n model=estimator.model,\n nb_classes=estimator.nb_classes,\n input_shape=estimator.input_shape,\n loss_object=self._loss_object,\n train_step=estimator._train_step,\n channels_first=estimator.channels_first,\n clip_values=estimator.clip_values,\n preprocessing_defences=estimator.preprocessing_defences,\n postprocessing_defences=estimator.postprocessing_defences,\n preprocessing=estimator.preprocessing,\n )\n elif isinstance(estimator, PyTorchClassifier):\n import torch\n\n if loss_type == \"cross_entropy\":\n if is_probability(\n estimator.predict(x=np.ones(shape=(1, *estimator.input_shape), dtype=np.float32))\n ):\n raise ValueError( # pragma: no cover\n \"The provided estimator seems to predict probabilities. If loss_type='cross_entropy' \"\n \"the estimator has to to predict logits.\"\n )\n\n self._loss_object = torch.nn.CrossEntropyLoss(reduction=\"mean\")\n elif loss_type == \"difference_logits_ratio\":\n if is_probability(\n estimator.predict(x=np.ones(shape=(1, *estimator.input_shape), dtype=ART_NUMPY_DTYPE))\n ):\n raise ValueError( # pragma: no cover\n \"The provided estimator seems to predict probabilities. \"\n \"If loss_type='difference_logits_ratio' the estimator has to to predict logits.\"\n )\n\n class DifferenceLogitsRatioPyTorch:\n \"\"\"\n Callable class for Difference Logits Ratio loss in PyTorch.\n \"\"\"\n\n def __init__(self):\n self.reduction = \"mean\"\n\n def __call__(self, y_pred, y_true): # type: ignore\n if isinstance(y_true, np.ndarray):\n y_true = torch.from_numpy(y_true)\n if isinstance(y_pred, np.ndarray):\n y_pred = torch.from_numpy(y_pred)\n\n y_true = y_true.float()\n\n i_y_true = torch.argmax(y_true, axis=1)\n i_y_pred_arg = torch.argsort(y_pred, axis=1)\n i_z_i_list = list()\n\n for i in range(y_true.shape[0]):\n if i_y_pred_arg[i, -1] != i_y_true[i]:\n i_z_i_list.append(i_y_pred_arg[i, -1])\n else:\n i_z_i_list.append(i_y_pred_arg[i, -2])\n\n i_z_i = torch.stack(i_z_i_list)\n\n z_1 = y_pred[:, i_y_pred_arg[:, -1]]\n z_3 = y_pred[:, i_y_pred_arg[:, -3]]\n z_i = y_pred[:, i_z_i]\n z_y = y_pred[:, i_y_true]\n\n z_1 = torch.diagonal(z_1)\n z_3 = torch.diagonal(z_3)\n z_i = torch.diagonal(z_i)\n z_y = torch.diagonal(z_y)\n\n dlr = -(z_y - z_i) / (z_1 - z_3)\n\n return torch.mean(dlr.float())\n\n self._loss_object = DifferenceLogitsRatioPyTorch()\n\n estimator_apgd = PyTorchClassifier(\n model=estimator.model,\n loss=self._loss_object,\n input_shape=estimator.input_shape,\n nb_classes=estimator.nb_classes,\n optimizer=None,\n channels_first=estimator.channels_first,\n clip_values=estimator.clip_values,\n preprocessing_defences=estimator.preprocessing_defences,\n postprocessing_defences=estimator.postprocessing_defences,\n preprocessing=estimator.preprocessing,\n device_type=str(estimator._device),\n )\n\n else: # pragma: no cover\n raise ValueError(\"The loss type {} is not supported for the provided estimator.\".format(loss_type))\n\n super().__init__(estimator=estimator_apgd)\n self.norm = norm\n self.eps = eps\n self.eps_step = eps_step\n self.max_iter = max_iter\n self.targeted = targeted\n self.nb_random_init = nb_random_init\n self.batch_size = batch_size\n self.loss_type = loss_type\n self.verbose = verbose\n self._check_params()\n\n def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> np.ndarray:\n \"\"\"\n Generate adversarial samples and return them in an array.\n\n :param x: An array with the original inputs.\n :param y: Target values (class labels) one-hot-encoded of shape `(nb_samples, nb_classes)` or indices of shape\n (nb_samples,). Only provide this parameter if you'd like to use true labels when crafting adversarial\n samples. Otherwise, model predictions are used as labels to avoid the \"label leaking\" effect\n (explained in this paper: https://arxiv.org/abs/1611.01236). Default is `None`.\n :param mask: An array with a mask broadcastable to input `x` defining where to apply adversarial perturbations.\n Shape needs to be broadcastable to the shape of x and can also be of the same shape as `x`. Any\n features for which the mask is zero will not be adversarially perturbed.\n :type mask: `np.ndarray`\n :return: An array holding the adversarial examples.\n \"\"\"\n mask = kwargs.get(\"mask\")\n\n y = check_and_transform_label_format(y, self.estimator.nb_classes)\n\n if y is None:\n if self.targeted:\n raise ValueError(\"Target labels `y` need to be provided for a targeted attack.\")\n y = get_labels_np_array(self.estimator.predict(x, batch_size=self.batch_size)).astype(int)\n\n if self.estimator.nb_classes == 2 and y.shape[1] == 1:\n raise ValueError(\n \"This attack has not yet been tested for binary classification with a single output classifier.\"\n )\n\n x_adv = x.astype(ART_NUMPY_DTYPE)\n\n for _ in trange(max(1, self.nb_random_init), desc=\"AutoPGD - restart\", disable=not self.verbose):\n # Determine correctly predicted samples\n y_pred = self.estimator.predict(x_adv)\n if self.targeted:\n sample_is_robust = np.argmax(y_pred, axis=1) != np.argmax(y, axis=1)\n elif not self.targeted:\n sample_is_robust = np.argmax(y_pred, axis=1) == np.argmax(y, axis=1)\n\n if np.sum(sample_is_robust) == 0:\n break\n\n x_robust = x_adv[sample_is_robust]\n y_robust = y[sample_is_robust]\n x_init = x[sample_is_robust]\n\n n = x_robust.shape[0]\n m = np.prod(x_robust.shape[1:]).item()\n random_perturbation = (\n random_sphere(n, m, self.eps, self.norm).reshape(x_robust.shape).astype(ART_NUMPY_DTYPE)\n )\n\n x_robust = x_robust + random_perturbation\n\n if self.estimator.clip_values is not None:\n clip_min, clip_max = self.estimator.clip_values\n x_robust = np.clip(x_robust, clip_min, clip_max)\n\n perturbation = projection(x_robust - x_init, self.eps, self.norm)\n x_robust = x_init + perturbation\n\n # Compute perturbation with implicit batching\n for batch_id in trange(\n int(np.ceil(x_robust.shape[0] / float(self.batch_size))),\n desc=\"AutoPGD - batch\",\n leave=False,\n disable=not self.verbose,\n ):\n self.eta = 2 * self.eps_step\n batch_index_1, batch_index_2 = batch_id * self.batch_size, (batch_id + 1) * self.batch_size\n x_k = x_robust[batch_index_1:batch_index_2].astype(ART_NUMPY_DTYPE)\n x_init_batch = x_init[batch_index_1:batch_index_2].astype(ART_NUMPY_DTYPE)\n y_batch = y_robust[batch_index_1:batch_index_2]\n\n p_0 = 0\n p_1 = 0.22\n var_w = [p_0, p_1]\n\n while True:\n p_j_p_1 = var_w[-1] + max(var_w[-1] - var_w[-2] - 0.03, 0.06)\n if p_j_p_1 > 1:\n break\n var_w.append(p_j_p_1)\n\n var_w = [math.ceil(p * self.max_iter) for p in var_w]\n\n eta = self.eps_step\n self.count_condition_1 = 0\n\n for k_iter in trange(self.max_iter, desc=\"AutoPGD - iteration\", leave=False, disable=not self.verbose):\n\n # Get perturbation, use small scalar to avoid division by 0\n tol = 10e-8\n\n # Get gradient wrt loss; invert it if attack is targeted\n grad = self.estimator.loss_gradient(x_k, y_batch) * (1 - 2 * int(self.targeted))\n\n # Apply norm bound\n if self.norm in [np.inf, \"inf\"]:\n grad = np.sign(grad)\n elif self.norm == 1:\n ind = tuple(range(1, len(x_k.shape)))\n grad = grad / (np.sum(np.abs(grad), axis=ind, keepdims=True) + tol)\n elif self.norm == 2:\n ind = tuple(range(1, len(x_k.shape)))\n grad = grad / (np.sqrt(np.sum(np.square(grad), axis=ind, keepdims=True)) + tol)\n assert x_k.shape == grad.shape\n\n perturbation = grad\n\n if mask is not None:\n perturbation = perturbation * (mask.astype(ART_NUMPY_DTYPE))\n\n # Apply perturbation and clip\n z_k_p_1 = x_k + eta * perturbation\n\n if self.estimator.clip_values is not None:\n clip_min, clip_max = self.estimator.clip_values\n z_k_p_1 = np.clip(z_k_p_1, clip_min, clip_max)\n\n if k_iter == 0:\n x_1 = z_k_p_1\n perturbation = projection(x_1 - x_init_batch, self.eps, self.norm)\n x_1 = x_init_batch + perturbation\n\n f_0 = self.estimator.compute_loss(x=x_k, y=y_batch, reduction=\"mean\")\n f_1 = self.estimator.compute_loss(x=x_1, y=y_batch, reduction=\"mean\")\n\n self.eta_w_j_m_1 = eta\n self.f_max_w_j_m_1 = f_0\n\n if f_1 >= f_0:\n self.f_max = f_1\n self.x_max = x_1\n self.x_max_m_1 = x_init_batch\n self.count_condition_1 += 1\n else:\n self.f_max = f_0\n self.x_max = x_k.copy()\n self.x_max_m_1 = x_init_batch\n\n # Settings for next iteration k\n x_k_m_1 = x_k.copy()\n x_k = x_1\n\n else:\n perturbation = projection(z_k_p_1 - x_init_batch, self.eps, self.norm)\n z_k_p_1 = x_init_batch + perturbation\n\n alpha = 0.75\n\n x_k_p_1 = x_k + alpha * (z_k_p_1 - x_k) + (1 - alpha) * (x_k - x_k_m_1)\n\n if self.estimator.clip_values is not None:\n clip_min, clip_max = self.estimator.clip_values\n x_k_p_1 = np.clip(x_k_p_1, clip_min, clip_max)\n\n perturbation = projection(x_k_p_1 - x_init_batch, self.eps, self.norm)\n x_k_p_1 = x_init_batch + perturbation\n\n f_k_p_1 = self.estimator.compute_loss(x=x_k_p_1, y=y_batch, reduction=\"mean\")\n\n if f_k_p_1 == 0.0:\n x_k = x_k_p_1.copy()\n break\n\n if (not self.targeted and f_k_p_1 > self.f_max) or (self.targeted and f_k_p_1 < self.f_max):\n self.count_condition_1 += 1\n self.x_max = x_k_p_1\n self.x_max_m_1 = x_k\n self.f_max = f_k_p_1\n\n if k_iter in var_w:\n\n rho = 0.75\n\n condition_1 = self.count_condition_1 < rho * (k_iter - var_w[var_w.index(k_iter) - 1])\n condition_2 = self.eta_w_j_m_1 == eta and self.f_max_w_j_m_1 == self.f_max\n\n if condition_1 or condition_2:\n eta = eta / 2\n x_k_m_1 = self.x_max_m_1\n x_k = self.x_max\n else:\n x_k_m_1 = x_k\n x_k = x_k_p_1.copy()\n\n self.count_condition_1 = 0\n self.eta_w_j_m_1 = eta\n self.f_max_w_j_m_1 = self.f_max\n\n else:\n x_k_m_1 = x_k\n x_k = x_k_p_1.copy()\n\n y_pred_adv_k = self.estimator.predict(x_k)\n if self.targeted:\n sample_is_not_robust_k = np.invert(np.argmax(y_pred_adv_k, axis=1) != np.argmax(y_batch, axis=1))\n elif not self.targeted:\n sample_is_not_robust_k = np.invert(np.argmax(y_pred_adv_k, axis=1) == np.argmax(y_batch, axis=1))\n\n x_robust[batch_index_1:batch_index_2][sample_is_not_robust_k] = x_k[sample_is_not_robust_k]\n\n x_adv[sample_is_robust] = x_robust\n\n return x_adv\n\n def _check_params(self) -> None:\n if self.norm not in [1, 2, np.inf, \"inf\"]:\n raise ValueError('The argument norm has to be either 1, 2, np.inf, or \"inf\".')\n\n if not isinstance(self.eps, (int, float)) or self.eps <= 0.0:\n raise ValueError(\"The argument eps has to be either of type int or float and larger than zero.\")\n\n if not isinstance(self.eps_step, (int, float)) or self.eps_step <= 0.0:\n raise ValueError(\"The argument eps_step has to be either of type int or float and larger than zero.\")\n\n if not isinstance(self.max_iter, int) or self.max_iter <= 0:\n raise ValueError(\"The argument max_iter has to be of type int and larger than zero.\")\n\n if not isinstance(self.targeted, bool):\n raise ValueError(\"The argument targeted has to be of bool.\")\n\n if not isinstance(self.nb_random_init, int) or self.nb_random_init <= 0:\n raise ValueError(\"The argument nb_random_init has to be of type int and larger than zero.\")\n\n if not isinstance(self.batch_size, int) or self.batch_size <= 0:\n raise ValueError(\"The argument batch_size has to be of type int and larger than zero.\")\n\n # if self.loss_type not in self._predefined_losses:\n # raise ValueError(\"The argument loss_type has to be either {}.\".format(self._predefined_losses))\n\n if not isinstance(self.verbose, bool):\n raise ValueError(\"The argument `verbose` has to be of type bool.\")\n", "# MIT License\n#\n# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2018\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the\n# Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nimport unittest\n\nimport numpy as np\n\nfrom art.defences.preprocessor import FeatureSqueezing\n\nfrom tests.utils import master_seed\n\nlogger = logging.getLogger(__name__)\n\n\nclass TestFeatureSqueezing(unittest.TestCase):\n def setUp(self):\n master_seed(seed=1234)\n\n def test_ones(self):\n m, n = 10, 2\n x = np.ones((m, n))\n\n for depth in range(1, 50):\n preproc = FeatureSqueezing(clip_values=(0, 1), bit_depth=depth)\n x_squeezed, _ = preproc(x)\n self.assertTrue((x_squeezed == 1).all())\n\n def test_random(self):\n m, n = 1000, 20\n x = np.random.rand(m, n)\n x_original = x.copy()\n x_zero = np.where(x < 0.5)\n x_one = np.where(x >= 0.5)\n\n preproc = FeatureSqueezing(clip_values=(0, 1), bit_depth=1)\n x_squeezed, _ = preproc(x)\n self.assertTrue((x_squeezed[x_zero] == 0.0).all())\n self.assertTrue((x_squeezed[x_one] == 1.0).all())\n\n preproc = FeatureSqueezing(clip_values=(0, 1), bit_depth=2)\n x_squeezed, _ = preproc(x)\n self.assertFalse(np.logical_and(0.0 < x_squeezed, x_squeezed < 0.33).any())\n self.assertFalse(np.logical_and(0.34 < x_squeezed, x_squeezed < 0.66).any())\n self.assertFalse(np.logical_and(0.67 < x_squeezed, x_squeezed < 1.0).any())\n # Check that x has not been modified by attack and classifier\n self.assertAlmostEqual(float(np.max(np.abs(x_original - x))), 0.0, delta=0.00001)\n\n def test_data_range(self):\n x = np.arange(5)\n preproc = FeatureSqueezing(clip_values=(0, 4), bit_depth=2)\n x_squeezed, _ = preproc(x)\n self.assertTrue(np.array_equal(x, np.arange(5)))\n self.assertTrue(np.allclose(x_squeezed, [0, 1.33, 2.67, 2.67, 4], atol=1e-1))\n\n def test_check_params(self):\n with self.assertRaises(ValueError):\n _ = FeatureSqueezing(clip_values=(0, 4), bit_depth=-1)\n\n with self.assertRaises(ValueError):\n _ = FeatureSqueezing(clip_values=(0, 4, 8))\n\n with self.assertRaises(ValueError):\n _ = FeatureSqueezing(clip_values=(4, 0))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "numpy.sum", "numpy.ones", "tensorflow.argsort", "torch.stack", "torch.argsort", "torch.diagonal", "numpy.abs", "torch.from_numpy", "tensorflow.stack", "torch.argmax", "numpy.argmax", "tensorflow.cast", "numpy.prod", "numpy.square", "numpy.sign", "tensorflow.reduce_mean", "tensorflow.keras.losses.CategoricalCrossentropy", "torch.nn.CrossEntropyLoss", "numpy.clip", "tensorflow.keras.losses.categorical_crossentropy", "tensorflow.linalg.diag_part", "tensorflow.gather" ], [ "numpy.ones", "numpy.allclose", "numpy.logical_and", "numpy.abs", "numpy.arange", "numpy.random.rand", "numpy.where" ] ]
sugyan/image-dataset
[ "2d0190714048cbc3750e3bb3609294f42170bcb9" ]
[ "python/detect.py" ]
[ "import argparse\nimport logging\nimport math\nimport cv2\nimport dlib\nimport numpy as np\n\n\nclass Detector():\n def __init__(self, datafile='shape_predictor_68_face_landmarks.dat', verbose=False):\n logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s')\n self.logger = logging.getLogger(__name__)\n if verbose:\n self.logger.setLevel(logging.INFO)\n self.detector = dlib.get_frontal_face_detector()\n self.predictor = dlib.shape_predictor(datafile)\n self.angles = [-48, -36, -24, -12, 0, 12, 24, 36, 48]\n\n def detect(self, img):\n # Create a large image that does not protrude by rotation\n h, w, c = img.shape\n hypot = math.ceil(math.hypot(h, w))\n hoffset = round((hypot-h)/2)\n woffset = round((hypot-w)/2)\n padded = np.zeros((hypot, hypot, c), np.uint8)\n padded[hoffset:hoffset+h, woffset:woffset+w, :] = img\n\n # Attempt detection by rotating at multiple angles\n results = []\n for angle in self.angles:\n rotated = self._rotate(padded, angle)\n dets, scores, indices = self.detector.run(rotated, 0, 0.0)\n self.logger.info(f'{angle:3d}: {dets}, {scores}, {indices}')\n if len(dets) == 1:\n results.append([dets[0], scores[0], angle, rotated])\n if len(results) == 0:\n self.logger.info('there are no detected faces')\n return\n\n # Choose the best angle by scores, and then adjust the angle using the eyes coordinates\n results.sort(key=lambda x: x[1], reverse=True)\n det, _, angle, rotated = results[0]\n shape = self.predictor(rotated, det)\n eyel, eyer = self._eye_center(shape)\n d = eyer - eyel\n angle += math.degrees(math.atan2(d[1], d[0]))\n self.logger.info(f'angle: {angle:.5f}')\n\n # Detect face and shapes from adjusted angle\n adjusted = self._rotate(padded, angle)\n dets = self.detector(adjusted)\n if len(dets) != 1:\n self.logger.info('faces are not detected in the rotated image')\n return\n shape = self.predictor(adjusted, dets[0])\n\n # Create a large mirrored image to rotate and crop\n margin = math.ceil(hypot * (math.sqrt(2) - 1.0) / 2)\n mirrored = np.pad(\n img,\n ((hoffset + margin, hypot - h - hoffset + margin),\n (woffset + margin, hypot - w - woffset + margin),\n (0, 0)), mode='symmetric')\n rotated = self._rotate(mirrored, angle)[margin:margin+hypot, margin:margin+hypot, :]\n\n # Calculate the center position and cropping size\n # https://arxiv.org/pdf/1710.10196v3.pdf\n e0, e1 = self._eye_center(shape)\n m0 = np.array([shape.part(48).x, shape.part(48).y])\n m1 = np.array([shape.part(54).x, shape.part(54).y])\n x = e1 - e0\n y = (e0 + e1) / 2 - (m0 + m1) / 2\n c = (e0 + e1) / 2 + y * 0.1\n s = max(np.linalg.norm(x) * 4.0, np.linalg.norm(y) * 3.6)\n\n xoffset = int(np.rint(c[0] - s/2))\n yoffset = int(np.rint(c[1] - s/2))\n if xoffset < 0 or yoffset < 0 or xoffset + s >= hypot or yoffset + s >= hypot:\n self.logger.info('cropping area has exceeded the image area')\n return\n size = int(np.rint(s))\n cropped = rotated[yoffset:yoffset+size, xoffset:xoffset+size, :]\n\n # Attempt detection on the cropped image\n dets = self.detector(cropped)\n if len(dets) != 1:\n self.logger.info('faces are not detected in the cropped image')\n return\n shape = self.predictor(cropped, dets[0])\n\n return {\n 'image': cropped,\n 'parts': [(point.x, point.y) for point in shape.parts()],\n 'angle': angle,\n 'size': size,\n }\n\n def _rotate(self, img, angle):\n h, w, _ = img.shape\n mat = cv2.getRotationMatrix2D((w/2, h/2), angle, 1.0)\n return cv2.warpAffine(img, mat, (w, h), cv2.INTER_LANCZOS4)\n\n def _eye_center(self, shape):\n eyel, eyer = np.array([0, 0]), np.array([0, 0])\n for i in range(36, 42):\n eyel[0] += shape.part(i).x\n eyel[1] += shape.part(i).y\n for i in range(42, 48):\n eyer[0] += shape.part(i).x\n eyer[1] += shape.part(i).y\n return eyel / 6, eyer / 6\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('image_file')\n parser.add_argument('-v', '--verbose', action='store_true')\n args = parser.parse_args()\n\n result = Detector(verbose=args.verbose).detect(cv2.imread(args.image_file))\n if result is None:\n print('detection failed.')\n exit(0)\n\n img = result['image']\n for part in result['parts']:\n cv2.drawMarker(img, part, (255, 255, 0))\n cv2.imshow('image', img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n" ]
[ [ "numpy.rint", "numpy.zeros", "numpy.array", "numpy.pad", "numpy.linalg.norm" ] ]
louispotok/great_expectations
[ "b91a3ce10f771742f49ccad9c403bda03f318515" ]
[ "tests/datasource/test_pandas_datasource.py" ]
[ "# -*- coding: utf-8 -*-\n\nimport pytest\n\nimport os\nfrom ruamel.yaml import YAML\n\nimport pandas as pd\nfrom six import PY3\nimport shutil\n\n\nfrom great_expectations.exceptions import BatchKwargsError\nfrom great_expectations.datasource import PandasDatasource\nfrom great_expectations.datasource.types.batch_kwargs import (\n PathBatchKwargs,\n BatchId,\n BatchFingerprint\n)\nfrom great_expectations.dataset import PandasDataset\n\nyaml = YAML(typ='safe')\n\n\[email protected](scope=\"module\")\ndef test_folder_connection_path(tmp_path_factory):\n df1 = pd.DataFrame(\n {'col_1': [1, 2, 3, 4, 5], 'col_2': ['a', 'b', 'c', 'd', 'e']})\n path = str(tmp_path_factory.mktemp(\"test_folder_connection_path\"))\n df1.to_csv(os.path.join(path, \"test.csv\"))\n\n return str(path)\n\n\ndef test_standalone_pandas_datasource(test_folder_connection_path):\n datasource = PandasDatasource('PandasCSV', base_directory=test_folder_connection_path)\n\n assert datasource.get_available_data_asset_names() == {\"default\": [\"test\"]}\n manual_batch_kwargs = PathBatchKwargs(path=os.path.join(str(test_folder_connection_path), \"test.csv\"))\n\n # Get the default (subdir_path) generator\n generator = datasource.get_generator()\n auto_batch_kwargs = generator.yield_batch_kwargs(\"test\")\n\n assert manual_batch_kwargs[\"path\"] == auto_batch_kwargs[\"path\"]\n\n # Include some extra kwargs...\n # Note that we are using get_data_asset NOT get_batch here, since we are standalone (no batch concept)\n dataset = datasource.get_data_asset(\"test\",\n generator_name=\"default\", batch_kwargs=auto_batch_kwargs,\n sep=\",\", header=0, index_col=0)\n assert isinstance(dataset, PandasDataset)\n assert (dataset[\"col_1\"] == [1, 2, 3, 4, 5]).all()\n\n ## A datasource should always return an object with a typed batch_id\n assert isinstance(dataset.batch_kwargs, PathBatchKwargs)\n assert isinstance(dataset.batch_id, BatchId)\n assert isinstance(dataset.batch_fingerprint, BatchFingerprint)\n\n\ndef test_create_pandas_datasource(data_context, tmp_path_factory):\n basedir = tmp_path_factory.mktemp('test_create_pandas_datasource')\n name = \"test_pandas_datasource\"\n class_name = \"PandasDatasource\"\n # OLD STYLE: Remove even from record later...\n # type_ = \"pandas\"\n # data_context.add_datasource(name, type_, base_directory=str(basedir))\n data_context.add_datasource(name, class_name=class_name, base_directory=str(basedir))\n data_context_config = data_context.get_config()\n\n assert name in data_context_config[\"datasources\"]\n assert data_context_config[\"datasources\"][name][\"class_name\"] == class_name\n # assert data_context_config[\"datasources\"][name][\"type\"] == type_\n\n # We should now see updated configs\n # Finally, we should be able to confirm that the folder structure is as expected\n with open(os.path.join(data_context.root_directory, \"great_expectations.yml\"), \"r\") as data_context_config_file:\n data_context_file_config = yaml.load(data_context_config_file)\n\n assert data_context_file_config[\"datasources\"][name] == data_context_config[\"datasources\"][name]\n\n # We should have added a default generator built from the default config\n assert data_context_file_config[\"datasources\"][name][\"generators\"][\"default\"][\"class_name\"] == \\\n \"SubdirReaderGenerator\"\n\n\ndef test_pandas_datasource_custom_data_asset(data_context, test_folder_connection_path):\n name = \"test_pandas_datasource\"\n # type_ = \"pandas\"\n class_name = \"PandasDatasource\"\n\n data_asset_type_config = {\n \"module_name\": \"custom_pandas_dataset\",\n \"class_name\": \"CustomPandasDataset\"\n }\n data_context.add_datasource(name,\n class_name=class_name,\n base_directory=test_folder_connection_path,\n data_asset_type=data_asset_type_config)\n\n # We should now see updated configs\n with open(os.path.join(data_context.root_directory, \"great_expectations.yml\"), \"r\") as data_context_config_file:\n data_context_file_config = yaml.load(data_context_config_file)\n\n assert data_context_file_config[\"datasources\"][name][\"data_asset_type\"][\"module_name\"] == \"custom_pandas_dataset\"\n assert data_context_file_config[\"datasources\"][name][\"data_asset_type\"][\"class_name\"] == \"CustomPandasDataset\"\n\n # We should be able to get a dataset of the correct type from the datasource.\n data_asset_name = \"test_pandas_datasource/default/test\"\n data_context.create_expectation_suite(data_asset_name=data_asset_name, expectation_suite_name=\"default\")\n batch = data_context.get_batch(data_asset_name=data_asset_name,\n expectation_suite_name=\"default\",\n batch_kwargs=data_context.yield_batch_kwargs(data_asset_name=data_asset_name)\n )\n assert type(batch).__name__ == \"CustomPandasDataset\"\n res = batch.expect_column_values_to_have_odd_lengths(\"col_2\")\n assert res[\"success\"] is True\n\n\ndef test_pandas_source_readcsv(data_context, tmp_path_factory):\n if not PY3:\n # We don't specifically test py2 unicode reading since this test is about our handling of kwargs *to* read_csv\n pytest.skip()\n basedir = tmp_path_factory.mktemp('test_create_pandas_datasource')\n shutil.copy(\"./tests/test_sets/unicode.csv\", basedir)\n data_context.add_datasource(\"mysource\",\n module_name=\"great_expectations.datasource\",\n class_name=\"PandasDatasource\",\n reader_options={\"encoding\": \"utf-8\"},\n base_directory=str(basedir))\n\n data_context.create_expectation_suite(data_asset_name=\"mysource/unicode\", expectation_suite_name=\"default\")\n batch = data_context.get_batch(\"mysource/unicode\",\n \"default\",\n data_context.yield_batch_kwargs(\"mysource/unicode\"))\n assert len(batch[\"Μ\"] == 1)\n assert \"😁\" in list(batch[\"Μ\"])\n\n data_context.add_datasource(\"mysource2\",\n module_name=\"great_expectations.datasource\",\n class_name=\"PandasDatasource\",\n base_directory=str(basedir))\n\n data_context.create_expectation_suite(data_asset_name=\"mysource2/unicode\", expectation_suite_name=\"default\")\n batch = data_context.get_batch(\"mysource2/unicode\",\n \"default\",\n data_context.yield_batch_kwargs(\"mysource2/unicode\")\n )\n assert \"😁\" in list(batch[\"Μ\"])\n\n data_context.add_datasource(\"mysource3\",\n module_name=\"great_expectations.datasource\",\n class_name=\"PandasDatasource\",\n reader_options={\"encoding\": \"utf-16\"},\n base_directory=str(basedir))\n with pytest.raises(UnicodeError, match=\"UTF-16 stream does not start with BOM\"):\n data_context.create_expectation_suite(data_asset_name=\"mysource3/unicode\", expectation_suite_name=\"default\")\n batch = data_context.get_batch(\"mysource3/unicode\",\n \"default\",\n data_context.yield_batch_kwargs(\"mysource3/unicode\")\n )\n\n with pytest.raises(LookupError, match=\"unknown encoding: blarg\"):\n batch = data_context.get_batch(\"mysource/unicode\",\n \"default\",\n batch_kwargs=data_context.yield_batch_kwargs(\"mysource/unicode\"),\n encoding='blarg')\n\n batch = data_context.get_batch(\"mysource2/unicode\",\n \"default\",\n batch_kwargs=data_context.yield_batch_kwargs(\"mysource2/unicode\"),\n encoding='utf-8'\n )\n assert \"😁\" in list(batch[\"Μ\"])\n\n\ndef test_invalid_reader_pandas_datasource(tmp_path_factory):\n basepath = str(tmp_path_factory.mktemp(\"test_invalid_reader_pandas_datasource\"))\n datasource = PandasDatasource('mypandassource', base_directory=basepath)\n\n with open(os.path.join(basepath, \"idonotlooklikeacsvbutiam.notrecognized\"), \"w\") as newfile:\n newfile.write(\"a,b\\n1,2\\n3,4\\n\")\n\n with pytest.raises(BatchKwargsError) as exc:\n datasource.get_data_asset(\"idonotlooklikeacsvbutiam.notrecognized\", batch_kwargs={\n \"path\": os.path.join(basepath, \"idonotlooklikeacsvbutiam.notrecognized\")\n })\n assert \"Unable to determine reader for path\" in exc.message\n\n with pytest.raises(BatchKwargsError) as exc:\n datasource.get_data_asset(\"idonotlooklikeacsvbutiam.notrecognized\", batch_kwargs={\n \"path\": os.path.join(basepath, \"idonotlooklikeacsvbutiam.notrecognized\")\n }, reader_method=\"blarg\")\n assert \"Unknown reader method: blarg\" in exc.message\n\n dataset = datasource.get_data_asset(\"idonotlooklikeacsvbutiam.notrecognized\", batch_kwargs={\n \"path\": os.path.join(basepath, \"idonotlooklikeacsvbutiam.notrecognized\")\n }, reader_method=\"csv\", header=0)\n assert dataset[\"a\"][0] == 1\n" ]
[ [ "pandas.DataFrame" ] ]
raymondEhlers/pachyderm
[ "c9a554d8c4e904315171a5aafa4569259e280fa4" ]
[ "pachyderm/binned_data.py" ]
[ "#!/usr/bin/env python3\n\n\"\"\" Functionality related to binned data.\n\n.. codeauthor:: Ramyond Ehlers <[email protected]>, ORNL\n\"\"\"\n\nimport collections\nimport itertools\nimport logging\nimport operator\nimport uuid\nfrom functools import reduce\nfrom typing import TYPE_CHECKING, Any, Dict, List, Mapping, Sequence, Tuple, Type, Union, cast\n\nimport attr\nimport numpy as np\nimport numpy.typing as npt\nimport ruamel.yaml\n\n\nlogger = logging.getLogger(__name__)\n\n# Work around typing issues in python 3.6\n# If only supporting 3.7+, we can add `from __future__ import annotations` and just use the more detailed definition\nif TYPE_CHECKING:\n AxesTupleAttribute = attr.Attribute[\"AxesTuple\"]\n NumpyAttribute = attr.Attribute[npt.NDArray[Any]]\nelse:\n AxesTupleAttribute = attr.Attribute\n NumpyAttribute = attr.Attribute\n\n\ndef _axis_bin_edges_converter(value: Any) -> npt.NDArray[Any]:\n \"\"\"Convert the bin edges input to a numpy array.\n\n If an `Axis` is passed, we grab its bin edges.\n\n Args:\n value: Value to be converted to a numpy array.\n Returns:\n The converted numpy array.\n \"\"\"\n # Check for self\n if isinstance(value, Axis):\n value = value.bin_edges\n # Ravel to ensure that we have a standard 1D array.\n # We specify the dtype here just to be safe.\n return np.ravel(np.array(value, dtype=np.float64))\n\n\ndef find_bin(bin_edges: npt.NDArray[Any], value: float) -> int:\n \"\"\"Determine the index position where the value should be inserted.\n\n This is basically ``ROOT.TH1.FindBin(value)``, but it can used for any set of bin_edges.\n\n Note:\n Bins are 0-indexed here, while in ROOT they are 1-indexed.\n\n Args:\n bin_edges: Bin edges of the histogram.\n value: Value to find within those bin edges.\n Returns:\n Index of the bin where that value would reside in the histogram.\n \"\"\"\n # This will return the index position where the value should be inserted.\n # This means that if we have the bin edges [0, 1, 2], and we pass value 1.5, it will return\n # index 2, but we want to return bin 1, so we subtract one from the result.\n # NOTE: By specifying that ``side = \"right\"``, it find values as arr[i] <= value < arr[i - 1],\n # which matches the ROOT convention.\n return cast(int, np.searchsorted(bin_edges, value, side=\"right\") - 1)\n\n\[email protected](eq=False)\nclass Axis:\n bin_edges: npt.NDArray[Any] = attr.ib(converter=_axis_bin_edges_converter)\n\n def __len__(self) -> int:\n \"\"\"The number of bins.\"\"\"\n return len(self.bin_edges) - 1\n\n @property\n def bin_widths(self) -> npt.NDArray[Any]:\n \"\"\"Bin widths calculated from the bin edges.\n\n Returns:\n Array of the bin widths.\n \"\"\"\n res: npt.NDArray[Any] = self.bin_edges[1:] - self.bin_edges[:-1]\n return res\n\n @property\n def bin_centers(self) -> npt.NDArray[Any]:\n \"\"\"The axis bin centers (``x`` for 1D).\n\n This property caches the values so we don't have to calculate it every time.\n\n Args:\n None\n Returns:\n Array of center of bins.\n \"\"\"\n try:\n return self._bin_centers\n except AttributeError:\n half_bin_widths = self.bin_widths / 2\n bin_centers = self.bin_edges[:-1] + half_bin_widths\n self._bin_centers: npt.NDArray[Any] = bin_centers\n\n return self._bin_centers\n\n def find_bin(self, value: float) -> int:\n \"\"\"Find the bin corresponding to the specified value.\n\n For further information, see ``find_bin(...)`` in this module.\n\n Note:\n Bins are 0-indexed here, while in ROOT they are 1-indexed.\n\n Args:\n value: Value for which we want want the corresponding bin.\n Returns:\n Bin corresponding to the value.\n \"\"\"\n return find_bin(self.bin_edges, value)\n\n def copy(self: \"Axis\") -> \"Axis\":\n \"\"\"Copies the object.\n\n In principle, this should be the same as ``copy.deepcopy(...)``, at least when this was written in\n Feb 2020. But ``deepcopy(...)`` often seems to have very bad performance (and perhaps does additional\n implicit copying), so we copy these numpy arrays by hand.\n \"\"\"\n return type(self)(bin_edges=np.array(self.bin_edges, copy=True))\n\n def __eq__(self, other: Any) -> bool:\n \"\"\"Check for equality.\"\"\"\n return np.allclose(self.bin_edges, other.bin_edges)\n\n # TODO: Serialize more carefully...\n\n\nclass AxesTuple(Tuple[Axis, ...]):\n @property\n def bin_edges(self) -> Tuple[npt.NDArray[Any], ...]:\n return tuple(a.bin_edges for a in self)\n\n @property\n def bin_widths(self) -> Tuple[npt.NDArray[Any], ...]:\n return tuple(a.bin_widths for a in self)\n\n @property\n def bin_centers(self) -> Tuple[npt.NDArray[Any], ...]:\n return tuple(a.bin_centers for a in self)\n\n @property\n def shape(self) -> Tuple[int, ...]:\n return tuple(len(a) for a in self)\n\n @classmethod\n def from_axes(\n cls: Type[\"AxesTuple\"], axes: Union[Axis, Sequence[Axis], npt.NDArray[Any], Sequence[npt.NDArray[Any]]]\n ) -> \"AxesTuple\":\n values = axes\n # Convert to a list if necessary\n # Ideally, we want to check for anything that isn't a collection, and convert it to one if it's not.\n # However, this is not entirely straightforward because a numpy array is a collection. So in the case of\n # a numpy array, we we to wrap it in a list if it's one dimensional. This check is as general as possible,\n # but if it becomes problematic, we can instead use the more specific:\n # if isinstance(axes, (Axis, np.ndarray)):\n if not isinstance(values, collections.abc.Iterable) or (isinstance(values, np.ndarray) and values.ndim == 1):\n values = [axes] # type: ignore\n # Help out mypy\n assert isinstance(values, collections.abc.Iterable)\n return cls([Axis(a) for a in values])\n\n def __eq__(self, other: Any) -> bool:\n \"\"\"Check for equality.\"\"\"\n if other:\n return all(a == b for a, b in itertools.zip_longest(self, other))\n return False\n\n @classmethod\n def to_yaml(\n cls: Type[\"AxesTuple\"], representer: ruamel.yaml.representer.BaseRepresenter, obj: \"AxesTuple\"\n ) -> ruamel.yaml.nodes.SequenceNode:\n \"\"\"Encode YAML representation.\n\n For some reason, YAML doesn't encode this object properly, so we have to tell it how to do so.\n\n Args:\n representer: Representation from YAML.\n data: AxesTuple to be converted to YAML.\n Returns:\n YAML representation of the AxesTuple object.\n \"\"\"\n representation = representer.represent_sequence(f\"!{cls.__name__}\", obj)\n\n # Finally, return the represented object.\n return cast(\n ruamel.yaml.nodes.SequenceNode,\n representation,\n )\n\n @classmethod\n def from_yaml(\n cls: Type[\"AxesTuple\"],\n constructor: ruamel.yaml.constructor.BaseConstructor,\n data: ruamel.yaml.nodes.SequenceNode,\n ) -> \"AxesTuple\":\n \"\"\"Decode YAML representation.\n\n For some reason, YAML doesn't encode this object properly, so we have to tell it how to do so.\n\n Args:\n constructor: Constructor from the YAML object.\n node: YAML sequence node representing the AxesTuple object.\n Returns:\n The AxesTuple object constructed from the YAML specified values.\n \"\"\"\n values = [constructor.construct_object(n) for n in data.value]\n return cls(values)\n\n\ndef _axes_tuple_from_axes_sequence(\n axes: Union[Axis, Sequence[Axis], npt.NDArray[Any], Sequence[npt.NDArray[Any]]]\n) -> AxesTuple:\n \"\"\"Workaround for mypy issue in creating an AxesTuple from axes.\n\n Converter class methods are currently not supported by mypy, so we ignore the typing here.\n See: https://github.com/python/mypy/issues/7912. So instead we wrap the call here.\n\n Args:\n axes: Axes to be stored in the AxesTuple.\n Returns:\n AxesTuple containing the axes.\n \"\"\"\n return AxesTuple.from_axes(axes)\n\n\ndef _array_length_from_axes(axes: AxesTuple) -> int:\n return reduce(operator.mul, (len(a) for a in axes))\n\n\ndef _validate_axes(instance: \"BinnedData\", attribute: AxesTupleAttribute, value: AxesTuple) -> None:\n array_length = _array_length_from_axes(value)\n for other_name, other_value in [(\"values\", instance.values), (\"variances\", instance.variances)]:\n if array_length != other_value.size:\n raise ValueError(\n f\"Length of {attribute.name} does not match expected length of the {other_name}.\"\n f\" len({attribute.name}) = {array_length}, expected length from '{other_name}': {len(other_value)}.\"\n )\n\n\ndef _validate_arrays(instance: \"BinnedData\", attribute: NumpyAttribute, value: npt.NDArray[Any]) -> None:\n expected_length = _array_length_from_axes(instance.axes)\n if value.size != expected_length:\n raise ValueError(\n f\"Length of {attribute} does not match expected length.\"\n f\" len({attribute}) = {len(value)}, expected length: {expected_length}.\"\n )\n\n\ndef _shared_memory_check(instance: \"BinnedData\", attribute: NumpyAttribute, value: npt.NDArray[Any]) -> None:\n # TODO: This trivially fails for axes.\n # Define this array for convenience in accessing the members. This way, we're less likely to miss\n # newly added members.\n arrays = {\n k: v for k, v in vars(instance).items() if not k.startswith(\"_\") and k != \"metadata\" and k != attribute.name\n }\n # Ensure the members don't point to one another (which can cause issues when performing operations in place).\n # Check the other values.\n for other_name, other_value in arrays.items():\n # logger.debug(f\"{attribute.name}: Checking {other_name} for shared memory.\")\n if np.may_share_memory(value, other_value): # type: ignore\n logger.warning(\n f\"Object '{other_name}' shares memory with object '{attribute.name}'. Copying '{attribute}'!\"\n )\n setattr(instance, attribute.name, value.copy())\n\n\ndef _shape_array_check(instance: \"BinnedData\", attribute: NumpyAttribute, value: npt.NDArray[Any]) -> None:\n \"\"\"Ensure that the arrays are shaped the same as the shape expected from the axes.\"\"\"\n # If we're passed a flattened array, reshape it to follow the shape of the axes.\n # NOTE: One must be a bit careful with this to ensure that the it is formatted as expected.\n # Especially when converting between ROOT and numpy.\n if value.ndim == 1:\n setattr(instance, attribute.name, value.reshape(instance.axes.shape))\n if instance.axes.shape != value.shape:\n # Protection for if the shapes are reversed.\n if instance.axes.shape == tuple(reversed(value.shape)):\n logger.info(f\"Shape of {attribute.name} appears to be reversed. Transposing the array.\")\n setattr(instance, attribute.name, value.T)\n else:\n # Otherwise, something is entirely wrong. Just let the user know.\n raise ValueError(\n f\"Shape of {attribute.name} mismatches axes. {attribute.name:}.shape: {value.shape}, axes.shape: {instance.axes.shape}\"\n )\n\n\[email protected](eq=False)\nclass BinnedData:\n axes: AxesTuple = attr.ib(\n converter=_axes_tuple_from_axes_sequence, validator=[_shared_memory_check, _validate_axes] # type: ignore\n )\n values: npt.NDArray[Any] = attr.ib(\n converter=np.asarray, validator=[_shared_memory_check, _shape_array_check, _validate_arrays]\n )\n variances: npt.NDArray[Any] = attr.ib(\n converter=np.asarray, validator=[_shared_memory_check, _shape_array_check, _validate_arrays]\n )\n metadata: Dict[str, Any] = attr.ib(factory=dict)\n\n @property\n def axis(self) -> Axis:\n \"\"\"Returns the single axis when the binned data is 1D.\n\n This is just a helper function, but can be nice for one dimensional data.\n\n Returns:\n The axis.\n \"\"\"\n if len(self.axes) != 1:\n raise ValueError(f\"Calling axis is only valid for one axis. There are {len(self.axes)} axes.\")\n return self.axes[0]\n\n @property\n def errors(self) -> npt.NDArray[Any]:\n res: npt.NDArray[Any] = np.sqrt(self.variances)\n return res\n\n def copy(self: \"BinnedData\") -> \"BinnedData\":\n \"\"\"Copies the object.\n\n In principle, this should be the same as ``copy.deepcopy(...)``, at least when this was written in\n Feb 2020. But ``deepcopy(...)`` often seems to have very bad performance (and perhaps does additional\n implicit copying), so we copy these numpy arrays by hand.\n \"\"\"\n return type(self)(\n axes=AxesTuple(axis.copy() for axis in self.axes),\n values=np.array(self.values, copy=True),\n variances=np.array(self.variances, copy=True),\n metadata=self.metadata.copy(),\n )\n\n # TODO: Add integral: Need to devise how best to pass axis limits.\n # TODO: Stats\n\n def __add__(self: \"BinnedData\", other: \"BinnedData\") -> \"BinnedData\":\n \"\"\"Handles ``a = b + c.``\"\"\"\n new = self.copy()\n new += other\n return new\n\n def __radd__(self: \"BinnedData\", other: \"BinnedData\") -> \"BinnedData\":\n \"\"\"For use with sum(...).\"\"\"\n if other == 0:\n return self\n else:\n return self + other\n\n def __iadd__(self: \"BinnedData\", other: \"BinnedData\") -> \"BinnedData\":\n \"\"\"Handles ``a += b``.\"\"\"\n if self.axes != other.axes:\n raise TypeError(\n f\"Binning is different for given binned data, so cannot be added!\"\n f\" len(self.axes): {len(self.axes)}, len(other.axes): {len(other.axes)}.\"\n f\" axes: {self.axes}, other axes: {other.axes}.\"\n )\n self.values += other.values\n self.variances += other.variances\n return self\n\n def __sub__(self: \"BinnedData\", other: \"BinnedData\") -> \"BinnedData\":\n \"\"\"Handles ``a = b - c``.\"\"\"\n new = self.copy()\n new -= other\n return new\n\n def __isub__(self: \"BinnedData\", other: \"BinnedData\") -> \"BinnedData\":\n \"\"\"Handles ``a -= b``.\"\"\"\n if self.axes != other.axes:\n raise TypeError(\n f\"Binning is different for given binned data, so cannot be subtracted!\"\n f\" len(self.axes): {len(self.axes)}, len(other.axes): {len(other.axes)}.\"\n f\" axes: {self.axes}, other axes: {other.axes}.\"\n )\n self.values -= other.values\n self.variances += other.variances\n return self\n\n def __mul__(self: \"BinnedData\", other: Union[\"BinnedData\", npt.NDArray[Any], float]) -> \"BinnedData\":\n \"\"\"Handles ``a = b * c``.\"\"\"\n new = self.copy()\n new *= other\n return new\n\n def __imul__(self: \"BinnedData\", other: Union[\"BinnedData\", npt.NDArray[Any], float]) -> \"BinnedData\":\n \"\"\"Handles ``a *= b``.\"\"\"\n if np.isscalar(other) or isinstance(other, np.ndarray):\n # Help out mypy...\n assert isinstance(other, (float, int, np.number, np.ndarray))\n # Scale data by a scalar\n self.values *= other\n self.variances *= other ** 2\n else:\n # Help out mypy...\n assert isinstance(other, type(self))\n # Validation\n if self.axes != other.axes:\n raise TypeError(\n f\"Binning is different for given binned data, so cannot be multiplied!\"\n f\" len(self.axes): {len(self.axes)}, len(other.axes): {len(other.axes)}.\"\n f\" axes: {self.axes}, other axes: {other.axes}.\"\n )\n # NOTE: We need to calculate the errors_squared first because the depend on the existing y values\n # Errors are from ROOT::TH1::Multiply(const TH1 *h1)\n # NOTE: This is just error propagation, simplified with a = b * c!\n self.variances = self.variances * other.values ** 2 + other.variances * self.values ** 2\n self.values *= other.values\n return self\n\n def __truediv__(self: \"BinnedData\", other: Union[\"BinnedData\", npt.NDArray[Any], float]) -> \"BinnedData\":\n \"\"\"Handles ``a = b / c``.\"\"\"\n new = self.copy()\n new /= other\n return new\n\n def __itruediv__(self: \"BinnedData\", other: Union[\"BinnedData\", npt.NDArray[Any], float]) -> \"BinnedData\":\n \"\"\"Handles ``a /= b``.\"\"\"\n if np.isscalar(other) or isinstance(other, np.ndarray):\n # Help out mypy...\n assert isinstance(other, (float, int, np.number, np.ndarray))\n # Scale data by a scalar\n self *= 1.0 / other\n else:\n # Help out mypy...\n assert isinstance(other, type(self))\n # Validation\n if self.axes != other.axes:\n raise TypeError(\n f\"Binning is different for given binned data, so cannot be divided!\"\n f\" len(self.axes): {len(self.axes)}, len(other.axes): {len(other.axes)}.\"\n f\" axes: {self.axes}, other axes: {other.axes}.\"\n )\n # Errors are from ROOT::TH1::Divide(const TH1 *h1)\n # NOTE: This is just error propagation, simplified with a = b / c!\n # NOTE: We need to calculate the variances first before setting values because the variances depend on\n # the existing values\n variances_numerator = self.variances * other.values ** 2 + other.variances * self.values ** 2\n variances_denominator = other.values ** 4\n # NOTE: We have to be a bit clever when we divide to avoid dividing by bins with 0 entries. The\n # approach taken here basically replaces any divide by 0s with a 0 in the output hist.\n # For more info, see: https://stackoverflow.com/a/37977222\n self.variances = np.divide(\n variances_numerator,\n variances_denominator,\n out=np.zeros_like(variances_numerator),\n where=variances_denominator != 0,\n )\n self.values = np.divide(self.values, other.values, out=np.zeros_like(self.values), where=other.values != 0)\n return self\n\n def __eq__(self, other: Any) -> bool:\n \"\"\"Check for equality.\"\"\"\n attributes = [k for k in vars(self) if not k.startswith(\"_\")]\n other_attributes = [k for k in vars(other) if not k.startswith(\"_\")]\n\n # As a beginning check, they must have the same attributes available.\n if attributes != other_attributes:\n return False\n\n # The values and variances are numpy arrays, so we compare the arrays using ``np.allclose``\n # NOTE: allclose can't handle the axes or the metadata dictionary, so we skip it here\n # and check it explicitly below.\n keys_to_exclude = [\"axes\", \"metadata\"]\n agreement = [np.allclose(getattr(self, a), getattr(other, a)) for a in attributes if a not in keys_to_exclude]\n # Check axes\n axes_agree = self.axes == other.axes\n # Check metadata\n metadata_agree = self.metadata == other.metadata\n # All arrays and the metadata must agree.\n return all(agreement) and axes_agree and metadata_agree\n\n @classmethod\n def from_hepdata(cls: Type[\"BinnedData\"], hist: Mapping[str, Any]) -> List[\"BinnedData\"]:\n \"\"\"Convert (a set) of HEPdata histogram(s) to BinnedData objects.\n\n Will include any information that the extraction function extracts and returns.\n\n Note:\n This is not included in the ``from_existing_hist(...)`` function because HEPdata files are oriented\n towards potentially containing multiple histograms in a single object. So we just return all of them\n and let the user sort it out.\n\n Note:\n It only grabs the first independent variable to determining the x axis.\n\n Args:\n hist: HEPdata input histogram(s).\n extraction_function: Extract values from HEPdata dict to be used to construct a histogram. Default:\n Retrieves y values, symmetric statical errors. Symmetric systematic errors are stored in the metadata.\n Returns:\n List of Histogram1D constructed from the input HEPdata.\n \"\"\"\n ...\n raise NotImplementedError(\"Not yet implemented.\")\n\n @classmethod\n def _from_uproot3(cls: Type[\"BinnedData\"], hist: Any) -> \"BinnedData\":\n \"\"\"Convert from uproot read histogram to BinnedData.\"\"\"\n # All of these methods should excludes underflow and overflow bins\n bin_edges = hist.edges\n values = hist.values\n variances = hist.variances\n\n metadata: Dict[str, Any] = {}\n\n return cls(axes=bin_edges, values=values, variances=variances, metadata=metadata)\n\n @classmethod\n def _from_uproot4(cls: Type[\"BinnedData\"], hist: Any) -> \"BinnedData\":\n \"\"\"Convert from uproot4 to BinnedData.\n\n Cannot just use the boost_histogram conversion because it includes flow bins.\n\n \"\"\"\n # We explicitly decide to exclude flow bins.\n values = hist.values(flow=False)\n variances = hist.variances(flow=False)\n bin_edges = [axis.edges(flow=False) for axis in hist.axes]\n\n metadata: Dict[str, Any] = {}\n\n return cls(\n axes=bin_edges,\n values=values,\n variances=variances,\n metadata=metadata,\n )\n\n @classmethod\n def _from_tgraph(cls: Type[\"BinnedData\"], hist: Any) -> \"BinnedData\":\n \"\"\"Convert from uproot4 TGraphAsymmetricErrors to BinnedData.\n\n We have to make a number of assumptions here, but it seems that it should work\n for well behaved cases.\n \"\"\"\n bin_centers, values = hist.values(axis=\"both\")\n x_errors_low, y_errors_low = hist.errors(which=\"low\", axis=\"both\")\n x_errors_high, y_errors_high = hist.errors(which=\"high\", axis=\"both\")\n\n # Aim to reconstruct the bin widths from the x_errors.\n possible_low_bin_edges = bin_centers - x_errors_low\n possible_high_bin_edges = bin_centers + x_errors_high\n if not np.allclose(possible_low_bin_edges[1:], possible_high_bin_edges[:-1]):\n raise ValueError(\n \"Bin edges in graph are inconsistent. Please fix this and try again.\"\n f\"\\n\\tLow: {possible_low_bin_edges}\"\n f\"\\n\\tHigh: {possible_high_bin_edges}\"\n f\"\\n\\tValues: {values}\"\n )\n # x errors are consistent, so we can create bin edges from them.\n bin_edges = np.append(possible_low_bin_edges, possible_high_bin_edges[-1]) # type: ignore\n\n # If the errors agree, we can just store them in a standard binned data.\n # Otherwise, we have to use the metadata.\n metadata = {}\n if np.allclose(y_errors_low, y_errors_high):\n variances = y_errors_low ** 2\n else:\n variances = np.ones_like(y_errors_low)\n metadata[\"y_errors\"] = {\"low\": y_errors_low, \"high\": y_errors_high}\n\n return cls(\n axes=bin_edges,\n values=values,\n variances=variances,\n metadata=metadata,\n )\n\n @classmethod\n def _from_boost_histogram(cls: Type[\"BinnedData\"], hist: Any) -> \"BinnedData\":\n \"\"\"Convert from boost histogram to BinnedData.\"\"\"\n view = hist.view()\n metadata: Dict[str, Any] = {}\n\n return cls(\n axes=hist.axes.edges,\n values=view.value,\n variances=np.copy(view.variance), # type: ignore\n metadata=metadata,\n )\n\n @classmethod\n def _from_ROOT(cls: Type[\"BinnedData\"], hist: Any) -> \"BinnedData\":\n \"\"\"Convert TH1, TH2, or TH3 histogram to BinnedData.\n\n Note:\n Under/Overflow bins are excluded.\n\n \"\"\"\n # Setup\n # Enable sumw2 if it's not already calculated\n if hist.GetSumw2N() == 0:\n hist.Sumw2(True)\n class_name = hist.ClassName()\n # Determine the number of dimensions\n # TProfile\n if \"TProfile\" in class_name:\n if \"TProfile\" == class_name:\n n_dim = 1\n else:\n n_dim = int(class_name[-1])\n else:\n # TH*D\n n_dim = int(class_name[2])\n # If it doesn't match these, then let it throw a ValueError so we know what's going on.\n\n # Then determine the axes based on the dimensions\n axis_methods = [hist.GetXaxis, hist.GetYaxis, hist.GetZaxis]\n root_axes = axis_methods[:n_dim]\n\n def get_bin_edges_from_axis(axis: Any) -> npt.NDArray[Any]:\n \"\"\"Get bin edges from a ROOT hist axis.\n\n Note:\n Doesn't include over- or underflow bins!\n\n Args:\n axis (ROOT.TAxis): Axis from which the bin edges should be extracted.\n Returns:\n Array containing the bin edges.\n \"\"\"\n # Don't include over- or underflow bins\n bins = range(1, axis.GetNbins() + 1)\n # Bin edges\n bin_edges = np.empty(len(bins) + 1)\n bin_edges[:-1] = [axis.GetBinLowEdge(i) for i in bins]\n bin_edges[-1] = axis.GetBinUpEdge(axis.GetNbins())\n\n return bin_edges\n\n # Determine the main values\n # Exclude overflow\n # Axes\n axes = [Axis(get_bin_edges_from_axis(axis())) for axis in root_axes]\n # Values and variances\n # ROOT stores the values in a flat array including underflow and overflow bins,\n # so we need to remove the flow bins, and then appropriately shape the arrays.\n # Specifically, to get the appropriate shape for the arrays, we need to reshape in the opposite\n # order of the axes, and then transpose.\n # NOTE: These operations _do not_ commute.\n shape = tuple((len(a) for a in reversed(axes)))\n bins_without_flow_mask = np.array(\n [not (hist.IsBinUnderflow(i) or hist.IsBinOverflow(i)) for i in range(hist.GetNcells())]\n )\n values = np.array([hist.GetBinContent(i) for i in range(hist.GetNcells())])\n values = values[bins_without_flow_mask].reshape(shape).T\n variances = np.array(hist.GetSumw2())\n variances = variances[bins_without_flow_mask].reshape(shape).T\n\n # Check for a TProfile.\n # In that case we need to retrieve the errors manually because the Sumw2() errors are\n # not the anticipated errors.\n if hasattr(hist, \"BuildOptions\"):\n errors = np.array([hist.GetBinError(i) for i in range(1, hist.GetXaxis().GetNbins() + 1)])\n # We expected variances (errors squared)\n variances = errors ** 2\n else:\n # Cross check. If they don't match, something odd has almost certainly occurred.\n # We use lambdas so we don't go beyond the length of the axis unless we're certain\n # that we have that many dimensions.\n first_non_overflow_bin_map = {\n # Using 10 ((by 10) by 10) as an example, to derive the specific values below, and then generalizing.\n # 1\n 1: lambda axes: 1,\n # 12 + 1 = 13\n 2: lambda axes: (len(axes[0]) + 2) + 1,\n # 12 * 12 + 12 + 1 = 157\n 3: lambda axes: (len(axes[0]) + 2) * (len(axes[1]) + 2) + (len(axes[0]) + 2) + 1,\n }\n first_non_overflow_bin = first_non_overflow_bin_map[len(axes)](axes) # type: ignore\n if not np.isclose(variances.flatten()[0], hist.GetBinError(first_non_overflow_bin) ** 2):\n raise ValueError(\"Sumw2 errors don't seem to represent bin errors!\")\n\n metadata: Dict[str, Any] = {}\n\n return cls(\n axes=axes,\n values=values,\n variances=variances,\n metadata=metadata,\n )\n\n @classmethod\n def from_existing_data(\n cls: Type[\"BinnedData\"], binned_data: Any, return_copy_if_already_converted: bool = True\n ) -> \"BinnedData\":\n \"\"\"Convert an existing histogram.\n\n Note:\n Underflow and overflow bins are excluded!\n\n Args:\n hist (uproot.rootio.TH1* or ROOT.TH1): Histogram to be converted.\n Returns:\n Histogram: Dataclass with x, y, and errors\n \"\"\"\n # If it's already BinnedData, just return it\n if isinstance(binned_data, cls):\n if return_copy_if_already_converted:\n logger.debug(f\"Passed binned data is already a {cls.__name__}. Returning a copy of the object.\")\n return binned_data.copy()\n else:\n logger.warning(f\"Passed binned data is already a {cls.__name__}. Returning the existing object.\")\n return binned_data\n\n # Now actually deal with conversion from other types.\n # Need to deal with boost histogram first because it now (Feb 2021) has values and variances.\n if hasattr(binned_data, \"view\"):\n return cls._from_boost_histogram(binned_data)\n # Uproot4: has \"_values_variances\"\n if hasattr(binned_data, \"_values_variances\"):\n return cls._from_uproot4(binned_data)\n # Uproot3: \"values\" and \"variances\" is a proxy for an uproot3 hist. uproot4 hists also have these,\n # so we need to check for uproot4 first\n if hasattr(binned_data, \"values\") and hasattr(binned_data, \"variances\"):\n return cls._from_uproot3(binned_data)\n # Next, look for TGraphs\n if hasattr(binned_data, \"values\") and hasattr(binned_data, \"errors\"):\n return cls._from_tgraph(binned_data)\n\n # Fall back to handling a traditional ROOT hist.\n return cls._from_ROOT(binned_data)\n\n # Convert to other formats.\n def to_ROOT(self, copy: bool = True) -> Any:\n \"\"\"Convert into a ROOT histogram.\n\n NOTE:\n This is a lossy operation because there is nowhere to store metadata is in the ROOT hist.\n\n Args:\n copy: Copy the arrays before assigning them. The ROOT hist may be able to view the array memory,\n such that modifications in one would affect the other. Be extremely careful, as that can have\n unexpected side effects! So only disable with a very good reason. Default: True.\n Returns:\n ROOT histogram containing the data.\n \"\"\"\n try:\n import ROOT\n except ImportError:\n raise RuntimeError(\"Unable to import ROOT. Please ensure that ROOT is installed and in your $PYTHONPATH.\")\n\n # Setup\n # We usually want to be entirely certain that the ROOT arrays are not pointing at the same memory\n # as the current hist, so we make a copy. We basically always want to copy.\n if copy:\n h = self.from_existing_data(self)\n else:\n h = self\n\n unique_name = str(uuid.uuid4())\n name = h.metadata.get(\"name\", unique_name)\n title = h.metadata.get(\"title\", unique_name)\n # Axes need to be of the form: n_bins, bin_edges\n axes = list(itertools.chain.from_iterable((len(axis), axis.bin_edges) for axis in h.axes))\n\n args = [name, title, *axes]\n if len(h.axes) <= 3:\n h_ROOT = getattr(ROOT, f\"TH{len(h.axes)}D\")(*args)\n else:\n raise RuntimeError(f\"Asking to create hist with {len(h.axes)} > 3 dimensions.\")\n\n # We have to keep track on the bin index by hand, because ROOT.\n # NOTE: The transpose is extremely import! Without it, the arrays aren't in the order\n # that ROOT expects! ROOT expects for the arrays to increment first through x bins,\n # then increment the y bin, and iterate over x again, etc. We cast the arrays this via\n # via a transpose.\n i = 1\n for value, error in zip(h.values.T.flatten(), h.errors.T.flatten()):\n # Sanity check.\n if i >= h_ROOT.GetNcells():\n raise ValueError(\"Indexing is wrong...\")\n\n # Need to advance to the next bin that we care about.\n # We don't want to naively increment and continue because then we should histogram values.\n while h_ROOT.IsBinUnderflow(i) or h_ROOT.IsBinOverflow(i):\n h_ROOT.SetBinContent(i, 0)\n h_ROOT.SetBinError(i, 0)\n i += 1\n\n # Set the content\n h_ROOT.SetBinContent(i, value)\n h_ROOT.SetBinError(i, error)\n i += 1\n\n return h_ROOT\n\n def to_boost_histogram(self) -> Any:\n \"\"\"Convert into a boost-histogram.\n\n NOTE:\n This is a lossy operation. The metadata is not preserved.\n\n Returns:\n Boost histogram containing the data.\n \"\"\"\n try:\n import boost_histogram as bh\n except ImportError:\n raise RuntimeError(\"Unable to import boost histogram. Please install it to export to a boost histogram.\")\n\n # It seems to copy by default, so we don't need to do it ourselves.\n\n axes = []\n for axis in self.axes:\n # NOTE: We use Variable instead of Regular even if the bin edges are Regular because it allows us to\n # construct the axes just from the bin edges.\n axes.append(bh.axis.Variable(axis.bin_edges, underflow=False, overflow=False))\n h = bh.Histogram(*axes, storage=bh.storage.Weight())\n # Need to shape the array properly so that it will actually be able to assign to the boost histogram.\n arr = np.zeros(shape=h.view().shape, dtype=h.view().dtype)\n arr[\"value\"] = self.values\n arr[\"variance\"] = self.variances\n h[...] = arr\n\n return h\n\n def to_histogram1D(self) -> Any:\n \"\"\"Convert to a Histogram 1D.\n\n This is entirely a convenience function. Generally, it's best to stay with BinnedData, but\n a Histogram1D is required in some cases, such as for fitting.\n\n Returns:\n Histogram1D containing the data.\n \"\"\"\n # Validation\n if len(self.axes) > 1:\n raise ValueError(f\"Can only convert to 1D histogram. Given {len(self.axes)} axes\")\n\n from pachyderm import histogram\n\n return histogram.Histogram1D(\n bin_edges=self.axes[0].bin_edges,\n y=self.values,\n errors_squared=self.variances,\n )\n\n def to_numpy(self) -> Tuple[npt.NDArray[Any], ...]:\n \"\"\"Convert to a numpy histogram.\n\n Returns:\n Tuple of values, and then axes bin edges.\n \"\"\"\n # TODO: Check that the values don't need to be transposed or similar.\n return (self.values, *self.axes.bin_edges)\n" ]
[ [ "numpy.sqrt", "numpy.allclose", "numpy.zeros_like", "numpy.append", "numpy.searchsorted", "numpy.ones_like", "numpy.copy", "numpy.may_share_memory", "numpy.array", "numpy.isscalar" ] ]
vikrant-github/python-data-science
[ "8f1d7631e302e7e751008aa16caa4d044411de43" ]
[ "src/models/machine_learning_api.py" ]
[ "\r\nfrom flask import Flask, request\r\nimport pandas as pd\r\nimport numpy as np\r\nimport json\r\nimport pickle\r\nimport os\r\n\r\napp = Flask(__name__)\r\n\r\n# Load Model and Scaler Files\r\nmodel_path = os.path.join(os.path.pardir,os.path.pardir,'models')\r\nmodel_filepath = os.path.join(model_path, 'lr_model.pkl')\r\nscaler_filepath = os.path.join(model_path, 'lr_scaler.pkl')\r\n\r\nscaler = pickle.load(open(scaler_filepath, 'rb'))\r\nmodel = pickle.load(open(model_filepath, 'rb'))\r\n\r\n# columns\r\ncolumns = [ u'Age', u'Fare', u'FamilySize', \\\r\n u'IsMother', u'IsMale', u'Deck_A', u'Deck_B', u'Deck_C', u'Deck_D', \\\r\n u'Deck_E', u'Deck_F', u'Deck_G', u'Deck_z', u'Pclass_1', u'Pclass_2', \\\r\n u'Pclass_3', u'Title_Lady', u'Title_Master', u'Title_Miss', u'Title_Mr', \\\r\n u'Title_Mrs', u'Title_Officer', u'Title_Sir', u'Fare_Bin_very_low', \\\r\n u'Fare_Bin_low', u'Fare_Bin_high', u'Fare_Bin_very_high', u'Embarked_C', \\\r\n u'Embarked_Q', u'Embarked_S', u'AgeState_Adult', u'AgeState_Child'] \r\n\r\n\r\[email protected]('/api', methods=['POST'])\r\ndef make_prediction():\r\n # read json object and conver to json string\r\n data = json.dumps(request.get_json(force=True))\r\n # create pandas dataframe using json string\r\n df = pd.read_json(data)\r\n # extract passengerIds\r\n passenger_ids = df['PassengerId'].ravel()\r\n # actual survived values\r\n actuals = df['Survived'].ravel()\r\n # extract required columns based and convert to matrix\r\n X = df[columns].values.astype('float')\r\n # transform the input\r\n X_scaled = scaler.transform(X)\r\n # make predictions\r\n predictions = model.predict(X_scaled)\r\n # create response dataframe\r\n df_response = pd.DataFrame({'PassengerId': passenger_ids, 'Predicted' : predictions, 'Actual' : actuals})\r\n # return json \r\n return df_response.to_json()\r\n \r\n\r\nif __name__ == '__main__':\r\n # host flask app at port 10001\r\n app.run(port=10001, debug=True)\r\n" ]
[ [ "pandas.DataFrame", "pandas.read_json" ] ]
SamLubbers/rainforest_land_use_classification
[ "8eb27b8cf4cacfacd53f066b05d5ea13196d1ba3" ]
[ "helpers/evaluation.py" ]
[ "import os\nimport sys\nfrom collections import OrderedDict\n\nimport pandas as pd\n\nfrom .data import extract_label_values\n\nsys.path.append(\"../\")\nfrom config import DATASETS_PATH\n\nfrom sklearn.metrics import fbeta_score\n\n\ndef evaluate_performance_validation(predictions, beta=2):\n labels = pd.read_csv(os.path.join(DATASETS_PATH, 'validation_labels.csv'))\n label_names = list(labels.columns[2:])\n labels = extract_label_values(labels)\n mean_f2 = fbeta_score(labels, predictions, beta, average='samples')\n per_class_f2 = fbeta_score(labels, predictions, beta, average=None)\n per_class_f2 = OrderedDict({l: v for l, v in zip(label_names, per_class_f2)})\n return mean_f2, per_class_f2\n" ]
[ [ "sklearn.metrics.fbeta_score" ] ]
TVM-for-armgpu/tvm
[ "4c4392306f5b4d21e85d633231bae71e935f39a2" ]
[ "python/tvm/autotvm/tuner/tuner.py" ]
[ "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n# pylint: disable=unused-argument, no-self-use, invalid-name\n\"\"\"Base class of tuner\"\"\"\nimport logging\nimport tempfile\nimport threading\n\nimport numpy as np\n\nfrom ..measure import MeasureInput, create_measure_batch\nfrom ..utils import format_si_prefix\n\nfrom ..env import GLOBAL_SCOPE\n\nlogger = logging.getLogger(\"autotvm\")\n\n\nclass SharedLock:\n '''\n A counter object that can be shared by multiple threads.\n '''\n _instance_lock = threading.Lock()\n\n def __init__(self):\n pass\n\n def __new__(cls, *args, **kwargs):\n if not hasattr(SharedLock, \"_instance\"):\n with SharedLock._instance_lock:\n if not hasattr(SharedLock, \"_instance\"):\n SharedLock._instance = super(SharedLock, cls).__new__(\n cls, *args, **kwargs)\n cls.value_lock = threading.Lock()\n return SharedLock._instance\n\n\nclass Tuner(object):\n \"\"\"Base class for tuners\n\n Parameters\n ----------\n task: autotvm.task.Task\n Tuning Task\n \"\"\"\n\n def __init__(self, task, **kwargs):\n self.param = kwargs\n self.recorder = None\n\n self.task = task\n\n # keep the current best\n self.best_config = None\n self.best_flops = 0\n self.best_measure_pair = None\n self.best_iter = 0\n\n # time to leave\n self.ttl = None\n self.n_trial = None\n self.early_stopping = None\n self.remote_lock = SharedLock()\n\n def has_next(self):\n \"\"\"Whether has next untried config in the space\n\n Returns\n -------\n has_next: bool\n \"\"\"\n raise NotImplementedError()\n\n def next_batch(self, batch_size):\n \"\"\"get the next batch of configs to be measure on real hardware\n\n Parameters\n ----------\n batch_size: int\n The size of the batch\n\n Returns\n -------\n a batch of configs\n \"\"\"\n raise NotImplementedError()\n\n def update(self, inputs, results):\n \"\"\"Update parameters of the tuner according to measurement results\n\n Parameters\n ----------\n inputs: Array of autotvm.measure.MeasureInput\n The input for measurement\n results: Array of autotvm.measure.MeasureResult\n result for measurement\n \"\"\"\n\n def tune(self, n_trial, measure_option, early_stopping=None, callbacks=(), si_prefix=\"G\"):\n \"\"\"Begin tuning\n\n Parameters\n ----------\n n_trial: int\n Maximum number of configs to try (measure on real hardware)\n measure_option: dict\n The options for how to measure generated code.\n You should use the return value ot autotvm.measure_option for this argument.\n early_stopping: int, optional\n Early stop the tuning when not finding better configs in this number of trials\n callbacks: List of callable\n A list of callback functions. The signature of callback function is\n (Tuner, List of MeasureInput, List of MeasureResult)\n with no return value. These callback functions will be called on\n every measurement pair. See autotvm/tuner/callback.py for some examples.\n si_prefix: str\n One of tvm.autotvm.utils.SI_PREFIXES. The SI prefix to use when reporting FLOPS.\n \"\"\"\n measure_batch = create_measure_batch(self.task, measure_option)\n n_parallel = getattr(measure_batch, \"n_parallel\", 1)\n early_stopping = early_stopping or 1e9\n self.n_trial = n_trial\n self.early_stopping = early_stopping\n\n # Validate si_prefix arg\n format_si_prefix(0, si_prefix)\n\n old_level = logger.level\n\n GLOBAL_SCOPE.in_tuning = True\n i = error_ct = 0\n errors = []\n while i < n_trial:\n if not self.has_next():\n break\n\n configs = self.next_batch(min(n_parallel, n_trial - i))\n\n inputs = [MeasureInput(self.task.target, self.task, config) for config in configs]\n #with self.remote_lock.value_lock:\n results = measure_batch(inputs)\n\n # keep best config\n for k, (inp, res) in enumerate(zip(inputs, results)):\n config = inp.config\n if res.error_no == 0:\n flops = inp.task.flop / np.mean(res.costs)\n error_ct = 0\n else:\n flops = 0\n error_ct += 1\n error = res.costs[0]\n if isinstance(error, str):\n errors.append(error)\n else:\n errors.append(str(error))\n\n if flops > self.best_flops:\n self.best_flops = flops\n self.best_config = config\n self.best_measure_pair = (inp, res)\n self.best_iter = i + k\n\n logger.debug(\n \"No: %d\\t%sFLOPS: %.2f/%.2f\\tresult: %s\\t%s\",\n i + k + 1,\n si_prefix,\n format_si_prefix(flops, si_prefix),\n format_si_prefix(self.best_flops, si_prefix),\n res,\n config,\n )\n\n i += len(results)\n self.ttl = min(early_stopping + self.best_iter, n_trial) - i\n\n self.update(inputs, results)\n for callback in callbacks:\n callback(self, inputs, results)\n\n if i >= self.best_iter + early_stopping and self.best_flops > 1:\n logger.debug(\"Early stopped. Best iter: %d.\", self.best_iter)\n break\n\n if error_ct > 150:\n logging.basicConfig()\n logger.warning(\"Too many errors happen in the tuning. Switching to debug mode.\")\n logger.setLevel(logging.DEBUG)\n else:\n logger.setLevel(old_level)\n\n if error_ct == i:\n _, f = tempfile.mkstemp(prefix=\"tvm_tuning_errors_\", suffix=\".log\", text=True)\n with open(f, \"w\") as file:\n file.write(\"\\n\".join(errors))\n logging.warning(\n \"Could not find any valid schedule for task %s. \"\n \"A file containing the errors has been written to %s.\",\n self.task,\n f,\n )\n GLOBAL_SCOPE.in_tuning = False\n del measure_batch\n\n def reset(self):\n \"\"\"reset the status of tuner\"\"\"\n self.best_config = None\n self.best_flops = 0\n self.best_measure_pair = None\n\n def load_history(self, data_set):\n \"\"\"load history data for transfer learning\n\n Parameters\n ----------\n data_set: Array of (autotvm.measure.MeasureInput, autotvm.measure.MeasureResult) pair\n Previous tuning records\n \"\"\"\n raise NotImplementedError()\n" ]
[ [ "numpy.mean" ] ]
ManianVSS/Shani
[ "2c9e5c0f8a0f073d891a84c38131da02deff0330" ]
[ "ucm_drf/api/ipte_util.py" ]
[ "from scipy.stats.distributions import chi2\n\n\n#\n# def ipte_formula(iterations_or_ipte, number_of_incidents, confidence):\n# chi_square_inverse_right_tailed = chi2.ppf(confidence, df=2 * (number_of_incidents + 1))\n# return chi_square_inverse_right_tailed * (1000 / (iterations_or_ipte * 2))\n#\n\ndef calculate_iterations_required(required_ipte, number_of_incidents, confidence=0.9):\n chi_square_inverse_right_tailed = chi2.ppf(confidence, df=2 * (number_of_incidents + 1))\n return chi_square_inverse_right_tailed * (1000 / (required_ipte * 2))\n # return ipte_formula(required_ipte, number_of_incidents, 0.9)\n\n\ndef calculate_ipte(number_of_iterations, number_of_incidents, confidence=0.9):\n chi_square_inverse_right_tailed = chi2.ppf(confidence, df=2 * (number_of_incidents + 1))\n return chi_square_inverse_right_tailed * (1000 / (number_of_iterations * 2))\n\n# Test\n# def test_util(number_of_incidents=0, number_of_iterations=576, confidence_interval=0.9, required_ipte=4.0):\n# print(\"Number of incidents=\", number_of_incidents)\n# print(\"Total iterations=\", number_of_iterations)\n# print(\"Confidence Interval(2 sided)=\", confidence_interval)\n# ipti = calculate_ipte(number_of_iterations, number_of_incidents)\n# number_of_iterations_required = calculate_iterations_required(required_ipte, number_of_incidents)\n# print(\"IPTI: \", ipti, \" \\nNumber of iterations required to get this IPTI: \", number_of_iterations_required)\n# test_util()\n" ]
[ [ "scipy.stats.distributions.chi2.ppf" ] ]
ethall/LED-Timeline
[ "f8d25c112c944db7d698eb7b9b31bc2d6b9c12e0" ]
[ "p03_gray.py" ]
[ "import cv2 as _cv\nimport numpy as _np\n\nfrom _util import get_frames as _get_frames, matrix_to_video as _matrix_to_video\n\n\ndef to_grayscale(source: str, destination: str) -> None:\n vid = _cv.VideoCapture(source)\n\n # (t,y,x,chan)\n denoised_color_movie = _get_frames(vid)\n print(\"Converting to grayscale...\")\n # (t,y,x)\n gray_movie = _np.zeros(denoised_color_movie.shape[0:3], _np.uint8) # type: ignore\n for index in range(denoised_color_movie.shape[0]):\n print(f\" {index + 1: 5}/{denoised_color_movie.shape[0]}\\u000D\", end=\"\")\n gray_movie[index] = _cv.cvtColor(\n denoised_color_movie[index], _cv.COLOR_BGR2GRAY\n )\n else:\n print(\"\")\n\n _matrix_to_video(\n gray_movie,\n destination,\n \"mp4v\",\n vid.get(_cv.CAP_PROP_FPS),\n int(vid.get(_cv.CAP_PROP_FRAME_WIDTH)),\n int(vid.get(_cv.CAP_PROP_FRAME_HEIGHT)),\n is_color=False,\n )\n\n vid.release()\n\n\nif __name__ == \"__main__\":\n to_grayscale(\"target_p02_denoised.mp4\", \"target_p03_gray.mp4\")\n" ]
[ [ "numpy.zeros" ] ]
danilobr94/dynamic-aif-framework
[ "39710501c4d3383c595b058d963719a0e9858c0b" ]
[ "ltf/state_plot.py" ]
[ "\"\"\"2D Scatter plot for data points of one iteration.\"\"\"\nimport numpy as np\n\n\nclass StatePlot:\n \"\"\"Plot data of one time step.\"\"\"\n\n def __init__(self, x_lim=None, y_lim=None, pos_lbl=1, neg_lbl=0, pos_cls=1, neg_cls=0):\n \"\"\"\n\n Args:\n x_lim [int, int]: Min and max value for x-axis of plot.\n y_lim [int, int]: Min and max value for y-axis of plot.\n pos_cls (int): The positive class of the protected attribute.\n neg_cls (int): The negative class of the protected attribute.\n pos_lbl (int): The positive label.\n neg_lbl (int): The negative label.\n \"\"\"\n\n self._pos_label = pos_lbl\n self._neg_label = neg_lbl\n\n self._pos_class = pos_cls\n self._neg_class = neg_cls\n\n self._x_lim = x_lim\n self._y_lim = y_lim\n\n def get_decision_boundary(self, X_t, predict_func, num_points=500):\n \"\"\"Return triple representing the decision boundary generated by predict_func.\n\n Args:\n X_t (optional): Dataset, used to estimate min and max values if _x_lim and _y_lim are None.\n predict_func (function): The classifiers prediction function, {X, X_sense} -> R.\n num_points (int): Number of points used for mesh grid.\n\n Returns:\n xx, yy, Z (numpy.ndarray, numpy.ndarray, numpy.ndarray): The decision boundary.\n 2D arrays of shape [num_points x num_points]. xx and yy is the meshgrid and\n Z predictions for the meshgrid reshaped to shape of xx.\n \"\"\"\n\n if self._x_lim is not None and self._y_lim is not None:\n x1_min, x1_max = self._x_lim\n x2_min, x2_max = self._y_lim\n else:\n x1_min, x1_max = X_t[:, 0].min() - 3, X_t[:, 0].max() + 3\n x2_min, x2_max = X_t[:, 1].min() - 3, X_t[:, 1].max() + 3\n\n x1_step = (x1_max - x1_min) / num_points\n x2_step = (x2_max - x2_min) / num_points\n\n xx, yy = np.meshgrid(np.arange(x1_min, x1_max, x1_step),\n np.arange(x2_min, x2_max, x2_step))\n\n mesh = np.c_[xx.ravel(), yy.ravel()]\n\n # TODO: generate protected attribute in plausible way\n X_sens_dummy = np.zeros(np.shape(mesh)[0])\n\n Z = predict_func(mesh, X_sens_dummy)\n Z = Z.reshape(xx.shape)\n\n return xx, yy, Z\n\n @staticmethod\n def add_decision_boundary(ax, D, cmap=None, label=\"\"):\n \"\"\"Add the decision boundary to the plot axis ax.\n\n Args:\n ax (matplotlib.pyplot.axes): Axis to add boundary to.\n D (numpy.ndarray, numpy.ndarray, numpy.ndarray): The decision boundary as returned above.\n cmap (str): Colormap, https://matplotlib.org/tutorials/colors/colormaps.html.\n label (str): Label for the boundary.\n \"\"\"\n\n xx, yy, Z = D\n\n if cmap is not None:\n ax.contourf(xx, yy, Z, cmap=cmap, alpha=.5)\n else:\n CS = ax.contour(xx, yy, Z)\n CS.collections[0].set_label(label)\n\n def scatter_data_points(self, ax, X_t, y_hat_t, X_sensitive_t, title=\"\", print_stats=False):\n \"\"\"Scatter plot points (X_t, y_hat_t, X_sensitive_t) at axis ax.\n\n Args:\n ax (matplotlib.pyplot.axes): Axis to scatter points.\n X_t (array_like): 2D, features at time step t.\n y_hat_t (array_like): 1D, predictions at time step t.\n X_sensitive_t (array_like): 1D. sensitive features at time step t.\n title (str): Label for the axis.\n print_stats (bool): If true, stats are printed in each call.\n \"\"\"\n\n pos_lbl_mask = y_hat_t == self._pos_label\n neg_lbl_mask = y_hat_t == self._neg_label\n\n pos_cls_mask = X_sensitive_t == self._pos_class\n neg_cls_mask = X_sensitive_t == self._neg_class\n\n pos_lbl_pos_cls = np.logical_and(pos_lbl_mask, pos_cls_mask)\n ax.scatter(X_t[pos_lbl_pos_cls, 0],\n X_t[pos_lbl_pos_cls, 1],\n label=\"pos label and class \" + str(np.sum(pos_lbl_pos_cls)),\n marker=\"x\",\n c=\"green\")\n\n pos_lbl_neg_cls = np.logical_and(pos_lbl_mask, neg_cls_mask)\n ax.scatter(X_t[pos_lbl_neg_cls, 0],\n X_t[pos_lbl_neg_cls, 1],\n label=\"pos label and neg class \" + str(np.sum(pos_lbl_neg_cls)),\n marker=\"o\",\n c=\"darkgreen\")\n\n neg_lbl_pos_cls = np.logical_and(neg_lbl_mask, pos_cls_mask)\n ax.scatter(X_t[neg_lbl_pos_cls, 0],\n X_t[neg_lbl_pos_cls, 1],\n label=\"neg label and pos class \" + str(np.sum(neg_lbl_pos_cls)),\n marker=\"x\",\n c=\"darkred\")\n\n neg_lbl_neg_cls = np.logical_and(neg_lbl_mask, neg_cls_mask)\n ax.scatter(X_t[neg_lbl_neg_cls, 0],\n X_t[neg_lbl_neg_cls, 1],\n label=\"neg label and class \" + str(np.sum(neg_lbl_neg_cls)),\n marker=\"o\",\n c=\"red\")\n\n if print_stats:\n txt = \"number of positive labels:\" + str(np.sum(pos_lbl_mask)) + \\\n \"\\nnumber of negative labels: \" + str(np.sum(neg_lbl_mask)) + \"\\n\" + \\\n \"\\npositive label positive class: \" + str(np.sum(pos_lbl_pos_cls)) + \\\n \"\\npositive label negative class: \" + str(np.sum(pos_lbl_neg_cls)) + \\\n \"\\nnegative label positive class: \" + str(np.sum(neg_lbl_pos_cls)) + \\\n \"\\nnegative label negative class: \" + str(np.sum(neg_lbl_neg_cls))\n\n print(txt)\n\n txt = \"num positive labels :\" + str(np.sum(pos_lbl_mask)) + \\\n \"\\nnum negative labels: \" + str(np.sum(neg_lbl_mask)) + \"\\n\"\n\n if self._x_lim is not None and self._y_lim is not None:\n ax.set_xlim(self._x_lim)\n ax.set_ylim(self._y_lim)\n\n ax.set_title(title + \"\\n\" + txt)\n" ]
[ [ "numpy.arange", "numpy.sum", "numpy.logical_and", "numpy.shape" ] ]
xiabo0816/ChemNer
[ "d89f1913a5510d21171bee6809a08bbce7d3d4cc" ]
[ "BiLSTM-CRF-baseline-model/run_lstm_crf.py" ]
[ "import json\r\nimport torch\r\nimport argparse\r\nimport torch.nn as nn\r\nfrom torch import optim\r\nimport config\r\nfrom model import NERModel\r\nfrom dataset_loader import DatasetLoader\r\nfrom progressbar import ProgressBar\r\nfrom ner_metrics import SeqEntityScore\r\nfrom data_processor import CluenerProcessor\r\nfrom lr_scheduler import ReduceLROnPlateau\r\nfrom utils_ner import get_entities\r\nfrom common import (init_logger,\r\n logger,\r\n json_to_text,\r\n load_model,\r\n AverageMeter,\r\n seed_everything)\r\n\r\ndef train(args,model,processor):\r\n train_dataset = load_and_cache_examples(args, processor, data_type='train')\r\n train_loader = DatasetLoader(data=train_dataset, batch_size=args.batch_size,\r\n shuffle=False, seed=args.seed, sort=True,\r\n vocab = processor.vocab,label2id = args.label2id)\r\n parameters = [p for p in model.parameters() if p.requires_grad]\r\n optimizer = optim.Adam(parameters, lr=args.learning_rate)\r\n scheduler = ReduceLROnPlateau(optimizer, mode='max', factor=0.5, patience=3,\r\n verbose=1, epsilon=1e-4, cooldown=0, min_lr=0, eps=1e-8)\r\n best_f1 = 0\r\n for epoch in range(1, 1 + args.epochs):\r\n print(f\"Epoch {epoch}/{args.epochs}\")\r\n pbar = ProgressBar(n_total=len(train_loader), desc='Training')\r\n train_loss = AverageMeter()\r\n model.train()\r\n assert model.training\r\n for step, batch in enumerate(train_loader):\r\n input_ids, input_mask, input_tags, input_lens = batch\r\n input_ids = input_ids.to(args.device)\r\n input_mask = input_mask.to(args.device)\r\n input_tags = input_tags.to(args.device)\r\n features, loss = model.forward_loss(input_ids, input_mask, input_lens, input_tags)\r\n loss.backward()\r\n torch.nn.utils.clip_grad_norm_(model.parameters(), args.grad_norm)\r\n optimizer.step()\r\n optimizer.zero_grad()\r\n pbar(step=step, info={'loss': loss.item()})\r\n train_loss.update(loss.item(), n=1)\r\n print(\" \")\r\n train_log = {'loss': train_loss.avg}\r\n if 'cuda' in str(args.device):\r\n torch.cuda.empty_cache()\r\n eval_log, class_info = evaluate(args,model,processor)\r\n logs = dict(train_log, **eval_log)\r\n show_info = f'\\nEpoch: {epoch} - ' + \"-\".join([f' {key}: {value:.4f} ' for key, value in logs.items()])\r\n logger.info(show_info)\r\n scheduler.epoch_step(logs['eval_f1'], epoch)\r\n if logs['eval_f1'] > best_f1:\r\n logger.info(f\"\\nEpoch {epoch}: eval_f1 improved from {best_f1} to {logs['eval_f1']}\")\r\n logger.info(\"save model to disk.\")\r\n best_f1 = logs['eval_f1']\r\n if isinstance(model, nn.DataParallel):\r\n model_stat_dict = model.module.state_dict()\r\n else:\r\n model_stat_dict = model.state_dict()\r\n state = {'epoch': epoch, 'arch': args.arch, 'state_dict': model_stat_dict}\r\n model_path = args.output_dir / 'best-model.bin'\r\n torch.save(state, str(model_path))\r\n print(\"Eval Entity Score: \")\r\n for key, value in class_info.items():\r\n info = f\"Subject: {key} - Acc: {value['acc']} - Recall: {value['recall']} - F1: {value['f1']}\"\r\n logger.info(info)\r\n\r\ndef evaluate(args,model,processor):\r\n eval_dataset = load_and_cache_examples(args,processor, data_type='dev')\r\n eval_dataloader = DatasetLoader(data=eval_dataset, batch_size=args.batch_size,\r\n shuffle=False, seed=args.seed, sort=False,\r\n vocab=processor.vocab, label2id=args.label2id)\r\n pbar = ProgressBar(n_total=len(eval_dataloader), desc=\"Evaluating\")\r\n metric = SeqEntityScore(args.id2label,markup=args.markup)\r\n eval_loss = AverageMeter()\r\n model.eval()\r\n fout = open(args.output_dir / 'bilstm+crf.result.txt','w')\r\n with torch.no_grad():\r\n for step, batch in enumerate(eval_dataloader):\r\n input_chars, input_ids, input_mask, input_tags, input_lens = batch\r\n input_ids = input_ids.to(args.device)\r\n input_mask = input_mask.to(args.device)\r\n input_tags = input_tags.to(args.device)\r\n features, loss = model.forward_loss(input_ids, input_mask, input_lens, input_tags)\r\n eval_loss.update(val=loss.item(), n=input_ids.size(0))\r\n tags, _ = model.crf._obtain_labels(features, args.id2label, input_lens)\r\n input_tags = input_tags.cpu().numpy()\r\n target = [input_[:len_] for input_, len_ in zip(input_tags, input_lens)]\r\n # 从这里可以看出,这个输出只适用于batch=1\r\n assert(len(tags[0])==len(input_tags[0]))\r\n for i in range(len(tags[0])):\r\n fout.write(input_chars[i] + ' ' + args.id2label[input_tags[0][i]] + ' ' + tags[0][i] + '\\n')\r\n print(input_chars[i], tags[0][i], args.id2label[input_tags[0][i]])\r\n # print(processor.vocab.to_word(input_chars[0][i]), tags[0][i], args.id2label[input_tags[0][i]])\r\n fout.write(\"\\n\")\r\n metric.update(pred_paths=tags, label_paths=target)\r\n pbar(step=step)\r\n print(\" \")\r\n fout.close()\r\n eval_info, class_info = metric.result()\r\n eval_info = {f'eval_{key}': value for key, value in eval_info.items()}\r\n result = {'eval_loss': eval_loss.avg}\r\n result = dict(result, **eval_info)\r\n return result, class_info\r\n\r\ndef predict(args,model,processor):\r\n model_path = args.output_dir / 'best-model.bin'\r\n model = load_model(model, model_path=str(model_path))\r\n test_data = []\r\n with open(str(args.data_dir / \"test.txt\"), 'r') as f:\r\n idx = 0\r\n for line in f:\r\n json_d = {}\r\n line = json.loads(line.strip())\r\n text = line['text']\r\n words = list(text)\r\n labels = ['O'] * len(words)\r\n json_d['id'] = idx\r\n json_d['context'] = \" \".join(words)\r\n json_d['tag'] = \" \".join(labels)\r\n json_d['raw_context'] = \"\".join(words)\r\n idx += 1\r\n test_data.append(json_d)\r\n pbar = ProgressBar(n_total=len(test_data))\r\n results = []\r\n for step, line in enumerate(test_data):\r\n token_a = line['context'].split(\" \")\r\n input_ids = [processor.vocab.to_index(w) for w in token_a]\r\n input_mask = [1] * len(token_a)\r\n input_lens = [len(token_a)]\r\n model.eval()\r\n with torch.no_grad():\r\n input_ids = torch.tensor([input_ids], dtype=torch.long)\r\n input_mask = torch.tensor([input_mask], dtype=torch.long)\r\n input_lens = torch.tensor([input_lens], dtype=torch.long)\r\n input_ids = input_ids.to(args.device)\r\n input_mask = input_mask.to(args.device)\r\n features = model.forward_loss(input_ids, input_mask, input_lens, input_tags=None)\r\n tags, _ = model.crf._obtain_labels(features, args.id2label, input_lens)\r\n label_entities = get_entities(tags[0], args.id2label)\r\n json_d = {}\r\n json_d['id'] = step\r\n json_d['tag_seq'] = \" \".join(tags[0])\r\n json_d['entities'] = label_entities\r\n results.append(json_d)\r\n pbar(step=step)\r\n print(\" \")\r\n output_predic_file = str(args.output_dir / \"test_prediction.json\")\r\n output_submit_file = str(args.output_dir / \"test_submit.json\")\r\n with open(output_predic_file, \"w\") as writer:\r\n for record in results:\r\n writer.write(json.dumps(record) + '\\n')\r\n test_text = []\r\n with open(str(args.data_dir / 'test.json'), 'r') as fr:\r\n for line in fr:\r\n test_text.append(json.loads(line))\r\n test_submit = []\r\n for x, y in zip(test_text, results):\r\n json_d = {}\r\n json_d['id'] = x['id']\r\n json_d['label'] = {}\r\n entities = y['entities']\r\n words = list(x['text'])\r\n if len(entities) != 0:\r\n for subject in entities:\r\n tag = subject[0]\r\n start = subject[1]\r\n end = subject[2]\r\n word = \"\".join(words[start:end + 1])\r\n if tag in json_d['label']:\r\n if word in json_d['label'][tag]:\r\n json_d['label'][tag][word].append([start, end])\r\n else:\r\n json_d['label'][tag][word] = [[start, end]]\r\n else:\r\n json_d['label'][tag] = {}\r\n json_d['label'][tag][word] = [[start, end]]\r\n test_submit.append(json_d)\r\n json_to_text(output_submit_file, test_submit)\r\n\r\ndef load_and_cache_examples(args,processor, data_type='train'):\r\n # Load data features from cache or dataset file\r\n cached_examples_file = args.data_dir / 'cached_crf-{}_{}_{}'.format(\r\n data_type,\r\n args.arch,\r\n str(args.task_name))\r\n if cached_examples_file.exists():\r\n logger.info(\"Loading features from cached file %s\", cached_examples_file)\r\n examples = torch.load(cached_examples_file)\r\n else:\r\n logger.info(\"Creating features from dataset file at %s\", args.data_dir)\r\n if data_type == 'train':\r\n examples = processor.get_train_examples()\r\n elif data_type == 'dev':\r\n examples = processor.get_dev_examples()\r\n logger.info(\"Saving features into cached file %s\", cached_examples_file)\r\n torch.save(examples, str(cached_examples_file))\r\n return examples\r\n\r\ndef main():\r\n parser = argparse.ArgumentParser()\r\n # Required parameters\r\n parser.add_argument(\"--do_train\", default=False, action='store_true')\r\n parser.add_argument('--do_eval', default=False, action='store_true')\r\n parser.add_argument(\"--do_predict\", default=False, action='store_true')\r\n\r\n parser.add_argument('--markup', default='bios', type=str, choices=['bios', 'bio'])\r\n parser.add_argument(\"--arch\",default='bilstm_crf',type=str)\r\n parser.add_argument('--learning_rate',default=0.001,type=float)\r\n parser.add_argument('--seed',default=1234,type=int)\r\n parser.add_argument('--gpu',default='0',type=str)\r\n parser.add_argument('--epochs',default=50,type=int)\r\n parser.add_argument('--batch_size',default=1,type=int)\r\n parser.add_argument('--embedding_size',default=128,type=int)\r\n parser.add_argument('--hidden_size',default=384,type=int)\r\n parser.add_argument(\"--grad_norm\", default=5.0, type=float, help=\"Max gradient norm.\")\r\n parser.add_argument(\"--task_name\", type=str, default='ner')\r\n args = parser.parse_args()\r\n args.data_dir = config.data_dir\r\n if not config.output_dir.exists():\r\n args.output_dir.mkdir()\r\n args.output_dir = config.output_dir / '{}'.format(args.arch)\r\n if not args.output_dir.exists():\r\n args.output_dir.mkdir()\r\n init_logger(log_file=str(args.output_dir / '{}-{}.log'.format(args.arch, args.task_name)))\r\n seed_everything(args.seed)\r\n if args.gpu!='':\r\n args.device = torch.device(f\"cuda:{args.gpu}\")\r\n else:\r\n args.device = torch.device(\"cpu\")\r\n args.id2label = {i: label for i, label in enumerate(config.label2id)}\r\n args.label2id = config.label2id\r\n processor = CluenerProcessor(data_dir=config.data_dir)\r\n processor.get_vocab()\r\n model = NERModel(vocab_size=len(processor.vocab), embedding_size=args.embedding_size,\r\n hidden_size=args.hidden_size,device=args.device,label2id=args.label2id)\r\n model.to(args.device)\r\n if args.do_train:\r\n train(args,model,processor)\r\n if args.do_eval:\r\n model_path = args.output_dir / 'best-model.bin'\r\n model = load_model(model, model_path=str(model_path))\r\n evaluate(args,model,processor)\r\n if args.do_predict:\r\n predict(args,model,processor)\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n" ]
[ [ "torch.cuda.empty_cache", "torch.load", "torch.no_grad", "torch.tensor", "torch.optim.Adam", "torch.device" ] ]
saurabhya/kornia
[ "f2b4fe9fb32d99795783f25b5a4c561001783ebf" ]
[ "test/feature/test_laf.py" ]
[ "import pytest\nimport torch\nfrom torch.autograd import gradcheck\n\nimport kornia\nimport kornia.geometry.transform.imgwarp\nimport kornia.testing as utils # test utils\nfrom kornia.testing import assert_close\n\n\nclass TestAngleToRotationMatrix:\n def test_shape(self, device):\n inp = torch.ones(1, 3, 4, 4).to(device)\n rotmat = kornia.geometry.transform.imgwarp.angle_to_rotation_matrix(inp)\n assert rotmat.shape == (1, 3, 4, 4, 2, 2)\n\n def test_angles(self, device):\n ang_deg = torch.tensor([0, 90.0], device=device)\n expected = torch.tensor([[[1.0, 0.0], [0.0, 1.0]], [[0, 1.0], [-1.0, 0]]], device=device)\n rotmat = kornia.geometry.transform.imgwarp.angle_to_rotation_matrix(ang_deg)\n assert_close(rotmat, expected)\n\n def test_gradcheck(self, device):\n batch_size, channels, height, width = 1, 2, 5, 4\n img = torch.rand(batch_size, channels, height, width, device=device)\n img = utils.tensor_to_gradcheck_var(img) # to var\n assert gradcheck(kornia.geometry.transform.imgwarp.angle_to_rotation_matrix, (img,), raise_exception=True)\n\n @pytest.mark.jit\n @pytest.mark.skip(\"Problems with kornia.pi\")\n def test_jit(self, device, dtype):\n B, C, H, W = 2, 1, 32, 32\n patches = torch.rand(B, C, H, W, device=device, dtype=dtype)\n model = kornia.geometry.transform.imgwarp.angle_to_rotation_matrix\n model_jit = torch.jit.script(kornia.geometry.transform.imgwarp.angle_to_rotation_matrix)\n assert_close(model(patches), model_jit(patches))\n\n\nclass TestGetLAFScale:\n def test_shape(self, device):\n inp = torch.ones(1, 3, 2, 3, device=device)\n rotmat = kornia.feature.get_laf_scale(inp)\n assert rotmat.shape == (1, 3, 1, 1)\n\n def test_scale(self, device):\n inp = torch.tensor([[5.0, 1, 0], [1, 1, 0]], device=device).float()\n inp = inp.view(1, 1, 2, 3)\n expected = torch.tensor([[[[2]]]], device=device).float()\n rotmat = kornia.feature.get_laf_scale(inp)\n assert_close(rotmat, expected)\n\n def test_gradcheck(self, device):\n batch_size, channels, height, width = 1, 2, 2, 3\n img = torch.rand(batch_size, channels, height, width, device=device)\n img = utils.tensor_to_gradcheck_var(img) # to var\n assert gradcheck(kornia.feature.get_laf_scale, (img,), raise_exception=True)\n\n @pytest.mark.jit\n def test_jit(self, device, dtype):\n batch_size, channels, height, width = 1, 2, 2, 3\n img = torch.rand(batch_size, channels, height, width, device=device)\n model = kornia.feature.get_laf_scale\n model_jit = torch.jit.script(kornia.feature.get_laf_scale)\n assert_close(model(img), model_jit(img))\n\n\nclass TestGetLAFCenter:\n def test_shape(self, device):\n inp = torch.ones(1, 3, 2, 3, device=device)\n xy = kornia.feature.get_laf_center(inp)\n assert xy.shape == (1, 3, 2)\n\n def test_center(self, device):\n inp = torch.tensor([[5.0, 1, 2], [1, 1, 3]], device=device).float()\n inp = inp.view(1, 1, 2, 3)\n expected = torch.tensor([[[2, 3]]], device=device).float()\n xy = kornia.feature.get_laf_center(inp)\n assert_close(xy, expected)\n\n def test_gradcheck(self, device):\n batch_size, channels, height, width = 1, 2, 2, 3\n img = torch.rand(batch_size, channels, height, width)\n img = utils.tensor_to_gradcheck_var(img) # to var\n assert gradcheck(kornia.feature.get_laf_center, (img,), raise_exception=True)\n\n @pytest.mark.jit\n def test_jit(self, device, dtype):\n batch_size, channels, height, width = 1, 2, 2, 3\n img = torch.rand(batch_size, channels, height, width, device=device)\n model = kornia.feature.get_laf_center\n model_jit = torch.jit.script(kornia.feature.get_laf_center)\n assert_close(model(img), model_jit(img))\n\n\nclass TestGetLAFOri:\n def test_shape(self, device):\n inp = torch.ones(1, 3, 2, 3, device=device)\n ori = kornia.feature.get_laf_orientation(inp)\n assert ori.shape == (1, 3, 1)\n\n def test_ori(self, device):\n inp = torch.tensor([[1, 1, 2], [1, 1, 3]], device=device).float()\n inp = inp.view(1, 1, 2, 3)\n expected = torch.tensor([[[45.0]]], device=device).float()\n angle = kornia.feature.get_laf_orientation(inp)\n assert_close(angle, expected)\n\n def test_gradcheck(self, device):\n batch_size, channels, height, width = 1, 2, 2, 3\n img = torch.rand(batch_size, channels, height, width, device=device)\n img = utils.tensor_to_gradcheck_var(img) # to var\n assert gradcheck(kornia.feature.get_laf_orientation, (img,), raise_exception=True)\n\n @pytest.mark.jit\n @pytest.mark.skip(\"Union\")\n def test_jit(self, device, dtype):\n batch_size, channels, height, width = 1, 2, 2, 3\n img = torch.rand(batch_size, channels, height, width, device=device)\n model = kornia.feature.get_laf_orientation\n model_jit = torch.jit.script(kornia.feature.get_laf_orientation)\n assert_close(model(img), model_jit(img))\n\n\nclass TestScaleLAF:\n def test_shape_float(self, device):\n inp = torch.ones(7, 3, 2, 3, device=device).float()\n scale = 23.0\n assert kornia.feature.scale_laf(inp, scale).shape == inp.shape\n\n def test_shape_tensor(self, device):\n inp = torch.ones(7, 3, 2, 3, device=device).float()\n scale = torch.zeros(7, 1, 1, 1, device=device).float()\n assert kornia.feature.scale_laf(inp, scale).shape == inp.shape\n\n def test_scale(self, device):\n inp = torch.tensor([[5.0, 1, 0.8], [1, 1, -4.0]], device=device).float()\n inp = inp.view(1, 1, 2, 3)\n scale = torch.tensor([[[[2.0]]]], device=device).float()\n out = kornia.feature.scale_laf(inp, scale)\n expected = torch.tensor([[[[10.0, 2, 0.8], [2, 2, -4.0]]]], device=device).float()\n assert_close(out, expected)\n\n def test_gradcheck(self, device):\n batch_size, channels, height, width = 1, 2, 2, 3\n laf = torch.rand(batch_size, channels, height, width, device=device)\n scale = torch.rand(batch_size, device=device)\n scale = utils.tensor_to_gradcheck_var(scale) # to var\n laf = utils.tensor_to_gradcheck_var(laf) # to var\n assert gradcheck(kornia.feature.scale_laf, (laf, scale), raise_exception=True, atol=1e-4)\n\n @pytest.mark.jit\n @pytest.mark.skip(\"Union\")\n def test_jit(self, device, dtype):\n batch_size, channels, height, width = 1, 2, 2, 3\n laf = torch.rand(batch_size, channels, height, width, device=device)\n scale = torch.rand(batch_size, device=device)\n model = kornia.feature.scale_laf\n model_jit = torch.jit.script(kornia.feature.scale_laf)\n assert_close(model(laf, scale), model_jit(laf, scale))\n\n\nclass TestSetLAFOri:\n def test_shape_tensor(self, device):\n inp = torch.ones(7, 3, 2, 3, device=device).float()\n ori = torch.ones(7, 3, 1, 1, device=device).float()\n assert kornia.feature.set_laf_orientation(inp, ori).shape == inp.shape\n\n def test_ori(self, device):\n inp = torch.tensor([[0.0, 5.0, 0.8], [-5.0, 0, -4.0]], device=device).float()\n inp = inp.view(1, 1, 2, 3)\n ori = torch.zeros(1, 1, 1, 1, device=device).float()\n out = kornia.feature.set_laf_orientation(inp, ori)\n expected = torch.tensor([[[[5.0, 0.0, 0.8], [0.0, 5.0, -4.0]]]], device=device).float()\n assert_close(out, expected)\n\n def test_gradcheck(self, device):\n batch_size, channels, height, width = 1, 2, 2, 3\n laf = torch.rand(batch_size, channels, height, width, device=device)\n ori = torch.rand(batch_size, channels, 1, 1, device=device)\n ori = utils.tensor_to_gradcheck_var(ori) # to var\n laf = utils.tensor_to_gradcheck_var(laf) # to var\n assert gradcheck(kornia.feature.set_laf_orientation, (laf, ori), raise_exception=True, atol=1e-4)\n\n @pytest.mark.jit\n @pytest.mark.skip(\"Union\")\n def test_jit(self, device, dtype):\n batch_size, channels, height, width = 1, 2, 2, 3\n laf = torch.rand(batch_size, channels, height, width, device=device)\n ori = torch.rand(batch_size, channels, 1, 1, device=device)\n model = kornia.feature.set_laf_orientation\n model_jit = torch.jit.script(kornia.feature.set_laf_orientation)\n assert_close(model(laf, ori), model_jit(laf, ori))\n\n\nclass TestMakeUpright:\n def test_shape(self, device):\n inp = torch.ones(5, 3, 2, 3, device=device)\n rotmat = kornia.feature.make_upright(inp)\n assert rotmat.shape == (5, 3, 2, 3)\n\n def test_do_nothing(self, device):\n inp = torch.tensor([[1, 0, 0], [0, 1, 0]], device=device).float()\n inp = inp.view(1, 1, 2, 3)\n expected = torch.tensor([[[[1, 0, 0], [0, 1, 0]]]], device=device).float()\n laf = kornia.feature.make_upright(inp)\n assert_close(laf, expected)\n\n def test_do_nothing_with_scalea(self, device):\n inp = torch.tensor([[2, 0, 0], [0, 2, 0]], device=device).float()\n inp = inp.view(1, 1, 2, 3)\n expected = torch.tensor([[[[2, 0, 0], [0, 2, 0]]]], device=device).float()\n laf = kornia.feature.make_upright(inp)\n assert_close(laf, expected)\n\n def test_check_zeros(self, device):\n inp = torch.rand(4, 5, 2, 3, device=device)\n laf = kornia.feature.make_upright(inp)\n must_be_zeros = laf[:, :, 0, 1]\n assert_close(must_be_zeros, torch.zeros_like(must_be_zeros))\n\n def test_gradcheck(self, device):\n batch_size, channels, height, width = 14, 2, 2, 3\n img = torch.rand(batch_size, channels, height, width, device=device)\n img = utils.tensor_to_gradcheck_var(img) # to var\n assert gradcheck(kornia.feature.make_upright, (img,), raise_exception=True)\n\n @pytest.mark.jit\n @pytest.mark.skip(\"Union\")\n def test_jit(self, device, dtype):\n batch_size, channels, height, width = 1, 2, 2, 3\n img = torch.rand(batch_size, channels, height, width, device=device)\n model = kornia.feature.make_upright\n model_jit = torch.jit.script(kornia.feature.make_upright)\n assert_close(model(img), model_jit(img))\n\n\nclass TestELL2LAF:\n def test_shape(self, device):\n inp = torch.ones(5, 3, 5, device=device)\n inp[:, :, 3] = 0\n rotmat = kornia.feature.ellipse_to_laf(inp)\n assert rotmat.shape == (5, 3, 2, 3)\n\n def test_conversion(self, device):\n inp = torch.tensor([[10, -20, 0.01, 0, 0.01]], device=device).float()\n inp = inp.view(1, 1, 5)\n expected = torch.tensor([[10, 0, 10.0], [0, 10, -20]], device=device).float()\n expected = expected.view(1, 1, 2, 3)\n laf = kornia.feature.ellipse_to_laf(inp)\n assert_close(laf, expected)\n\n def test_gradcheck(self, device):\n batch_size, channels, height = 1, 2, 5\n img = torch.rand(batch_size, channels, height, device=device).abs()\n img[:, :, 2] = img[:, :, 3].abs() + 0.3\n img[:, :, 4] += 1.0\n # assure it is positive definite\n img = utils.tensor_to_gradcheck_var(img) # to var\n assert gradcheck(kornia.feature.ellipse_to_laf, (img,), raise_exception=True)\n\n @pytest.mark.jit\n def test_jit(self, device, dtype):\n batch_size, channels, height = 1, 2, 5\n img = torch.rand(batch_size, channels, height, device=device).abs()\n img[:, :, 2] = img[:, :, 3].abs() + 0.3\n img[:, :, 4] += 1.0\n model = kornia.feature.ellipse_to_laf\n model_jit = torch.jit.script(kornia.feature.ellipse_to_laf)\n assert_close(model(img), model_jit(img))\n\n\nclass TestNormalizeLAF:\n def test_shape(self, device):\n inp = torch.rand(5, 3, 2, 3)\n img = torch.rand(5, 3, 10, 10)\n assert inp.shape == kornia.feature.normalize_laf(inp, img).shape\n\n def test_conversion(self, device):\n w, h = 10, 5\n laf = torch.tensor([[1, 0, 1], [0, 1, 1]]).float()\n laf = laf.view(1, 1, 2, 3)\n img = torch.rand(1, 3, h, w)\n expected = torch.tensor([[[[0.2, 0, 0.1], [0, 0.2, 0.2]]]]).float()\n lafn = kornia.feature.normalize_laf(laf, img)\n assert_close(lafn, expected)\n\n def test_gradcheck(self, device):\n batch_size, channels, height, width = 1, 2, 2, 3\n\n laf = torch.rand(batch_size, channels, height, width)\n img = torch.rand(batch_size, 3, 10, 32)\n img = utils.tensor_to_gradcheck_var(img) # to var\n laf = utils.tensor_to_gradcheck_var(laf) # to var\n assert gradcheck(kornia.feature.normalize_laf, (laf, img), raise_exception=True)\n\n @pytest.mark.jit\n def test_jit(self, device, dtype):\n batch_size, channels, height, width = 1, 2, 2, 3\n\n laf = torch.rand(batch_size, channels, height, width)\n img = torch.rand(batch_size, 3, 10, 32)\n model = kornia.feature.normalize_laf\n model_jit = torch.jit.script(kornia.feature.normalize_laf)\n assert_close(model(laf, img), model_jit(laf, img))\n\n\nclass TestLAF2pts:\n def test_shape(self, device):\n inp = torch.rand(5, 3, 2, 3, device=device)\n n_pts = 13\n assert kornia.feature.laf_to_boundary_points(inp, n_pts).shape == (5, 3, n_pts, 2)\n\n def test_conversion(self, device):\n laf = torch.tensor([[1, 0, 1], [0, 1, 1]], device=device).float()\n laf = laf.view(1, 1, 2, 3)\n n_pts = 6\n expected = torch.tensor([[[[1, 1], [1, 2], [2, 1], [1, 0], [0, 1], [1, 2]]]], device=device).float()\n pts = kornia.feature.laf_to_boundary_points(laf, n_pts)\n assert_close(pts, expected)\n\n def test_gradcheck(self, device):\n batch_size, channels, height, width = 3, 2, 2, 3\n laf = torch.rand(batch_size, channels, height, width, device=device)\n laf = utils.tensor_to_gradcheck_var(laf) # to var\n assert gradcheck(kornia.feature.laf_to_boundary_points, (laf), raise_exception=True)\n\n @pytest.mark.jit\n def test_jit(self, device, dtype):\n batch_size, channels, height, width = 3, 2, 2, 3\n laf = torch.rand(batch_size, channels, height, width, device=device)\n model = kornia.feature.laf_to_boundary_points\n model_jit = torch.jit.script(kornia.feature.laf_to_boundary_points)\n assert_close(model(laf), model_jit(laf))\n\n\nclass TestDenormalizeLAF:\n def test_shape(self, device):\n inp = torch.rand(5, 3, 2, 3, device=device)\n img = torch.rand(5, 3, 10, 10, device=device)\n assert inp.shape == kornia.feature.denormalize_laf(inp, img).shape\n\n def test_conversion(self, device):\n w, h = 10, 5\n expected = torch.tensor([[1, 0, 1], [0, 1, 1]], device=device).float()\n expected = expected.view(1, 1, 2, 3)\n img = torch.rand(1, 3, h, w, device=device)\n lafn = torch.tensor([[0.2, 0, 0.1], [0, 0.2, 0.2]], device=device).float()\n laf = kornia.feature.denormalize_laf(lafn.view(1, 1, 2, 3), img)\n assert_close(laf, expected)\n\n def test_gradcheck(self, device):\n batch_size, channels, height, width = 1, 2, 2, 3\n\n laf = torch.rand(batch_size, channels, height, width, device=device)\n img = torch.rand(batch_size, 3, 10, 32, device=device)\n img = utils.tensor_to_gradcheck_var(img) # to var\n laf = utils.tensor_to_gradcheck_var(laf) # to var\n assert gradcheck(kornia.feature.denormalize_laf, (laf, img), raise_exception=True)\n\n @pytest.mark.jit\n def test_jit(self, device, dtype):\n batch_size, channels, height, width = 1, 2, 2, 3\n\n laf = torch.rand(batch_size, channels, height, width)\n img = torch.rand(batch_size, 3, 10, 32)\n model = kornia.feature.denormalize_laf\n model_jit = torch.jit.script(kornia.feature.denormalize_laf)\n assert_close(model(laf, img), model_jit(laf, img))\n\n\nclass TestGenPatchGrid:\n def test_shape(self, device):\n laf = torch.rand(5, 3, 2, 3, device=device)\n img = torch.rand(5, 3, 10, 10, device=device)\n PS = 3\n from kornia.feature.laf import generate_patch_grid_from_normalized_LAF\n\n grid = generate_patch_grid_from_normalized_LAF(img, laf, PS)\n assert grid.shape == (15, 3, 3, 2)\n\n def test_gradcheck(self, device):\n laf = torch.rand(5, 3, 2, 3, device=device)\n img = torch.rand(5, 3, 10, 10, device=device)\n PS = 3\n from kornia.feature.laf import generate_patch_grid_from_normalized_LAF\n\n img = utils.tensor_to_gradcheck_var(img) # to var\n laf = utils.tensor_to_gradcheck_var(laf) # to var\n assert gradcheck(generate_patch_grid_from_normalized_LAF, (img, laf, PS), raise_exception=True)\n\n\nclass TestExtractPatchesSimple:\n def test_shape(self, device):\n laf = torch.rand(5, 4, 2, 3, device=device)\n img = torch.rand(5, 3, 100, 30, device=device)\n PS = 10\n patches = kornia.feature.extract_patches_simple(img, laf, PS)\n assert patches.shape == (5, 4, 3, PS, PS)\n\n # TODO: check what to do to improve timing\n # @pytest.mark.skip(\"The test takes too long to finish.\")\n def test_gradcheck(self, device):\n nlaf = torch.tensor([[0.1, 0.001, 0.5], [0, 0.1, 0.5]], device=device).float()\n nlaf = nlaf.view(1, 1, 2, 3)\n img = torch.rand(1, 3, 20, 30, device=device)\n PS = 11\n img = utils.tensor_to_gradcheck_var(img) # to var\n nlaf = utils.tensor_to_gradcheck_var(nlaf) # to var\n assert gradcheck(kornia.feature.extract_patches_simple, (img, nlaf, PS, False), raise_exception=True)\n\n\nclass TestExtractPatchesPyr:\n def test_shape(self, device):\n laf = torch.rand(5, 4, 2, 3, device=device)\n img = torch.rand(5, 3, 100, 30, device=device)\n PS = 10\n patches = kornia.feature.extract_patches_from_pyramid(img, laf, PS)\n assert patches.shape == (5, 4, 3, PS, PS)\n\n # TODO: check what to do to improve timing\n # @pytest.mark.skip(\"The test takes too long to finish.\")\n def test_gradcheck(self, device):\n nlaf = torch.tensor([[0.1, 0.001, 0.5], [0, 0.1, 0.5]], device=device).float()\n nlaf = nlaf.view(1, 1, 2, 3)\n img = torch.rand(1, 3, 20, 30, device=device)\n PS = 11\n img = utils.tensor_to_gradcheck_var(img) # to var\n nlaf = utils.tensor_to_gradcheck_var(nlaf) # to var\n assert gradcheck(kornia.feature.extract_patches_from_pyramid, (img, nlaf, PS, False), raise_exception=True)\n\n\nclass TestLAFIsTouchingBoundary:\n def test_shape(self, device):\n inp = torch.rand(5, 3, 2, 3, device=device)\n img = torch.rand(5, 3, 10, 10, device=device)\n assert (5, 3) == kornia.feature.laf_is_inside_image(inp, img).shape\n\n def test_touch(self, device):\n w, h = 10, 5\n img = torch.rand(1, 3, h, w, device=device)\n laf = torch.tensor([[[[10, 0, 3], [0, 10, 3]], [[1, 0, 5], [0, 1, 2]]]], device=device).float()\n expected = torch.tensor([[False, True]], device=device)\n assert torch.all(kornia.feature.laf_is_inside_image(laf, img) == expected).item()\n\n @pytest.mark.jit\n def test_jit(self, device, dtype):\n w, h = 10, 5\n img = torch.rand(1, 3, h, w, device=device)\n laf = torch.tensor([[[[10, 0, 3], [0, 10, 3]], [[1, 0, 5], [0, 1, 2]]]], device=device).float()\n model = kornia.feature.laf_is_inside_image\n model_jit = torch.jit.script(kornia.feature.laf_is_inside_image)\n assert_close(model(laf, img), model_jit(laf, img))\n\n\nclass TestGetCreateLAF:\n def test_shape(self, device):\n xy = torch.ones(1, 3, 2, device=device)\n ori = torch.ones(1, 3, 1, device=device)\n scale = torch.ones(1, 3, 1, 1, device=device)\n laf = kornia.feature.laf_from_center_scale_ori(xy, scale, ori)\n assert laf.shape == (1, 3, 2, 3)\n\n def test_laf(self, device):\n xy = torch.ones(1, 1, 2, device=device)\n ori = torch.zeros(1, 1, 1, device=device)\n scale = 5 * torch.ones(1, 1, 1, 1, device=device)\n expected = torch.tensor([[[[5, 0, 1], [0, 5, 1]]]], device=device).float()\n laf = kornia.feature.laf_from_center_scale_ori(xy, scale, ori)\n assert_close(laf, expected)\n\n def test_laf_def(self, device):\n xy = torch.ones(1, 1, 2, device=device)\n expected = torch.tensor([[[[1, 0, 1], [0, 1, 1]]]], device=device).float()\n laf = kornia.feature.laf_from_center_scale_ori(xy)\n assert_close(laf, expected)\n\n def test_cross_consistency(self, device):\n batch_size, channels = 3, 2\n xy = torch.rand(batch_size, channels, 2, device=device)\n ori = torch.rand(batch_size, channels, 1, device=device)\n scale = torch.abs(torch.rand(batch_size, channels, 1, 1, device=device))\n laf = kornia.feature.laf_from_center_scale_ori(xy, scale, ori)\n scale2 = kornia.feature.get_laf_scale(laf)\n assert_close(scale, scale2)\n xy2 = kornia.feature.get_laf_center(laf)\n assert_close(xy2, xy)\n ori2 = kornia.feature.get_laf_orientation(laf)\n assert_close(ori2, ori)\n\n def test_gradcheck(self, device):\n batch_size, channels = 3, 2\n xy = utils.tensor_to_gradcheck_var(torch.rand(batch_size, channels, 2, device=device))\n ori = utils.tensor_to_gradcheck_var(torch.rand(batch_size, channels, 1, device=device))\n scale = utils.tensor_to_gradcheck_var(torch.abs(torch.rand(batch_size, channels, 1, 1, device=device)))\n assert gradcheck(kornia.feature.laf_from_center_scale_ori, (xy, scale, ori), raise_exception=True)\n\n @pytest.mark.skip(\"Depends on angle-to-rotation-matric\")\n @pytest.mark.jit\n def test_jit(self, device, dtype):\n batch_size, channels = 3, 2\n xy = torch.rand(batch_size, channels, 2, device=device)\n ori = torch.rand(batch_size, channels, 1, device=device)\n scale = torch.abs(torch.rand(batch_size, channels, 1, 1, device=device))\n model = kornia.feature.laf_from_center_scale_ori\n model_jit = torch.jit.script(kornia.feature.laf_from_center_scale_ori)\n assert_close(model(xy, scale, ori), model_jit(xy, scale, ori))\n\n\nclass TestGetLAF3pts:\n def test_shape(self, device):\n inp = torch.ones(1, 3, 2, 3, device=device)\n out = kornia.feature.laf_to_three_points(inp)\n assert out.shape == inp.shape\n\n def test_batch_shape(self, device):\n inp = torch.ones(5, 3, 2, 3, device=device)\n out = kornia.feature.laf_to_three_points(inp)\n assert out.shape == inp.shape\n\n def test_conversion(self, device):\n inp = torch.tensor([[1, 0, 2], [0, 1, 3]], device=device).float().view(1, 1, 2, 3)\n expected = torch.tensor([[3, 2, 2], [3, 4, 3]], device=device).float().view(1, 1, 2, 3)\n threepts = kornia.feature.laf_to_three_points(inp)\n assert_close(threepts, expected)\n\n def test_gradcheck(self, device):\n batch_size, channels, height, width = 3, 2, 2, 3\n inp = torch.rand(batch_size, channels, height, width, device=device)\n inp = utils.tensor_to_gradcheck_var(inp) # to var\n assert gradcheck(kornia.feature.laf_to_three_points, (inp,), raise_exception=True)\n\n @pytest.mark.jit\n def test_jit(self, device, dtype):\n batch_size, channels, height, width = 3, 2, 2, 3\n inp = torch.rand(batch_size, channels, height, width, device=device)\n model = kornia.feature.laf_to_three_points\n model_jit = torch.jit.script(kornia.feature.laf_to_three_points)\n assert_close(model(inp), model_jit(inp))\n\n\nclass TestGetLAFFrom3pts:\n def test_shape(self, device):\n inp = torch.ones(1, 3, 2, 3, device=device)\n out = kornia.feature.laf_from_three_points(inp)\n assert out.shape == inp.shape\n\n def test_batch_shape(self, device):\n inp = torch.ones(5, 3, 2, 3, device=device)\n out = kornia.feature.laf_from_three_points(inp)\n assert out.shape == inp.shape\n\n def test_conversion(self, device):\n expected = torch.tensor([[1, 0, 2], [0, 1, 3]], device=device).float().view(1, 1, 2, 3)\n inp = torch.tensor([[3, 2, 2], [3, 4, 3]], device=device).float().view(1, 1, 2, 3)\n threepts = kornia.feature.laf_from_three_points(inp)\n assert_close(threepts, expected)\n\n def test_cross_consistency(self, device):\n batch_size, channels, height, width = 3, 2, 2, 3\n inp = torch.rand(batch_size, channels, height, width, device=device)\n inp_2 = kornia.feature.laf_from_three_points(inp)\n inp_2 = kornia.feature.laf_to_three_points(inp_2)\n assert_close(inp_2, inp)\n\n def test_gradcheck(self, device):\n batch_size, channels, height, width = 3, 2, 2, 3\n inp = torch.rand(batch_size, channels, height, width, device=device)\n inp = utils.tensor_to_gradcheck_var(inp) # to var\n assert gradcheck(kornia.feature.laf_from_three_points, (inp,), raise_exception=True)\n\n @pytest.mark.jit\n def test_jit(self, device, dtype):\n batch_size, channels, height, width = 3, 2, 2, 3\n inp = torch.rand(batch_size, channels, height, width, device=device)\n model = kornia.feature.laf_from_three_points\n model_jit = torch.jit.script(kornia.feature.laf_from_three_points)\n assert_close(model(inp), model_jit(inp))\n\n\nclass TestTransformLAFs:\n @pytest.mark.parametrize(\"batch_size\", [1, 2, 5])\n @pytest.mark.parametrize(\"num_points\", [2, 3, 5])\n def test_transform_points(self, batch_size, num_points, device, dtype):\n # generate input data\n eye_size = 3\n lafs_src = torch.rand(batch_size, num_points, 2, 3, device=device, dtype=dtype)\n\n dst_homo_src = utils.create_random_homography(batch_size, eye_size).to(device=device, dtype=dtype)\n\n # transform the points from dst to ref\n lafs_dst = kornia.feature.perspective_transform_lafs(dst_homo_src, lafs_src)\n\n # transform the points from ref to dst\n src_homo_dst = torch.inverse(dst_homo_src)\n lafs_dst_to_src = kornia.feature.perspective_transform_lafs(src_homo_dst, lafs_dst)\n\n # projected should be equal as initial\n assert_close(lafs_src, lafs_dst_to_src)\n\n def test_gradcheck(self, device, dtype):\n # generate input data\n batch_size, num_points = 2, 3\n eye_size = 3\n points_src = torch.rand(batch_size, num_points, 2, 3, device=device, dtype=dtype)\n dst_homo_src = utils.create_random_homography(batch_size, eye_size).to(device=device, dtype=dtype)\n # evaluate function gradient\n points_src = utils.tensor_to_gradcheck_var(points_src) # to var\n dst_homo_src = utils.tensor_to_gradcheck_var(dst_homo_src) # to var\n assert gradcheck(kornia.feature.perspective_transform_lafs, (dst_homo_src, points_src), raise_exception=True)\n" ]
[ [ "torch.jit.script", "torch.ones", "torch.inverse", "torch.rand", "torch.zeros_like", "torch.tensor", "torch.zeros", "torch.autograd.gradcheck" ] ]
Jwuthri/GtfsTools
[ "d0db0c89588f936f02d4e6cccb70034ec1e4b9b1" ]
[ "mixer/gtfs/generater/shapes.py" ]
[ "\"\"\"Generate the file shapes.txt.\"\"\"\r\nimport networkx as nx\r\nimport multiprocessing as mp\r\nfrom hashlib import sha1\r\nfrom tqdm import tqdm\r\n\r\nimport pandas as pd\r\n\r\nfrom mixer.gtfs.reader.controller import Controller\r\n\r\n\r\nclass EstimateShapes(object):\r\n \"\"\"Gen or fill shapes.\"\"\"\r\n\r\n def __init__(self, gtfs_path):\r\n \"\"\"Constructor.\r\n\r\n :params gtfs_path: path of the gtfs zip\r\n \"\"\"\r\n self.gtfs_path = gtfs_path\r\n self.dict_gtfs = Controller(gtfs_path).main()\r\n self.routes = self.dict_gtfs[\"routes.txt\"]\r\n self.stoptimes = self.dict_gtfs[\"stop_times.txt\"]\r\n self.stoptimes[\"trip_id\"] = self.stoptimes[\"trip_id\"].str.lower()\r\n self.stops = self.dict_gtfs[\"stops.txt\"]\r\n\r\n def explore_digraph_from(self, G, source, path):\r\n \"\"\"Depth graph.\"\"\"\r\n if len(path) > 1:\r\n for np in path[:-1]:\r\n if source in G.successors(np):\r\n G.remove_edge(np, source)\r\n path.append(source)\r\n for ns in G.successors(source):\r\n if ns not in path:\r\n self.explore_digraph_from(G, ns, path[:])\r\n\r\n def simplify_digraph(self, G):\r\n \"\"\"Remove shortpath.\"\"\"\r\n starts = []\r\n for node in G.nodes():\r\n if not G.predecessors(node):\r\n starts.append(node)\r\n if not starts:\r\n for node in G.nodes():\r\n if 1 == len(G.predecessors(node)):\r\n starts.append(node)\r\n for s in starts:\r\n self.explore_digraph_from(G, s, [])\r\n\r\n def select_trip(self, route_id, direction):\r\n \"\"\"Select trips for a route/direction.\"\"\"\r\n query = \"route_id == '{}' & direction_id == {}\"\r\n cquery = query.format(route_id, direction)\r\n\r\n return self.trips.query(cquery)\r\n\r\n def select_stimes(self, trip):\r\n \"\"\"Select stimes for a trip.\"\"\"\r\n stoptimes = self.stoptimes[\r\n self.stoptimes[\"trip_id\"] == trip[\"trip_id\"]]\r\n\r\n return stoptimes.sort_values(by=\"stop_sequence\")\r\n\r\n def build_digraph(self, route_id, direction):\r\n \"\"\"Gen the Graph of a route/direction.\"\"\"\r\n G = nx.DiGraph()\r\n trips = self.select_trip(route_id, direction)\r\n for idx, trip in trips.iterrows():\r\n previous = None\r\n stoptimes = self.select_stimes(trip)\r\n for idx, stoptime in stoptimes.iterrows():\r\n current = stoptime[\"stop_id\"]\r\n if current not in G.nodes():\r\n G.add_node(current)\r\n if previous:\r\n if (previous, current) not in G.edges():\r\n G.add_edge(previous, current)\r\n previous = current\r\n\r\n return G\r\n\r\n def get_shortest_path(self, path, G, stop):\r\n \"\"\"Gen the shortest path btw 2 pts.\"\"\"\r\n try:\r\n if len(path) == 1:\r\n shortest_path = nx.shortest_path(G, path[0], stop)\r\n else:\r\n shortest_path = nx.shortest_path(G, path[-1][-1], stop)\r\n except:\r\n shortest_path = [path[-1][-1], stop]\r\n\r\n return shortest_path\r\n\r\n def gen_path(self, dshapes, seq, G, key):\r\n \"\"\"Gen the path for a trip.\"\"\"\r\n path = list()\r\n path.append([seq[0]])\r\n for stop in seq[1:]:\r\n shortest_path = self.get_shortest_path(path, G, stop)\r\n path.append(shortest_path)\r\n dshapes[key] = path\r\n\r\n return dshapes\r\n\r\n def gen_trip_shape(self, G, dshapes, dtrips, trip, stoptimes):\r\n \"\"\"Gen a shapes for a trip.\"\"\"\r\n stoptimes = stoptimes[stoptimes[\"trip_id\"] == trip]\r\n seq = list(stoptimes[\"stop_id\"])\r\n key = sha1(str(seq).encode(\"utf8\")).hexdigest()\r\n dtrips[trip] = key\r\n if key not in dshapes.keys():\r\n dshapes = self.gen_path(dshapes, seq, G, key)\r\n\r\n return dshapes, dtrips\r\n\r\n def gen_trips_shapes(self, G, lst_trips, stoptimes):\r\n \"\"\"Gen all shapes for all trips.\"\"\"\r\n dict_shapes = dict()\r\n dict_trips = dict()\r\n for trip in lst_trips:\r\n dict_shapes, dict_trips = self.gen_trip_shape(\r\n G, dict_shapes, dict_trips, trip, stoptimes)\r\n\r\n return dict_shapes, dict_trips\r\n\r\n def gen_shapes_df(self, key, val):\r\n \"\"\"Gen the df from value of dict.\"\"\"\r\n shapes = sum(val, [])\r\n shapes = pd.DataFrame({\"stop_id\": shapes})\r\n cols = [\"stop_lat\", \"stop_lon\"]\r\n shapes = pd.merge(shapes, self.stops, on=\"stop_id\")[cols]\r\n shapes = shapes.rename(columns={\r\n \"stop_lat\": \"shape_pt_lat\", \"stop_lon\": \"shape_pt_lon\"\r\n })\r\n shapes[\"shape_id\"] = key\r\n shapes[\"shape_pt_sequence\"] = shapes.index\r\n\r\n return shapes\r\n\r\n def prepare_shapes(self, dict_shapes, dict_trips):\r\n \"\"\"Transform dict to df.\"\"\"\r\n shapes_df = pd.DataFrame()\r\n for key, val in dict_shapes.items():\r\n shapes = self.gen_shapes_df(key, val)\r\n shapes_df = pd.concat([shapes_df, shapes])\r\n trips_df = pd.DataFrame.from_dict([dict_trips]).T\r\n trips_df[\"trip_id\"] = trips_df.index\r\n trips_df.columns = [\"shape_id\", \"trip_id\"]\r\n trips_df = trips_df.reset_index(drop=True)\r\n\r\n return shapes_df, trips_df\r\n\r\n def gen_route_dir(self, route_id, direction):\r\n \"\"\"Gen the shapes for a route/direction.\"\"\"\r\n sub_col = [\"route_id\", \"trip_id\", \"direction_id\"]\r\n stoptimes = pd.merge(self.stoptimes, self.trips[sub_col], on=\"trip_id\")\r\n G = self.build_digraph(route_id, direction)\r\n self.simplify_digraph(G)\r\n query = \"route_id == '{}' & direction_id == {}\"\r\n cquery = query.format(route_id, direction)\r\n trip_route_dir = stoptimes.query(cquery)\r\n lst_trips = list(trip_route_dir['trip_id'].unique())\r\n dict_shapes, dict_trips = self.gen_trips_shapes(\r\n G, lst_trips, stoptimes)\r\n\r\n return self.prepare_shapes(dict_shapes, dict_trips)\r\n\r\n def mp_r_d(self, args):\r\n \"\"\"Multiprocess the gen_route_dir.\"\"\"\r\n return self.gen_route_dir(*args)\r\n\r\n def gen_data(self):\r\n \"\"\"Gen the l_args for mp.\"\"\"\r\n lst_routes = list(self.trips[\"route_id\"].unique())\r\n lst_dir = [0, 1]\r\n\r\n return [\r\n (route, direction)\r\n for route in lst_routes\r\n for direction in lst_dir\r\n ]\r\n\r\n def remove_shape_id(self, df):\r\n \"\"\"Remove the shape_id in trips if doesn t exist.\"\"\"\r\n try:\r\n df = df.drop(\"shape_id\", 1)\r\n except:\r\n pass\r\n\r\n return df\r\n\r\n def export_shapes(self, res):\r\n \"\"\"Create the shapes.txt and add the new shapes id to trips.\"\"\"\r\n shapes_df = pd.DataFrame()\r\n trips_df = pd.DataFrame()\r\n for df in res:\r\n shapes_df = pd.concat([shapes_df, df[0]])\r\n trips_df = pd.concat([trips_df, df[1]])\r\n shapes_df[\"shape_dist_traveled\"] = None\r\n trips_df = trips_df[[\"trip_id\", \"shape_id\"]]\r\n trips = self.remove_shape_id(self.trips)\r\n trips = pd.merge(trips, trips_df, on=\"trip_id\")\r\n\r\n return shapes_df, trips\r\n\r\n def main(self, trips):\r\n \"\"\"Build all digraph.\"\"\"\r\n self.trips = trips\r\n l_args = self.gen_data()\r\n res = []\r\n pool = mp.Pool()\r\n ln = len(l_args)\r\n # res = pool.map(self.mp_r_d, l_args)\r\n for shape in tqdm(pool.imap_unordered(self.mp_r_d, l_args), total=ln):\r\n res.append(shape)\r\n pool.close()\r\n\r\n return self.export_shapes(res)\r\n" ]
[ [ "pandas.DataFrame", "pandas.merge", "pandas.concat", "pandas.DataFrame.from_dict" ] ]
huangwenwenlili/imgclsmob
[ "1505fd61acbed429773f5c7ce286c858fc2278b8" ]
[ "gluon/gluoncv2/models/densenet.py" ]
[ "\"\"\"\n DenseNet, implemented in Gluon.\n Original paper: 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993.\n\"\"\"\n\n__all__ = ['DenseNet', 'densenet121', 'densenet161', 'densenet169', 'densenet201']\n\nimport os\nfrom mxnet import cpu\nfrom mxnet.gluon import nn, HybridBlock\nfrom .common import pre_conv1x1_block, pre_conv3x3_block\nfrom .preresnet import PreResInitBlock, PreResActivation\n\n\nclass DenseUnit(HybridBlock):\n \"\"\"\n DenseNet unit.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n bn_use_global_stats : bool\n Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.\n dropout_rate : bool\n Parameter of Dropout layer. Faction of the input units to drop.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels,\n bn_use_global_stats,\n dropout_rate,\n **kwargs):\n super(DenseUnit, self).__init__(**kwargs)\n self.use_dropout = (dropout_rate != 0.0)\n bn_size = 4\n inc_channels = out_channels - in_channels\n mid_channels = inc_channels * bn_size\n\n with self.name_scope():\n self.conv1 = pre_conv1x1_block(\n in_channels=in_channels,\n out_channels=mid_channels,\n bn_use_global_stats=bn_use_global_stats)\n self.conv2 = pre_conv3x3_block(\n in_channels=mid_channels,\n out_channels=inc_channels,\n bn_use_global_stats=bn_use_global_stats)\n if self.use_dropout:\n self.dropout = nn.Dropout(rate=dropout_rate)\n\n def hybrid_forward(self, F, x):\n identity = x\n x = self.conv1(x)\n x = self.conv2(x)\n if self.use_dropout:\n x = self.dropout(x)\n x = F.concat(identity, x, dim=1)\n return x\n\n\nclass TransitionBlock(HybridBlock):\n \"\"\"\n DenseNet's auxiliary block, which can be treated as the initial part of the DenseNet unit, triggered only in the\n first unit of each stage.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n bn_use_global_stats : bool\n Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels,\n bn_use_global_stats,\n **kwargs):\n super(TransitionBlock, self).__init__(**kwargs)\n with self.name_scope():\n self.conv = pre_conv1x1_block(\n in_channels=in_channels,\n out_channels=out_channels,\n bn_use_global_stats=bn_use_global_stats)\n self.pool = nn.AvgPool2D(\n pool_size=2,\n strides=2,\n padding=0)\n\n def hybrid_forward(self, F, x):\n x = self.conv(x)\n x = self.pool(x)\n return x\n\n\nclass DenseNet(HybridBlock):\n \"\"\"\n DenseNet model from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993.\n\n Parameters:\n ----------\n channels : list of list of int\n Number of output channels for each unit.\n init_block_channels : int\n Number of output channels for the initial unit.\n bn_use_global_stats : bool, default False\n Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.\n Useful for fine-tuning.\n dropout_rate : float, default 0.0\n Parameter of Dropout layer. Faction of the input units to drop.\n in_channels : int, default 3\n Number of input channels.\n in_size : tuple of two ints, default (224, 224)\n Spatial size of the expected input image.\n classes : int, default 1000\n Number of classification classes.\n \"\"\"\n def __init__(self,\n channels,\n init_block_channels,\n bn_use_global_stats=False,\n dropout_rate=0.0,\n in_channels=3,\n in_size=(224, 224),\n classes=1000,\n **kwargs):\n super(DenseNet, self).__init__(**kwargs)\n self.in_size = in_size\n self.classes = classes\n\n with self.name_scope():\n self.features = nn.HybridSequential(prefix='')\n self.features.add(PreResInitBlock(\n in_channels=in_channels,\n out_channels=init_block_channels,\n bn_use_global_stats=bn_use_global_stats))\n in_channels = init_block_channels\n for i, channels_per_stage in enumerate(channels):\n stage = nn.HybridSequential(prefix=\"stage{}_\".format(i + 1))\n with stage.name_scope():\n if i != 0:\n stage.add(TransitionBlock(\n in_channels=in_channels,\n out_channels=(in_channels // 2),\n bn_use_global_stats=bn_use_global_stats))\n in_channels = in_channels // 2\n for j, out_channels in enumerate(channels_per_stage):\n stage.add(DenseUnit(\n in_channels=in_channels,\n out_channels=out_channels,\n bn_use_global_stats=bn_use_global_stats,\n dropout_rate=dropout_rate))\n in_channels = out_channels\n self.features.add(stage)\n self.features.add(PreResActivation(\n in_channels=in_channels,\n bn_use_global_stats=bn_use_global_stats))\n self.features.add(nn.AvgPool2D(\n pool_size=7,\n strides=1))\n\n self.output = nn.HybridSequential(prefix='')\n self.output.add(nn.Flatten())\n self.output.add(nn.Dense(\n units=classes,\n in_units=in_channels))\n\n def hybrid_forward(self, F, x):\n x = self.features(x)\n x = self.output(x)\n return x\n\n\ndef get_densenet(num_layers,\n model_name=None,\n pretrained=False,\n ctx=cpu(),\n root=os.path.join('~', '.mxnet', 'models'),\n **kwargs):\n \"\"\"\n Create DenseNet model with specific parameters.\n\n Parameters:\n ----------\n num_layers : int\n Number of layers.\n model_name : str or None, default None\n Model name for loading pretrained model.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n ctx : Context, default CPU\n The context in which to load the pretrained weights.\n root : str, default '~/.mxnet/models'\n Location for keeping the model parameters.\n \"\"\"\n\n if num_layers == 121:\n init_block_channels = 64\n growth_rate = 32\n layers = [6, 12, 24, 16]\n elif num_layers == 161:\n init_block_channels = 96\n growth_rate = 48\n layers = [6, 12, 36, 24]\n elif num_layers == 169:\n init_block_channels = 64\n growth_rate = 32\n layers = [6, 12, 32, 32]\n elif num_layers == 201:\n init_block_channels = 64\n growth_rate = 32\n layers = [6, 12, 48, 32]\n else:\n raise ValueError(\"Unsupported DenseNet version with number of layers {}\".format(num_layers))\n\n from functools import reduce\n channels = reduce(lambda xi, yi:\n xi + [reduce(lambda xj, yj:\n xj + [xj[-1] + yj],\n [growth_rate] * yi,\n [xi[-1][-1] // 2])[1:]],\n layers,\n [[init_block_channels * 2]])[1:]\n\n net = DenseNet(\n channels=channels,\n init_block_channels=init_block_channels,\n **kwargs)\n\n if pretrained:\n if (model_name is None) or (not model_name):\n raise ValueError(\"Parameter `model_name` should be properly initialized for loading pretrained model.\")\n from .model_store import get_model_file\n net.load_parameters(\n filename=get_model_file(\n model_name=model_name,\n local_model_store_dir_path=root),\n ctx=ctx)\n\n return net\n\n\ndef densenet121(**kwargs):\n \"\"\"\n DenseNet-121 model from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993.\n\n Parameters:\n ----------\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n ctx : Context, default CPU\n The context in which to load the pretrained weights.\n root : str, default '~/.mxnet/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_densenet(num_layers=121, model_name=\"densenet121\", **kwargs)\n\n\ndef densenet161(**kwargs):\n \"\"\"\n DenseNet-161 model from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993.\n\n Parameters:\n ----------\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n ctx : Context, default CPU\n The context in which to load the pretrained weights.\n root : str, default '~/.mxnet/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_densenet(num_layers=161, model_name=\"densenet161\", **kwargs)\n\n\ndef densenet169(**kwargs):\n \"\"\"\n DenseNet-169 model from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993.\n\n Parameters:\n ----------\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n ctx : Context, default CPU\n The context in which to load the pretrained weights.\n root : str, default '~/.mxnet/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_densenet(num_layers=169, model_name=\"densenet169\", **kwargs)\n\n\ndef densenet201(**kwargs):\n \"\"\"\n DenseNet-201 model from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993.\n\n Parameters:\n ----------\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n ctx : Context, default CPU\n The context in which to load the pretrained weights.\n root : str, default '~/.mxnet/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_densenet(num_layers=201, model_name=\"densenet201\", **kwargs)\n\n\ndef _test():\n import numpy as np\n import mxnet as mx\n\n pretrained = False\n\n models = [\n densenet121,\n densenet161,\n densenet169,\n densenet201,\n ]\n\n for model in models:\n\n net = model(pretrained=pretrained)\n\n ctx = mx.cpu()\n if not pretrained:\n net.initialize(ctx=ctx)\n\n # net.hybridize()\n net_params = net.collect_params()\n weight_count = 0\n for param in net_params.values():\n if (param.shape is None) or (not param._differentiable):\n continue\n weight_count += np.prod(param.shape)\n print(\"m={}, {}\".format(model.__name__, weight_count))\n assert (model != densenet121 or weight_count == 7978856)\n assert (model != densenet161 or weight_count == 28681000)\n assert (model != densenet169 or weight_count == 14149480)\n assert (model != densenet201 or weight_count == 20013928)\n\n x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)\n y = net(x)\n assert (y.shape == (1, 1000))\n\n\nif __name__ == \"__main__\":\n _test()\n" ]
[ [ "numpy.prod" ] ]
spencer-hong/QSARBO
[ "a9fa8cbf058abea715fe2c721564f662ed8b1135" ]
[ "tutorial/lstm.py" ]
[ "from keras.callbacks import ModelCheckpoint\nfrom keras import backend as K\nfrom keras import optimizers\nfrom keras.layers import Dense\nfrom keras.layers import Dense, Dropout\nfrom keras.models import Sequential\nfrom keras.wrappers.scikit_learn import KerasClassifier\nfrom pandas import ExcelFile\nfrom pandas import ExcelWriter\nfrom PIL import Image\nfrom scipy import ndimage\nfrom scipy.stats import randint as sp_randint\nfrom sklearn.base import BaseEstimator\nfrom sklearn.base import TransformerMixin\nfrom sklearn.ensemble import ExtraTreesClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.feature_selection import SelectFromModel\nfrom sklearn import datasets\nfrom sklearn import metrics\nfrom sklearn import pipeline\nfrom sklearn.metrics import roc_auc_score, roc_curve\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import PredefinedSplit\nfrom sklearn.model_selection import RandomizedSearchCV\nfrom sklearn.model_selection import ShuffleSplit\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import FunctionTransformer\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.utils import resample\nfrom tensorflow.python.framework import ops\n\nfrom keras.preprocessing.text import one_hot\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import Flatten\nfrom keras.layers.embeddings import Embedding\nfrom keras.optimizers import adam\n\nimport sys\n\nimport h5py\nimport keras\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport scipy\nimport tensorflow as tf\n\nimport numpy as np\nfrom keras import layers\nfrom keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, GlobalMaxPooling2D\nfrom keras.models import Model, load_model\nfrom keras.preprocessing import image\nfrom keras.utils import layer_utils\nfrom keras.utils.data_utils import get_file\nfrom keras.applications.imagenet_utils import preprocess_input\nfrom IPython.display import SVG\nfrom keras.utils.vis_utils import model_to_dot\nfrom keras.utils import plot_model\n\nfrom keras.initializers import glorot_uniform\nimport scipy.misc\nfrom matplotlib.pyplot import imshow\nimport keras.backend as K\n#from __future__ import print_function\nimport rdkit\nfrom rdkit import Chem\nfrom rdkit.Chem import AllChem\nimport pandas as pd\nimport numpy as np\n \nfrom matplotlib import pyplot as plt\n\nimport keras\nfrom sklearn.utils import shuffle\nfrom keras.models import Sequential, Model\nfrom keras.layers import Conv2D, MaxPooling2D, Input, GlobalMaxPooling2D\nfrom keras.layers.core import Dense, Dropout, Activation, Flatten\nfrom keras.optimizers import Adam\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.callbacks import ReduceLROnPlateau\n\nprint('--------------------')\nprint(' Reading Data ')\nprint('--------------------')\ndata = pd.read_csv(str(sys.argv[1]), error_bad_lines=False)\nX_train_smiles = np.array(list(data[str(sys.argv[3])][data[\"SPLIT\"]==1]))\nX_test_smiles = np.array(list(data[str(sys.argv[3])][data[\"SPLIT\"]==0]))\n\ntrainsize = X_train_smiles.shape[0]\ntestsize = X_test_smiles.shape[0]\n\n\nprint('--------------------')\nprint(' Dataset Details ')\nprint(f\"Training size: {trainsize}\")\nprint(f\"Testing size: {testsize}\")\nprint('--------------------')\n\nassay = str(sys.argv[2]) \n\nY_train = data[assay][data[\"SPLIT\"]==1].values.reshape(-1,1)\nY_test = data[assay][data[\"SPLIT\"]==0].values.reshape(-1,1)\n\ncharset = set(\"\".join(list(data.SMILES))+\"!E\")\nchar_to_int = dict((c,i) for i,c in enumerate(charset))\nint_to_char = dict((i,c) for i,c in enumerate(charset))\nembed = max([len(smile) for smile in data.SMILES]) + 5\n\nprint('--------------------')\nprint(' Character to Integer List ')\nprint(char_to_int)\nprint('--------------------')\n\ndef vectorize(smiles):\n\tone_hot = np.zeros((smiles.shape[0], embed , len(charset)),dtype=np.int8)\n\tfor i,smile in enumerate(smiles):\n\t\t#encode the startchar\n\t\tone_hot[i,0,char_to_int[\"!\"]] = 1\n\t\t#encode the rest of the chars\n\t\tfor j,c in enumerate(smile):\n\t\t\tone_hot[i,j+1,char_to_int[c]] = 1\n\t\t#Encode endchar\n\t\tone_hot[i,len(smile)+1:,char_to_int[\"E\"]] = 1\n\t#Return two, one for input and the other for output\n\treturn one_hot[:,0:-1,:], one_hot[:,1:,:]\n\n\nX_train, _ = vectorize(X_train_smiles)\nX_test, _ = vectorize(X_test_smiles)\n\nvocab_size=len(charset)\n\nmodel = Sequential()\nmodel.add(Embedding(vocab_size, 50, input_length=embed-1))\nmodel.add(keras.layers.Conv1D(192,10,activation='relu'))\nmodel.add(BatchNormalization())\nmodel.add(keras.layers.Conv1D(192,5,activation='relu'))\nmodel.add(keras.layers.Conv1D(192,3,activation='relu'))\nmodel.add(Flatten())\nmodel.add(Dense(100, activation='relu'))\nmodel.add(Dropout(0.4))\nmodel.add(Dense(1, activation='linear'))\n\ndef coeff_determination(y_true, y_pred):\n from keras import backend as K\n SS_res = K.sum(K.square( y_true-y_pred ))\n SS_tot = K.sum(K.square( y_true - K.mean(y_true) ) )\n return ( 1 - SS_res/(SS_tot + K.epsilon()) )\n\ndef get_lr_metric(optimizer):\n def lr(y_true, y_pred):\n return optimizer.lr\n return lr\noptimizer = adam(lr=0.00025)\nlr_metric = get_lr_metric(optimizer)\nmodel.compile(loss=\"mse\", optimizer=optimizer, metrics=[coeff_determination, lr_metric])\n\nfrom keras.callbacks import ReduceLROnPlateau\ncallbacks_list = [\n ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=5, min_lr=1e-15, verbose=1, mode='auto',cooldown=0),\n ModelCheckpoint(filepath=\"weights.best.hdf5\", monitor='val_loss', save_best_only=True, verbose=1, mode='auto')\n \n]\n\n\n\nhistory =model.fit(x=np.argmax(X_train, axis=2), y=Y_train,\n batch_size=128,\n epochs=150,\n validation_data=(np.argmax(X_test, axis=2),Y_test),\n callbacks=callbacks_list)\n\n\nY_pred_train = model.predict(np.argmax(X_train, axis = 2))\nY_pred_test = model.predict(np.argmax(X_test, axis = 2))\n\ntrainlist = Y_pred_train.flatten()\ntestlist = Y_pred_test.flatten()\n\ntrainlistactivity = Y_train.flatten()\ntestlistactivity = Y_test.flatten()\n\nnp.append(trainlist, testlist)\n\nnp.append(X_train_smiles, X_test_smiles)\n\nnp.append(trainlistactivity, testlistactivity)\n\npredictlist = trainlist\n\nsmileslist = X_train_smiles\n\nactivitylist = trainlistactivity\n\nres= pd.DataFrame({'SMILES':smileslist, 'Actual':activitylist, 'Prediction':predictlist})\nres.to_csv('lstm_results.csv', sep=',')\n\nhist = history.history\n\nplt.figure(figsize=(10, 8))\n\nfor label in ['val_coeff_determination','coeff_determination']:\n plt.subplot(221)\n plt.plot(hist[label], label = label)\n plt.legend()\n plt.xlabel(\"Epochs\")\n plt.ylabel(\"coeff_determination\")\n \nfor label in ['val_loss','loss']:\n plt.subplot(222)\n plt.plot(hist[label], label = label)\n plt.legend()\n plt.xlabel(\"Epochs\")\n plt.ylabel(\"loss\")\n\n\n\nplt.subplot(223)\nplt.plot( hist['lr'],hist['val_coeff_determination'] )\nplt.legend()\nplt.xlabel(\"lr\")\nplt.ylabel(\"val_coeff_determination\")\n\n\nplt.subplot(224)\nplt.plot( hist['lr'],hist['val_loss'] )\nplt.legend()\nplt.xlabel(\"lr\")\nplt.ylabel(\"val_loss\")\n\n \nplt.subplots_adjust(top=0.92, bottom=0.08, left=0.10, right=0.95, hspace=0.25,\n wspace=0.35)\n\nplt.savefig('results.png', bbox_inches = 'tight')" ]
[ [ "matplotlib.pyplot.legend", "numpy.append", "matplotlib.pyplot.figure", "matplotlib.pyplot.savefig", "pandas.DataFrame", "numpy.argmax", "matplotlib.pyplot.subplot", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel" ] ]
svandenhoek/NeuralCR
[ "52d2fecc38760c7e7a975c5631241d4ffe457249" ]
[ "train.py" ]
[ "import argparse\nimport ncrmodel \nimport numpy as np\nimport os\nfrom onto import Ontology\nimport json\nimport fastText\nimport pickle \nimport tensorflow as tf\nimport accuracy\nimport annotate_text\nimport eval\nimport tempfile\nimport shutil\nimport random\ntf.enable_eager_execution()\n\ndef save_ont_and_args(ont, args, param_dir):\n pickle.dump(ont, open(param_dir+'/ont.pickle',\"wb\" )) \n with open(param_dir+'/config.json', 'w') as fp:\n json.dump(vars(args),fp)\n\ndef sample_negatives_from_file(file_addr, count):\n max_text_size = 10*1000*1000\n with open(file_addr, errors='replace') as f:\n text = f.read()[:max_text_size]\n\n tokens = ncrmodel.tokenize(text)\n indecies = np.random.choice(len(tokens), count)\n lengths = np.random.randint(1, 10, count)\n negative_samples = [' '.join(tokens[indecies[i]:indecies[i]+lengths[i]])\n for i in range(count)]\n return negative_samples\n\ndef main():\n parser = argparse.ArgumentParser(description='Hello!')\n parser.add_argument('--obofile', help=\"address to the ontology .obo file\")\n parser.add_argument('--oboroot', help=\"the concept in the ontology to be used as root (only this concept and its descendants will be used)\")\n parser.add_argument('--fasttext', help=\"address to the fasttext word vector file\")\n parser.add_argument('--neg_file', help=\"address to the negative corpus\", default=\"\")\n parser.add_argument('--output', help=\"address to the directroy where the trained model will be stored\")\n parser.add_argument('--output_without_early_stopping', help=\"address to the directroy where the trained model will be stored, without considering early stopping\")\n parser.add_argument('--phrase_val', help=\"address to the file containing labeled phrases for validation\")\n parser.add_argument('--flat', action=\"store_true\", help=\"whether utilizing the concepts' hierarchy structure\")\n parser.add_argument('--no_l2norm', action=\"store_true\")\n parser.add_argument('--no_negs', action=\"store_true\")\n parser.add_argument('--verbose', action=\"store_true\")\n parser.add_argument('--cl1', type=int, help=\"cl1\", default=1024)\n parser.add_argument('--cl2', type=int, help=\"cl2\", default=1024)\n parser.add_argument('--lr', type=float, help=\"learning rate\", default=1/512)\n parser.add_argument('--batch_size', type=int, help=\"batch size\", default=256)\n parser.add_argument('--max_sequence_length', type=int, help=\"max sequence length\", default=50)\n parser.add_argument('--epochs', type=int, help=\"number of epochs\", default=80)\n parser.add_argument('--n_ensembles', type=int, help=\"number of ensembles\", default=10)\n parser.add_argument('--num_negs', type=int, help=\"number of negative samples to use\", default=10000)\n parser.add_argument('--validation_rate', type=int, help=\"number of epochs per validation\", default=5)\n parser.add_argument('--sentence_val_input_dir', help=\"address to the directroy where the validation text files are stored\")\n parser.add_argument('--sentence_val_label_dir', help=\"address to the directroy where the validation labels are stored\")\n parser.add_argument('--snomed2icd')\n parser.add_argument('--eval_mimic', action=\"store_true\")\n args = parser.parse_args()\n\n \n\n print('Loading the ontology...')\n ont = Ontology(args.obofile,args.oboroot)\n\n model = ncrmodel.NCR(args, ont, args.fasttext)\n if (not args.no_negs) and args.neg_file != \"\":\n negative_samples = sample_negatives_from_file(args.neg_file, args.num_negs)\n\n raw_data = []\n labels = []\n for c in ont.concepts:\n for name in ont.names[c]:\n raw_data.append(name)\n labels.append(ont.concept2id[c]) \n if negative_samples!=None:\n none_id = len(ont.concepts)\n raw_data+=negative_samples\n labels += [none_id]*len(negative_samples)\n training_data = {}\n training_data['seq'], training_data['seq_len'] = ncrmodel.phrase2vec(\n model.word_model, raw_data, args.max_sequence_length)\n training_data['label'] = np.array(labels).astype(np.int32)\n training_data_size = training_data['seq'].shape[0]\n\n optimizers = [tf.train.AdamOptimizer(learning_rate=args.lr)\n for i in range(args.n_ensembles)]\n\n if args.phrase_val != None: \n samples = accuracy.prepare_phrase_samples(model.ont, args.phrase_val, True)\n\n\n if args.snomed2icd != None:\n with open(args.snomed2icd, 'r') as fp:\n snomed2icd = json.load(fp)\n\n best_loss = -1.0\n param_dir = args.output\n best_result = -1.0\n if not os.path.exists(param_dir):\n os.makedirs(param_dir)\n #'''\n if args.sentence_val_input_dir != None:\n tmp_dirpath = tempfile.mkdtemp()\n\n report_len = 20\n for epoch in range(args.epochs):\n epoch_loss = 0\n epoch_ct = 0\n print(\"Epoch :: \"+str(epoch))\n for ens_i, ncr_core in enumerate(model.ncr_cores):\n ct = 0\n report_loss = 0\n shuffled_indecies = list(range(training_data_size))\n random.shuffle(shuffled_indecies)\n for head in range(0, training_data_size, args.batch_size):\n batch_indecies = shuffled_indecies[\n head:head+args.batch_size]\n batch = {}\n for cat in training_data:\n batch[cat] = training_data[cat][batch_indecies]\n\n with tf.GradientTape() as tape:\n logits = ncr_core(batch['seq'])\n loss = tf.reduce_sum(\n tf.losses.sparse_softmax_cross_entropy(batch['label'], logits))\n grads = tape.gradient(loss, ncr_core.trainable_weights)\n optimizers[ens_i].apply_gradients(zip(grads, ncr_core.trainable_weights))\n\n report_loss += loss.numpy()\n epoch_loss += loss.numpy()\n if args.verbose and ct % report_len == report_len-1:\n print(\"Step = \"+str(ct+1)+\"\\tLoss =\"+str(report_loss/report_len))\n report_loss = 0\n ct += 1\n epoch_ct += 1\n print(\"epoch loss:\", epoch_loss/epoch_ct)\n if args.sentence_val_input_dir != None and (epoch==args.epochs-1 or (epoch%args.validation_rate==0 and epoch>min(args.epochs//2, 30))): \n sent_input_stream = annotate_text.DirInputStream(args.sentence_val_input_dir)\n sent_output_stream = annotate_text.DirOutputStream(tmp_dirpath)\n annotate_text.annotate_stream(model, 0.8, sent_input_stream, sent_output_stream)\n file_list = os.listdir(tmp_dirpath)\n if args.eval_mimic:\n results = eval.eval_mimic(args.sentence_val_label_dir, tmp_dirpath, file_list, ont, snomed2icd, column=2)#, args.comp_dir)\n else:\n results = eval.eval(args.sentence_val_label_dir, tmp_dirpath, file_list, ont, column=2)#, args.comp_dir)\n if results['micro']['fmeasure']>best_result:\n best_result = results['micro']['fmeasure']\n model.save_weights(param_dir)\n print(results['micro']['fmeasure'])\n\n if args.phrase_val != None and (epoch%5==0 or epoch==args.epochs-1) and epoch>=0: \n res = model.get_match(list(samples.keys()), 1)\n missed = [x for i,x in enumerate(samples) if samples[x] not in [r[0] for r in res[i]]]\n print(\"R@1: \"+ str((len(samples)-len(missed))/len(samples)))\n\n res = model.get_match(list(samples.keys()), 5)\n missed = [x for i,x in enumerate(samples) if samples[x] not in [r[0] for r in res[i]]]\n print(\"R@5: \"+ str((len(samples)-len(missed))/len(samples)))\n if epoch%5==0 and epoch>0: \n for x in model.get_match('blood examination', 5):\n print(x[0], (ont.names[x[0]] if x[0]!='None' else x[0]), x[1])\n\n \n save_ont_and_args(ont, args, param_dir)\n if args.sentence_val_input_dir == None:\n model.save_weights(param_dir)\n else:\n shutil.rmtree(tmp_dirpath)\n os.makedirs(args.output_without_early_stopping)\n if args.output_without_early_stopping!=None:\n model.save_weights(args.output_without_early_stopping)\n save_ont_and_args(ont, args, args.output_without_early_stopping)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "tensorflow.enable_eager_execution", "tensorflow.train.AdamOptimizer", "tensorflow.GradientTape", "tensorflow.losses.sparse_softmax_cross_entropy", "numpy.array", "numpy.random.randint" ] ]
tmsincomb/pyontutils
[ "dad24e7178d8d8cd3bd60d53b9039952fa7a5a1e" ]
[ "ilxutils/ilxutils/ontopandas.py" ]
[ "\"\"\" Converts owl or ttl or raw rdflib graph into a pandas DataFrame. Saved in .pickle format.\n\n Usage:\n Graph2Pandas.py [-h | --help]\n Graph2Pandas.py [-v | --version]\n Graph2Pandas.py [-f=<path>] [-a | -t=<str>] [-o=<path>]\n\n Options:\n -h --help Display this help message\n -v --version Current version of file\n -o --output=<path> Output path of picklized pandas DataFrame\n -f --file=<path> owl | ttl | rdflib.Graph() -> df.to_pickle\n -t --type=<str> type of class you want in the ttl file\n -a --all If seleted you get all types of classes \"\"\"\n\nfrom collections import defaultdict\nfrom docopt import docopt\nimport pandas as pd\nfrom pathlib import Path as p\nimport pickle\nimport rdflib\nfrom rdflib import BNode\nfrom sys import exit\nfrom typing import Union, List, Dict\nfrom ilxutils.tools import create_pickle, open_pickle\nVERSION = '0.5' # fixed NaN issue in empty cells\n\n\nclass OntoPandas:\n\n ''' Goal is to speed up and clean up ontologies into a pd.DataFrame and more object. Can be\n transformed back into whatever original or different ontological format. Sparql is used to\n get a more specific batch from the original ontology in one go. You could get all or some,\n thats the beauty in it. '''\n\n defaultquery = \"\"\"\n select ?subj ?pred ?obj\n where {\n ?subj rdf:type owl:Class ;\n ?pred ?obj .\n } \"\"\"\n\n def __init__(self,\n obj: Union[rdflib.graph.Graph, str],\n query:str=defaultquery,\n curie:bool=True,\n qnamed:bool=False,\n str_vals:bool=False,) -> None:\n self.query = query\n self.qnamed = qnamed\n self.curie = curie\n self.str_vals = str_vals\n self.g = obj # could be path\n self.path = obj # could be graph\n self.df = self.Graph2Pandas_converter()\n\n def create_pickle(self, output: str) -> None:\n with open(output, 'wb') as outfile:\n pickle.dump(data, outfile)\n outfile.close()\n\n def save(self, foldername: str, path_to_folder: str=None) -> None:\n ''' Saves entities into multiple files within the same folder because of pickle-recursive\n errors that would happen if squeezed into one '''\n self.create_pickle((self.g.namespaces, ))\n self.df.to_pickle(output)\n\n def qname(self, uri: str) -> str:\n ''' Returns qname of uri in rdflib graph while also saving it '''\n try:\n prefix, namespace, name = self.g.compute_qname(uri)\n qname = prefix + ':' + name\n return qname\n except:\n try:\n print('prefix:', prefix)\n print('namespace:', namespace)\n print('name:', name)\n except:\n print('Could not print from compute_qname')\n exit('No qname for ' + uri)\n\n def Graph2Pandas_converter(self):\n '''Updates self.g or self.path bc you could only choose 1'''\n\n if isinstance(self.path, str) or isinstance(self.path, p):\n self.path = str(self.path)\n filetype = p(self.path).suffix\n if filetype == '.pickle':\n self.g = pickle.load(open(self.path, 'rb'))\n if isinstance(self.g, rdflib.graph.Graph):\n return self.get_sparql_dataframe()\n else:\n print('WARNING:: function df() wont work unless an ontology source is loaded')\n return self.g\n elif filetype == '.ttl' or filetype == '.rdf':\n self.g = rdflib.Graph()\n self.g.parse(self.path, format='turtle')\n return self.get_sparql_dataframe()\n elif filetype == '.nt':\n self.g = rdflib.Graph()\n self.g.parse(self.path, format='nt')\n return self.get_sparql_dataframe()\n elif filetype == '.owl' or filetype == '.xrdf':\n self.g = rdflib.Graph()\n try:\n self.g.parse(self.path, format='xml')\n except:\n # some owl formats are more rdf than owl\n self.g.parse(self.path, format='turtle')\n return self.get_sparql_dataframe()\n else:\n exit('Format options: owl, ttl, df_pickle, rdflib.Graph()')\n try:\n return self.get_sparql_dataframe()\n self.path = None\n except:\n exit('Format options: owl, ttl, df_pickle, rdflib.Graph()')\n\n elif isinstance(self.g, rdflib.graph.Graph):\n self.path = None\n return self.get_sparql_dataframe()\n\n else:\n exit('Obj given is not str, pathlib obj, or an rdflib.Graph()')\n\n def get_sparql_dataframe_deprecated_memory_problem(self):\n ''' Iterates through the sparql table and condenses it into a Pandas DataFrame\n !!! OLD !!! Eats up double the memory and produces lists for everything that has a value '''\n\n self.result = self.g.query(self.query)\n cols = set() # set(['qname'])\n indx = set()\n data = {}\n\n for i, binding in enumerate(self.result.bindings):\n\n subj_binding = binding[rdflib.term.Variable('subj')]\n pred_binding = binding[rdflib.term.Variable('pred')]\n obj_binding = binding[rdflib.term.Variable('obj')]\n\n subj = subj_binding\n pred = self.qname(pred_binding)\n obj = obj_binding\n\n # stops at BNodes; could be exanded here\n if isinstance(subj, BNode):\n continue\n elif isinstance(pred, BNode):\n continue\n elif isinstance(obj, BNode) and obj:\n continue\n else:\n subj = str(subj)\n pred = str(pred)\n obj = str(obj)\n\n # Prepare defaultdict home if it doesn't exist\n if not data.get(subj):\n data[subj] = defaultdict(list)\n # I really dont think i need this...\n # data[subj]['qname'] = self.qname(subj_binding)\n\n data[subj][pred].append(obj)\n cols.add(pred)\n indx.add(subj)\n\n # Building DataFrame\n df = pd.DataFrame(columns=cols, index=indx)\n for key, value in data.items():\n df.loc[str(key)] = pd.Series(value)\n\n del data # to take care of the memory problem if you are stacking this function\n\n df = df.where((pd.notnull(df)), None) # default Null is fricken Float NaN\n\n return df\n\n def get_sparql_dataframe( self ):\n ''' Iterates through the sparql table and condenses it into a Pandas DataFrame '''\n self.result = self.g.query(self.query)\n cols = set() # set(['qname'])\n indx = set()\n data = {}\n curr_subj = None # place marker for first subj to be processed\n bindings = []\n\n for i, binding in enumerate(self.result.bindings):\n subj_binding = binding[rdflib.term.Variable('subj')]\n pred_binding = binding[rdflib.term.Variable('pred')]\n obj_binding = binding[rdflib.term.Variable('obj')]\n\n subj = subj_binding\n pred = pred_binding\n obj = obj_binding\n\n # stops at BNodes; could be exanded here\n if isinstance(subj, BNode):\n continue\n elif isinstance(pred, BNode):\n continue\n elif isinstance(obj, BNode) and obj:\n continue\n\n if self.qnamed:\n pred = self.qname(pred_binding)\n\n if self.str_vals:\n subj = str(subj)\n pred = str(pred)\n obj = str(obj)\n\n cols.add(pred)\n indx.add(subj)\n bindings.append(binding)\n\n bindings = sorted(bindings, key=lambda k: k[rdflib.term.Variable('subj')])\n df = pd.DataFrame(columns=cols, index=indx)\n\n for i, binding in enumerate(bindings):\n\n subj_binding = binding[rdflib.term.Variable('subj')]\n pred_binding = binding[rdflib.term.Variable('pred')]\n obj_binding = binding[rdflib.term.Variable('obj')]\n\n subj = subj_binding\n pred = pred_binding\n obj = obj_binding\n\n # stops at BNodes; could be exanded here\n if isinstance(subj, BNode):\n continue\n elif isinstance(pred, BNode):\n continue\n elif isinstance(obj, BNode) and obj:\n continue\n\n if self.qnamed:\n pred = self.qname(pred_binding)\n\n if self.str_vals:\n subj = str(subj)\n pred = str(pred)\n obj = str(obj)\n\n if curr_subj == None:\n curr_subj = subj\n if not data.get(subj): # Prepare defaultdict home if it doesn't exist\n data[subj] = defaultdict(list)\n data[subj][pred].append(obj)\n elif curr_subj != subj:\n curr_subj = subj\n for data_subj, data_pred_objs in data.items():\n for data_pred, data_objs in data_pred_objs.items():\n if len(data_objs) == 1: # clean lists of just 1 value\n data_pred_objs[data_pred] = data_objs[0]\n df.loc[data_subj] = pd.Series(data_pred_objs)\n data = {}\n if not data.get(subj): # Prepare defaultdict home if it doesn't exist\n data[subj] = defaultdict(list)\n data[subj][pred].append(obj)\n else:\n if not data.get(subj): # Prepare defaultdict home if it doesn't exist\n data[subj] = defaultdict(list)\n data[subj][pred].append(obj)\n\n for data_subj, data_pred_objs in data.items():\n for data_pred, data_objs in data_pred_objs.items():\n if len(data_objs) == 1: # clean lists of just 1 value\n data_pred_objs[data_pred] = data_objs[0]\n df.loc[data_subj] = pd.Series(data_pred_objs)\n\n df = df.where((pd.notnull(df)), None) # default Null is fricken Float NaN\n df = df.reset_index().rename(columns={'index':'iri'})\n\n if self.curie:\n df['curie'] = df.apply(lambda row: self.qname(row.iri), axis = 1)\n\n return df\n\n\ndef command_line():\n ''' If you want to use the command line '''\n from docopt import docopt\n doc = docopt( __doc__, version=VERSION )\n args = pd.Series({k.replace('--', ''): v for k, v in doc.items()})\n if args.all:\n graph = Graph2Pandas(args.file, _type='all')\n elif args.type:\n graph = Graph2Pandas(args.file, _type=args.type)\n else:\n graph = Graph2Pandas(args.file)\n graph.save(args.output)\n\n\nif __name__ == '__main__':\n command_line()\n" ]
[ [ "pandas.Series", "pandas.notnull", "pandas.DataFrame" ] ]
gregvw/pyQAOA
[ "59b5abda36d90b45913878e7ffb588a1c146bc38" ]
[ "qaoa/operators/sum_sigma_x_propagator.py" ]
[ "import numpy as np\nfrom qaoa.operators import Kronecker, SumSigmaXOperator, Propagator\n\nclass SumSigmaXPropagator(Propagator):\n \n def __init__(self,D,theta=0):\n assert( isinstance(D,SumSigmaXOperator) )\n self.kronecker = Kronecker(np.eye(2),D.num_qubits(),dtype=complex)\n super().__init__(D,theta)\n\n def __str__(self):\n return \"SumSigmaXPropagator\"\n\n def set_control(self,theta):\n self.theta = theta\n c = np.cos(self.theta)\n s = 1j*np.sin(self.theta)\n self.kronecker.K = np.array(((c,s),(s,c)),dtype=complex)\n\n def apply(self,v,u):\n self.kronecker.apply(v,u)\n\n def apply_adjoint(self,v,u):\n self.kronecker.apply_adjoint(v,u)\n\n def as_matrix(self):\n return self.kronecker.as_matrix()\n" ]
[ [ "numpy.array", "numpy.eye", "numpy.cos", "numpy.sin" ] ]
tkoyama010/pyvista-doc-translations
[ "23bb813387b7f8bfe17e86c2244d5dd2243990db" ]
[ "locale/pot/api/core/_autosummary/pyvista-PolyData-lines-1.py" ]
[ "# Return the lines from a spline.\n#\nimport pyvista\nimport numpy as np\npoints = np.random.random((3, 3))\nspline = pyvista.Spline(points, 10)\nspline.lines\n# Expected:\n## array([10, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\n" ]
[ [ "numpy.random.random" ] ]
snu-mllab/PuzzleMix
[ "18986c3cd745a18b281e45082704cfd497ad6a9b" ]
[ "imagenet_fast/main_test.py" ]
[ "# This module is adapted from https://github.com/mahyarnajibi/FreeAdversarialTraining/blob/master/main_free.py\n# Which in turn was adapted from https://github.com/pytorch/examples/blob/master/imagenet/main.py\nimport init_paths\nimport argparse\nimport os\nimport time\nimport sys\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.backends.cudnn as cudnn\nimport torch.optim\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\nfrom torch.autograd import Variable\nimport math\nimport numpy as np\nfrom utils import *\nfrom validation import validate\n#import torchvision.models as models\nimport models\nfrom models.imagenet_resnet import BasicBlock, Bottleneck\n#from torchvision.models.resnet import BasicBlock, Bottleneck\n\n\nfrom apex import amp\nimport copy\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')\n parser.add_argument('data', metavar='DIR',\n help='path to dataset')\n parser.add_argument('--output_prefix', default='fast_adv', type=str,\n help='prefix used to define output path')\n parser.add_argument('-c', '--config', default='configs.yml', type=str, metavar='Path',\n help='path to the config file (default: configs.yml)')\n parser.add_argument('--resume', default='', type=str, metavar='PATH',\n help='path to latest checkpoint (default: none)')\n parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',\n help='evaluate model on validation set')\n parser.add_argument('--pretrained', dest='pretrained', action='store_true',\n help='use pre-trained model')\n parser.add_argument('--restarts', default=1, type=int)\n return parser.parse_args()\n\n\n# Parase config file and initiate logging\nconfigs = parse_config_file(parse_args())\nlogger = initiate_logger(configs.output_name, configs.evaluate)\nprint = logger.info\ncudnn.benchmark = True\ncriterion = nn.CrossEntropyLoss().cuda()\ncriterion_batch = nn.CrossEntropyLoss(reduction='none').cuda()\n\nCORRUPTIONS = [\n 'gaussian_noise', 'shot_noise', 'impulse_noise', 'defocus_blur',\n 'glass_blur', 'motion_blur', 'zoom_blur', 'snow', 'frost', 'fog',\n 'brightness', 'contrast', 'elastic_transform', 'pixelate',\n 'jpeg_compression'\n]\n\n# Raw AlexNet errors taken from https://github.com/hendrycks/robustness\nALEXNET_ERR = [\n 0.886428, 0.894468, 0.922640, 0.819880, 0.826268, 0.785948, 0.798360,\n 0.866816, 0.826572, 0.819324, 0.564592, 0.853204, 0.646056, 0.717840,\n 0.606500\n]\n\ndef compute_mce(corruption_accs):\n \"\"\"Compute mCE (mean Corruption Error) normalized by AlexNet performance.\"\"\"\n mce = 0.\n for i in range(len(CORRUPTIONS)):\n avg_err = 1 - np.mean(corruption_accs[CORRUPTIONS[i]])\n ce = 100 * avg_err / ALEXNET_ERR[i]\n mce += ce / 15\n return mce\n\n\ndef main():\n # Scale and initialize the parameters\n best_prec1 = 0\n\n # Create output folder\n if not os.path.isdir(os.path.join('trained_models', configs.output_name)):\n os.makedirs(os.path.join('trained_models', configs.output_name))\n \n # Log the config details\n logger.info(pad_str(' ARGUMENTS '))\n for k, v in configs.items(): print('{}: {}'.format(k, v))\n logger.info(pad_str(''))\n\n # Create the model\n if configs.pretrained:\n print(\"=> using pre-trained model '{}'\".format(configs.TRAIN.arch))\n model = models.__dict__[configs.TRAIN.arch](pretrained=True)\n else:\n print(\"=> creating model '{}'\".format(configs.TRAIN.arch))\n model = models.__dict__[configs.TRAIN.arch]()\n\n def init_dist_weights(model):\n for m in model.modules():\n if isinstance(m, BasicBlock): \n m.bn2.weight = nn.Parameter(torch.zeros_like(m.bn2.weight))\n if isinstance(m, Bottleneck): \n m.bn3.weight = nn.Parameter(torch.zeros_like(m.bn3.weight))\n if isinstance(m, nn.Linear): \n m.weight.data.normal_(0, 0.01)\n\n init_dist_weights(model)\n\n # Wrap the model into DataParallel\n model.cuda()\n\n # reverse mapping\n param_to_moduleName = {}\n for m in model.modules():\n for p in m.parameters(recurse=False):\n param_to_moduleName[p] = str(type(m).__name__)\n\n group_decay = [p for p in model.parameters() if 'BatchNorm' not in param_to_moduleName[p]]\n group_no_decay = [p for p in model.parameters() if 'BatchNorm' in param_to_moduleName[p]]\n groups = [dict(params=group_decay), dict(params=group_no_decay, weight_decay=0)]\n optimizer = torch.optim.SGD(groups, 0,\n momentum=configs.TRAIN.momentum,\n weight_decay=configs.TRAIN.weight_decay)\n\n if configs.TRAIN.half and not configs.evaluate:\n model, optimizer = amp.initialize(model, optimizer, opt_level=\"O1\", loss_scale=1024)\n model = torch.nn.DataParallel(model)\n\n # Resume if a valid checkpoint path is provided\n if configs.resume:\n if os.path.isfile(configs.resume):\n print(\"=> loading checkpoint '{}'\".format(configs.resume))\n checkpoint = torch.load(configs.resume)\n configs.TRAIN.start_epoch = checkpoint['epoch']\n best_prec1 = checkpoint['best_prec1']\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n print(\"=> loaded checkpoint '{}' (epoch {})\"\n .format(configs.resume, checkpoint['epoch']))\n else:\n print(\"=> no checkpoint found at '{}'\".format(configs.resume))\n \n test_transform = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n ])\n \n corruption_accs = test_c(model, test_transform) \n \n for c in CORRUPTIONS:\n print('\\t'.join(map(str, [c] + corruption_accs[c])))\n\n print('mCE (normalized by AlexNet):', compute_mce(corruption_accs))\n\ndef train(train_loader, model, optimizer, epoch, lr_schedule, half=False): \n mean = torch.Tensor(np.array(configs.TRAIN.mean)[:, np.newaxis, np.newaxis])\n mean = mean.expand(3, configs.DATA.crop_size, configs.DATA.crop_size).cuda()\n std = torch.Tensor(np.array(configs.TRAIN.std)[:, np.newaxis, np.newaxis])\n std = std.expand(3, configs.DATA.crop_size, configs.DATA.crop_size).cuda()\n\n # Initialize the meters\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n # switch to train mode\n model.train()\n end = time.time()\n for i, (input, target) in enumerate(train_loader):\n if configs.TRAIN.methods != 'augmix':\n input = input.cuda(non_blocking=True)\n else:\n input = torch.cat(input, 0).cuda(non_blocking=True)\n\n target = target.cuda(non_blocking=True)\n data_time.update(time.time() - end)\n \n # update learning rate\n lr = lr_schedule(epoch + (i + 1)/len(train_loader))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n optimizer.zero_grad()\n\n input.sub_(mean).div_(std)\n lam = np.random.beta(configs.TRAIN.alpha, configs.TRAIN.alpha)\n if configs.TRAIN.methods == 'manifold' or configs.TRAIN.methods == 'graphcut':\n permuted_idx1 = np.random.permutation(input.size(0)//4)\n permuted_idx2 = permuted_idx1 + input.size(0)//4\n permuted_idx3 = permuted_idx2 + input.size(0)//4\n permuted_idx4 = permuted_idx3 + input.size(0)//4\n permuted_idx = np.concatenate([permuted_idx1, permuted_idx2, permuted_idx3, permuted_idx4], axis=0)\n else:\n permuted_idx = torch.tensor(np.random.permutation(input.size(0)))\n\n if configs.TRAIN.methods == 'input':\n input = lam*input + (1-lam)*input[permuted_idx]\n\n elif configs.TRAIN.methods == 'cutmix':\n input, lam = mixup_box(input, lam=lam, permuted_idx=permuted_idx)\n\n elif configs.TRAIN.methods == 'augmix':\n logit = model(input)\n logit_clean, logit_aug1, logit_aug2 = torch.split(logit, logit.size(0)//3)\n output = logit_clean\n\n\n p_clean = F.softmax(logit_clean, dim=1)\n p_aug1 = F.softmax(logit_aug1, dim=1)\n p_aug2 = F.softmax(logit_aug2, dim=1)\n\n p_mixture = torch.clamp((p_clean + p_aug1 + p_aug2) / 3., 1e-7, 1).log()\n loss_JSD = 4 * (F.kl_div(p_mixture, p_clean, reduction='batchmean') +\n F.kl_div(p_mixture, p_aug1, reduction='batchmean') +\n F.kl_div(p_mixture, p_aug2, reduction='batchmean'))\n\n elif configs.TRAIN.methods == 'graphcut':\n input_var = Variable(input, requires_grad=True)\n\n output = model(input_var)\n loss_clean = criterion(output, target)\n\n if half: \n with amp.scale_loss(loss_clean, optimizer) as scaled_loss: \n scaled_loss.backward()\n else: \n loss_clean.backward()\n unary = torch.sqrt(torch.mean(input_var.grad**2, dim=1))\n \n block_num = 2**(np.random.randint(1, 5))\n mask = get_mask(input, unary, block_num, permuted_idx, alpha=lam, mean=mean, std=std)\n output, lam = model(input, graphcut=True, permuted_idx=permuted_idx1, block_num=block_num, mask=mask, unary=unary)\n\n if configs.TRAIN.methods == 'manifold':\n output = model(input, manifold=True, lam=lam, permuted_idx=permuted_idx1)\n elif configs.TRAIN.methods != 'augmix' and configs.TRAIN.methods != 'graphcut':\n output = model(input)\n\n if configs.TRAIN.methods == 'nat':\n loss = criterion(output, target)\n elif configs.TRAIN.methods == 'augmix':\n loss = criterion(output, target) + loss_JSD\n else:\n loss = lam*criterion_batch(output, target) + (1-lam)*criterion_batch(output, target[permuted_idx])\n loss = torch.mean(loss)\n\n # compute gradient and do SGD step\n #optimizer.zero_grad()\n if half: \n with amp.scale_loss(loss, optimizer) as scaled_loss: \n scaled_loss.backward()\n else: \n loss.backward()\n\n optimizer.step()\n\n prec1, prec5 = accuracy(output, target, topk=(1, 5))\n losses.update(loss.item(), input.size(0))\n top1.update(prec1[0], input.size(0))\n top5.update(prec5[0], input.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % configs.TRAIN.print_freq == 0:\n print('Train Epoch: [{0}][{1}/{2}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Data {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n 'Loss {cls_loss.val:.4f} ({cls_loss.avg:.4f})\\t'\n 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\\t'\n 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})\\t'\n 'LR {lr:.3f}'.format(\n epoch, i, len(train_loader), batch_time=batch_time,\n data_time=data_time, top1=top1,\n top5=top5,cls_loss=losses, lr=lr))\n sys.stdout.flush()\n\ndef test(net, test_loader):\n \"\"\"Evaluate network on given dataset.\"\"\"\n net.eval()\n total_loss = 0.\n total_correct = 0\n \n mean = torch.Tensor(np.array(configs.TRAIN.mean)[:, np.newaxis, np.newaxis])\n mean = mean.expand(3, configs.DATA.crop_size, configs.DATA.crop_size).cuda()\n std = torch.Tensor(np.array(configs.TRAIN.std)[:, np.newaxis, np.newaxis])\n std = std.expand(3, configs.DATA.crop_size, configs.DATA.crop_size).cuda()\n \n with torch.no_grad():\n for images, targets in test_loader:\n images, targets = images.cuda(), targets.cuda()\n images.sub_(mean).div_(std)\n logits = net(images)\n loss = F.cross_entropy(logits, targets)\n pred = logits.data.max(1)[1]\n total_loss += float(loss.data)\n total_correct += pred.eq(targets.data).sum().item()\n\n return total_loss / len(test_loader.dataset), total_correct / len(test_loader.dataset)\n\n\ndef test_c(net, test_transform):\n \"\"\"Evaluate network on given corrupted dataset.\"\"\"\n corruption_accs = {}\n for c in CORRUPTIONS:\n print(c)\n for s in range(1, 6):\n valdir = os.path.join('/home/wonhochoo/data/imagenet/imagenet-c/', c, str(s))\n val_loader = torch.utils.data.DataLoader(\n datasets.ImageFolder(valdir, test_transform),\n batch_size=configs.DATA.batch_size, shuffle=False,\n num_workers=configs.DATA.workers, pin_memory=True, drop_last=False)\n \n loss, acc1 = test(net, val_loader)\n if c in corruption_accs:\n corruption_accs[c].append(acc1)\n else:\n corruption_accs[c] = [acc1]\n\n print('\\ts={}: Test Loss {:.3f} | Test Acc1 {:.3f}'.format(\n s, loss, 100. * acc1))\n\n return corruption_accs\n\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.optim.SGD", "torch.load", "torch.mean", "torch.clamp", "torch.nn.functional.softmax", "numpy.concatenate", "torch.no_grad", "torch.zeros_like", "torch.autograd.Variable", "torch.nn.CrossEntropyLoss", "numpy.random.beta", "torch.nn.functional.kl_div", "torch.nn.functional.cross_entropy", "numpy.array", "torch.nn.DataParallel", "numpy.random.randint", "torch.cat", "numpy.mean" ] ]
anirudh-chakravarthy/PropTR
[ "29448a0c73da6c9918d161228d92409d3d1315db" ]
[ "datasets/transforms.py" ]
[ "\"\"\"\nTransforms and data augmentation for sequence level images, bboxes and masks.\n\nMostly copy-paste from https://github.com/Epiphqny/VisTR/blob/master/datasets/transforms.py\n\"\"\"\nimport random\n\nimport PIL\nimport torch\nimport torchvision.transforms as T\nimport torchvision.transforms.functional as F\n\nfrom util.box_ops import box_xyxy_to_cxcywh, box_iou\nfrom util.misc import interpolate\nimport numpy as np\nfrom numpy import random as rand\nfrom PIL import Image\nimport cv2\nimport pdb\n\ndef bbox_overlaps(bboxes1, bboxes2, mode='iou', eps=1e-6):\n assert mode in ['iou', 'iof']\n bboxes1 = bboxes1.astype(np.float32)\n bboxes2 = bboxes2.astype(np.float32)\n rows = bboxes1.shape[0]\n cols = bboxes2.shape[0]\n ious = np.zeros((rows, cols), dtype=np.float32)\n if rows * cols == 0:\n return ious\n exchange = False\n if bboxes1.shape[0] > bboxes2.shape[0]:\n bboxes1, bboxes2 = bboxes2, bboxes1\n ious = np.zeros((cols, rows), dtype=np.float32)\n exchange = True\n area1 = (bboxes1[:, 2] - bboxes1[:, 0]) * (bboxes1[:, 3] - bboxes1[:, 1])\n area2 = (bboxes2[:, 2] - bboxes2[:, 0]) * (bboxes2[:, 3] - bboxes2[:, 1])\n for i in range(bboxes1.shape[0]):\n x_start = np.maximum(bboxes1[i, 0], bboxes2[:, 0])\n y_start = np.maximum(bboxes1[i, 1], bboxes2[:, 1])\n x_end = np.minimum(bboxes1[i, 2], bboxes2[:, 2])\n y_end = np.minimum(bboxes1[i, 3], bboxes2[:, 3])\n overlap = np.maximum(x_end - x_start, 0) * np.maximum(y_end - y_start, 0)\n if mode == 'iou':\n union = area1[i] + area2 - overlap\n else:\n union = area1[i] if not exchange else area2\n union = np.maximum(union, eps)\n ious[i, :] = overlap / union\n if exchange:\n ious = ious.T\n return ious\n\n\ndef crop(image, target, region):\n cropped_image = F.crop(image, *region)\n\n target = target.copy()\n i, j, h, w = region\n\n # should we do something wrt the original size?\n target[\"size\"] = torch.tensor([h, w])\n\n fields = [\"labels\", \"area\", \"iscrowd\"]\n\n if \"boxes\" in target:\n boxes = target[\"boxes\"]\n max_size = torch.as_tensor([w, h], dtype=torch.float32)\n cropped_boxes = boxes - torch.as_tensor([j, i, j, i])\n cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size)\n cropped_boxes = cropped_boxes.clamp(min=0)\n area = (cropped_boxes[:, 1, :] - cropped_boxes[:, 0, :]).prod(dim=1)\n target[\"boxes\"] = cropped_boxes.reshape(-1, 4)\n target[\"area\"] = area\n fields.append(\"boxes\")\n\n if \"masks\" in target:\n # FIXME should we update the area here if there are no boxes?\n target['masks'] = target['masks'][:, i:i + h, j:j + w]\n fields.append(\"masks\")\n\n # remove elements for which the boxes or masks that have zero area\n if \"boxes\" in target or \"masks\" in target:\n # favor boxes selection when defining which elements to keep\n # this is compatible with previous implementation\n if \"boxes\" in target:\n cropped_boxes = target['boxes'].reshape(-1, 2, 2)\n keep = torch.all(cropped_boxes[:, 1, :] > cropped_boxes[:, 0, :], dim=1)\n else:\n keep = target['masks'].flatten(1).any(1)\n\n for field in fields:\n target[field] = target[field][keep]\n\n return cropped_image, target\n\n\ndef hflip(image, target):\n flipped_image = F.hflip(image)\n\n w, h = image.size\n\n target = target.copy()\n if \"boxes\" in target:\n boxes = target[\"boxes\"]\n boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor([-1, 1, -1, 1]) + torch.as_tensor([w, 0, w, 0])\n target[\"boxes\"] = boxes\n\n if \"masks\" in target:\n target['masks'] = target['masks'].flip(-1)\n\n return flipped_image, target\n\n\ndef resize(image, target, size, max_size=None):\n # size can be min_size (scalar) or (w, h) tuple\n\n def get_size_with_aspect_ratio(image_size, size, max_size=None):\n w, h = image_size\n if max_size is not None:\n min_original_size = float(min((w, h)))\n max_original_size = float(max((w, h)))\n if max_original_size / min_original_size * size > max_size:\n size = int(round(max_size * min_original_size / max_original_size))\n\n if (w <= h and w == size) or (h <= w and h == size):\n return (h, w)\n\n if w < h:\n ow = size\n oh = int(size * h / w)\n else:\n oh = size\n ow = int(size * w / h)\n\n return (oh, ow)\n\n def get_size(image_size, size, max_size=None):\n if isinstance(size, (list, tuple)):\n return size[::-1]\n else:\n return get_size_with_aspect_ratio(image_size, size, max_size)\n\n try:\n size = get_size(image.size, size, max_size)\n except:\n pdb.set_trace()\n rescaled_image = F.resize(image, size)\n\n if target is None:\n return rescaled_image, None\n\n ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(rescaled_image.size, image.size))\n ratio_width, ratio_height = ratios\n\n target = target.copy()\n if \"boxes\" in target:\n boxes = target[\"boxes\"]\n scaled_boxes = boxes * torch.as_tensor([ratio_width, ratio_height, ratio_width, ratio_height])\n target[\"boxes\"] = scaled_boxes\n\n if \"area\" in target:\n area = target[\"area\"]\n scaled_area = area * (ratio_width * ratio_height)\n target[\"area\"] = scaled_area\n\n h, w = size\n target[\"size\"] = torch.tensor([h, w])\n\n if \"masks\" in target:\n target['masks'] = interpolate(\n target['masks'][:, None].float(), size, mode=\"nearest\")[:, 0] > 0.5\n\n return rescaled_image, target\n\n\ndef pad(image, target, padding):\n # assumes that we only pad on the bottom right corners\n padded_image = F.pad(image, (0, 0, padding[0], padding[1]))\n if target is None:\n return padded_image, None\n target = target.copy()\n # should we do something wrt the original size?\n target[\"size\"] = torch.tensor(padded_image.size[::-1])\n if \"masks\" in target:\n target['masks'] = torch.nn.functional.pad(target['masks'], (0, padding[0], 0, padding[1]))\n return padded_image, target\n\n\nclass RandomCrop(object):\n def __init__(self, size):\n self.size = size\n\n def __call__(self, img, target):\n region = T.RandomCrop.get_params(img, self.size)\n return crop(img, target, region)\n\n\nclass RandomSizeCrop(object):\n def __init__(self, min_size: int, max_size: int):\n self.min_size = min_size\n self.max_size = max_size\n\n def __call__(self, img: PIL.Image.Image, ref_img: PIL.Image.Image, target: dict, ref_target: dict):\n w = random.randint(self.min_size, min(img.width, self.max_size))\n h = random.randint(self.min_size, min(img.height, self.max_size))\n region = T.RandomCrop.get_params(img, [h, w])\n img, target = crop(img, target, region)\n ref_img, ref_target = crop(ref_img, ref_target, region)\n return img, ref_img, target, ref_target\n\n\nclass CenterCrop(object):\n def __init__(self, size):\n self.size = size\n\n def __call__(self, img, target):\n image_width, image_height = img.size\n crop_height, crop_width = self.size\n crop_top = int(round((image_height - crop_height) / 2.))\n crop_left = int(round((image_width - crop_width) / 2.))\n return crop(img, target, (crop_top, crop_left, crop_height, crop_width))\n\n\nclass RandomContrast(object):\n def __init__(self, lower=0.5, upper=1.5):\n self.lower = lower\n self.upper = upper\n assert self.upper >= self.lower, \"contrast upper must be >= lower.\"\n assert self.lower >= 0, \"contrast lower must be non-negative.\"\n def __call__(self, image, ref_image, target, ref_target):\n \n if rand.randint(2):\n image *= rand.uniform(self.lower, self.upper)\n ref_image *= rand.uniform(self.lower, self.upper)\n return image, ref_image, target, ref_target\n\nclass RandomBrightness(object):\n def __init__(self, delta=32):\n assert delta >= 0.0\n assert delta <= 255.0\n self.delta = delta\n def __call__(self, image, target):\n if rand.randint(2):\n delta = rand.uniform(-self.delta, self.delta)\n image += delta\n return image, target\n\nclass RandomSaturation(object):\n def __init__(self, lower=0.5, upper=1.5):\n self.lower = lower\n self.upper = upper\n assert self.upper >= self.lower, \"contrast upper must be >= lower.\"\n assert self.lower >= 0, \"contrast lower must be non-negative.\"\n\n def __call__(self, image, ref_image, target, ref_target):\n if rand.randint(2):\n image[:, :, 1] *= rand.uniform(self.lower, self.upper)\n ref_image[:, :, 1] *= rand.uniform(self.lower, self.upper)\n return image, ref_image, target, ref_target\n\nclass RandomHue(object): #\n def __init__(self, delta=18.0):\n assert delta >= 0.0 and delta <= 360.0\n self.delta = delta\n\n def __call__(self, image, ref_image, target, ref_target):\n if rand.randint(2):\n image[:, :, 0] += rand.uniform(-self.delta, self.delta)\n image[:, :, 0][image[:, :, 0] > 360.0] -= 360.0\n image[:, :, 0][image[:, :, 0] < 0.0] += 360.0\n ref_image[:, :, 0] += rand.uniform(-self.delta, self.delta)\n ref_image[:, :, 0][ref_image[:, :, 0] > 360.0] -= 360.0\n ref_image[:, :, 0][ref_image[:, :, 0] < 0.0] += 360.0\n return image, ref_image, target, ref_target\n\nclass RandomLightingNoise(object):\n def __init__(self):\n self.perms = ((0, 1, 2), (0, 2, 1),\n (1, 0, 2), (1, 2, 0),\n (2, 0, 1), (2, 1, 0))\n def __call__(self, image, target):\n if rand.randint(2):\n swap = self.perms[rand.randint(len(self.perms))]\n shuffle = SwapChannels(swap) # shuffle channels\n image = shuffle(image)\n return image, target\n\nclass ConvertColor(object):\n def __init__(self, current='BGR', transform='HSV'):\n self.transform = transform\n self.current = current\n\n def __call__(self, image, ref_image, target, ref_target):\n if self.current == 'BGR' and self.transform == 'HSV':\n image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n ref_image = cv2.cvtColor(ref_image, cv2.COLOR_BGR2HSV)\n elif self.current == 'HSV' and self.transform == 'BGR':\n image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)\n ref_image = cv2.cvtColor(ref_image, cv2.COLOR_HSV2BGR)\n else:\n raise NotImplementedError\n return image, ref_image, target, ref_target\n\nclass SwapChannels(object):\n def __init__(self, swaps):\n self.swaps = swaps\n def __call__(self, image):\n image = image[:, :, self.swaps]\n return image\n\nclass PhotometricDistort(object):\n def __init__(self):\n self.pd = [\n RandomContrast(),\n ConvertColor(transform='HSV'),\n RandomSaturation(),\n RandomHue(),\n ConvertColor(current='HSV', transform='BGR'),\n RandomContrast()\n ]\n self.rand_brightness = RandomBrightness()\n self.rand_light_noise = RandomLightingNoise()\n \n def __call__(self, image, ref_image, target, ref_target):\n image = np.asarray(image).astype('float32')\n ref_image = np.asarray(ref_image).astype('float32')\n image, target = self.rand_brightness(image, target)\n ref_image, ref_target = self.rand_brightness(ref_image, ref_target)\n if rand.randint(2):\n distort = Compose(self.pd[:-1])\n else:\n distort = Compose(self.pd[1:])\n image, ref_image, target, ref_target = distort(image, ref_image, target, ref_target)\n image, target = self.rand_light_noise(image, target)\n ref_image, ref_target = self.rand_light_noise(ref_image, ref_target)\n image = Image.fromarray(image.astype('uint8'))\n ref_image = Image.fromarray(ref_image.astype('uint8'))\n return image, ref_image, target, ref_target\n\nclass RandomHorizontalFlip(object):\n def __init__(self, p=0.5):\n self.p = p\n\n def __call__(self, img, ref_img, target, ref_target):\n if random.random() < self.p:\n img, target = hflip(img, target)\n ref_img, ref_target = hflip(ref_img, ref_target)\n return img, ref_img, target, ref_target\n\nclass RandomVerticalFlip(object):\n def __init__(self, p=0.5):\n self.p = p\n\n def __call__(self, img, target):\n if random.random() < self.p:\n return vflip(img, target)\n return img, target\n\n\nclass RandomResize(object):\n def __init__(self, sizes, max_size=None):\n assert isinstance(sizes, (list, tuple))\n self.sizes = sizes\n self.max_size = max_size\n\n def __call__(self, img, ref_img, target=None, ref_target=None):\n size = random.choice(self.sizes)\n img, target = resize(img, target, size, self.max_size)\n ref_img, ref_target = resize(ref_img, ref_target, size, self.max_size)\n return img, ref_img, target, ref_target\n\n\nclass RandomPad(object):\n def __init__(self, max_pad):\n self.max_pad = max_pad\n\n def __call__(self, img, target):\n pad_x = random.randint(0, self.max_pad)\n pad_y = random.randint(0, self.max_pad)\n return pad(img, target, (pad_x, pad_y))\n\n\nclass RandomSelect(object):\n \"\"\"\n Randomly selects between transforms1 and transforms2,\n with probability p for transforms1 and (1 - p) for transforms2\n \"\"\"\n def __init__(self, transforms1, transforms2, p=0.5):\n self.transforms1 = transforms1\n self.transforms2 = transforms2\n self.p = p\n\n def __call__(self, img, target):\n if random.random() < self.p:\n return self.transforms1(img, target)\n return self.transforms2(img, target)\n\n\nclass ToTensor(object):\n def __call__(self, image, ref_image, target, ref_target):\n return F.to_tensor(image), F.to_tensor(ref_image), target, ref_target\n\n\nclass RandomErasing(object):\n\n def __init__(self, *args, **kwargs):\n self.eraser = T.RandomErasing(*args, **kwargs)\n\n def __call__(self, img, target):\n return self.eraser(img), target\n\n\nclass Normalize(object):\n def __init__(self, mean, std):\n self.mean = mean\n self.std = std\n\n def __call__(self, image, ref_image, target=None, ref_target=None):\n image = F.normalize(image, mean=self.mean, std=self.std)\n ref_image = F.normalize(ref_image, mean=self.mean, std=self.std)\n if target is None:\n return image, None, ref_image, None\n target = target.copy()\n h, w = image.shape[-2:]\n if \"boxes\" in target:\n boxes = target[\"boxes\"]\n boxes = box_xyxy_to_cxcywh(boxes)\n boxes = boxes / torch.tensor([w, h, w, h], dtype=torch.float32)\n target[\"boxes\"] = boxes\n ref_target = ref_target.copy()\n if \"boxes\" in ref_target:\n boxes = ref_target[\"boxes\"]\n boxes = box_xyxy_to_cxcywh(boxes)\n boxes = boxes / torch.tensor([w, h, w, h], dtype=torch.float32)\n ref_target[\"boxes\"] = boxes\n return image, ref_image, target, ref_target\n\n\nclass Compose(object):\n def __init__(self, transforms):\n self.transforms = transforms\n\n def __call__(self, image, ref_image, target, ref_target):\n for t in self.transforms:\n image, ref_image, target, ref_target = t(image, ref_image, target, ref_target)\n return image, ref_image, target, ref_target\n\n def __repr__(self):\n format_string = self.__class__.__name__ + \"(\"\n for t in self.transforms:\n format_string += \"\\n\"\n format_string += \" {0}\".format(t)\n format_string += \"\\n)\"\n return format_string" ]
[ [ "numpy.random.uniform", "numpy.zeros", "torch.as_tensor", "torch.nn.functional.pad", "torch.tensor", "numpy.asarray", "torch.all", "numpy.maximum", "numpy.random.randint", "numpy.minimum" ] ]
JannisBush/h2o-3
[ "30aa2a86e6bfa1febb5f95f3cb43811337895f7f" ]
[ "h2o-py/h2o/frame.py" ]
[ "# -*- encoding: utf-8 -*-\n\"\"\"\nH2O data frame.\n\n:copyright: (c) 2016 H2O.ai\n:license: Apache License Version 2.0 (see LICENSE for details)\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nfrom h2o.utils.compatibility import * # NOQA\n\nimport csv\nimport datetime\nimport functools\nfrom io import StringIO\nimport os\nimport sys\nimport tempfile\nimport traceback\nfrom types import FunctionType\nimport warnings\n\nimport h2o\nfrom h2o.base import Keyed\nfrom h2o.display import H2ODisplay\nfrom h2o.exceptions import H2OTypeError, H2OValueError, H2ODeprecationWarning\nfrom h2o.expr import ExprNode\nfrom h2o.group_by import GroupBy\nfrom h2o.job import H2OJob\nfrom h2o.utils.config import get_config_value\nfrom h2o.utils.ext_dependencies import get_matplotlib_pyplot\nfrom h2o.utils.shared_utils import (_handle_numpy_array, _handle_pandas_data_frame, _handle_python_dicts,\n _handle_python_lists, _is_list, _is_str_list, _py_tmp_key, _quoted,\n can_use_pandas, quote, normalize_slice, slice_is_normalized, check_frame_id)\nfrom h2o.utils.typechecks import (assert_is_type, assert_satisfies, Enum, I, is_type, numeric, numpy_ndarray,\n numpy_datetime, pandas_dataframe, pandas_timestamp, scipy_sparse, U)\nfrom h2o.model.model_base import _get_numpy\n\n__all__ = (\"H2OFrame\", )\n\n\nclass H2OFrame(Keyed):\n \"\"\"\n Primary data store for H2O.\n\n H2OFrame is similar to pandas' ``DataFrame``, or R's ``data.frame``. One of the critical distinction is that the\n data is generally not held in memory, instead it is located on a (possibly remote) H2O cluster, and thus\n ``H2OFrame`` represents a mere handle to that data.\n\n Create a new H2OFrame object, possibly from some other object.\n\n :param python_obj: object that will be converted to an ``H2OFrame``. This could have multiple types:\n\n - None: create an empty H2OFrame\n - A list/tuple of strings or numbers: create a single-column H2OFrame containing the contents of this list.\n - A dictionary of ``{name: list}`` pairs: create an H2OFrame with multiple columns, each column having the\n provided ``name`` and contents from ``list``. If the source dictionary is not an OrderedDict, then the\n columns in the H2OFrame may appear shuffled.\n - A list of lists of strings/numbers: construct an H2OFrame from a rectangular table of values, with inner\n lists treated as rows of the table. I.e. ``H2OFrame([[1, 'a'], [2, 'b'], [3, 'c']])`` will create a\n frame with 3 rows and 2 columns, one numeric and one string.\n - A Pandas dataframe, or a Numpy ndarray: create a matching H2OFrame.\n - A Scipy sparse matrix: create a matching sparse H2OFrame.\n\n :param int header: if ``python_obj`` is a list of lists, this parameter can be used to indicate whether the\n first row of the data represents headers. The value of -1 means the first row is data, +1 means the first\n row is the headers, 0 (default) allows H2O to guess whether the first row contains data or headers.\n :param List[str] column_names: explicit list of column names for the new H2OFrame. This will override any\n column names derived from the data. If the python_obj does not contain explicit column names, and this\n parameter is not given, then the columns will be named \"C1\", \"C2\", \"C3\", etc.\n :param column_types: explicit column types for the new H2OFrame. This could be either a list of types for\n each column, or a dictionary of {column name: column type} pairs. In the latter case you may override\n types for only few columns, and let H2O choose the types of the rest.\n :param na_strings: List of strings in the input data that should be interpreted as missing values. This could\n be given on a per-column basis, either as a list-of-lists, or as a dictionary {column name: list of nas}.\n :param str destination_frame: (internal) name of the target DKV key in the H2O backend.\n :param str separator: (deprecated)\n\n :example:\n >>> python_obj = [1, 2, 2.5, -100.9, 0]\n >>> frame = h2o.H2OFrame(python_obj)\n >>> frame\n \"\"\"\n\n # Temp flag: set this to false for now if encountering path conversion/expansion issues when import files to remote server\n __LOCAL_EXPANSION_ON_SINGLE_IMPORT__ = True\n\n #-------------------------------------------------------------------------------------------------------------------\n # Construction\n #-------------------------------------------------------------------------------------------------------------------\n\n def __init__(self, python_obj=None, destination_frame=None, header=0, separator=\",\",\n column_names=None, column_types=None, na_strings=None, skipped_columns=None):\n \n coltype = U(None, \"unknown\", \"uuid\", \"string\", \"float\", \"real\", \"double\", \"int\", \"long\", \"numeric\",\n \"categorical\", \"factor\", \"enum\", \"time\")\n assert_is_type(python_obj, None, list, tuple, dict, numpy_ndarray, pandas_dataframe, scipy_sparse)\n assert_is_type(destination_frame, None, str)\n assert_is_type(header, -1, 0, 1)\n assert_is_type(separator, I(str, lambda s: len(s) == 1))\n assert_is_type(column_names, None, [str])\n assert_is_type(column_types, None, [coltype], {str: coltype})\n assert_is_type(na_strings, None, [str], [[str]], {str: [str]})\n check_frame_id(destination_frame)\n\n self._ex = ExprNode()\n self._ex._children = None\n self._is_frame = True # Indicate that this is an actual frame, allowing typechecks to be made\n if python_obj is not None:\n self._upload_python_object(python_obj, destination_frame, header, separator,\n column_names, column_types, na_strings, skipped_columns)\n\n @staticmethod\n def _expr(expr, cache=None):\n # TODO: merge this method with `__init__`\n fr = H2OFrame()\n fr._ex = expr\n if cache is not None:\n fr._ex._cache.fill_from(cache)\n return fr\n\n\n def _upload_python_object(self, python_obj, destination_frame=None, header=0, separator=\",\",\n column_names=None, column_types=None, na_strings=None, skipped_columns=None):\n assert_is_type(python_obj, list, tuple, dict, numpy_ndarray, pandas_dataframe, scipy_sparse)\n if is_type(python_obj, scipy_sparse):\n self._upload_sparse_matrix(python_obj, destination_frame=destination_frame)\n return\n # TODO: all these _handlers should really belong to this class, not to shared_utils.\n processor = (_handle_pandas_data_frame if is_type(python_obj, pandas_dataframe) else\n _handle_numpy_array if is_type(python_obj, numpy_ndarray) else\n _handle_python_dicts if is_type(python_obj, dict) else\n _handle_python_lists)\n col_header, data_to_write = processor(python_obj, header)\n if col_header is None or data_to_write is None:\n raise H2OValueError(\"No data to write\")\n if not column_names:\n column_names = col_header\n\n # create a temporary file that will be written to\n tmp_handle, tmp_path = tempfile.mkstemp(suffix=\".csv\")\n tmp_file = os.fdopen(tmp_handle, 'w')\n # create a new csv writer object thingy\n csv_writer = csv.writer(tmp_file, dialect=\"excel\", quoting=csv.QUOTE_NONNUMERIC)\n csv_writer.writerow(column_names)\n if data_to_write and isinstance(data_to_write[0], dict):\n for row in data_to_write:\n csv_writer.writerow([row.get(k, None) for k in col_header])\n else:\n csv_writer.writerows(data_to_write)\n tmp_file.close() # close the streams\n self._upload_parse(tmp_path, destination_frame, 1, separator, column_names, column_types, na_strings, skipped_columns)\n os.remove(tmp_path) # delete the tmp file\n\n\n def _upload_sparse_matrix(self, matrix, destination_frame=None):\n import scipy.sparse as sp\n if not sp.issparse(matrix):\n raise H2OValueError(\"A sparse matrix expected, got %s\" % type(matrix))\n\n tmp_handle, tmp_path = tempfile.mkstemp(suffix=\".svmlight\")\n out = os.fdopen(tmp_handle, \"wt\")\n if destination_frame is None:\n destination_frame = _py_tmp_key(h2o.connection().session_id)\n\n # sp.find(matrix) returns (row indices, column indices, values) of the non-zero elements of A. Unfortunately\n # there is no guarantee that those elements are returned in the correct order, so need to sort\n data = zip(*sp.find(matrix))\n if not isinstance(data, list): data = list(data) # possibly convert from iterator to a list\n data.sort()\n idata = 0 # index of the next element to be consumed from `data`\n for irow in range(matrix.shape[0]):\n if idata < len(data) and data[idata][0] == irow and data[idata][1] == 0:\n y = data[idata][2]\n idata += 1\n else:\n y = 0\n out.write(str(y))\n while idata < len(data) and data[idata][0] == irow:\n out.write(\" \")\n out.write(str(data[idata][1]))\n out.write(\":\")\n out.write(str(data[idata][2]))\n idata += 1\n out.write(\"\\n\")\n out.close()\n\n ret = h2o.api(\"POST /3/PostFile\", filename=tmp_path)\n os.remove(tmp_path)\n rawkey = ret[\"destination_frame\"]\n\n p = {\"source_frames\": [rawkey], \"destination_frame\": destination_frame}\n H2OJob(h2o.api(\"POST /3/ParseSVMLight\", data=p), \"Parse\").poll()\n self._ex._cache._id = destination_frame\n self._ex._cache.fill()\n\n\n @staticmethod\n def get_frame(frame_id, rows=10, rows_offset=0, cols=-1, full_cols=-1, cols_offset=0, light=False):\n \"\"\"\n Retrieve an existing H2OFrame from the H2O cluster using the frame's id.\n\n :param str frame_id: id of the frame to retrieve\n :param int rows: number of rows to fetch for preview (10 by default)\n :param int rows_offset: offset to fetch rows from (0 by default)\n :param int cols: number of columns to fetch (all by default)\n :param full_cols: number of columns to fetch together with backed data\n :param int cols_offset: offset to fetch rows from (0 by default)\n :param bool light: whether to use light frame endpoint or not\n :returns: an existing H2OFrame with the id provided; or None if such frame doesn't exist.\n\n :examples:\n\n >>> iris = h2o.import_file(\"http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader.csv\")\n >>> h2o.get_frame(iris.frame_id)\n \"\"\"\n fr = H2OFrame()\n fr._ex._cache._id = frame_id\n try:\n fr._ex._cache.fill(rows=rows, rows_offset=rows_offset, cols=cols, full_cols=full_cols, cols_offset=cols_offset, light=light)\n except EnvironmentError:\n return None\n return fr\n\n @staticmethod\n def _validate(param, name, required=False, message=None):\n message = message or \"'{}' must be a valid H2OFrame!\".format(name)\n if param is None:\n if required:\n raise ValueError(message)\n else:\n return\n else:\n assert_is_type(param, str, H2OFrame, message=message)\n if is_type(param, str):\n fr = h2o.get_frame(param)\n if fr is None:\n raise ValueError(message)\n return fr\n return param\n\n\n def refresh(self):\n \"\"\"\n Reload frame information from the backend H2O server.\n\n :returns: Frame information from the backend H2O server.\n\n :examples:\n\n >>> dataframe = {'A': [1,0,3,4],\n ... 'B': [5,6,-6, -1],\n ... 'C':[-4, -6, -7, 8]}\n >>> frame = h2o.H2OFrame(dataframe)\n >>> frame_asin = frame.asin()\n >>> assert set(frame.names) ==\n ... {\"A\", \"B\", \"C\"},\n ... \"Expected original colnames to remain the same after uniop operation\"\n >>> assert [\"asin(%s)\" % (name) for name in frame.names] ==\n ... frame_asin.names,\"Expected equal col names after\",\n ... \" uniop operation\"\n >>> frame_asin.refresh()\n \"\"\"\n self._ex._cache.flush()\n self._frame(fill_cache=True)\n\n\n\n #-------------------------------------------------------------------------------------------------------------------\n # Frame properties\n #-------------------------------------------------------------------------------------------------------------------\n\n @property\n def key(self):\n \"\"\"\n Displays the unique key representing the object on the backend.\n \n :returns: the unique key representing the object on the backend\n\n :examples:\n\n >>> frame = h2o.import_file(\"http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris.csv\")\n >>> frame.key\n \"\"\"\n return None if self._ex is None else self.frame_id\n\n\n @property\n def names(self):\n \"\"\"\n The list of column names (List[str]).\n\n :returns: The list of column names.\n\n :examples:\n\n >>> iris = h2o.import_file(\"http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader_NA_2.csv\")\n >>> iris.names\n \"\"\"\n if not self._ex._cache.names_valid():\n self._ex._cache.flush()\n self._frame(fill_cache=True)\n return list(self._ex._cache.names)\n\n @names.setter\n def names(self, value):\n self.set_names(value)\n\n\n @property\n def nrows(self):\n \"\"\"\n Number of rows in the dataframe (int).\n\n :returns: Number of rows in the dataframe.\n\n >>> iris = h2o.import_file(\"http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader_NA_2.csv\")\n >>> iris.nrows\n \"\"\"\n if not self._ex._cache.nrows_valid():\n self._ex._cache.flush()\n self._frame(fill_cache=True)\n return self._ex._cache.nrows\n\n\n @property\n def ncols(self):\n \"\"\"\n Number of columns in the dataframe (int).\n\n :returns: Number of columns in the dataframe.\n\n :examples:\n\n >>> iris = h2o.import_file(\"http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader_NA_2.csv\")\n >>> iris.ncols\n \"\"\"\n if not self._ex._cache.ncols_valid():\n self._ex._cache.flush()\n self._frame(fill_cache=True)\n return self._ex._cache.ncols\n\n\n @property\n def shape(self):\n \"\"\"\n Number of rows and columns in the dataframe as a tuple ``(nrows, ncols)``.\n\n :returns: Number of rows and columns in the dataframe as a tuple\n\n :examples:\n\n >>> iris = h2o.import_file(\"http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader.csv\")\n >>> iris = iris[:, 0:4]\n >>> iris.shape\n \"\"\"\n return self.nrows, self.ncols\n\n\n @property\n def types(self):\n \"\"\"\n The dictionary of column name/type pairs.\n\n :returns: Dictionary of column name/type pairs.\n\n :examples:\n\n >>> iris = h2o.import_file(\"http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris.csv\")\n >>> iris.types\n \"\"\"\n if not self._ex._cache.types_valid():\n self._ex._cache.flush()\n self._frame(fill_cache=True)\n return dict(self._ex._cache.types)\n\n @property\n def dtype(self):\n \"\"\"\n Returns the numpy.dtype of the first column of this data frame.\n Works only for single-column data frames.\n Used mainly for using H2OFrames in conjunction with scikit-learn APIs.\n\n :returns: Numpy dtype of the first column\n \"\"\"\n if not len(self.columns) == 1:\n raise H2OValueError(\"dtype is only supported for one column frames\")\n np = _get_numpy(\"H2OFrame.dtype\")\n type_map = {\n \"enum\": np.str, \n \"string\": np.str, \n \"int\": np.int, \n \"real\": np.float, \n \"time\": np.str,\n \"uuid\": np.str\n }\n types_list = list(self.types.values())\n return np.dtype(type_map[types_list[0]])\n\n @property\n def frame_id(self):\n \"\"\"\n Internal id of the frame (str).\n\n :returns: Internal id of the frame (str).\n\n :examples:\n\n >>> iris = h2o.import_file(\"http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader.csv\")\n >>> print(iris.frame_id)\n \"\"\"\n return self._frame()._ex._cache._id\n\n @frame_id.setter\n def frame_id(self, newid):\n check_frame_id(newid)\n if self._ex._cache._id is None:\n h2o.assign(self, newid)\n else:\n oldname = self.frame_id\n self._ex._cache._id = newid\n h2o.rapids(\"(rename \\\"{}\\\" \\\"{}\\\")\".format(oldname, newid))\n\n\n def type(self, col):\n \"\"\"\n The type for the given column.\n\n :param col: either a name, or an index of the column to look up\n :returns: type of the column, one of: ``str``, ``int``, ``real``, ``enum``, ``time``, ``bool``.\n :raises H2OValueError: if such column does not exist in the frame.\n\n :examples:\n\n >>> iris = h2o.import_file(\"http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris.csv\")\n >>> iris.type(\"C5\")\n \"\"\"\n assert_is_type(col, int, str)\n if not self._ex._cache.types_valid() or not self._ex._cache.names_valid():\n self._ex._cache.flush()\n self._frame(fill_cache=True)\n types = self._ex._cache.types\n if is_type(col, str):\n if col in types:\n return types[col]\n else:\n names = self._ex._cache.names\n if -len(names) <= col < len(names):\n return types[names[col]]\n raise H2OValueError(\"Column '%r' does not exist in the frame\" % col)\n\n\n def _import_parse(self, path, pattern, destination_frame, header, separator, column_names, column_types, na_strings,\n skipped_columns=None, custom_non_data_line_markers=None, partition_by=None, quotechar=None, escapechar=None):\n if H2OFrame.__LOCAL_EXPANSION_ON_SINGLE_IMPORT__ and is_type(path, str) and \"://\" not in path: # fixme: delete those 2 lines, cf. PUBDEV-5717\n path = os.path.abspath(path)\n rawkey = h2o.lazy_import(path, pattern)\n self._parse(rawkey, destination_frame, header, separator, column_names, column_types, na_strings,\n skipped_columns, custom_non_data_line_markers, partition_by, quotechar, escapechar)\n return self\n\n\n def _upload_parse(self, path, destination_frame, header, sep, column_names, column_types, na_strings, skipped_columns=None,\n quotechar=None, escapechar=None):\n ret = h2o.api(\"POST /3/PostFile\", filename=path)\n rawkey = ret[\"destination_frame\"]\n self._parse(rawkey, destination_frame, header, sep, column_names, column_types, na_strings, skipped_columns,\n quotechar=quotechar, escapechar=escapechar)\n return self\n\n\n def _parse(self, rawkey, destination_frame=\"\", header=None, separator=None, column_names=None, column_types=None,\n na_strings=None, skipped_columns=None, custom_non_data_line_markers=None, partition_by=None, quotechar=None,\n escapechar=None):\n setup = h2o.parse_setup(rawkey, destination_frame, header, separator, column_names, column_types, na_strings,\n skipped_columns, custom_non_data_line_markers, partition_by, quotechar, escapechar)\n return self._parse_raw(setup)\n\n\n def _parse_raw(self, setup):\n # Parse parameters (None values provided by setup)\n p = {\"destination_frame\": None,\n \"parse_type\": None,\n \"separator\": None,\n \"check_header\": None,\n \"number_columns\": None,\n \"chunk_size\": None,\n \"delete_on_done\": True,\n \"blocking\": False,\n \"column_types\": None,\n \"skipped_columns\":None,\n \"custom_non_data_line_markers\": None,\n \"partition_by\": None,\n \"single_quotes\": None,\n \"escapechar\": None\n }\n\n if setup[\"column_names\"]: p[\"column_names\"] = None\n if setup[\"na_strings\"]: p[\"na_strings\"] = None\n\n p.update({k: v for k, v in viewitems(setup) if k in p})\n\n # Extract only 'name' from each src in the array of srcs\n p['source_frames'] = [_quoted(src['name']) for src in setup['source_frames']]\n\n H2OJob(h2o.api(\"POST /3/Parse\", data=p), \"Parse\").poll()\n # Need to return a Frame here for nearly all callers\n # ... but job stats returns only a dest_key, requiring another REST call to get nrow/ncol\n self._ex._cache._id = p[\"destination_frame\"]\n self._ex._cache.fill()\n\n\n def filter_na_cols(self, frac=0.2):\n \"\"\"\n Filter columns with proportion of NAs greater or equals than ``frac``.\n\n :param float frac: Maximum fraction of NAs in the column to keep.\n\n :returns: A list of indices of columns that have fewer NAs than ``frac``. If all columns are filtered,\n None is returned.\n\n :examples:\n\n >>> prostate = h2o.import_file(\"http://s3.amazonaws.com/h2o-public-test-data/smalldata/prostate/prostate.csv.zip\")\n >>> include_cols1 = prostate.filter_na_cols()\n >>> include_cols1\n >>> include_cols2 = prostate.filter_na_cols(0.001)\n >>> include_cols2\n \"\"\"\n return ExprNode(\"filterNACols\", self, frac)._eager_scalar()\n\n\n def columns_by_type(self, coltype=\"numeric\"):\n \"\"\"\n Extract columns of the specified type from the frame.\n\n :param str coltype: A character string indicating which column type to filter by. This must be\n one of the following:\n\n - ``\"numeric\"`` - Numeric, but not categorical or time\n - ``\"categorical\"`` - Integer, with a categorical/factor String mapping\n - ``\"string\"`` - String column\n - ``\"time\"`` - Long msec since the Unix Epoch - with a variety of display/parse options\n - ``\"uuid\"`` - UUID\n - ``\"bad\"`` - No none-NA rows (triple negative! all NAs or zero rows)\n\n :returns: list of indices of columns that have the requested type\n\n :examples:\n\n >>> frame = h2o.create_frame(rows=10,\n ... integer_fraction=1,\n ... binary_ones_fraction=0,\n ... missing_fraction=0)\n >>> num = frame.columns_by_type(coltype=\"numeric\")\n >>> str = frame.columns_by_type(coltype=\"string\")\n >>> num\n >>> string\n \"\"\"\n assert_is_type(coltype, \"numeric\", \"categorical\", \"string\", \"time\", \"uuid\", \"bad\")\n assert_is_type(self, H2OFrame)\n return ExprNode(\"columnsByType\", self, coltype)._eager_scalar()\n\n\n def __iter__(self):\n return (self[i] for i in range(self.ncol))\n\n def __unicode__(self):\n if sys.gettrace() is None:\n if self._ex is None: return \"This H2OFrame has been removed.\"\n table = self._frame(fill_cache=True)._ex._cache._tabulate(\"simple\", False)\n nrows = \"%d %s\" % (self.nrow, \"row\" if self.nrow == 1 else \"rows\")\n ncols = \"%d %s\" % (self.ncol, \"column\" if self.ncol == 1 else \"columns\")\n return \"%s\\n\\n[%s x %s]\" % (table, nrows, ncols)\n return \"\"\n\n def __repr__(self):\n if sys.gettrace() is None:\n # PUBDEV-2278: using <method>? from IPython caused everything to dump\n stk = traceback.extract_stack()\n if not (\"IPython\" in stk[-2][0] and \"info\" == stk[-2][2]):\n self.show()\n return \"\"\n\n def _has_content(self):\n return self._ex and (self._ex._children or self._ex._cache._id)\n\n def show(self, use_pandas=False, rows=10, cols=200):\n \"\"\"\n Used by the H2OFrame.__repr__ method to print or display a snippet of the data frame.\n\n If called from IPython, displays the results in HTML format. Otherwise, this prints a tabulated result.\n\n :returns: snippet of the data frame.\n\n :examples:\n\n >>> from random import randrange\n >>> import numpy as np\n >>> row_num = randrange(1,10)\n >>> col_num = randrange(1,10)\n >>> python_lists = np.random.randint(-5,5, (row_num,col_num))\n >>> h20frame = h2o.H2OFrame(python_obj=python_lists)\n >>> h2oframe.show(use_pandas=False)\n >>> h2oframe.show(use_pandas=True)\n \"\"\"\n if self._ex is None:\n print(\"This H2OFrame has been removed.\")\n return\n if not self._has_content():\n print(\"This H2OFrame is empty and not initialized.\")\n return\n if self.nrows == 0:\n print(\"This H2OFrame is empty.\")\n return\n if not self._ex._cache.is_valid(): self._frame()._ex._cache.fill()\n if H2ODisplay._in_zep():\n print(\"%html \" + self._ex._cache._tabulate(\"html\", False, rows=rows))\n elif H2ODisplay._in_ipy():\n import IPython.display\n if use_pandas and can_use_pandas():\n IPython.display.display(self.head(rows=rows, cols=cols).as_data_frame(use_pandas=True))\n else:\n IPython.display.display_html(self._ex._cache._tabulate(\"html\", False, rows=rows), raw=True)\n else:\n if use_pandas and can_use_pandas():\n print(self.head(rows=rows, cols=cols).as_data_frame(use_pandas=True))\n else:\n s = self.__unicode__()\n stk = traceback.extract_stack()\n if \"IPython\" in stk[-3][0]:\n s = \"\\n%s\" % s\n try:\n print(s)\n except UnicodeEncodeError:\n print(s.encode(\"ascii\", \"replace\"))\n\n\n def summary(self, return_data=False):\n \"\"\"\n Display summary information about the frame.\n\n Summary includes min/mean/max/sigma and other rollup data.\n\n :param bool return_data: Return a dictionary of the summary output\n :returns: Summary of information about the frame\n\n :examples:\n\n >>> frame = h2o.import_file(\"http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris.csv\")\n >>> frame.summary()\n \"\"\"\n if not self._has_content():\n print(\"This H2OFrame is empty and not initialized.\")\n return self._ex._cache._data;\n if not self._ex._cache.is_valid(): self._frame()._ex._cache.fill()\n if not return_data:\n if self.nrows == 0:\n print(\"This H2OFrame is empty.\")\n elif H2ODisplay._in_zep():\n print(\"%html \" + self._ex._cache._tabulate(\"html\", True))\n elif H2ODisplay._in_ipy():\n import IPython.display\n IPython.display.display_html(self._ex._cache._tabulate(\"html\", True), raw=True)\n else:\n print(self._ex._cache._tabulate(\"simple\", True))\n else:\n return self._ex._cache._data\n\n\n def describe(self, chunk_summary=False):\n \"\"\"\n Generate an in-depth description of this H2OFrame.\n\n This will print to the console the dimensions of the frame; names/types/summary statistics for each column;\n and finally first ten rows of the frame.\n\n :param bool chunk_summary: Retrieve the chunk summary along with the distribution summary\n :returns: The dimensions of the frame; names/types/summary statistics for each column; first ten rows of the frame.\n\n :examples:\n\n >>> python_lists = [[1,2,3],[4,5,6],[\"a\",\"b\",\"c\"],[1,0,1]]\n >>> col_names=[\"num1\",\"num2\",\"str1\",\"enum1\"]\n >>> dest_frame=\"newFrame\"\n >>> heads=-1\n >>> sep=','\n >>> col_types=['numeric','numeric','string','enum']\n >>> na_str=['NA']\n >>> h2oframe = h2o.H2OFrame(python_obj=python_lists,\n ... destination_frame=dest_frame,\n ... header=heads,\n ... separator=sep,\n ... column_names=col_names,\n ... column_types=col_types,\n ... na_strings=na_str)\n >>> h2oframe.describe(chunk_summary=True)\n \"\"\"\n if self._has_content():\n res = h2o.api(\"GET /3/Frames/%s\" % self.frame_id, data={\"row_count\": 10})[\"frames\"][0]\n self._ex._cache._fill_data(res)\n\n print(\"Rows:{}\".format(self.nrow))\n print(\"Cols:{}\".format(self.ncol))\n\n # The chunk & distribution summaries are not cached, so must be pulled if chunk_summary=True.\n if chunk_summary:\n res[\"chunk_summary\"].show()\n res[\"distribution_summary\"].show()\n print(\"\\n\")\n self.summary()\n\n def detach(self):\n \"\"\"\n Detach the Python object from the backend, usually by clearing its key\n\n :examples: \n\n >>> from random import randrange\n >>> import numpy as np\n >>> row_num = randrange(2,10)\n >>> col_num = randrange(2,10)\n >>> python_lists = np.random.randint(-5,5, (row_num, col_num))\n >>> h2oframe = h2o.H2OFrame(python_obj=python_lists)\n >>> h2oframe.detach()\n >>> h2oframe\n \"\"\"\n self._ex = None\n\n\n def _frame(self, rows=10, rows_offset=0, cols=-1, cols_offset=0, fill_cache=False):\n self._ex._eager_frame()\n if fill_cache:\n self._ex._cache.fill(rows=rows, rows_offset=rows_offset, cols=cols, cols_offset=cols_offset)\n return self\n\n\n def head(self, rows=10, cols=200):\n \"\"\"\n Return the first ``rows`` and ``cols`` of the frame as a new H2OFrame.\n\n :param int rows: maximum number of rows to return\n :param int cols: maximum number of columns to return\n :returns: a new H2OFrame cut from the top left corner of the current frame, and having dimensions at\n most ``rows`` x ``cols``.\n\n :examples:\n\n >>> import numpy as np\n >>> from h2o.frame import H2OFrame\n >>> iris = h2o.import_file(\"http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader.csv\")\n >>> df = H2OFrame.from_python(np.random.randn(100, 4).tolist(),\n ... column_names=list(\"ABCD\"),\n ... column_types=[\"enum\"] * 4)\n >>> df.head()\n \"\"\"\n assert_is_type(rows, int)\n assert_is_type(cols, int)\n nrows = min(self.nrows, rows)\n ncols = min(self.ncols, cols)\n newdt = self[:nrows, :ncols]\n return newdt._frame(rows=nrows, cols=cols, fill_cache=True)\n\n\n def tail(self, rows=10, cols=200):\n \"\"\"\n Return the last ``rows`` and ``cols`` of the frame as a new H2OFrame.\n\n :param int rows: maximum number of rows to return\n :param int cols: maximum number of columns to return\n :returns: a new H2OFrame cut from the bottom left corner of the current frame, and having dimensions at\n most ``rows`` x ``cols``.\n\n :examples:\n\n >>> from random import randrange\n >>> import numpy as np\n >>> row_num = randrange(2,10)\n >>> col_num = randrange(2,10)\n >>> python_lists = np.random.randint(-5,5, (row_num, col_num))\n >>> h2oframe = h2o.H2OFrame(python_obj=python_lists)\n >>> h2oframe\n >>> new_row = randrange(1, row_num)\n >>> new_col = randrange(1, col_num)\n >>> h2oframe.tail(rows=new_row, cols=new_col)\n \"\"\"\n assert_is_type(rows, int)\n assert_is_type(cols, int)\n nrows = min(self.nrows, rows)\n ncols = min(self.ncols, cols)\n start_idx = self.nrows - nrows\n newdt = self[start_idx:start_idx + nrows, :ncols]\n return newdt._frame(rows=nrows, cols=cols, fill_cache=True)\n\n\n def logical_negation(self):\n \"\"\"\n Create a new H2OFrame equal to elementwise Logical NOT applied to the current frame.\n\n :returns: New H2OFrame equal to elementwise Logical NOT applied to the current frame.\n\n :examples:\n\n >>> python_obj = [1, 2, 2.5, -100.9, 0]\n >>> frame = h2o.H2OFrame(python_obj)\n >>> frame.logical_negation()\n \"\"\"\n return H2OFrame._expr(expr=ExprNode(\"not\", self), cache=self._ex._cache)\n\n\n def _unop(self, op, rtype=\"real\"):\n if self._is_frame:\n for cname, ctype in self.types.items():\n if ctype not in {\"int\", \"real\", \"bool\"}:\n raise H2OValueError(\"Function %s cannot be applied to %s column '%s'\" % (op, ctype, cname))\n ret = H2OFrame._expr(expr=ExprNode(op, self), cache=self._ex._cache)\n ret._ex._cache._names = [\"%s(%s)\" % (op, name) for name in self._ex._cache._names]\n ret._ex._cache._types = {name: rtype for name in ret._ex._cache._names}\n return ret\n\n # Binary operations\n def __add__(self, rhs):\n return _binop(self, \"+\", rhs)\n\n def __sub__(self, rhs):\n return _binop(self, \"-\", rhs)\n\n def __mul__(self, rhs):\n return _binop(self, \"*\", rhs)\n\n def __div__(self, rhs):\n return _binop(self, \"/\", rhs)\n\n def __truediv__(self, rhs):\n return _binop(self, \"/\", rhs)\n\n def __floordiv__(self, rhs):\n return _binop(self, \"intDiv\", rhs)\n\n def __mod__(self, rhs):\n return _binop(self, \"%\", rhs)\n\n def __or__(self, rhs):\n return _binop(self, \"|\", rhs, rtype=\"bool\")\n\n def __and__(self, rhs):\n return _binop(self, \"&\", rhs, rtype=\"bool\")\n\n def __ge__(self, rhs):\n return _binop(self, \">=\", rhs, rtype=\"bool\")\n\n def __gt__(self, rhs):\n return _binop(self, \">\", rhs, rtype=\"bool\")\n\n def __le__(self, rhs):\n return _binop(self, \"<=\", rhs, rtype=\"bool\")\n\n def __lt__(self, rhs):\n return _binop(self, \"<\", rhs, rtype=\"bool\")\n\n def __eq__(self, rhs):\n if rhs is None: rhs = float(\"nan\")\n return _binop(self, \"==\", rhs, rtype=\"bool\")\n\n def __ne__(self, rhs):\n if rhs is None: rhs = float(\"nan\")\n return _binop(self, \"!=\", rhs, rtype=\"bool\")\n\n def __pow__(self, rhs):\n return _binop(self, \"^\", rhs)\n\n def __contains__(self, lhs):\n return all((t == self).any() for t in lhs) if _is_list(lhs) else (lhs == self).any()\n\n # rops\n def __rmod__(self, lhs):\n return _binop(lhs, \"%\", self)\n\n def __radd__(self, lhs):\n return _binop(lhs, \"+\", self)\n\n def __rsub__(self, lhs):\n return _binop(lhs, \"-\", self)\n\n def __rand__(self, lhs):\n return _binop(lhs, \"&\", self, rtype=\"bool\")\n\n def __ror__(self, lhs):\n return _binop(lhs, \"|\", self, rtype=\"bool\")\n\n def __rtruediv__(self, lhs):\n return _binop(lhs, \"/\", self)\n\n def __rdiv__(self, lhs):\n return _binop(lhs, \"/\", self)\n\n def __rfloordiv__(self, lhs):\n return _binop(lhs, \"intDiv\", self, rtype=\"int\")\n\n def __rmul__(self, lhs):\n return _binop(lhs, \"*\", self)\n\n def __rpow__(self, lhs):\n return _binop(lhs, \"^\", self)\n\n # unops\n def __abs__(self):\n return self._unop(\"abs\")\n\n def __invert__(self):\n return self._unop(\"!!\", rtype=\"bool\")\n\n def __nonzero__(self):\n if self.nrows > 1 or self.ncols > 1:\n raise H2OValueError(\n 'This operation is not supported on an H2OFrame. Try using parentheses. '\n 'Did you mean & (logical and), | (logical or), or ~ (logical not)?')\n else:\n return self.__len__()\n\n def __int__(self):\n return int(self.flatten())\n\n def __float__(self):\n return float(self.flatten())\n\n\n def flatten(self):\n \"\"\"\n Convert a 1x1 frame into a scalar.\n\n :returns: content of this 1x1 frame as a scalar (``int``, ``float``, or ``str``).\n :raises H2OValueError: if current frame has shape other than 1x1\n\n :examples:\n\n >>> python_obj = [1, 2, 2.5, -100.9, 0]\n >>> frame1 = h2o.H2OFrame(python_obj)\n >>> frame1.flatten()\n # Should receive \"H2OValueError: Not a 1x1 Frame\"\n >>> frame2 = h2o.H2OFrame.from_python([\"redrum\"])\n >>> frame2.flatten()\n \"\"\"\n if self.shape != (1, 1): raise H2OValueError(\"Not a 1x1 Frame\")\n return ExprNode(\"flatten\", self)._eager_scalar()\n\n\n def getrow(self):\n \"\"\"\n Convert a 1xn frame into an n-element list.\n\n :returns: content of this 1xn frame as a Python list.\n :raises H2OValueError: if current frame has more than one row.\n\n :examples:\n\n >>> import scipy.sparse as sp\n >>> A = sp.csr_matrix([[1, 2, 0, 5.5], [0, 0, 3, 6.7], [4, 0, 5, 0]])\n >>> fr = h2o.H2OFrame(A)\n >>> assert fr.shape == (3, 4)\n >>> assert fr.as_data_frame(False) ==\n ... [['C1', 'C2', 'C3', 'C4'], ['1', '2', '0', '5.5'],\n ... ['0', '0', '3', '6.7'], ['4', '0', '5', '0.0']]\n >>> A = sp.lil_matrix((1000, 1000))\n >>> A.setdiag(10)\n >>> for i in range(999):\n ... A[i, i + 1] = -3\n ... A[i + 1, i] = -2\n >>> fr = h2o.H2OFrame(A)\n >>> assert fr.shape == (1000, 1000)\n >>> means = fr.mean().getrow()\n >>> assert means == [0.008] + [0.005] * 998 + [0.007]\n >>> means\n \"\"\"\n if self.nrows != 1:\n raise H2OValueError(\"This method can only be applied to single-row frames\")\n return ExprNode(\"getrow\", self)._eager_scalar()\n\n\n def mult(self, matrix):\n \"\"\"\n Multiply this frame, viewed as a matrix, by another matrix.\n\n :param matrix: another frame that you want to multiply the current frame by; must be compatible with the\n current frame (i.e. its number of rows must be the same as number of columns in the current frame).\n :returns: new H2OFrame, which is the result of multiplying the current frame by ``matrix``.\n\n :examples:\n\n >>> data = [[random.uniform(-10000,10000)] for c in range(100)]\n >>> h2o_data = h2o.H2OFrame(data)\n >>> h2o_mm = h2o_data.mult(h2o_data.transpose())\n \"\"\"\n if self.ncols != matrix.nrows:\n raise H2OValueError(\"Matrix is not compatible for multiplication with the current frame\")\n return H2OFrame._expr(expr=ExprNode(\"x\", self, matrix))\n\n\n def cos(self):\n \"\"\"\n Create a new H2OFrame equal to elementwise cosine of the current frame.\n \n :returns: New H2OFrame equal to elementwise cosine of the current frame.\n\n :examples:\n\n >>> python_obj = [1, 2, 2.5, -100.9, 0]\n >>> frame = h2o.H2OFrame(python_obj)\n >>> frame.cos()\n \"\"\"\n return self._unop(\"cos\")\n\n\n def sin(self):\n \"\"\"\n Create a new H2OFrame equal to elementwise sine of the current frame.\n \n :returns: New H2OFrame equal to elementwise sine of the current frame.\n\n :examples:\n\n >>> python_obj = [1, 2, 2.5, -100.9, 0]\n >>> frame = h2o.H2OFrame(python_obj)\n >>> frame.sin()\n \"\"\"\n return self._unop(\"sin\")\n\n\n def tan(self):\n \"\"\"\n Create a new H2OFrame equal to elementwise tangent of the current frame.\n \n :returns: New H2OFrame equal to elementwise tangent of the current frame.\n\n :examples:\n\n >>> python_obj = [1, 2, 2.5, -100.9, 0]\n >>> frame = h2o.H2OFrame(python_obj)\n >>> frame.tan()\n \"\"\"\n return self._unop(\"tan\")\n\n\n def acos(self):\n \"\"\"\n Create a new H2OFrame equal to elementwise arc cosine of the current frame.\n \n :returns: New H2OFrame equal to elementwise arc cosine of the current frame.\n\n :examples:\n\n >>> python_obj = [1, 2, 2.5, -100.9, 0]\n >>> frame = h2o.H2OFrame(python_obj)\n >>> frame.acos()\n \"\"\"\n return self._unop(\"acos\")\n\n\n def asin(self):\n \"\"\"\n Create a new H2OFrame equal to elementwise arc sine of the current frame.\n\n :returns: New H2OFrame equal to elementwise arc sine of the current frame.\n\n :examples:\n\n >>> python_obj = [1, 2, 2.5, -100.9, 0]\n >>> frame = h2o.H2OFrame(python_obj)\n >>> frame.asin()\n \"\"\"\n return self._unop(\"asin\")\n\n\n def atan(self):\n \"\"\"\n Create a new H2OFrame equal to elementwise arc tangent of the current frame.\n \n :returns: New H2OFrame equal to elementwise arc tangent of the current frame.\n\n :examples:\n\n >>> python_obj = [1,2,2.5,-100.9,0]\n >>> frame = h2o.H2OFrame(python_obj)\n >>> frame.atan()\n \"\"\"\n return self._unop(\"atan\")\n\n\n def cosh(self):\n \"\"\"\n Create a new H2OFrame with values equal to the hyperbolic cosines of the values in the current frame.\n\n :returns: New H2OFrame with values equal to the hyperbolic cosines of the values in the current frame.\n\n :examples:\n\n >>> python_obj = [1,2,2.5,-100.9,0]\n >>> frame = h2o.H2OFrame(python_obj)\n >>> frame.cosh()\n \"\"\"\n return self._unop(\"cosh\")\n\n\n def sinh(self):\n \"\"\"\n Create a new H2OFrame equal to elementwise hyperbolic sine of the current frame.\n\n :returns: New H2OFrame equal to elementwise hyperbolic sine of the current frame.\n\n :examples:\n\n >>> python_obj = [1,2,2.5,-100.9,0]\n >>> frame = h2o.H2OFrame(python_obj)\n >>> frame.sinh()\n \"\"\"\n return self._unop(\"sinh\")\n\n\n def tanh(self):\n \"\"\"\n Create a new H2OFrame equal to elementwise hyperbolic tangent of the current frame.\n \n :returns: New H2OFrame equal to elementwise hyperbolic tangent of the current frame.\n\n :examples:\n\n >>> python_obj = [1, 2, 2.5, -100.9, 0]\n >>> frame = h2o.H2OFrame(python_obj)\n >>> frame.tanh()\n \"\"\"\n return self._unop(\"tanh\")\n\n\n def acosh(self):\n \"\"\"\n Create a new H2OFrame equal to elementwise inverse hyperbolic cosine of the current frame\n\n :returns: New H2OFrame equal to elementwise inverse hyperbolic cosine of the current frame.\n\n :examples:\n\n >>> python_obj = [1, 2, 2.5, -100.9, 0]\n >>> frame = h2o.H2OFrame(python_obj)\n >>> frame.acosh()\n \"\"\"\n return self._unop(\"acosh\")\n\n\n def asinh(self):\n \"\"\"\n Create a new H2OFrame equal to elementwise inverse hyperbolic sine of the current frame.\n \n :returns: New H2OFrame equal to elementwise inverse hyperbolic sine of the current frame.\n\n :examples:\n\n >>> python_obj = [1, 2, 2.5, -100.9, 0]\n >>> frame = h2o.H2OFrame(python_obj)\n >>> frame.asinh()\n \"\"\"\n return self._unop(\"asinh\")\n\n\n def atanh(self):\n \"\"\"\n Create a new H2OFrame equal to elementwise inverse hyperbolic tangent of the current frame.\n \n :returns: New H2OFrame equal to elementwise inverse hyperbolic tangent of the current frame.\n\n :examples:\n\n >>> python_obj = [1, 2, 2.5, -100.9, 0]\n >>> frame = h2o.H2OFrame(python_obj)\n >>> frame.atanh()\n \"\"\"\n return self._unop(\"atanh\")\n\n\n def cospi(self):\n \"\"\"\n Create a new H2OFrame equal to elementwise cosine of the current frame multiplied by Pi.\n \n :returns: New H2OFrame equal to elementwise cosine of the current frame multiplied by Pi.\n\n :examples:\n\n >>> python_obj = [1, 2, 2.5, -100.9, 0]\n >>> frame = h2o.H2OFrame(python_obj)\n >>> frame.cospi()\n \"\"\"\n return self._unop(\"cospi\")\n\n\n def sinpi(self):\n \"\"\"\n Create a new H2OFrame equal to elementwise sine of the current frame multiplied by Pi.\n \n :returns: New H2OFrame equal to elementwise sine of the current frame multiplied by Pi.\n\n :examples:\n\n >>> python_obj = [1, 2, 2.5, -100.9, 0]\n >>> frame = h2o.H2OFrame(python_obj)\n >>> frame.sinpi()\n \"\"\"\n return self._unop(\"sinpi\")\n\n\n def tanpi(self):\n \"\"\"\n Create a new H2OFrame equal to elementwise tangent of the current frame multiplied by Pi.\n \n :returns: New H2OFrame equal to elementwise tangent of the current frame multiplied by Pi.\n\n :examples:\n\n >>> python_obj = [1, 2, 2.5, -100.9, 0]\n >>> frame = h2o.H2OFrame(python_obj)\n >>> frame.tanpi()\n \"\"\"\n return self._unop(\"tanpi\")\n\n\n def abs(self):\n \"\"\"\n Calculate the absolute value of the current frame.\n \n :returns: new H2OFrame equal to elementwise absolute value of the current frame.\n\n :examples:\n\n >>> python_obj = [1, 2, 2.5, -100.9, 0]\n >>> frame = h2o.H2OFrame(python_obj)\n >>> abs(frame)\n \"\"\"\n return self._unop(\"abs\")\n\n\n def sign(self):\n \"\"\"\n Return new H2OFrame equal to signs of the values in the frame: -1 , +1, or 0.\n\n :returns: New H2OFrame equal to signs of the values in the frame: -1, +1, or 0.\n \n :examples:\n\n >>> iris = h2o.import_file(\"http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader.csv\")\n >>> iris.sign()\n \"\"\"\n return self._unop(\"sign\", rtype=\"int\")\n\n\n def sqrt(self):\n \"\"\"\n Create a new H2OFrame equal to elementwise square root of the current frame.\n\n :returns: New H2OFrame equal to elementwise square root of the current frame.\n\n :examples:\n\n >>> python_obj = [1, 2, 2.5, -100.9, 0]\n >>> frame = h2o.H2OFrame(python_obj)\n >>> frame.sqrt()\n \"\"\"\n return self._unop(\"sqrt\")\n\n\n def trunc(self):\n \"\"\"\n Apply the numeric truncation function.\n\n ``trunc(x)`` is the integer obtained from ``x`` by dropping its decimal tail. This is equal to ``floor(x)``\n if ``x`` is positive, and ``ceil(x)`` if ``x`` is negative. Truncation is also called \"rounding towards zero\".\n\n :returns: new H2OFrame of truncated values of the original frame.\n\n :examples:\n\n >>> import math\n >>> import numpy as np\n >>> from random import randrange\n >>> row_num = randrange(1,10)\n >>> col_num = randrange(1,10)\n >>> length_out_r = math.ceil(0.78*row_num)\n >>> length_out_c = math.ceil(col_num*0.4)\n >>> python_lists = np.random.randint(-5,5, (row_num, col_num))\n >>> h2oframe = h2o.H2OFrame(python_obj=python_lists)\n >>> h2oframe.trunc()\n \"\"\"\n return self._unop(\"trunc\", rtype=\"int\")\n\n\n def ceil(self):\n \"\"\"\n Apply the ceiling function to the current frame.\n\n ``ceil(x)`` is the smallest integer greater or equal to ``x``.\n\n :returns: new H2OFrame of ceiling values of the original frame.\n\n :examples:\n\n >>> from random import randrange\n >>> import math\n >>> import numpy as np\n >>> row_num = randrange(1,10)\n >>> col_num = randrange(1,10)\n >>> length_out_r = math.ceil(0.78*row_num)\n >>> length_out_c = math.ceil(col_num*0.4)\n >>> python_lists = np.random.randint(-5,5, (row_num, col_num))\n >>> h2oframe = h2o.H2OFrame(python_obj=python_lists)\n \"\"\"\n return self._unop(\"ceiling\", rtype=\"int\")\n\n\n def floor(self):\n \"\"\"\n Apply the floor function to the current frame. ``floor(x)`` is the largest integer smaller or equal to ``x``.\n\n :returns: new H2OFrame of floor values of the original frame.\n\n :examples:\n\n >>> python_obj = [1, 2, 2.5, -100.9, 0]\n >>> frame = h2o.H2OFrame(python_obj)\n >>> frame\n >>> frame.floor()\n \"\"\"\n return self._unop(\"floor\", rtype=\"int\")\n\n\n def log(self):\n \"\"\"\n Create a new H2OFrame equal to elementwise natural logarithm of the current frame.\n \n :returns: New H2OFrame equal to elementwise natural logarithm of the current frame.\n\n :examples:\n\n >>> python_obj = [1, 2, 2.5, -100.9, 0]\n >>> frame = h2o.H2OFrame(python_obj)\n >>> frame.log()\n \"\"\"\n return self._unop(\"log\")\n\n\n def log10(self):\n \"\"\"\n Create new H2OFrame equal to elementwise decimal logarithm of the current frame.\n \n :returns: New H2OFrame equal to elementwise decimal logarithm of the current frame.\n\n :examples:\n\n >>> python_obj = [1, 2, 2.5, -100.9, 0]\n >>> frame = h2o.H2OFrame(python_obj)\n >>> frame.log10()\n \"\"\"\n return self._unop(\"log10\")\n\n\n def log1p(self):\n \"\"\"\n Create a new H2Oframe equal to elementwise ``ln(1 + x)`` for each ``x`` in the current frame.\n \n :returns: New H2OFrame equals to elementwise ``ln(1 + x)`` for each ``x`` in the current frame.\n\n :examples:\n\n >>> python_obj = [1, 2, 2.5, -100.9, 0]\n >>> frame = h2o.H2OFrame(python_obj)\n >>> frame.log1p()\n \"\"\"\n return self._unop(\"log1p\")\n\n\n def log2(self):\n \"\"\"\n Create a new H2OFrame equal to elementwise binary logarithm of the current frame.\n \n :returns: New H2OFrame equal to elementwise binary logarithm of the current frame.\n\n :examples:\n\n >>> python_obj = [1, 2, 2.5, -100.9, 0]\n >>> frame = h2o.H2OFrame(python_obj)\n >>> frame.log2()\n \"\"\"\n return self._unop(\"log2\")\n\n\n def exp(self):\n \"\"\"\n Create a new H2OFrame equal to elementwise exponent (i.e. ``e^x``) of the current frame.\n\n :returns: New H2OFrame equals to elementwise exponent (i.e. ``e^x``) of the current frame.\n\n :examples:\n\n >>> python_obj = [1, 2, 2.5, -100.9, 0]\n >>> frame = h2o.H2OFrame(python_obj)\n >>> frame.exp()\n \"\"\"\n return self._unop(\"exp\")\n\n\n def expm1(self):\n \"\"\"\n Create a new H2OFrame equal to elementwise exponent minus 1 (i.e. ``e^x - 1``) of the current frame.\n\n :returns: New H2OFrame equal to elementwise exponent minus 1 (i.e. ``e^x - 1``) of the current frame.\n\n :examples:\n\n >>> python_obj = [1, 2, 2.5, -100.9, 0]\n >>> frame = h2o.H2OFrame(python_obj)\n >>> frame.expm1()\n \"\"\"\n return self._unop(\"expm1\")\n\n\n def gamma(self):\n \"\"\"\n Create a new H2OFrame equal to elementwise gamma function of the current frame.\n \n :returns: new H2OFrame equals to elementwise gamma function of the current frame.\n\n :examples:\n\n >>> python_obj = [1, 2, 2.5, -100.9, 0]\n >>> frame = h2o.H2OFrame(python_obj)\n >>> frame.gamma()\n \"\"\"\n return self._unop(\"gamma\")\n\n\n def lgamma(self):\n \"\"\"\n Create a new H2OFrame equal to elementwise logarirth of the gamma function of the current frame.\n \n :returns: New H2OFrame equal to elementwise logarithm of the gamma function of the current frame.\n\n :examples:\n\n >>> python_obj = [1, 2, 2.5, -100.9, 0]\n >>> frame = h2o.H2OFrame(python_obj)\n >>> frame.lgamma()\n \"\"\"\n return self._unop(\"lgamma\")\n\n\n def digamma(self):\n \"\"\"\n Create a new H2OFrame equal to elementwise digamma function of the current frame.\n \n :returns: New H2OFrame equal to elementwise digamma function of the current frame.\n\n :examples:\n\n >>> python_obj = [1, 2, 2.5, -100.9, 0]\n >>> frame = h2o.H2OFrame(python_obj)\n >>> frame.digamma()\n \"\"\"\n return self._unop(\"digamma\")\n\n\n def trigamma(self):\n \"\"\"\n Create a new H2OFrame equal to the elementwise trigamma function of the current frame.\n \n :returns: new H2OFrame equal to elementwise trigamma function of the current frame.\n\n :examples:\n\n >>> python_obj = [1, 2, 2.5, -100.9, 0]\n >>> frame = h2o.H2OFrame(python_obj)\n >>> frame.trigamma()\n \"\"\"\n return self._unop(\"trigamma\")\n\n\n @staticmethod\n def moment(year=None, month=None, day=None, hour=None, minute=None, second=None, msec=None, date=None, time=None):\n \"\"\"\n Create a time column from individual components.\n\n Each parameter should be either an integer, or a single-column H2OFrame\n containing the corresponding time parts for each row.\n\n The \"date\" part of the timestamp can be specified using either the tuple ``(year, month, day)``, or an\n explicit ``date`` parameter. The \"time\" part of the timestamp is optional, but can be specified either via\n the ``time`` parameter, or via the ``(hour, minute, second, msec)`` tuple.\n\n :param year: the year part of the constructed date\n :param month: the month part of the constructed date\n :param day: the day-of-the-month part of the constructed date\n :param hour: the hours part of the constructed date\n :param minute: the minutes part of the constructed date\n :param second: the seconds part of the constructed date\n :param msec: the milliseconds part of the constructed date\n :param date date: construct the timestamp from the Python's native ``datetime.date`` (or ``datetime.datetime``)\n object. If the object passed is of type ``date``, then you can specify the time part using either the\n ``time`` argument, or ``hour`` ... ``msec`` arguments (but not both). If the object passed is of type\n ``datetime``, then no other arguments can be provided.\n :param time time: construct the timestamp from this Python's native ``datetime.time`` object. This argument\n cannot be used alone, it should be supplemented with either ``date`` argument, or ``year`` ... ``day``\n tuple.\n\n :returns: H2OFrame with one column containing the date constructed from the provided arguments.\n\n :examples:\n\n >>> df = h2o.create_frame(rows=10,\n ... cols=3,\n ... factors=10,\n ... categorical_fraction=1.0/3,\n ... time_fraction=1.0/3,\n ... real_fraction=1.0/3,\n ... real_range=100,\n ... missing_fraction=0.0,\n ... seed=123)\n >>> df[\"C1\"].moment(year=df[\"C1\"].year(),\n ... month=df[\"C1\"].month(),\n ... day=df[\"C1\"].day(),\n ... hour=df[\"C1\"].hour(),\n ... minute=df[\"C1\"].minute(),\n ... second=df[\"C1\"].second())\n \"\"\"\n assert_is_type(date, None, datetime.date, numpy_datetime, pandas_timestamp)\n assert_is_type(time, None, datetime.time)\n assert_is_type(year, None, int, H2OFrame)\n assert_is_type(month, None, int, H2OFrame)\n assert_is_type(day, None, int, H2OFrame)\n assert_is_type(hour, None, int, H2OFrame)\n assert_is_type(minute, None, int, H2OFrame)\n assert_is_type(second, None, int, H2OFrame)\n assert_is_type(msec, None, int, H2OFrame)\n if time is not None:\n if hour is not None or minute is not None or second is not None or msec is not None:\n raise H2OValueError(\"Arguments hour, minute, second, msec cannot be used together with `time`.\")\n hour = time.hour\n minute = time.minute\n second = time.second\n msec = time.microsecond // 1000\n if date is not None:\n if is_type(date, pandas_timestamp):\n date = date.to_pydatetime()\n if is_type(date, numpy_datetime):\n date = date.astype(\"M8[ms]\").astype(\"O\")\n if year is not None or month is not None or day is not None:\n raise H2OValueError(\"Arguments year, month and day cannot be used together with `date`.\")\n year = date.year\n month = date.month\n day = date.day\n if isinstance(date, datetime.datetime):\n if time is not None:\n raise H2OValueError(\"Argument `time` cannot be used together with `date` of datetime type.\")\n if hour is not None or minute is not None or second is not None or msec is not None:\n raise H2OValueError(\"Arguments hour, minute, second, msec cannot be used together with `date` \"\n \"of datetime type.\")\n hour = date.hour\n minute = date.minute\n second = date.second\n msec = date.microsecond // 1000\n if year is None or month is None or day is None:\n raise H2OValueError(\"Either arguments (`year`, `month` and `day`) or the `date` are required.\")\n if hour is None: hour = 0\n if minute is None: minute = 0\n if second is None: second = 0\n if msec is None: msec = 0\n\n local_vars = locals()\n res_nrows = None\n for n in [\"year\", \"month\", \"day\", \"hour\", \"minute\", \"second\", \"msec\"]:\n x = local_vars[n]\n if isinstance(x, H2OFrame):\n if x.ncols != 1:\n raise H2OValueError(\"Argument `%s` is a frame with more than 1 column\" % n)\n if x.type(0) not in {\"int\", \"real\"}:\n raise H2OValueError(\"Column `%s` is not numeric (type = %s)\" % (n, x.type(0)))\n if res_nrows is None:\n res_nrows = x.nrows\n if x.nrows == 0 or x.nrows != res_nrows:\n raise H2OValueError(\"Incompatible column `%s` having %d rows\" % (n, x.nrows))\n if res_nrows is None:\n res_nrows = 1\n res = H2OFrame._expr(ExprNode(\"moment\", year, month, day, hour, minute, second, msec))\n res._ex._cache._names = [\"name\"]\n res._ex._cache._types = {\"name\": \"time\"}\n res._ex._cache._nrows = res_nrows\n res._ex._cache._ncols = 1\n return res\n\n\n def unique(self, include_nas=False):\n \"\"\"\n Extract the unique values in the column.\n \n :param include_nas: If set to true, NAs are included. False (turned off) by default.\n :returns: H2OFrame of just the unique values in the column.\n\n :examples:\n\n >>> import numpy as np\n >>> python_lists = np.random.randint(-5,5, (100,1))\n >>> h2oframe = h2o.H2OFrame(python_obj=python_lists)\n >>> h2oframe.unique()\n \"\"\"\n return H2OFrame._expr(expr=ExprNode(\"unique\", self, include_nas))\n\n\n def levels(self):\n \"\"\"\n Get the factor levels.\n\n :returns: A list of lists, one list per column, of levels.\n\n :examples:\n\n >>> import numpy as np\n >>> from random import randrange\n >>> python_lists = np.random.randint(-2,2, (10000,2))\n >>> h2oframe = h2o.H2OFrame(python_obj=python_lists,\n ... column_types=['enum', 'enum'])\n >>> h2oframe.levels()\n \"\"\"\n lol = H2OFrame._expr(expr=ExprNode(\"levels\", self)).as_data_frame(False)\n lol.pop(0) # Remove column headers\n lol = list(zip(*lol))\n return [[ll for ll in l if ll != ''] for l in lol]\n\n\n def nlevels(self):\n \"\"\"\n Get the number of factor levels for each categorical column.\n\n :returns: A list of the number of levels per column.\n\n :examples:\n\n >>> python_lists = np.random.randint(-2,2, (10000,2))\n >>> h2oframe = h2o.H2OFrame(python_obj=python_lists,\n ... column_types=['enum', 'enum'])\n >>> h2oframe.nlevels()\n \"\"\"\n levels = self.levels()\n return [len(l) for l in levels] if levels else 0\n\n\n def set_level(self, level):\n \"\"\"\n A method to set all column values to one of the levels.\n\n :param str level: The level at which the column will be set (a string)\n\n :returns: H2OFrame with entries set to the desired level.\n\n :examples:\n\n >>> import numpy as np\n >>> import random\n >>> python_lists = np.random.randint(-5,5, (10000, 2))\n >>> h2oframe = h2o.H2OFrame(python_obj=python_lists)\n >>> newFrame = h2oframe.asfactor()\n >>> allLevels = newFrame.levels()\n >>> lastLevel = allLevels[0][len(allLevels[0])-1]\n >>> newFrame[0] = newFrame[0].set_level(level=lastLevel)\n >>> firstLevel = allLevels[1][0]\n >>> newFrame[1] = newFrame[1].set_level(level=firstLevel)\n >>> newFrame\n \"\"\"\n return H2OFrame._expr(expr=ExprNode(\"setLevel\", self, level), cache=self._ex._cache)\n\n\n def set_levels(self, levels):\n \"\"\"\n Replace the levels of a categorical column.\n\n New levels must be aligned with the old domain. This call has copy-on-write semantics.\n\n :param List[str] levels: A list of strings specifying the new levels. The number of new\n levels must match the number of old levels.\n :returns: A single-column H2OFrame with the desired levels.\n\n :examples:\n\n >>> import numpy as np\n >>> import random\n >>> python_lists = np.random.randint(-5,5, (10000, 2))\n >>> h2oframe = h2o.H2OFrame(python_obj=python_lists)\n >>> newFrame = h2oframe.asfactor()\n >>> allLevels = newFrame.levels()\n >>> newLevel0 = random.sample(allLevels[0], len(allLevels[0]))\n >>> newLevel1 = random.sample(allLevels[1], len(allLevels[1]))\n >>> newFrame[0] = newFrame[0].set_levels(levels=newLevel0)\n >>> newFrame[1] = newFrame[1].set_levels(levels=newLevel1)\n >>> newFrame\n \"\"\"\n assert_is_type(levels, [str])\n return H2OFrame._expr(expr=ExprNode(\"setDomain\", self, False, levels), cache=self._ex._cache)\n\n\n def rename(self, columns=None):\n \"\"\"\n Change names of columns in the frame.\n\n Dict key is an index or name of the column whose name is to be set.\n Dict value is the new name of the column.\n\n :param columns: dict-like transformations to apply to the column names\n :returns: Renamed columns\n\n :examples:\n\n >>> iris = h2o.import_file(\"http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris.csv\")\n >>> iris\n >>> name = iris.rename(columns={'C2':'C1',\n ... 'C1':'C2',\n ... 'C3':'X3',\n ... 'F0':'X0',\n ... 'C3':'Y3'})\n >>> name\n \"\"\"\n assert_is_type(columns, None, dict)\n new_names = self.names\n ncols = self.ncols\n\n for col, name in columns.items():\n col_index = None\n if is_type(col, int) and (-ncols <= col < ncols):\n col_index = (col + ncols) % ncols # handle negative indices\n elif is_type(col, str) and col in self.names:\n col_index = self.names.index(col) # lookup the name\n\n if col_index is not None:\n new_names[col_index] = name\n\n return self.set_names(new_names)\n\n\n def set_names(self, names):\n \"\"\"\n Change names of all columns in the frame.\n\n :param List[str] names: The list of new names for every column in the frame.\n :returns: Frame with all new column names.\n\n :examples:\n\n >>> import numpy as np\n >>> import random\n >>> row_num = random.randrange(1,10)\n >>> col_num = random.randrange(1,10)\n >>> python_lists = np.random.randint(-5,5, (row_num, col_num))\n >>> h2oframe = h2o.H2OFrame(python_obj=python_lists)\n >>> newNames = random.sample(h2oframe.names, col_num)\n >>> h2oframe.set_names(names=newNames)\n \"\"\"\n assert_is_type(names, [str])\n assert_satisfies(names, len(names) == self.ncol)\n self._ex = ExprNode(\"colnames=\", self, range(self.ncol), names) # Update-in-place, but still lazy\n return self\n\n\n def set_name(self, col=None, name=None):\n \"\"\"\n Set a new name for a column.\n\n :param col: index or name of the column whose name is to be set; may be skipped for 1-column frames\n :param name: the new name of the column\n :returns: The renamed column.\n\n :examples:\n\n >>> import numpy as np\n >>> import random\n >>> row_num = random.randrange(1,10)\n >>> col_num = random.randrange(1,10)\n >>> python_lists = np.random.randint(-5,5, (row_num, col_num))\n >>> h2oframe = h2o.H2OFrame(python_obj=python_lists)\n >>> newNames = random.sample(h2oframe.names, col_num)\n >>> h2oframe.set_names(names=newNames)\n >>> newName = \"Dolphine\"\n >>> h2oframe.set_name(col=0, name=newName)\n >>> h2oframe\n \"\"\"\n assert_is_type(col, None, int, str)\n assert_is_type(name, str)\n ncols = self.ncols\n\n col_index = None\n if is_type(col, int):\n if not(-ncols <= col < ncols):\n raise H2OValueError(\"Index %d is out of bounds for a frame with %d columns\" % (col, ncols))\n col_index = (col + ncols) % ncols # handle negative indices\n elif is_type(col, str):\n if col not in self.names:\n raise H2OValueError(\"Column %s doesn't exist in the frame.\" % col)\n col_index = self.names.index(col) # lookup the name\n else:\n assert col is None\n if ncols != 1:\n raise H2OValueError(\"The frame has %d columns; please specify which one to rename\" % ncols)\n col_index = 0\n if name != self.names[col_index] and name in self.types:\n raise H2OValueError(\"Column '%s' already exists in the frame\" % name)\n\n oldname = self.names[col_index]\n old_cache = self._ex._cache\n self._ex = ExprNode(\"colnames=\", self, col_index, name) # Update-in-place, but still lazy\n self._ex._cache.fill_from(old_cache)\n if self.names is None:\n self._frame()._ex._cache.fill()\n else:\n self._ex._cache._names = self.names[:col_index] + [name] + self.names[col_index + 1:]\n self._ex._cache._types[name] = self._ex._cache._types.pop(oldname)\n return\n\n\n def as_date(self, format):\n \"\"\"\n Convert the frame (containing strings / categoricals) into the ``date`` format.\n\n :param str format: the format string (e.g. \"%Y-%m-%d\")\n :returns: new H2OFrame with \"int\" column types\n\n :examples:\n\n >>> hdf = h2o.import_file(\"https://s3.amazonaws.com/h2o-public-test-data/smalldata/jira/v-11-eurodate.csv\")\n >>> hdf[\"ds5\"].as_date(\"%d.%m.%y %H:%M\")\n \"\"\"\n fr = H2OFrame._expr(expr=ExprNode(\"as.Date\", self, format), cache=self._ex._cache)\n if fr._ex._cache.types_valid():\n fr._ex._cache.types = {k: \"int\" for k in self._ex._cache.types.keys()}\n return fr\n\n\n def cumsum(self, axis=0):\n \"\"\"\n Compute cumulative sum over rows / columns of the frame.\n\n :param int axis: 0 for column-wise, 1 for row-wise\n :returns: new H2OFrame with cumulative sums of the original frame.\n\n :examples:\n\n >>> foo = h2o.H2OFrame([[x,y] for x,\n ... y in zip(list(range(10)),\n ... list(range(9,-1,-1)))])\n >>> cumsum1 = foo[0].cumsum()\n >>> cumsum1\n >>> cumsum2 = foo[1].cumsum()\n \"\"\"\n return H2OFrame._expr(expr=ExprNode(\"cumsum\", self, axis), cache=self._ex._cache)\n\n\n def cumprod(self, axis=0):\n \"\"\"\n Compute cumulative product over rows / columns of the frame.\n\n :param int axis: 0 for column-wise, 1 for row-wise\n :returns: new H2OFrame with cumulative products of the original frame.\n\n :examples:\n\n >>> foo = h2o.H2OFrame([[x,y] for x,\n ... y in zip(list(range(10)),\n ... list(range(9,-1,-1)))])\n >>> cumprod1 = foo[1:10,0].cumprod()\n >>> cumprod1\n >>> cumprod2 = foo[0:9,1].cumprod()\n >>> cumprod2\n \"\"\"\n return H2OFrame._expr(expr=ExprNode(\"cumprod\", self, axis), cache=self._ex._cache)\n\n\n def cummin(self, axis=0):\n \"\"\"\n Compute cumulative minimum over rows / columns of the frame.\n\n :param int axis: 0 for column-wise, 1 for row-wise\n :returns: new H2OFrame with running minimums of the original frame.\n\n :examples:\n\n >>> foo = h2o.H2OFrame([[x,y] for x,\n ... y in zip(list(range(10)),\n ... list(range(9,-1,-1)))])\n >>> cummin1 = foo[0].cummin()\n >>> cummin1\n >>> cummin2 = foo[1].cummin()\n >>> cummin2\n \"\"\"\n return H2OFrame._expr(expr=ExprNode(\"cummin\", self, axis), cache=self._ex._cache)\n\n\n def cummax(self, axis=0):\n \"\"\"\n Compute cumulative maximum over rows / columns of the frame.\n\n :param int axis: 0 for column-wise, 1 for row-wise\n :returns: new H2OFrame with running maximums of the original frame.\n\n :examples:\n\n >>> foo = h2o.H2OFrame([[x,y] for x,\n ... y in zip(list(range(10)),\n ... list(range(9,-1,-1)))])\n >>> cummax1 = foo[0].cummax()\n >>> cummax1\n >>> cummax2 = foo[1].cummax()\n >>> cummax2\n \"\"\"\n return H2OFrame._expr(expr=ExprNode(\"cummax\", self, axis), cache=self._ex._cache)\n\n\n def prod(self, na_rm=False):\n \"\"\"\n Compute the product of all values across all rows in a single column H2O frame. If you apply\n this command on a multi-column H2O frame, the answer may not be correct.\n\n :param bool na_rm: If True then NAs will be ignored during the computation.\n :returns: product of all values in the frame (a float)\n\n :examples:\n\n >>> import random\n >>> import numpy as np\n >>> data = [[random.uniform(1,10)] for c in range(10)]\n >>> h2o_data = h2o.H2OFrame(data)\n >>> np_data = np.array(data)\n >>> h2o_data.prod(na_rm=True)\n >>> np.prod(np_data)\n \"\"\"\n return ExprNode(\"prod.na\" if na_rm else \"prod\", self)._eager_scalar()\n\n\n def any(self):\n \"\"\"\n Determine whether any element in the frame is either True, non-zero, or NA.\n\n :returns: (bool) True if any element in the frame is either True, non-zero or NA.\n\n :examples:\n\n >>> python_obj = [1,2,2.5,-100.9,0]\n >>> frame = h2o.H2OFrame(python_obj)\n >>> frame.any()\n \"\"\"\n return bool(ExprNode(\"any\", self)._eager_scalar())\n\n\n def any_na_rm(self):\n \"\"\"\n Determine whether any value in the frame is non-zero.\n \n :returns: (bool) True if any value in the frame is non-zero (disregarding all NAs).\n\n :example:\n\n >>> python_obj = [1,2,2.5,-100.9,0]\n >>> frame = h2o.H2OFrame(python_obj)\n >>> frame.any_na_rm()\n \"\"\"\n return bool(ExprNode(\"any.na\", self)._eager_scalar())\n\n\n def all(self):\n \"\"\"\n Determine whether every element in the frame is either True, non-zero, or NA.\n\n :returns: (bool) True if every element in the frame is either True, non-zero or NA.\n\n :examples:\n\n >>> python_obj = [1,2,2.5,-100.9,0]\n >>> frame = h2o.H2OFrame(python_obj)\n >>> frame.all()\n \"\"\"\n return bool(ExprNode(\"all\", self)._eager_scalar())\n\n\n def isnumeric(self):\n \"\"\"\n Test which columns in the frame are numeric.\n\n :returns: a list of True/False indicating for each column in the frame whether it is numeric.\n\n :examples:\n\n >>> iris = h2o.import_file(\"http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader.csv\")\n >>> iris.summary()\n # Look at the column headers: [0:3] are numeric; [4] is not\n >>> iris[0].isnumeric()\n # Return as True\n >>> iris[4].isnumeric()\n # Return as False\n \"\"\"\n return [bool(o) for o in ExprNode(\"is.numeric\", self)._eager_scalar()]\n\n\n def isstring(self):\n \"\"\"\n Test which columns in the frame are string.\n\n :returns: a list of True/False indicating for each column in the frame whether it is string.\n\n :examples:\n\n >>> import numpy as np\n >>> from random import randrange\n >>> row_num = randrange(1,10)\n >>> col_num = randrange(1,10)\n >>> python_lists = np.random.randint(-5,5, (row_num, col_num))\n >>> h2oframe = h2o.H2OFrame(python_obj=python_lists)\n >>> h2oframe.isstring()\n >>> newFrame = h2oframe.asfactor().ascharacter()\n >>> newFrame.isstring()\n \"\"\"\n return [bool(o) for o in ExprNode(\"is.character\", self)._eager_scalar()]\n\n\n def isin(self, item):\n \"\"\"\n Test whether elements of an H2OFrame are contained in the ``item``.\n\n :param items: An item or a list of items to compare the H2OFrame against.\n\n :returns: An H2OFrame of 0s and 1s showing whether each element in the original H2OFrame is contained in item.\n\n :examples:\n\n >>> fr = h2o.create_frame(rows=100, cols=1, categorical_fraction=1, factors=3)\n >>> f2 = ~fr[\"C1\"].isin([\"c0.l0\", \"c0.l2\"])\n >>> f2\n \"\"\"\n if is_type(item, list, tuple, set):\n if self.ncols == 1 and (self.type(0) == 'str' or self.type(0) == 'enum'):\n return self.match(item)\n else:\n return functools.reduce(H2OFrame.__or__, (self == i for i in item))\n else:\n return self == item\n\n\n def kfold_column(self, n_folds=3, seed=-1):\n \"\"\"\n Build a fold assignments column for cross-validation.\n\n This method will produce a column having the same data layout as the source frame.\n\n :param int n_folds: An integer specifying the number of validation sets to split the training data into.\n :param int seed: Seed for random numbers as fold IDs are randomly assigned.\n\n :returns: A single column H2OFrame with the fold assignments.\n\n :examples:\n\n >>> from random import randrange\n >>> import numpy as np\n >>> python_lists = np.random.randint(-5,5, (1000, 2))\n >>> k = randrange(2,10)\n >>> h2oframe = h2o.H2OFrame(python_obj=python_lists)\n >>> clist = h2oframe.kfold_column(n_folds=k, seed=12345)\n >>> clist\n \"\"\"\n return H2OFrame._expr(expr=ExprNode(\"kfold_column\", self, n_folds, seed))._frame() # want this to be eager!\n\n\n def modulo_kfold_column(self, n_folds=3):\n \"\"\"\n Build a fold assignments column for cross-validation.\n\n Rows are assigned a fold according to the current row number modulo ``n_folds``.\n\n :param int n_folds: An integer specifying the number of validation sets to split the training data into.\n :returns: A single-column H2OFrame with the fold assignments.\n\n :examples:\n\n >>> from random import randrange\n >>> import numpy as np\n >>> python_lists = np.random.randint(-5,5, (1000, 2))\n >>> k = randrange(2,10)\n >>> h2oframe = h2o.H2OFrame(python_obj=python_lists)\n >>> h2oframe.kfold_column(n_folds=k)\n \"\"\"\n return H2OFrame._expr(expr=ExprNode(\"modulo_kfold_column\", self, n_folds))._frame() # want this to be eager!\n\n\n def stratified_kfold_column(self, n_folds=3, seed=-1):\n \"\"\"\n Build a fold assignment column with the constraint that each fold has the same class\n distribution as the fold column.\n\n :param int n_folds: The number of folds to build.\n :param int seed: A seed for the random number generator.\n\n :returns: A single column H2OFrame with the fold assignments.\n\n :examples:\n\n >>> import numpy as np\n >>> python_lists = np.random.randint(-3,3, (10000,2))\n >>> h2oframe = h2o.H2OFrame(python_obj=python_lists).asfactor()\n >>> h2oframe[1].stratified_kfold_column(n_folds=3, seed=-1)\n \"\"\"\n return H2OFrame._expr(\n expr=ExprNode(\"stratified_kfold_column\", self, n_folds, seed))._frame() # want this to be eager!\n\n\n def structure(self):\n \"\"\"\n Compactly display the internal structure of an H2OFrame.\n\n :returns: Compact display of the internal structure of an H2OFrame.\n\n :examples:\n\n >>> frame = h2o.import_file(\"http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris.csv\")\n >>> frame.structure()\n \"\"\"\n df = self.as_data_frame(use_pandas=False)\n cn = df.pop(0)\n nr = self.nrow\n nc = self.ncol\n width = max([len(c) for c in cn])\n isfactor = self.isfactor()\n numlevels = self.nlevels()\n lvls = self.levels()\n print(\"H2OFrame: '{}' \\nDimensions: {} obs. of {} variables\".format(self.frame_id, nr, nc))\n for i in range(nc):\n print(\"$ {} {}: \".format(cn[i], ' ' * (width - max(0, len(cn[i])))), end=' ')\n if isfactor[i]:\n nl = numlevels[i]\n print(\"Factor w/ {} level(s) {} \".format(nl, '\"' + '\",\"'.join(lvls[i]) + '\"'), end='\\n')\n else:\n print(\"num {}\".format(\" \".join(it[0] if it else \"nan\" for it in h2o.as_list(self[:10, i], False)[1:])))\n\n def as_data_frame(self, use_pandas=True, header=True):\n \"\"\"\n Obtain the dataset as a python-local object.\n\n :param bool use_pandas: If True (default) then return the H2OFrame as a pandas DataFrame (requires that the\n ``pandas`` library was installed). If False, then return the contents of the H2OFrame as plain nested\n list, in a row-wise order.\n :param bool header: If True (default), then column names will be appended as the first row in list\n\n :returns: A python object (a list of lists of strings, each list is a row, if use_pandas=False, otherwise\n a pandas DataFrame) containing this H2OFrame instance's data.\n\n :examples:\n\n >>> airlines= h2o.import_file(\"https://s3.amazonaws.com/h2o-public-test-data/smalldata/airlines/allyears2k_headers.zip\")\n >>> airlines[\"Year\"]= airlines[\"Year\"].asfactor()\n >>> airlines[\"Month\"]= airlines[\"Month\"].asfactor()\n >>> airlines[\"DayOfWeek\"] = airlines[\"DayOfWeek\"].asfactor()\n >>> airlines[\"Cancelled\"] = airlines[\"Cancelled\"].asfactor()\n >>> airlines['FlightNum'] = airlines['FlightNum'].asfactor()\n >>> df = airlines.as_data_frame()\n >>> df\n \"\"\" \n if can_use_pandas() and use_pandas:\n import pandas\n return pandas.read_csv(StringIO(self.get_frame_data()), low_memory=False, skip_blank_lines=False)\n from h2o.utils.csv.readers import reader\n frame = [row for row in reader(StringIO(self.get_frame_data()))]\n if not header:\n frame.pop(0)\n return frame\n\n def save_to_hive(self, jdbc_url, table_name, format=\"csv\", table_path=None, tmp_path=None):\n \"\"\"\n Save contents of this data frame into a Hive table.\n \n :param jdbc_url: Hive JDBC connection URL.\n :param table_name: Table name into which to store the data. The table must not exist as it will be created\n to match the structure of the the frame. The user must be allowed to create tables.\n :param format: Storage format of created Hive table, can be either ``csv`` (default) or ``parquet``.\n :param table_path: If specified, the table will be created as an external table and this is where the data \n will be stored.\n :param tmp_path: Path where to store temporary data.\n\n :examples:\n\n >>> airlines= h2o.import_file(\"https://s3.amazonaws.com/h2o-public-test-data/smalldata/airlines/allyears2k_headers.zip\")\n >>> airlines[\"Year\"] = airlines[\"Year\"].asfactor()\n >>> airlines.save_to_hive(\"jdbc:hive2://hive-server:10000/default\", \"airlines\")\n \"\"\"\n assert_is_type(jdbc_url, str)\n assert_is_type(table_name, str)\n assert_is_type(format, Enum(\"csv\", \"parquet\"))\n assert_is_type(table_path, str, None)\n assert_is_type(tmp_path, str, None)\n p = {\n \"frame_id\": self.frame_id,\n \"jdbc_url\": jdbc_url,\n \"table_name\": table_name,\n \"format\": format,\n \"table_path\": table_path,\n \"tmp_path\": tmp_path\n }\n h2o.api(\"POST /3/SaveToHiveTable\", data=p)\n\n def get_frame_data(self):\n \"\"\"\n Get frame data as a string in csv format.\n\n This will create a multiline string, where each line will contain a separate row of frame's data, with\n individual values separated by commas.\n \n :returns: Frame data as a string in csv format.\n \n :examples:\n \n >>> iris = h2o.import_file(\"http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader.csv\")\n >>> iris.get_frame_data()\n \"\"\"\n return h2o.api(\n \"GET /3/DownloadDataset\", \n data={\"frame_id\": self.frame_id, \"hex_string\": False, \"escape_quotes\": True}\n )\n\n def save(self, path, force=True):\n \"\"\"\n Store frame data in H2O's native format.\n\n This will store this frame's data to a file-system location in H2O's native binary format. Stored data can be\n loaded only with a cluster of the same size and same version the the one which wrote the data. The provided\n directory must be accessible from all nodes (HDFS, NFS). \n \n :param path: a filesystem location where to write frame data\n :param force: overwrite already existing files (defaults to true)\n :returns: Frame data as a string in csv format.\n \n :examples:\n \n >>> iris = h2o.import_file(\"http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader.csv\")\n >>> iris.save(\"hdfs://namenode/h2o_data\")\n \"\"\"\n H2OJob(h2o.api(\n \"POST /3/Frames/%s/save\" % self.frame_id, \n data={\"dir\": path, \"force\": force}\n ), \"Save frame data\").poll()\n\n def __getitem__(self, item):\n \"\"\"\n Frame slicing, supports row and column slicing.\n\n :param item: selector of a subframe. This can be one of the following:\n\n - an int, indicating selection of a single column at the specified index (0-based)\n - a string, selecting a column with the given name\n - a list of ints or strings, selecting several columns with the given indices / names\n - a slice, selecting columns with the indices within this slice\n - a single-column boolean frame, selecting rows for which the selector is true\n - a 2-element tuple, where the first element is a row selector, and the second element is the\n column selector. Here the row selector may be one of: an int, a list of ints, a slice, or\n a boolean frame. The column selector is similarly one of: an int, a list of ints, a string,\n a list of strings, or a slice. It is also possible to use the empty slice (``:``) to select\n all elements within one of the dimensions.\n\n :returns: A new frame comprised of some rows / columns of the source frame.\n\n :examples:\n >>> fr[2] # All rows, 3rd column\n >>> fr[-2] # All rows, 2nd column from end\n >>> fr[:, -1] # All rows, last column\n >>> fr[0:5, :] # First 5 rows, all columns\n >>> fr[fr[0] > 1, :] # Only rows where first cell is greater than 1, all columns\n >>> fr[[1, 5, 6]] # Columns 2, 6, and 7\n >>> fr[0:50, [1,2,3]] # First 50 rows, columns 2, 3, and 4\n \"\"\"\n # Select columns based on a string, a list of strings, an int or a slice.\n # Note that the python column selector handles the case of negative\n # selections, or out-of-range selections - without having to compute\n # self._ncols in the front-end - which would force eager evaluation just to\n # range check in the front-end.\n new_ncols = -1\n new_nrows = -1\n new_names = None\n new_types = None\n fr = None\n flatten = False\n if isinstance(item, slice):\n item = normalize_slice(item, self.ncols)\n if is_type(item, str, int, list, slice):\n new_ncols, new_names, new_types, item = self._compute_ncol_update(item)\n new_nrows = self.nrow\n fr = H2OFrame._expr(expr=ExprNode(\"cols_py\", self, item))\n elif isinstance(item, (ExprNode, H2OFrame)):\n new_ncols = self.ncol\n new_names = self.names\n new_types = self.types\n new_nrows = -1 # have a \"big\" predicate column -- update cache later on...\n fr = H2OFrame._expr(expr=ExprNode(\"rows\", self, item))\n elif isinstance(item, tuple):\n rows, cols = item\n allrows = allcols = False\n if isinstance(cols, slice):\n cols = normalize_slice(cols, self.ncols)\n allcols = cols == slice(0, self.ncols, 1)\n if isinstance(rows, slice):\n rows = normalize_slice(rows, self.nrows)\n allrows = rows == slice(0, self.nrows, 1)\n\n if allrows and allcols: return self # fr[:,:] -> all rows and columns.. return self\n if allrows:\n new_ncols, new_names, new_types, cols = self._compute_ncol_update(cols)\n new_nrows = self.nrow\n fr = H2OFrame._expr(expr=ExprNode(\"cols_py\", self, cols)) # fr[:,cols] -> really just a column slice\n if allcols:\n new_ncols = self.ncols\n new_names = self.names\n new_types = self.types\n new_nrows, rows = self._compute_nrow_update(rows)\n fr = H2OFrame._expr(expr=ExprNode(\"rows\", self, rows)) # fr[rows,:] -> really just a row slices\n\n if not allrows and not allcols:\n new_ncols, new_names, new_types, cols = self._compute_ncol_update(cols)\n new_nrows, rows = self._compute_nrow_update(rows)\n fr = H2OFrame._expr(expr=ExprNode(\"rows\", ExprNode(\"cols_py\", self, cols), rows))\n\n flatten = is_type(rows, int) and is_type(cols, str, int)\n else:\n raise ValueError(\"Unexpected __getitem__ selector: \" + str(type(item)) + \" \" + str(item.__class__))\n\n assert fr is not None\n # Pythonic: if the row & col selector turn into ints (or a single col\n # name), then extract the single element out of the Frame. Otherwise\n # return a Frame, EVEN IF the selectors are e.g. slices-of-1-value.\n if flatten:\n return fr.flatten()\n\n fr._ex._cache.ncols = new_ncols\n fr._ex._cache.nrows = new_nrows\n fr._ex._cache.names = new_names\n fr._ex._cache.types = new_types\n fr._is_frame = self._is_frame\n return fr\n\n def _compute_ncol_update(self, item): # computes new ncol, names, and types\n new_ncols = -1\n if isinstance(item, list):\n new_ncols = len(item)\n if _is_str_list(item):\n new_types = {k: self.types[k] for k in item}\n new_names = item\n else:\n new_names = [self.names[i] for i in item]\n new_types = {name: self.types[name] for name in new_names}\n elif isinstance(item, slice):\n assert slice_is_normalized(item)\n new_names = self.names[item]\n new_types = {name: self.types[name] for name in new_names}\n elif is_type(item, str, int):\n new_ncols = 1\n if is_type(item, str):\n new_names = [item]\n new_types = None if item not in self.types else {item: self.types[item]}\n else:\n new_names = [self.names[item]]\n new_types = {new_names[0]: self.types[new_names[0]]}\n else:\n raise ValueError(\"Unexpected type: \" + str(type(item)))\n return (new_ncols, new_names, new_types, item)\n\n\n def _compute_nrow_update(self, item):\n if isinstance(item, list):\n new_nrows = len(item)\n elif isinstance(item, slice):\n assert slice_is_normalized(item)\n new_nrows = (item.stop - item.start + item.step - 1) // item.step\n elif isinstance(item, H2OFrame):\n new_nrows = -1\n else:\n new_nrows = 1\n return [new_nrows, item]\n\n\n def __setitem__(self, item, value):\n \"\"\"\n Replace, update or add column(s) in an H2OFrame.\n\n :param item: A 0-based index of a column, or a column name, or a list of column names, or a slice.\n Alternatively, this may also be a two-element tuple where the first element in the tuple is a row selector,\n and the second element is a row selector. Finally, this can also be a boolean frame indicating which\n rows/columns to modify. If ``item`` is a column name that does not exist in the frame, then a new column\n will be appended to the current frame.\n :param value: The value replacing elements at positions given by ``item``. This can be either a constant, or\n another frame.\n \"\"\"\n # TODO: add far stronger type checks, so that we never run in a situation where the server has to\n # tell us that we requested an illegal operation.\n assert_is_type(item, str, int, tuple, list, H2OFrame)\n assert_is_type(value, None, numeric, str, H2OFrame)\n col_expr = None\n row_expr = None\n colname = None # When set, we are doing an append\n\n if is_type(item, str): # String column name, could be new or old\n if item in self.names:\n col_expr = self.names.index(item) # Update an existing column\n else:\n col_expr = self.ncols\n colname = item # New, append\n elif is_type(item, int):\n if not(-self.ncols <= item < self.ncols):\n raise H2OValueError(\"Incorrect column index: %d\" % item)\n col_expr = item # Column by number\n if col_expr < 0:\n col_expr += self.ncols\n elif isinstance(item, tuple): # Both row and col specifiers\n # Need more type checks\n row_expr = item[0]\n col_expr = item[1]\n if is_type(col_expr, str): # Col by name\n if col_expr not in self.names: # Append\n colname = col_expr\n col_expr = self.ncol\n elif is_type(col_expr, int):\n if not(-self.ncols <= col_expr < self.ncols):\n raise H2OValueError(\"Incorrect column index: %d\" % item)\n if col_expr < 0:\n col_expr += self.ncols\n elif isinstance(col_expr, slice): # Col by slice\n if col_expr.start is None and col_expr.stop is None:\n col_expr = slice(0, self.ncol) # Slice of all\n if isinstance(row_expr, slice):\n start = row_expr.start\n step = row_expr.step\n stop = row_expr.stop\n if start is None: start = 0\n if stop is None: stop = self.nrows\n row_expr = slice(start, stop, step)\n elif isinstance(item, H2OFrame):\n row_expr = item # Row slicing\n elif isinstance(item, list):\n col_expr = item\n\n if value is None: value = float(\"nan\")\n value_is_own_subframe = isinstance(value, H2OFrame) and self._is_frame_in_self(value)\n old_cache = self._ex._cache\n if colname is None:\n self._ex = ExprNode(\":=\", self, value, col_expr, row_expr)\n self._ex._cache.fill_from(old_cache)\n if isinstance(value, H2OFrame) and \\\n value._ex._cache.types_valid() and \\\n self._ex._cache.types_valid():\n self._ex._cache._types.update(value._ex._cache.types)\n else:\n self._ex._cache.types = None\n else:\n self._ex = ExprNode(\"append\", self, value, colname)\n self._ex._cache.fill_from(old_cache)\n self._ex._cache.names = self.names + [colname]\n self._ex._cache._ncols += 1\n if self._ex._cache.types_valid() and isinstance(value, H2OFrame) and value._ex._cache.types_valid():\n self._ex._cache._types[colname] = list(viewvalues(value._ex._cache.types))[0]\n else:\n self._ex._cache.types = None\n if value_is_own_subframe:\n value._ex = None # wipe out to keep ref counts correct\n\n\n def _is_frame_in_self(self, frame):\n if self._ex is frame._ex: return True\n if frame._ex._children is None: return False\n return any(self._is_expr_in_self(ch) for ch in frame._ex._children)\n\n def _is_expr_in_self(self, expr):\n if not isinstance(expr, ExprNode): return False\n if self._ex is expr: return True\n if expr._children is None: return False\n return any(self._is_expr_in_self(ch) for ch in expr._children)\n\n def drop(self, index, axis=1):\n \"\"\"\n Drop a single column or row or a set of columns or rows from a H2OFrame.\n\n Dropping a column or row is not in-place.\n Indices of rows and columns are zero-based.\n\n :param index: A list of column indices, column names, or row indices to drop; or\n a string to drop a single column by name; or an int to drop a single column by index.\n\n :param int axis: If 1 (default), then drop columns; if 0 then drop rows.\n\n :returns: a new H2OFrame with the respective dropped columns or rows. The original H2OFrame remains\n unchanged.\n\n :examples:\n\n >>> pros = h2o.import_file(\"http://s3.amazonaws.com/h2o-public-test-data/smalldata/prostate/prostate.csv.zip\")\n >>> nc = pros.ncol\n >>> nr = pros.nrow\n >>> dropped_col_int = pros.drop(0)\n >>> dropped_col_int\n \"\"\"\n if axis == 1:\n if not isinstance(index, list):\n #If input is a string, i.e., \"C1\":\n if is_type(index, str):\n #Check if index is an actual column(s) in the frame\n if index not in self.names:\n raise H2OValueError(\"Column(s) selected to drop are not in original frame: %r\" % index)\n index = self.names.index(index)\n #If input is an int indicating a column index, i.e., 3:\n elif is_type(index, int):\n #Check if index is an actual column index in the frame\n if index > self.ncol:\n raise H2OValueError(\"Column index selected to drop is not part of the frame: %r\" % index)\n if index < 0:\n raise H2OValueError(\"Column index selected to drop is not positive: %r\" % index)\n\n fr = H2OFrame._expr(expr=ExprNode(\"cols\", self, -(index + 1)), cache=self._ex._cache)\n fr._ex._cache.ncols -= 1\n fr._ex._cache.names = self.names[:index] + self.names[index + 1:]\n fr._ex._cache.types = {name: self.types[name] for name in fr._ex._cache.names}\n return fr\n\n elif isinstance(index, list):\n #If input is an int array indicating a column index, i.e., [3] or [1,2,3]:\n if is_type(index, [int]):\n if max(index) > self.ncol:\n raise H2OValueError(\"Column index selected to drop is not part of the frame: %r\" % index)\n if min(index) < 0:\n raise H2OValueError(\"Column index selected to drop is not positive: %r\" % index)\n index = [-(i + 1) for i in index]\n #If index is a string array, i.e., [\"C1\", \"C2\"]\n elif is_type(index, [str]):\n #Check if index is an actual column(s) in the frame\n if not set(index).issubset(self.names):\n raise H2OValueError(\"Column(s) selected to drop are not in original frame: %r\" % index)\n index = [-(self.names.index(i) + 1) for i in index]\n fr = H2OFrame._expr(expr=ExprNode(\"cols\", self, index), cache=self._ex._cache)\n fr._ex._cache.ncols -= len(index)\n fr._ex._cache.names = [i for i in self.names\n if self.names.index(i) not in list(map(lambda x: abs(x) - 1, index))]\n fr._ex._cache.types = {name: fr.types[name] for name in fr._ex._cache.names}\n\n else:\n raise ValueError(\"Invalid column index types. Must either be a list of all int indexes, \"\n \"a string list of all column names, a single int index, or\"\n \"a single string for dropping columns.\")\n return fr\n elif axis == 0:\n if is_type(index, [int]):\n #Check if index is an actual column index in the frame\n if max(index) > self.nrow:\n raise H2OValueError(\"Row index selected to drop is not part of the frame: %r\" % index)\n if min(index) < 0:\n raise H2OValueError(\"Row index selected to drop is not positive: %r\" % index)\n index = [-(x + 1) for x in index]\n fr = H2OFrame._expr(expr=ExprNode(\"rows\", self, index), cache=self._ex._cache)\n fr._ex._cache.nrows -= len(index)\n else:\n raise ValueError(\"Invalid row indexes. Must be a list of int row indexes to drop from the H2OFrame.\")\n return fr\n\n\n def pop(self, i):\n \"\"\"\n Pop a column from the H2OFrame at index i.\n\n :param i: The index (int) or name (str) of the column to pop.\n :returns: an H2OFrame containing the column dropped from the current frame; the current frame is modified\n in-place and loses the column.\n\n :examples:\n\n >>> prostate = h2o.import_file(\"http://s3.amazonaws.com/h2o-public-test-data/smalldata/prostate/prostate.csv.zip\")\n >>> nc = prostate.ncol\n >>> prostate\n >>> popped_col = prostate.pop(prostate.names[0])\n >>> prostate\n >>> popped_col\n \"\"\"\n if is_type(i, str): i = self.names.index(i)\n col = H2OFrame._expr(expr=ExprNode(\"cols\", self, i))\n old_cache = self._ex._cache\n self._ex = ExprNode(\"cols\", self, -(i + 1))\n self._ex._cache.ncols -= 1\n self._ex._cache.names = old_cache.names[:i] + old_cache.names[i + 1:]\n self._ex._cache.types = {name: old_cache.types[name] for name in self._ex._cache.names}\n self._ex._cache._data = None\n col._ex._cache.ncols = 1\n col._ex._cache.names = [old_cache.names[i]]\n return col\n\n\n def quantile(self, prob=None, combine_method=\"interpolate\", weights_column=None):\n \"\"\"\n Compute quantiles.\n\n :param List[float] prob: list of probabilities for which quantiles should be computed.\n :param str combine_method: for even samples this setting determines how to combine quantiles. This can be\n one of ``\"interpolate\"``, ``\"average\"``, ``\"low\"``, ``\"high\"``.\n :param weights_column: optional weights for each row. If not given, all rows are assumed to have equal\n importance. This parameter can be either the name of column containing the observation weights in\n this frame, or a single-column separate H2OFrame of observation weights.\n\n :returns: a new H2OFrame containing the quantiles and probabilities.\n\n :examples:\n\n >>> data = [[random.uniform(-10000,10000)] for c in range(1000)]\n >>> h2o_data = h2o.H2OFrame(data)\n >>> np_data = np.array(data)\n >>> h2o_data.quantile(prob=None,\n ... combine_method='interpolate',\n ... weights_column=None)\n \"\"\"\n if len(self) == 0: return self\n if prob is None: prob = [0.01, 0.1, 0.25, 0.333, 0.5, 0.667, 0.75, 0.9, 0.99]\n if weights_column is None:\n weights_column = \"_\"\n else:\n assert_is_type(weights_column, str, I(H2OFrame, lambda wc: wc.ncol == 1 and wc.nrow == self.nrow))\n if isinstance(weights_column, H2OFrame):\n merged = self.cbind(weights_column)\n weights_column = merged.names[-1]\n return H2OFrame._expr(expr=ExprNode(\"quantile\", merged, prob, combine_method, weights_column))\n return H2OFrame._expr(expr=ExprNode(\"quantile\", self, prob, combine_method, weights_column))\n\n\n def concat(self, frames, axis=1):\n \"\"\"\n Append multiple H2OFrames to this frame, column-wise or row-wise.\n\n :param List[H2OFrame] frames: list of frames that should be appended to the current frame.\n :param int axis: if 1 then append column-wise (default), if 0 then append row-wise.\n\n :returns: an H2OFrame of the combined datasets.\n\n :examples:\n\n >>> df1 = h2o.create_frame(integer_fraction=1,binary_fraction=0,\n ... categorical_fraction=0,seed=1)\n >>> df2 = h2o.create_frame(integer_fraction=1,binary_fraction=0,\n ... categorical_fraction=0,seed=2)\n >>> df3 = h2o.create_frame(integer_fraction=1,binary_fraction=0,\n ... categorical_fraction=0,seed=3)\n >>> df123 = df1.concat([df2,df3])\n \"\"\"\n if len(frames) == 0:\n raise ValueError(\"Input list of frames is empty! Nothing to concat.\")\n\n if axis == 1:\n df = self.cbind(frames)\n else:\n df = self.rbind(frames)\n return df\n\n\n def cbind(self, data):\n \"\"\"\n Append data to this frame column-wise.\n\n :param H2OFrame data: append columns of frame ``data`` to the current frame. You can also cbind a number,\n in which case it will get converted into a constant column.\n\n :returns: new H2OFrame with all frames in ``data`` appended column-wise.\n\n :examples:\n\n >>> iris = h2o.import_file(\"http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader.csv\")\n >>> iris = iris.cbind(iris[4] == \"Iris-setosa\")\n >>> iris[5] = iris[5].asfactor()\n >>> iris.set_name(5,\"C6\")\n >>> iris = iris.cbind(iris[4] == \"Iris-virginica\")\n >>> iris[6] = iris[6].asfactor()\n >>> iris.set_name(6, name=\"C7\")\n >>> print(iris)\n \"\"\"\n assert_is_type(data, H2OFrame, numeric, [H2OFrame, numeric])\n frames = [data] if not isinstance(data, list) else data\n new_cols = list(self.columns)\n new_types = dict(self.types)\n for frame in frames:\n if isinstance(frame, H2OFrame):\n if frame.nrow != self.nrow:\n raise H2OValueError(\"Cannot bind a dataframe with %d rows to a data frame with %d rows: \"\n \"the number of rows should match\" % (frame.nrow, self.nrow))\n new_cols += frame.columns\n new_types.update(frame.types)\n else:\n new_cols += [None]\n unique_cols = set(new_cols)\n fr = H2OFrame._expr(expr=ExprNode(\"cbind\", self, *frames), cache=self._ex._cache)\n fr._ex._cache.ncols = len(new_cols)\n if len(new_cols) == len(unique_cols) and None not in unique_cols:\n fr._ex._cache.names = new_cols\n fr._ex._cache.types = new_types\n else:\n # Invalidate names and types since they contain duplicate / unknown names, and the server will choose those.\n fr._ex._cache.names = None\n fr._ex._cache.types = None\n return fr\n\n\n def rbind(self, data):\n \"\"\"\n Append data to this frame row-wise.\n\n :param data: an H2OFrame or a list of H2OFrame's to be combined with current frame row-wise.\n :returns: this H2OFrame with all frames in data appended row-wise.\n\n :examples:\n\n >>> frame = h2o.import_file(\"https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars.csv\")\n >>> nrows = frame.nrow\n >>> nrows\n >>> frame2 = frame.rbind(frame)\n >>> nrows2 = frame2.nrow\n >>> nrows2\n \"\"\"\n assert_is_type(data, H2OFrame, [H2OFrame])\n frames = [data] if not isinstance(data, list) else data\n for frame in frames:\n if frame.ncol != self.ncol:\n raise H2OValueError(\"Cannot row-bind a dataframe with %d columns to a data frame with %d columns: \"\n \"the columns must match\" % (frame.ncol, self.ncol))\n if frame.columns != self.columns:\n raise H2OValueError(\"Column names must match for rbind() to work\")\n if frame.types != self.types: # compare the whole list here\n validTypes = [u'float', u'real', u'double', u'int', u'long', u'numeric']\n for eachKey in frame.types.keys(): \n sametypes = frame.types[eachKey]==self.types[eachKey]\n bothNumericTypes = (frame.types[eachKey] in validTypes) and (self.types[eachKey] in validTypes)\n if not(sametypes) and not(bothNumericTypes):\n raise H2OValueError(\"Column types must match for rbind() to work. First column type {0}. \"\n \"Second column type {1})\".format(self.types[eachKey], frame.types[eachKey]))\n fr = H2OFrame._expr(expr=ExprNode(\"rbind\", self, *frames), cache=self._ex._cache)\n fr._ex._cache.nrows = self.nrow + sum(frame.nrow for frame in frames)\n return fr\n\n\n def split_frame(self, ratios=None, destination_frames=None, seed=None):\n \"\"\"\n Split a frame into distinct subsets of size determined by the given ratios.\n\n The number of subsets is always 1 more than the number of ratios given. Note that\n this does not give an exact split. H2O is designed to be efficient on big data\n using a probabilistic splitting method rather than an exact split. For example\n when specifying a split of 0.75/0.25, H2O will produce a test/train split with\n an expected value of 0.75/0.25 rather than exactly 0.75/0.25. On small datasets,\n the sizes of the resulting splits will deviate from the expected value more than\n on big data, where they will be very close to exact.\n\n :param List[float] ratios: The fractions of rows for each split.\n :param List[str] destination_frames: The names of the split frames.\n :param int seed: seed for the random number generator\n\n :returns: A list of H2OFrames\n\n :examples:\n\n >>> iris = h2o.import_file(\"http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader.csv\")\n >>> iris\n >>> train, valid = iris.split_frame(ratios=[.8])\n >>> train\n >>> valid\n \"\"\"\n assert_is_type(ratios, [numeric], None)\n assert_is_type(destination_frames, [str], None)\n assert_is_type(seed, int, None)\n\n if ratios is None:\n ratios = [0.75]\n if not ratios:\n raise ValueError(\"Ratios array may not be empty\")\n\n if destination_frames is not None:\n if len(ratios) + 1 != len(destination_frames):\n raise ValueError(\"The number of provided destination_frames must be one more \"\n \"than the number of provided ratios\")\n\n num_slices = len(ratios) + 1\n boundaries = []\n\n last_boundary = 0\n i = 0\n while i < num_slices - 1:\n ratio = ratios[i]\n if ratio < 0:\n raise ValueError(\"Ratio must be greater than 0\")\n boundary = last_boundary + ratio\n if boundary >= 1.0:\n raise ValueError(\"Ratios must add up to less than 1.0\")\n boundaries.append(boundary)\n last_boundary = boundary\n i += 1\n\n splits = []\n tmp_runif = self.runif(seed)\n\n i = 0\n while i < num_slices:\n if i == 0:\n # lower_boundary is 0.0\n upper_boundary = boundaries[i]\n tmp_slice = self[(tmp_runif <= upper_boundary), :]\n elif i == num_slices - 1:\n lower_boundary = boundaries[i - 1]\n # upper_boundary is 1.0\n tmp_slice = self[(tmp_runif > lower_boundary), :]\n else:\n lower_boundary = boundaries[i - 1]\n upper_boundary = boundaries[i]\n tmp_slice = self[((tmp_runif > lower_boundary) & (tmp_runif <= upper_boundary)), :]\n\n if destination_frames is None:\n splits.append(tmp_slice)\n else:\n destination_frame_id = destination_frames[i]\n tmp_slice.frame_id = destination_frame_id\n splits.append(tmp_slice)\n\n i += 1\n for split in splits:\n split.refresh() # Force the split now (otherwise done lazily) to immediately delete tmp_runif\n h2o.remove(tmp_runif)\n del tmp_runif\n return splits\n\n\n def group_by(self, by):\n \"\"\"\n Return a new ``GroupBy`` object using this frame and the desired grouping columns.\n\n The returned groups are sorted by the natural group-by column sort.\n\n :param by: The columns to group on (either a single column name, or a list of column names, or\n a list of column indices).\n :returns: New ``GroupBy`` object, sorted by the natural group-by column sort.\n \n :examples:\n\n >>> iris = h2o.import_file(\"http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader.csv\")\n >>> na_handling = [\"rm\",\"ignore\",\"all\"]\n >>> for na in na_handling:\n ... grouped = iris.group_by(\"class\")\n ... grouped\n ... .count(na=na)\n ... .min(na=na)\n ... .max(na=na)\n ... .mean(na=na)\n ... .var(na=na)\n ... .sd(na=na)\n ... .ss(na=na)\n ... .sum(na=na) \n ... print(grouped.get_frame())\n ... print(grouped.get_frame())\n \"\"\"\n assert_is_type(by, str, int, [str, int])\n return GroupBy(self, by)\n\n def sort(self, by, ascending=[]):\n \"\"\"\n Return a new Frame that is sorted by column(s) in ascending order. A fully distributed and parallel sort.\n However, the original frame can contain String columns but sorting cannot be done on String columns.\n Default sorting direction is ascending.\n\n :param by: The column to sort by (either a single column name, or a list of column names, or\n a list of column indices)\n :param ascending: Boolean array to denote sorting direction for each sorting column. True for ascending\n sort and False for descending sort.\n\n :return: a new sorted Frame\n\n :examples:\n\n >>> df = h2o.create_frame(rows=10,\n ... cols=3,\n ... factors=10,\n ... categorical_fraction=1.0/3,\n ... time_fraction=1.0/3,\n ... real_fraction=1.0/3,\n ... real_range=100,\n ... missing_fraction=0.0,\n ... seed=123)\n >>> df.sort(\"C1\")\n \"\"\"\n assert_is_type(by, str, int, [str, int])\n if type(by) != list: by = [by]\n if type(ascending) != list: ascending = [ascending] # convert to list\n ascendingI=[1]*len(by) # intitalize sorting direction to ascending by default\n for c in by:\n if self.type(c) not in [\"enum\",\"time\",\"int\",\"real\",\"string\"]:\n raise H2OValueError(\"Sort by column: \" + str(c) + \" not of enum, time, int, real, or string type\")\n if len(ascending)>0: # user did not specify sort direction, assume all columns ascending\n assert len(ascending)==len(by), \"Sorting direction must be specified for each sorted column.\"\n for index in range(len(by)):\n ascendingI[index]=1 if ascending[index] else -1\n return H2OFrame._expr(expr=ExprNode(\"sort\",self,by,ascendingI))\n\n def fillna(self,method=\"forward\",axis=0,maxlen=1):\n \"\"\"\n Return a new Frame that fills NA along a given axis and along a given direction with a maximum fill length.\n\n :param method: ``\"forward\"`` or ``\"backward\"``\n :param axis: 0 for columnar-wise or 1 for row-wise fill\n :param maxlen: Max number of consecutive NA's to fill\n \n :returns: A new Frame that fills NA along a given axis and along a given direction with a maximum fill length.\n\n :examples:\n\n >>> python_obj = [1,2,2.5,-100.9,0,',',',',',']\n >>> frame = h2o.H2OFrame(python_obj)\n >>> frame\n >>> frame.fillna(method='forward', axis=0, maxlen=3)\n \"\"\"\n assert_is_type(axis, 0, 1)\n assert_is_type(method,str)\n assert_is_type(maxlen, int)\n return H2OFrame._expr(expr=ExprNode(\"h2o.fillna\",self,method,axis,maxlen))\n\n def impute(self, column=-1, method=\"mean\", combine_method=\"interpolate\", by=None, group_by_frame=None, values=None):\n \"\"\"\n Impute missing values into the frame, modifying it in-place.\n\n :param int column: Index of the column to impute, or -1 to impute the entire frame.\n :param str method: The method of imputation: ``\"mean\"``, ``\"median\"``, or ``\"mode\"``.\n :param str combine_method: When the method is ``\"median\"``, this setting dictates how to combine quantiles\n for even samples. One of ``\"interpolate\"``, ``\"average\"``, ``\"low\"``, ``\"high\"``.\n :param by: The list of columns to group on.\n :param H2OFrame group_by_frame: Impute the values with this pre-computed grouped frame.\n :param List values: The list of impute values, one per column. None indicates to skip the column.\n\n :returns: A list of values used in the imputation or the group-by result used in imputation.\n\n :examples:\n\n >>> prostate = h2o.import_file(\"http://s3.amazonaws.com/h2o-public-test-data/smalldata/prostate/prostate.csv.zip\")\n >>> prostate.dim\n >>> prostate.impute(\"DPROS\", method=\"mean\")\n \"\"\"\n if is_type(column, str): column = self.names.index(column)\n if is_type(by, str): by = self.names.index(by)\n\n if values is None:\n values = \"_\"\n else:\n assert len(values) == len(self.columns), \"Length of values does not match length of columns\"\n # convert string values to categorical num values\n values2 = []\n for i in range(0,len(values)):\n if self.type(i) == \"enum\":\n try:\n values2.append(self.levels()[i].index(values[i]))\n except:\n raise H2OValueError(\"Impute value of: \" + values[i] + \" not found in existing levels of\"\n \" column: \" + self.col_names[i])\n else:\n values2.append(values[i])\n values = values2\n if group_by_frame is None: group_by_frame = \"_\"\n\n\n # This code below is needed to ensure the frame (self) exists on the server. Without it, self._ex._cache.fill()\n # fails with an assertion that ._id is None.\n # This code should be removed / reworked once we have a more consistent strategy of dealing with frames.\n self._ex._eager_frame()\n\n if by is not None or group_by_frame != \"_\":\n res = H2OFrame._expr(\n expr=ExprNode(\"h2o.impute\", self, column, method, combine_method, by, group_by_frame, values))._frame()\n else:\n res = ExprNode(\"h2o.impute\", self, column, method, combine_method, by, group_by_frame,\n values)._eager_scalar()\n\n self._ex._cache.flush()\n self._ex._cache.fill(10)\n return res\n\n\n def merge(self, other, all_x=False, all_y=False, by_x=None, by_y=None, method=\"auto\"):\n \"\"\"\n Merge two datasets based on common column names. We do not support all_x=True and all_y=True.\n Only one can be True or none is True. The default merge method is auto and it will default to the\n radix method. The radix method will return the correct merge result regardless of duplicated rows\n in the right frame. In addition, the radix method can perform merge even if you have string columns\n in your frames. If there are duplicated rows in your rite frame, they will not be included if you use\n the hash method. The hash method cannot perform merge if you have string columns in your left frame.\n Hence, we consider the radix method superior to the hash method and is the default method to use.\n\n :param H2OFrame other: The frame to merge to the current one. By default, must have at least one column in common with\n this frame, and all columns in common are used as the merge key. If you want to use only a subset of the\n columns in common, rename the other columns so the columns are unique in the merged result.\n :param bool all_x: If True, include all rows from the left/self frame\n :param bool all_y: If True, include all rows from the right/other frame\n :param by_x: list of columns in the current frame to use as a merge key.\n :param by_y: list of columns in the ``other`` frame to use as a merge key. Should have the same number of\n columns as in the ``by_x`` list.\n :param method: string representing the merge method, one of auto(default), radix or hash.\n\n :returns: New H2OFrame with the result of merging the current frame with the ``other`` frame.\n\n :examples:\n\n >>> col = 10000* [0, 0, 1, 1, 2, 3, 0]\n >>> fr = h2o.H2OFrame(list(zip(*[col])))\n >>> fr.set_names(['rank'])\n >>> mapping = h2o.H2OFrame(list(zip(*[[0,1,2,3],[6,7,8,9]])))\n >>> mapping.set_names(['rank', 'outcome'])\n >>> merged = fr.merge(mapping,\n ... all_x=True,\n ... all_y=False,\n ... by_x=None,\n ... by_y=None,\n ... method='auto')\n >>> merged\n \"\"\"\n\n if by_x is None and by_y is None:\n common_names = list(set(self.names) & set(other.names))\n if not common_names:\n raise H2OValueError(\"No columns in common to merge on!\")\n\n if by_x is None:\n by_x = [self.names.index(c) for c in common_names]\n else:\n by_x = _getValidCols(by_x,self)\n\n if by_y is None:\n by_y = [other.names.index(c) for c in common_names]\n else:\n by_y = _getValidCols(by_y,other)\n\n\n return H2OFrame._expr(expr=ExprNode(\"merge\", self, other, all_x, all_y, by_x, by_y, method))\n\n\n def relevel(self, y):\n \"\"\"\n Reorder levels of an H2O factor for one single column of a H2O frame\n\n The levels of a factor are reordered such that the reference level is at level 0, all remaining levels are\n moved down as needed.\n\n :param str y: The reference level\n :returns: New reordered factor column\n\n :examples:\n\n >>> import numpy as np\n >>> python_lists = np.random.randint(-5,5, (100, 2))\n >>> h2oframe = h2o.H2OFrame(python_obj=python_lists)\n >>> newFrame = h2oframe.asfactor()\n >>> allLevels = newFrame.levels()\n >>> lastLevels = len(allLevels[0])-1\n >>> newZeroLevel = allLevels[0][lastLevels]\n >>> newFrame[0] = newFrame[0].relevel(newZeroLevel)\n >>> newLevels = newFrame.levels()\n >>> newLevels\n \"\"\"\n return H2OFrame._expr(expr=ExprNode(\"relevel\", self, quote(y)))\n\n\n def insert_missing_values(self, fraction=0.1, seed=None):\n \"\"\"\n Insert missing values into the current frame, modifying it in-place.\n\n Randomly replaces a user-specified fraction of entries in a H2O dataset with missing\n values.\n\n :param float fraction: A number between 0 and 1 indicating the fraction of entries to replace with missing.\n :param int seed: The seed for the random number generator used to determine which values to make missing.\n\n :returns: the original H2OFrame with missing values inserted.\n\n :examples:\n\n >>> data = [[1, 2, 3, 1, 'a', 1, 9],\n ... [1, 6, 4, 2, 'a', 1, 9],\n ... [2, 3, 8, 6, 'b', 1, 9],\n ... [3, 4, 3, 2, 'b', 3, 8],\n ... [4, 5, 9, 5, 'c', 2, 8],\n ... [5, 7, 10,7, 'b', 8, 8]]\n >>> h2o_data = h2o.H2OFrame(data)\n >>> h2o_data.insert_missing_values(fraction = 0.0)\n \"\"\"\n kwargs = {}\n kwargs['dataset'] = self.frame_id # Eager; forces eval now for following REST call\n kwargs['fraction'] = fraction\n if seed is not None: kwargs['seed'] = seed\n job = {}\n job['job'] = h2o.api(\"POST /3/MissingInserter\", data=kwargs)\n H2OJob(job, job_type=(\"Insert Missing Values\")).poll()\n self._ex._cache.flush()\n return self\n\n\n def min(self):\n \"\"\"\n Show the minimum value of all frame entries.\n \n :returns: The minimum value of all frame entries.\n\n :examples:\n\n >>> iris = h2o.import_file(\"http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris.csv\")\n >>> iris.min()\n >>> iris[\"C1\"].min()\n >>> iris[\"C2\"].min()\n >>> iris[\"C3\"].min()\n >>> iris[\"C4\"].min()\n \"\"\"\n return ExprNode(\"min\", self)._eager_scalar()\n\n\n def max(self):\n \"\"\"\n Show the maximum value of all frame entries.\n \n :returns: The maximum value of all frame entries.\n\n :examples:\n\n >>> iris = h2o.import_file(\"http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris.csv\")\n >>> iris.max()\n >>> iris[\"C1\"].max()\n >>> iris[\"C2\"].max()\n >>> iris[\"C3\"].max()\n >>> iris[\"C4\"].max()\n \"\"\"\n return ExprNode(\"max\", self)._eager_scalar()\n\n\n def sum(self, skipna=True, axis=0, **kwargs):\n \"\"\"\n Compute the frame's sum by-column (or by-row).\n\n :param bool skipna: If True (default), then NAs are ignored during the computation. Otherwise presence\n of NAs renders the entire result NA.\n :param int axis: Direction of sum computation. If 0 (default), then sum is computed columnwise, and the result\n is a frame with 1 row and number of columns as in the original frame. If 1, then sum is computed rowwise\n and the result is a frame with 1 column (called \"sum\"), and number of rows equal to the number of rows\n in the original frame. For row or column sums, the ``return_frame`` parameter must be True.\n :param bool return_frame: A boolean parameter that indicates whether to return an H2O frame or one single aggregated value. Default is False.\n :returns: either an aggregated value with sum of values per-column (old semantic); or an H2OFrame containing sum of values\n per-column/per-row in the original frame (new semantic). The new semantic is triggered by either\n providing the ``return_frame=True`` parameter, or having the ``general.allow_breaking_changed`` config\n option turned on.\n\n :examples:\n\n >>> from random import randrange\n >>> import numpy as np\n >>> row_num = randrange(1,10)\n >>> col_num = randrange(1,10)\n >>> python_lists = np.random.randint(-5,5,(row_num,col_num))\n >>> h2oframe = h2o.H2OFrame(python_obj=python_lists)\n >>> h2oframe.sum(skipna=False,axis=0)\n \"\"\"\n assert_is_type(skipna, bool)\n assert_is_type(axis, 0, 1)\n # Deprecated since 2016-10-14,\n if \"na_rm\" in kwargs:\n warnings.warn(\"Parameter na_rm is deprecated; use skipna instead\", category=H2ODeprecationWarning)\n na_rm = kwargs.pop(\"na_rm\")\n assert_is_type(na_rm, bool)\n skipna = na_rm # don't assign to skipna directly, to help with error reporting\n # Determine whether to return a frame or a list\n return_frame = get_config_value(\"general.allow_breaking_changes\", False)\n if \"return_frame\" in kwargs:\n return_frame = kwargs.pop(\"return_frame\")\n assert_is_type(return_frame, bool)\n if kwargs:\n raise H2OValueError(\"Unknown parameters %r\" % list(kwargs))\n\n if return_frame:\n return H2OFrame._expr(ExprNode(\"sumaxis\", self, skipna, axis))\n else:\n return ExprNode(\"sumNA\" if skipna else \"sum\", self)._eager_scalar()\n\n\n def mean(self, skipna=True, axis=0, **kwargs):\n \"\"\"\n Compute the frame's means by-column (or by-row).\n\n :param bool skipna: If True (default), then NAs are ignored during the computation. Otherwise presence\n of NAs renders the entire result NA.\n :param int axis: Direction of mean computation. If 0 (default), then mean is computed columnwise, and the\n result is a frame with 1 row and number of columns as in the original frame. If 1, then mean is computed\n rowwise and the result is a frame with 1 column (called \"mean\"), and number of rows equal to the number\n of rows in the original frame.\n :returns: either a list of mean values per-column (old semantic); or an H2OFrame containing mean values\n per-column/per-row from the original frame (new semantic). The new semantic is triggered by either\n providing the ``return_frame=True`` parameter, or having the ``general.allow_breaking_changed`` config\n option turned on.\n\n :examples:\n\n >>> iris = h2o.import_file(\"http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris.csv\")\n >>> iris.mean()\n \"\"\"\n assert_is_type(skipna, bool)\n assert_is_type(axis, 0, 1)\n # Deprecated since 2016-10-14,\n if \"na_rm\" in kwargs:\n warnings.warn(\"Parameter na_rm is deprecated; use skipna instead\", category=H2ODeprecationWarning)\n na_rm = kwargs.pop(\"na_rm\")\n assert_is_type(na_rm, bool)\n skipna = na_rm # don't assign to skipna directly, to help with error reporting\n # Determine whether to return a frame or a list\n return_frame = get_config_value(\"general.allow_breaking_changes\", False)\n if \"return_frame\" in kwargs:\n return_frame = kwargs.pop(\"return_frame\")\n assert_is_type(return_frame, bool)\n if kwargs:\n raise H2OValueError(\"Unknown parameters %r\" % list(kwargs))\n\n new_frame = H2OFrame._expr(ExprNode(\"mean\", self, skipna, axis))\n if return_frame:\n return new_frame\n else:\n return new_frame.getrow()\n\n\n def skewness(self, na_rm=False):\n \"\"\"\n Compute the skewness of each column in the frame.\n\n :param bool na_rm: If True, then ignore NAs during the computation.\n :returns: A list containing the skewness for each column (NaN for non-numeric columns).\n\n :examples:\n\n >>> import numpy as np\n >>> python_lists = np.random.uniform(-1,1, (10000,2))\n >>> h2oframe = h2o.H2OFrame(python_obj=python_lists)\n >>> h2oframe.skewness()\n \"\"\"\n return ExprNode(\"skewness\", self, na_rm)._eager_scalar()\n\n\n def kurtosis(self, na_rm=False):\n \"\"\"\n Compute the kurtosis of each column in the frame.\n\n We calculate the common kurtosis, such that kurtosis(normal distribution) is 3.\n\n :param bool na_rm: If True, then ignore NAs during the computation.\n :returns: A list containing the kurtosis for each column (NaN for non-numeric columns).\n\n :examples:\n\n >>> import numpy as np\n >>> from random import randrange\n >>> python_lists = np.random.normal(0,1, (10000, 1))\n >>> h2oframe = h2o.H2OFrame(python_obj=python_lists)\n >>> h2oframe.kurtosis(na_rm=True)\n \"\"\"\n return ExprNode(\"kurtosis\", self, na_rm)._eager_scalar()\n\n\n def nacnt(self):\n \"\"\"\n Count of NAs for each column in this H2OFrame.\n\n :returns: A list of the na counts (one entry per column).\n\n :examples:\n\n >>> iris = h2o.import_file(\"http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader_NA_2.csv\")\n >>> iris.nacnt()\n \"\"\"\n return ExprNode(\"naCnt\", self)._eager_scalar()\n\n\n def median(self, na_rm=False):\n \"\"\"\n Compute the median of each column in the frame.\n\n :param bool na_rm: If True, then ignore NAs during the computation.\n :returns: A list containing the median for each column (NaN for non-numeric columns).\n\n :examples:\n\n >>> iris = h2o.import_file(\"http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris.csv\")\n >>> iris.median()\n \"\"\"\n return ExprNode(\"median\", self, na_rm)._eager_scalar()\n\n\n def var(self, y=None, na_rm=False, use=None):\n \"\"\"\n Compute the variance-covariance matrix of one or two H2OFrames.\n\n :param H2OFrame y: If this parameter is given, then a covariance matrix between the columns of the target\n frame and the columns of ``y`` is computed. If this parameter is not provided then the covariance matrix\n of the target frame is returned. If target frame has just a single column, then return the scalar variance\n instead of the matrix. Single rows are treated as single columns.\n :param str use: A string indicating how to handle missing values. This could be one of the following:\n\n - ``\"everything\"``: outputs NaNs whenever one of its contributing observations is missing\n - ``\"all.obs\"``: presence of missing observations will throw an error\n - ``\"complete.obs\"``: discards missing values along with all observations in their rows so that only\n complete observations are used\n :param bool na_rm: an alternative to ``use``: when this is True then default value for ``use`` is\n ``\"everything\"``; and if False then default ``use`` is ``\"complete.obs\"``. This parameter has no effect\n if ``use`` is given explicitly.\n\n :returns: An H2OFrame of the covariance matrix of the columns of this frame (if ``y`` is not given),\n or with the columns of ``y`` (if ``y`` is given). However when this frame and ``y`` are both single rows\n or single columns, then the variance is returned as a scalar.\n\n :examples:\n\n >>> iris = h2o.import_file(\"http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader.csv\")\n >>> iris.var(y=iris, na_rm=True, use=None)\n \"\"\"\n symmetric = False\n if y is None:\n y = self\n symmetric = True\n if use is None: use = \"complete.obs\" if na_rm else \"everything\"\n if self.nrow == 1 or (self.ncol == 1 and y.ncol == 1):\n return ExprNode(\"var\", self, y, use, symmetric)._eager_scalar()\n return H2OFrame._expr(expr=ExprNode(\"var\", self, y, use, symmetric))._frame()\n\n\n def sd(self, na_rm=False):\n \"\"\"\n Compute the standard deviation for each column in the frame.\n\n :param bool na_rm: if True, then NAs will be removed from the computation.\n :returns: A list containing the standard deviation for each column (NaN for non-numeric columns).\n\n :examples:\n\n >>> import numpy as np\n >>> python_lists = np.random.uniform(1, 10, (10000,2))\n >>> h2oframe = h2o.H2OFrame(python_obj=python_lists)\n >>> newframe = h2oframe.scale(center=True, scale=True)\n >>> frameMean = newframe.mean()\n >>> newframe.sd()\n \"\"\"\n return ExprNode(\"sd\", self, na_rm)._eager_scalar()\n\n\n def cor(self, y=None, na_rm=False, use=None, method=\"Pearson\"):\n \"\"\"\n Compute the correlation matrix of one or two H2OFrames.\n\n :param H2OFrame y: If this parameter is provided, then compute correlation between the columns of ``y``\n and the columns of the current frame. If this parameter is not given, then just compute the correlation\n matrix for the columns of the current frame.\n :param str use: A string indicating how to handle missing values. This could be one of the following:\n\n - ``\"everything\"``: outputs NaNs whenever one of its contributing observations is missing\n - ``\"all.obs\"``: presence of missing observations will throw an error\n - ``\"complete.obs\"``: discards missing values along with all observations in their rows so that only\n complete observations are used\n :param bool na_rm: an alternative to ``use``: when this is True then default value for ``use`` is\n ``\"everything\"``; and if False then default ``use`` is ``\"complete.obs\"``. This parameter has no effect\n if ``use`` is given explicitly.\n :param str method: Which method to use - value must be in [\"Pearson\", \"Spearman\"]. Defaults to \"Pearson\".\n\n :returns: An H2OFrame of the correlation matrix of the columns of this frame (if ``y`` is not given),\n or with the columns of ``y`` (if ``y`` is given). However when this frame and ``y`` are both single rows\n or single columns, then the correlation is returned as a scalar.\n\n :examples:\n\n >>> import numpy as np\n >>> iris = h2o.import_file(\"http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader.csv\")\n >>> irisnp = np.genfromtxt((\"http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader.csv\"), delimiter=',', skip_header=1, usecols=(0,1,2,3))\n >>> cor_np = h2o.H2OFrame(np.corrcoef(irisnp,rowvar=0))\n >>> cor_h2o = iris[0:4].cor()\n >>> cor_diff = abs(cor_h2o - cor_np)\n >>> print(cor_diff)\n \"\"\"\n assert_is_type(y, H2OFrame, None)\n assert_is_type(na_rm, bool)\n assert_is_type(use, None, \"everything\", \"all.obs\", \"complete.obs\")\n if y is None:\n y = self\n if use is None: use = \"complete.obs\" if na_rm else \"everything\"\n if self.nrow == 1 or (self.ncol == 1 and y.ncol == 1): return ExprNode(\"cor\", self, y, use, method)._eager_scalar()\n return H2OFrame._expr(expr=ExprNode(\"cor\", self, y, use, method))._frame()\n\n\n def distance(self, y, measure=None):\n \"\"\"\n Compute a pairwise distance measure between all rows of two numeric H2OFrames.\n\n :param H2OFrame y: Frame containing queries (small)\n :param str use: A string indicating what distance measure to use. Must be one of:\n\n - ``\"l1\"``: Absolute distance (L1-norm, >=0)\n - ``\"l2\"``: Euclidean distance (L2-norm, >=0)\n - ``\"cosine\"``: Cosine similarity (-1...1)\n - ``\"cosine_sq\"``: Squared Cosine similarity (0...1)\n :returns: An H2OFrame of the matrix containing pairwise distance / similarity between the \n rows of this frame (N x p) and ``y`` (M x p), with dimensions (N x M).\n\n :examples:\n\n >>> iris_h2o = h2o.import_file(\"http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader.csv\")\n >>> references = iris_h2o[10:150,0:4]\n >>> queries = iris_h2o[0:10,0:4]\n >>> A = references.distance(queries, \"l1\")\n >>> B = references.distance(queries, \"l2\")\n >>> C = references.distance(queries, \"cosine\")\n >>> D = references.distance(queries, \"cosine_sq\")\n >>> E = queries.distance(references, \"l1\")\n >>> (E.transpose() == A).all()\n \"\"\"\n assert_is_type(y, H2OFrame)\n if measure is None: measure = \"l2\"\n return H2OFrame._expr(expr=ExprNode(\"distance\", self, y, measure))._frame()\n\n def drop_duplicates(self, columns, keep=\"first\"):\n \"\"\"\n Drops duplicated rows across specified columns.\n :param columns: Columns to compare during the duplicate detection process.\n :param keep: Which rows to keep. Two possible values: [\"first\", \"last\"]. The \"first\" value (default) keeps\n the first row and deletes the rest. The \"last\" value keeps the last row.\n :return: A new H2OFrame with rows deduplicated\n \"\"\"\n assert_is_type(columns, [int], [str])\n assert_is_type(keep, Enum(\"first\", \"last\"))\n\n return H2OFrame._expr(expr=ExprNode(\"dropdup\", self, columns, keep))._frame()\n\n def strdistance(self, y, measure=None, compare_empty=True):\n \"\"\"\n Compute element-wise string distances between two H2OFrames. Both frames need to have the same\n shape and only contain string/factor columns.\n\n :param H2OFrame y: A comparison frame.\n :param str measure: A string identifier indicating what string distance measure to use. Must be one of:\n\n - ``\"lv\"``: Levenshtein distance\n - ``\"lcs\"``: Longest common substring distance\n - ``\"qgram\"``: q-gram distance\n - ``\"jaccard\"``: Jaccard distance between q-gram profiles\n - ``\"jw\"``: Jaro, or Jaro-Winker distance\n - ``\"soundex\"``: Distance based on soundex encoding\n\n :param compare_empty: if set to FALSE, empty strings will be handled as NaNs\n :returns: An H2OFrame of the matrix containing element-wise distance between the\n strings of this frame and ``y``. The returned frame has the same shape as the input frames.\n\n :examples:\n \n >>> x = h2o.H2OFrame.from_python(['Martha', 'Dwayne', 'Dixon'],\n ... column_types=['factor'])\n >>> y = h2o.H2OFrame.from_python(['Marhta', 'Duane', 'Dicksonx'],\n ... column_types=['string'])\n >>> x.strdistance(y, measure=\"jw\")\n \"\"\"\n assert_is_type(y, H2OFrame)\n assert_is_type(measure, Enum('lv', 'lcs', 'qgram', 'jaccard', 'jw', 'soundex'))\n assert_is_type(compare_empty, bool)\n return H2OFrame._expr(expr=ExprNode(\"strDistance\", self, y, measure, compare_empty))._frame()\n\n \n\n def asfactor(self):\n \"\"\"\n Convert columns in the current frame to categoricals.\n\n :returns: new H2OFrame with columns of the \"enum\" type.\n\n :examples:\n\n >>> h2o = h2o.import_file(\"https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv\")\n >>> h2o['cylinders'] = h2o['cylinders'].asfactor()\n >>> h2o['cylinders']\n \"\"\"\n for colname in self.names:\n t = self.types[colname]\n if t not in {\"bool\", \"int\", \"string\", \"enum\"}:\n raise H2OValueError(\"Only 'int' or 'string' are allowed for \"\n \"asfactor(), got %s:%s \" % (colname, t))\n fr = H2OFrame._expr(expr=ExprNode(\"as.factor\", self), cache=self._ex._cache)\n if fr._ex._cache.types_valid():\n fr._ex._cache.types = {name: \"enum\" for name in self.types}\n else:\n raise H2OTypeError(\"Types are not available in result\")\n \n return fr\n\n\n def isfactor(self):\n \"\"\"\n Test which columns in the current frame are categorical.\n\n :returns: a list of True/False indicating for each column in the frame whether it is categorical.\n\n :examples:\n\n >>> aa = {'h1': [1, 8, 4, 3, 6],\n ... 'h2': [\"fish\", \"cat\", \"fish\", \"dog\", \"bird\"],\n ... 'h3': [0, 1, 0, 0, 1]}\n >>> df_hex = h2o.H2OFrame(aa)\n >>> df_hex['h1'].isfactor()\n >>> df_hex['h1'] = df_hex['h1'].asfactor()\n >>> df_hex['h1'].isfactor()\n \"\"\"\n return [bool(o) for o in ExprNode(\"is.factor\", self)._eager_scalar()]\n\n\n def anyfactor(self):\n \"\"\"\n Determine if there are any categorical columns in the frame.\n\n :returns: (bool) True if there are any categorical columns in the frame.\n\n :examples:\n\n >>> python_obj = [1,2,2.5,-100.9,0]\n >>> frame = h2o.H2OFrame(python_obj)\n >>> frame.anyfactor()\n \"\"\"\n return bool(ExprNode(\"any.factor\", self)._eager_scalar())\n\n\n def categories(self):\n \"\"\"\n Make a list of levels for an enum (categorical) column. This function can only be applied to single-column categorical frame.\n\n :returns: The list of levels for an enum column.\n\n :examples:\n\n >>> iris = h2o.import_file(\"http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader.csv\")\n >>> category_list = iris['class'].categories()\n >>> print(category_list)\n \"\"\"\n if self.ncols != 1:\n raise H2OValueError(\"This operation only applies to a single factor column\")\n if self.types[self.names[0]] != \"enum\":\n raise H2OValueError(\"Input is not a factor. This operation only applies to a single factor column\")\n return self.levels()[0]\n\n\n def transpose(self):\n \"\"\"\n Transpose rows and columns of this frame.\n\n :returns: new H2OFrame where with rows/columns from the original frame transposed.\n\n :examples:\n\n >>> from random import randrange\n >>> import numpy as np\n >>> row_num = randrange(1,10)\n >>> col_num = randrange(1,10)\n >>> python_lists = np.random.randint(-5,5, (row_num, col_num))\n >>> h2oframe = h2o.H2OFrame(python_obj=python_lists)\n >>> h2oframe.transpose()\n \"\"\"\n return H2OFrame._expr(expr=ExprNode(\"t\", self))\n\n\n def strsplit(self, pattern):\n \"\"\"\n Split the strings in the target column on the given regular expression pattern.\n\n :param str pattern: The split pattern.\n :returns: H2OFrame containing columns of the split strings.\n\n :examples:\n \n >>> frame = h2o.import_file(\"http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris.csv\")\n >>> frame[\"C5\"].strsplit(\"-\")\n \"\"\"\n fr = H2OFrame._expr(expr=ExprNode(\"strsplit\", self, pattern))\n fr._ex._cache.nrows = self.nrow\n return fr\n\n def tokenize(self, split):\n \"\"\"\n Tokenize String\n\n tokenize() is similar to strsplit(), the difference between them is that tokenize() will store the tokenized\n text into a single column making it easier for additional processing (filtering stop words, word2vec algo, ...).\n\n :param tokenize split: The regular expression to tokenize on.\n \n :returns: An H2OFrame with a single column representing the tokenized Strings. Original rows of the input DF are separated by NA.\n\n :examples:\n\n >>> df1 = h2o.H2OFrame.from_python({'String':\n ... [' this is a string ']})\n >>> df1 = df1.ascharacter()\n >>> df2 = h2o.H2OFrame.from_python({'String':\n ... ['this is another string']})\n >>> df2 = df2.ascharacter()\n >>> df3 = h2o.H2OFrame.from_python({'String':\n ... ['this is a longer string']})\n >>> df3 = df3.ascharacter()\n >>> df4 = h2o.H2OFrame.from_python({'String':\n ... ['this is tall, this is taller']})\n >>> df4 = df4.ascharacter()\n >>> combined = df1.rbind([df2, df3, df4])\n >>> combined\n >>> tokenized = combined.tokenize(\" \")\n >>> tokenized.describe\n \"\"\"\n fr = H2OFrame._expr(expr=ExprNode(\"tokenize\", self, split))\n return fr\n\n def countmatches(self, pattern):\n \"\"\"\n For each string in the frame, count the occurrences of the provided pattern. If countmatches is applied to\n a frame, all columns of the frame must be type string, otherwise, the returned frame will contain errors.\n\n The pattern here is a plain string, not a regular expression. We will search for the occurrences of the\n pattern as a substring in element of the frame. This function is applicable to frames containing only\n string or categorical columns.\n\n :param str pattern: The pattern to count matches on in each string. This can also be a list of strings,\n in which case all of them will be searched for.\n :returns: numeric H2OFrame with the same shape as the original, containing counts of matches of the\n pattern for each cell in the original frame.\n\n :examples:\n\n >>> iris = h2o.import_file(\"http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader.csv\")\n >>> result = iris[\"class\"].countmatches(\"o\")\n >>> result2 = iris[\"class\"].countmatches(\"s\")\n >>> result\n >>> result2\n \"\"\"\n assert_is_type(pattern, str, [str])\n fr = H2OFrame._expr(expr=ExprNode(\"countmatches\", self, pattern))\n fr._ex._cache.nrows = self.nrow\n fr._ex._cache.ncols = self.ncol\n return fr\n\n\n def trim(self):\n \"\"\"\n Trim white space on the left and right of strings in a single-column H2OFrame.\n\n :returns: H2OFrame with trimmed strings.\n\n :examples:\n\n >>> frame = h2o.import_file((\"https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_trim.csv\"),\n ... col_types=[\"string\",\"numeric\",\n ... \"numeric\",\"numeric\",\n ... \"numeric\",\"numeric\",\n ... \"numeric\",\"numeric\"])\n >>> frame[\"name\"].trim()\n \"\"\"\n fr = H2OFrame._expr(expr=ExprNode(\"trim\", self))\n fr._ex._cache.nrows = self.nrow\n fr._ex._cache.ncol = self.ncol\n return fr\n\n\n def substring(self, start_index, end_index=None):\n \"\"\"\n For each string, return a new string that is a substring of the original string.\n\n If end_index is not specified, then the substring extends to the end of the original string. If the start_index\n is longer than the length of the string, or is greater than or equal to the end_index, an empty string is\n returned. Negative start_index is coerced to 0.\n\n :param int start_index: The index of the original string at which to start the substring, inclusive.\n :param int end_index: The index of the original string at which to end the substring, exclusive.\n :returns: An H2OFrame containing the specified substrings.\n\n :examples:\n\n >>> frame = h2o.import_file(\"http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris.csv\")\n >>> frame[\"C5\"].substring(0,5)\n \"\"\"\n fr = H2OFrame._expr(expr=ExprNode(\"substring\", self, start_index, end_index))\n fr._ex._cache.nrows = self.nrow\n fr._ex._cache.ncol = self.ncol\n return fr\n\n\n def lstrip(self, set=\" \"):\n \"\"\"\n Return a copy of the column with leading characters removed.\n\n The set argument is a string specifying the set of characters to be removed.\n If omitted, the set argument defaults to removing whitespace.\n\n :param character set: The set of characters to lstrip from strings in column.\n :returns: a new H2OFrame with the same shape as the original frame and having all its values\n trimmed from the left (equivalent of Python's ``str.lstrip()``).\n\n :examples:\n\n >>> iris = h2o.import_file(\"http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris.csv\")\n >>> iris[\"C5\"].lstrip(\"Iris-\")\n \"\"\"\n # work w/ None; parity with python lstrip\n if set is None: set = \" \"\n\n fr = H2OFrame._expr(expr=ExprNode(\"lstrip\", self, set))\n fr._ex._cache.nrows = self.nrow\n fr._ex._cache.ncol = self.ncol\n return fr\n\n\n def rstrip(self, set=\" \"):\n \"\"\"\n Return a copy of the column with trailing characters removed.\n\n The set argument is a string specifying the set of characters to be removed.\n If omitted, the set argument defaults to removing whitespace.\n\n :param character set: The set of characters to rstrip from strings in column\n :returns: a new H2OFrame with the same shape as the original frame and having all its values\n trimmed from the right (equivalent of Python's ``str.rstrip()``).\n\n :examples:\n\n >>> iris = h2o.import_file(\"http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris.csv\")\n >>> iris.levels()\n >>> iris[\"C5\"] = iris[\"C5\"].rstrip(\"color\")\n >>> iris[\"C5\"].levels()[0]\n \"\"\"\n # work w/ None; parity with python rstrip\n if set is None: set = \" \"\n\n fr = H2OFrame._expr(expr=ExprNode(\"rstrip\", self, set))\n fr._ex._cache.nrows = self.nrow\n fr._ex._cache.ncol = self.ncol\n return fr\n\n\n def entropy(self):\n \"\"\"\n For each string compute its Shannon entropy, if the string is empty the entropy is 0.\n\n :returns: an H2OFrame of Shannon entropies.\n\n :examples:\n\n >>> frame = h2o.H2OFrame.from_python([\"redrum\"])\n >>> frame.entropy()\n \"\"\"\n fr = H2OFrame._expr(expr=ExprNode(\"entropy\", self))\n fr._ex._cache.nrows = self.nrow\n fr._ex._cache.ncol = self.ncol\n return fr\n\n\n def num_valid_substrings(self, path_to_words):\n \"\"\"\n For each string, find the count of all possible substrings with 2 characters or more that are contained in\n the line-separated text file whose path is given.\n\n :param str path_to_words: Path to file that contains a line-separated list of strings considered valid.\n :returns: An H2OFrame with the number of substrings that are contained in the given word list.\n\n :examples:\n\n >>> path = \"https://raw.githubusercontent.com/dwyl/english-words/master/words.txt\"\n # test empty strings\n >>> string = h2o.H2OFrame.from_python([''],\n ... column_types=['string'])\n >>> enum = h2o.H2OFrame.from_python([''],\n ... column_types=['enum'])\n >>> string.num_valid_substrings(path)[0,0] == 0\n >>> enum.num_valid_substrings(path)[0,0] == 0\n \"\"\"\n assert_is_type(path_to_words, str)\n fr = H2OFrame._expr(expr=ExprNode(\"num_valid_substrings\", self, path_to_words))\n fr._ex._cache.nrows = self.nrow\n fr._ex._cache.ncol = self.ncol\n return fr\n\n\n def nchar(self):\n \"\"\"\n Count the length of each string in a single-column H2OFrame of string type.\n\n :returns: A single-column H2OFrame containing the per-row character count.\n\n :examples:\n\n >>> iris = h2o.import_file(\"http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader_NA_2.csv\")\n >>> iris[4].nchar()\n \"\"\"\n return H2OFrame._expr(expr=ExprNode(\"strlen\", self))\n\n\n def table(self, data2=None, dense=True):\n \"\"\"\n Compute the counts of values appearing in a column, or co-occurence counts between two columns.\n\n :param H2OFrame data2: An optional single column to aggregate counts by.\n :param bool dense: If True (default) then use dense representation, which lists only non-zero counts,\n 1 combination per row. Set to False to expand counts across all combinations.\n :returns: H2OFrame of the counts at each combination of factor levels\n\n :examples:\n\n >>> df = h2o.import_file(\"http://s3.amazonaws.com/h2o-public-test-data/smalldata/prostate/prostate_cat.csv\")\n >>> df[['DPROS', 'RACE']].table(data2=None,dense=True)\n \"\"\"\n return H2OFrame._expr(expr=ExprNode(\"table\", self, data2, dense)) if data2 is not None else H2OFrame._expr(\n expr=ExprNode(\"table\", self, dense))\n\n\n def hist(self, breaks=\"sturges\", plot=True, **kwargs):\n \"\"\"\n Compute a histogram over a numeric column.\n\n :param breaks: Can be one of ``\"sturges\"``, ``\"rice\"``, ``\"sqrt\"``, ``\"doane\"``, ``\"fd\"``, ``\"scott\"``;\n or a single number for the number of breaks; or a list containing the split points, e.g:\n ``[-50, 213.2123, 9324834]``. If breaks is \"fd\", the MAD is used over the IQR in computing bin width.\n :param bool plot: If True (default), then a plot will be generated using ``matplotlib``.\n\n :returns: If ``plot`` is False, return H2OFrame with these columns: breaks, counts, mids_true,\n mids, and density; otherwise this method draws a plot and returns nothing.\n\n :examples:\n\n >>> iris = h2o.import_file(\"http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader.csv\")\n >>> iris.describe()\n >>> iris[0].hist(breaks=5,plot=False)\n \"\"\"\n import matplotlib\n server = kwargs.pop(\"server\") if \"server\" in kwargs else False\n assert_is_type(breaks, int, [numeric], Enum(\"sturges\", \"rice\", \"sqrt\", \"doane\", \"fd\", \"scott\"))\n assert_is_type(plot, bool)\n assert_is_type(server, bool)\n if kwargs:\n raise H2OValueError(\"Unknown parameters to hist(): %r\" % kwargs)\n hist = H2OFrame._expr(expr=ExprNode(\"hist\", self, breaks))._frame()\n\n if plot:\n plt = get_matplotlib_pyplot(server)\n if plt is None:\n return\n\n hist[\"widths\"] = hist[\"breaks\"].difflag1()\n # [2:] because we're removing the title and the first row (which consists of NaNs)\n lefts = [float(c[0]) for c in h2o.as_list(hist[\"breaks\"], use_pandas=False)[2:]]\n widths = [float(c[0]) for c in h2o.as_list(hist[\"widths\"], use_pandas=False)[2:]]\n counts = [float(c[0]) for c in h2o.as_list(hist[\"counts\"], use_pandas=False)[2:]]\n\n plt.xlabel(self.names[0])\n plt.ylabel(\"Frequency\")\n plt.title(\"Histogram of %s\" % self.names[0])\n\n # matplotlib deprecated \"left\" arg in 2.1.0 and removed in 3.0.0\n version_number = matplotlib.__version__\n major = version_number.split('.')[0]\n minor = version_number.split('.')[1]\n major = int(major)\n minor = int(minor)\n if major == 2 and minor >= 1 or major >= 3:\n plt.bar(x=lefts, width=widths, height=counts, bottom=0)\n else:\n plt.bar(left=lefts, height=counts, width=widths, bottom=0)\n\n if not server:\n plt.show()\n else:\n hist[\"density\"] = hist[\"counts\"] / (hist[\"breaks\"].difflag1() * hist[\"counts\"].sum())\n return hist\n\n\n def isax(self, num_words, max_cardinality, optimize_card=False, **kwargs):\n \"\"\"\n Compute the iSAX index for DataFrame which is assumed to be numeric time series data.\n\n References:\n\n - http://www.cs.ucr.edu/~eamonn/SAX.pdf\n - http://www.cs.ucr.edu/~eamonn/iSAX_2.0.pdf\n\n :param int num_words: Number of iSAX words for the timeseries, i.e. granularity along the time series\n :param int max_cardinality: Maximum cardinality of the iSAX word. Each word can have less than the max\n :param bool optimized_card: An optimization flag that will find the max cardinality regardless of what is\n passed in for ``max_cardinality``.\n\n :returns: An H2OFrame with the name of time series, string representation of iSAX word, followed by\n binary representation.\n\n :examples:\n\n >>> df = h2o.create_frame(rows=1,\n ... cols=256,\n ... real_fraction=1.0,\n ... missing_fraction=0.0,\n ... seed=123)\n >>> df2 = df.cumsum(axis=1)\n >>> res = df2.isax(num_words=10,max_cardinality=10)\n >>> res\n \"\"\"\n if num_words <= 0: raise H2OValueError(\"num_words must be greater than 0\")\n if max_cardinality <= 0: raise H2OValueError(\"max_cardinality must be greater than 0\")\n return H2OFrame._expr(expr=ExprNode(\"isax\", self, num_words, max_cardinality, optimize_card))\n\n def convert_H2OFrame_2_DMatrix(self, predictors, yresp, h2oXGBoostModel, return_pandas=False):\n '''\n This method requires that you import the following toolboxes: xgboost, pandas, numpy and scipy.sparse.\n\n This method will convert an H2OFrame to a DMatrix that can be used by native XGBoost. The H2OFrame contains\n numerical and enum columns alone. Note that H2O one-hot-encoding introduces a missing(NA)\n column. There can be NAs in any columns.\n\n Follow the steps below to compare H2OXGBoost and native XGBoost:\n\n 1. Train the H2OXGBoost model with H2OFrame trainFile and generate a prediction:\n h2oModelD = H2OXGBoostEstimator(**h2oParamsD) # parameters specified as a dict()\n h2oModelD.train(x=myX, y=y, training_frame=trainFile) # train with H2OFrame trainFile\n h2oPredict = h2oPredictD = h2oModelD.predict(trainFile)\n\n 2. Derive the DMatrix from H2OFrame:\n nativeDMatrix = trainFile.convert_H2OFrame_2_DMatrix(myX, y, h2oModelD)\n\n 3. Derive the parameters for native XGBoost:\n nativeParams = h2oModelD.convert_H2OXGBoostParams_2_XGBoostParams()\n\n 4. Train your native XGBoost model and generate a prediction:\n nativeModel = xgb.train(params=nativeParams[0], dtrain=nativeDMatrix, num_boost_round=nativeParams[1])\n nativePredict = nativeModel.predict(data=nativeDMatrix, ntree_limit=nativeParams[1].\n\n 5. Compare the predictions h2oPredict from H2OXGBoost, nativePredict from native \n XGBoost.\n\n :param h2oFrame: H2OFrame to be converted to DMatrix for native XGBoost\n :param predictors: List of predictor columns, can be column names or indices\n :param yresp: response column, can be column index or name\n :param h2oXGBoostModel: H2OXGboost model that are built with the same H2OFrame as input earlier\n :param return_pandas: Whether to return `pandas.DataFrame` or DMatrix. Default to `False`\n :return: DMatrix that can be an input to a native XGBoost model, or `pandas.DataFrame`\n\n :examples:\n\n >>> import xgboost as xgb\n >>> from h2o.estimators.xgboost import *\n >>> data = h2o.import_file(\"https://s3.amazonaws.com/h2o-public-test-data/smalldata/jira/adult_data_modified.csv\")\n >>> data[14] = data[14].asfactor()\n >>> myX = list(range(0, 13))\n >>> y='income'\n >>> h2oParamsD = {\"ntrees\":30, \"max_depth\":4, \"seed\":2,\n ... \"learn_rate\":0.7,\"col_sample_rate_per_tree\" : 0.9,\n ... \"min_rows\" : 5, \"score_tree_interval\": 30+1,\n ... \"tree_method\": \"exact\", \"backend\":\"cpu\"}\n >>> h2oModelD = H2OXGBoostEstimator(**h2oParamsD)\n >>> h2oModelD.train(x=myX, y=y, training_frame=data)\n >>> h2oPredictD = h2oModelD.predict(data)\n >>> nativeXGBoostParam = h2oModelD.convert_H2OXGBoostParams_2_XGBoostParams()\n >>> nativeXGBoostInput = data.convert_H2OFrame_2_DMatrix(myX,\n ... y,\n ... h2oModelD)\n >>> nativeModel = xgb.train(params=nativeXGBoostParam[0],\n ... dtrain=nativeXGBoostInput,\n ... num_boost_round=nativeXGBoostParam[1])\n >>> nativePred = nativeModel.predict(data=nativeXGBoostInput,\n ... ntree_limit=nativeXGBoostParam[1])\n '''\n import xgboost as xgb\n import pandas as pd\n import numpy as np\n from scipy.sparse import csr_matrix\n\n assert isinstance(predictors, list) or isinstance(predictors, tuple)\n assert h2oXGBoostModel._model_json['algo'] == 'xgboost', \\\n \"convert_H2OFrame_2_DMatrix is used for H2OXGBoost model only.\"\n\n tempFrame = self[predictors].cbind(self[yresp])\n colnames = tempFrame.names\n if type(predictors[0])==type(1): # convert integer indices to column names\n temp = []\n for colInd in predictors:\n temp.append(colnames[colInd])\n predictors = temp\n\n if (type(yresp) == type(1)):\n tempy = colnames[yresp]\n yresp = tempy # column name of response column\n\n enumCols = [] # extract enum columns out to process them\n enumColsIndices = [] # store enum column indices\n typeDict = self.types\n for predName in predictors:\n if str(typeDict[predName])=='enum':\n enumCols.append(predName)\n enumColsIndices.append(colnames.index(predName))\n\n pandaFtrain = tempFrame.as_data_frame(use_pandas=True, header=True)\n nrows = tempFrame.nrow\n\n # convert H2OFrame to DMatrix starts here\n if len(enumCols) > 0: # enumCols contain all enum column names\n allDomain = tempFrame.levels() # list all domain levels with column indices\n domainLen = []\n for enumIndex in enumColsIndices:\n if len(allDomain[enumIndex])>0:\n domainLen.append(len(allDomain[enumIndex])*-1)\n incLevel = np.argsort(domainLen) # indices of enum column indices with decreasing domain length\n\n # need to move enum columns to the front, highest level first\n c2 = tempFrame[enumCols[incLevel[0]]]\n tempFrame = tempFrame.drop(enumCols[incLevel[0]])\n for index in range(1, len(incLevel)):\n c2 = c2.cbind(tempFrame[enumCols[incLevel[index]]])\n tempFrame = tempFrame.drop(enumCols[incLevel[index]])\n \n enumCols = c2.names\n tempFrame = c2.cbind(tempFrame)\n pandaFtrain = tempFrame.as_data_frame(use_pandas=True, header=True) # redo translation from H2O to panda\n \n pandaTrainPart = generatePandaEnumCols(pandaFtrain, enumCols[0], nrows, tempFrame[enumCols[0]].categories())\n pandaFtrain.drop([enumCols[0]], axis=1, inplace=True)\n\n for colInd in range(1, len(enumCols)):\n cname=enumCols[colInd]\n ctemp = generatePandaEnumCols(pandaFtrain, cname, nrows, tempFrame[enumCols[colInd]].categories())\n pandaTrainPart=pd.concat([pandaTrainPart, ctemp], axis=1)\n pandaFtrain.drop([cname], axis=1, inplace=True)\n\n pandaFtrain = pd.concat([pandaTrainPart, pandaFtrain], axis=1)\n\n c0= tempFrame[yresp].asnumeric().as_data_frame(use_pandas=True, header=True)\n pandaFtrain.drop([yresp], axis=1, inplace=True)\n pandaF = pd.concat([c0, pandaFtrain], axis=1)\n pandaF.rename(columns={c0.columns[0]:yresp}, inplace=True)\n if return_pandas:\n return pandaF\n newX = list(pandaFtrain.columns.values)\n data = pandaF[newX].values\n label = pandaF[[yresp]].values\n\n return xgb.DMatrix(data=csr_matrix(data), label=label, feature_names=newX) \\\n if h2oXGBoostModel._model_json['output']['sparse'] else xgb.DMatrix(data=data, \n label=label, feature_names=newX)\n\n def pivot(self, index, column, value):\n \"\"\"\n Pivot the frame designated by the three columns: index, column, and value. Index and column should be\n of type enum, int, or time.\n For cases of multiple indexes for a column label, the aggregation method is to pick the first occurrence in the data frame.\n\n :param index: Index is a column that will be the row label\n :param column: The labels for the columns in the pivoted Frame\n :param value: The column of values for the given index and column label\n :returns: Returns a new H2OFrame with pivoted columns.\n\n :examples:\n\n >>> df = h2o.create_frame(rows=1000000,\n ... cols=3,\n ... factors=10,\n ... categorical_fraction=1.0/3,\n ... time_fraction=1.0/3,\n ... real_fraction=1.0/3,\n ... real_range=100,\n ... missing_fraction=0.0,\n ... seed=1234)\n >>> pdf = df.as_data_frame()\n >>> ppdf = pdf.pivot(values=\"C3\",index=\"C1\",columns=\"C2\")\n >>> ppdf = ppdf.fillna(0.0)\n >>> ppdfh2o = h2o.H2OFrame(ppdf)\n >>> ppdfh2o\n \"\"\"\n assert_is_type(index, str)\n assert_is_type(column, str)\n assert_is_type(value, str)\n col_names = self.names\n if index not in col_names:\n raise H2OValueError(\"Index not in H2OFrame\")\n if column not in col_names:\n raise H2OValueError(\"Column not in H2OFrame\")\n if value not in col_names:\n raise H2OValueError(\"Value column not in H2OFrame\")\n if self.type(column) not in [\"enum\",\"time\",\"int\"]:\n raise H2OValueError(\"'column' argument is not type enum, time or int\")\n if self.type(index) not in [\"enum\",\"time\",\"int\"]:\n raise H2OValueError(\"'index' argument is not type enum, time or int\")\n return H2OFrame._expr(expr=ExprNode(\"pivot\",self,index,column,value))\n\n def melt(self, id_vars, value_vars=None, var_name=\"variable\", value_name=\"value\", skipna=False):\n \"\"\"\n Converts an H2OFrame to key-value representation while optionally skipping NA values.\n Inverse operation to pivot.\n\n :param id_vars: Columns used as identifiers.\n :param value_vars: What columns will be converted to key-value pairs (default: complement to id_vars).\n :param var_name: Name of the key-column (default: \"variable\").\n :param value_name: Name of the value-column (default: \"value\").\n :param skipna: If enabled, do not include NAs in the result. \n :returns: Returns an unpivoted H2OFrame.\n\n :examples:\n\n >>> import pandas as pd\n >>> from h2o.frame import H2OFrame\n >>> df = pd.DataFrame({'A': {0: 'a', 1: 'b', 2: 'c'},\n ... 'B': {0: 1, 2: 5},\n ... 'C': {0: 2, 1: 4, 2: 6}})\n >>> df\n >>> frozen_h2o = H2OFrame(df)\n >>> frozen_h2o\n >>> melted = frozen_h2o.melt(id_vars=[\"A\"], value_vars=[\"B\"])\n >>> melted\n \"\"\"\n assert_is_type(id_vars, [str])\n assert_is_type(value_vars, [str], None)\n assert_is_type(var_name, str)\n assert_is_type(value_name, str)\n assert_is_type(skipna, bool)\n return H2OFrame._expr(expr=ExprNode(\"melt\", self, id_vars, value_vars, var_name, value_name, skipna))\n\n def rank_within_group_by(self, group_by_cols, sort_cols, ascending=[], new_col_name=\"New_Rank_column\", sort_cols_sorted=False):\n \"\"\"\n This function will add a new column rank where the ranking is produced as follows:\n \n 1. Sorts the H2OFrame by columns sorted in by columns specified in group_by_cols and sort_cols in the directions\n specified by the ascending for the sort_cols. The sort directions for the group_by_cols are ascending only.\n\n 2. A new rank column is added to the frame which will contain a rank assignment performed next. The user can\n choose to assign a name to this new column. The default name is New_Rank_column.\n\n 3. For each groupby groups, a rank is assigned to the row starting from 1, 2, ... to the end of that \n group.\n\n 4. If sort_cols_sorted is TRUE, a final sort on the frame will be performed frame according to the sort_cols and\n the sort directions in ascending. If sort_cols_sorted is FALSE (by default), the frame from step 3 will be\n returned as is with no extra sort. This may provide a small speedup if desired.\n\n :param group_by_cols: The columns to group on (either a single column name/index, or a list of column names\n or column indices\n :param sort_cols: The columns to sort on (either a single column name/index, or a list of column names or\n column indices\n :param ascending: Optional Boolean array to denote sorting direction for each sorting column. True for\n ascending, False for descending. Default is ascending sort. Sort direction for enums will be ignored.\n :param new_col_name: Optional String to denote the new column names. Default to New_Rank_column.\n :param sort_cols_sorted: Optional Boolean to denote if the returned frame should be sorted according to sort_cols\n and sort directions specified in ascending. Default is False.\n\n :returns: A new Frame with new rank (sorted by columns in sort_cols) column within the grouping \n specified by the group_by_cols.\n\n :examples:\n\n >>> air = h2o.import_file(\"https://s3.amazonaws.com/h2o-airlines-unpacked/allyears2k.csv\")\n # slice out all but the following five columns\n >>> df = air[:, [\"ArrDelay\", \"DepDelay\", \"Origin\", \"Dest\", \"Distance\"]]\n # group by \"Distance\" and sort by \"Origin\"\n >>> ranked1 = df.rank_within_group_by(group_by_cols=\"Distance\", sort_cols=\"Origin\")\n # group by \"ArrDelay\" and sort by \"Origin\"\n >>> ranked2 = df.rank_within_group_by(group_by_cols=\"ArrDelay\", sort_cols=\"Origin\")\n # group by \"DepDelay\" and sort by \"Dest\"\n >>> ranked3 = df.rank_within_group_by(group_by_cols=\"DepDelay\", sort_cols=\"Dest\")\n \"\"\"\n assert_is_type(group_by_cols, str, int, [str, int])\n if type(group_by_cols) != list: group_by_cols = [group_by_cols]\n if type(sort_cols) != list: sort_cols = [sort_cols]\n\n if type(ascending) != list: ascending = [ascending] # convert to list\n ascendingI=[1]*len(sort_cols) # intitalize sorting direction to ascending by default\n for c in sort_cols:\n if self.type(c) not in [\"enum\",\"time\",\"int\",\"real\"]:\n raise H2OValueError(\"Sort by column: \" + str(c) + \" not of enum, time, int or real type\")\n for c in group_by_cols:\n if self.type(c) not in [\"enum\",\"time\",\"int\",\"real\"]:\n raise H2OValueError(\"Group by column: \" + str(c) + \" not of enum, time, int or real type\")\n\n if len(ascending)>0: # user specify sort direction, assume all columns ascending\n assert len(ascending)==len(sort_cols), \"Sorting direction must be specified for each sorted column.\"\n for index in range(len(sort_cols)):\n ascendingI[index]=1 if ascending[index] else -1\n\n finalSortedOrder=0\n if (sort_cols_sorted):\n finalSortedOrder=1\n return H2OFrame._expr(expr=ExprNode(\"rank_within_groupby\",self,group_by_cols,sort_cols,ascendingI,new_col_name, finalSortedOrder))\n\n def topNBottomN(self, column=0, nPercent=10, grabTopN=-1):\n \"\"\"\n Given a column name or one column index, a percent N, this function will return the top or bottom N% of the\n values of the column of a frame. The column must be a numerical column.\n \n :param column: a string for column name or an integer index\n :param nPercent: a top or bottom percentage of the column values to return\n :param grabTopN: -1 to grab bottom N percent and 1 to grab top N percent\n :returns: a H2OFrame containing two columns. The first column contains the original row indices where\n the top/bottom values are extracted from. The second column contains the values.\n\n :examples:\n\n >>> import numpy as np\n >>> from random import randint\n >>> dataFrame = h2o.import_file(\"https://s3.amazonaws.com/h2o-public-test-data/bigdata/laptop/jira/TopBottomNRep4.csv.zip\")\n >>> topAnswer = h2o.import_file(\"https://s3.amazonaws.com/h2o-public-test-data/smalldata/jira/Top20Per.csv.zip\")\n >>> bottomAnswer = h2o.import_file(\"https://s3.amazonaws.com/h2o-public-test-data/smalldata/jira/Bottom20Per.csv.zip\")\n >>> nPercentages = [1,2,3,4]\n >>> frameNames = dataFrame.names\n >>> tolerance=1e-12\n >>> nsample=100\n >>> nP = nPercentages[randint(0, len(nPercentages)-1)]\n >>> colIndex = randint(0, len(frameNames)-2)\n >>> dataFrame.topNBottomN(frameNames[colIndex], nP, grabTopN=1)\n >>> dataFrame.topNBottomN(frameNames[colIndex], nP, grabTopN=-1)\n \"\"\"\n assert (nPercent >= 0) and (nPercent<=100.0), \"nPercent must be between 0.0 and 100.0\"\n assert round(nPercent*0.01*self.nrows)>0, \"Increase nPercent. Current value will result in top 0 row.\"\n\n if isinstance(column, int):\n if (column < 0) or (column>=self.ncols):\n raise H2OValueError(\"Invalid column index H2OFrame\")\n else:\n colIndex = column\n else: # column is a column name\n col_names = self.names\n if column not in col_names:\n raise H2OValueError(\"Column name not found H2OFrame\")\n else:\n colIndex = col_names.index(column)\n\n if not(self[colIndex].isnumeric()):\n raise H2OValueError(\"Wrong column type! Selected column must be numeric.\")\n\n return H2OFrame._expr(expr=ExprNode(\"topn\", self, colIndex, nPercent, grabTopN))\n\n def topN(self, column=0, nPercent=10):\n \"\"\"\n Given a column name or one column index, a percent N, this function will return the top N% of the values\n of the column of a frame. The column must be a numerical column.\n \n :param column: a string for column name or an integer index\n :param nPercent: a top percentage of the column values to return\n :returns: a H2OFrame containing two columns. The first column contains the original row indices where\n the top values are extracted from. The second column contains the top nPercent values.\n\n :examples:\n\n >>> import numpy as np\n >>> from random import randint\n >>> dataFrame = h2o.import_file(\"https://s3.amazonaws.com/h2o-public-test-data/bigdata/laptop/jira/TopBottomNRep4.csv.zip\")\n >>> topAnswer = h2o.import_file(\"https://s3.amazonaws.com/h2o-public-test-data/smalldata/jira/Top20Per.csv.zip\")\n >>> nPercentages = [1,2,3,4]\n >>> frameNames = dataFrame.names\n >>> tolerance=1e-12\n >>> nsample=100\n >>> nP = nPercentages[randint(0, len(nPercentages)-1)]\n >>> colIndex = randint(0, len(frameNames)-2)\n >>> dataFrame.topN(frameNames[colIndex], nP)\n \"\"\"\n return self.topNBottomN(column, nPercent, 1)\n\n def bottomN(self, column=0, nPercent=10):\n \"\"\"\n Given a column name or one column index, a percent N, this function will return the bottom N% of the values\n of the column of a frame. The column must be a numerical column.\n \n :param column: a string for column name or an integer index\n :param nPercent: a bottom percentage of the column values to return\n :returns: a H2OFrame containing two columns. The first column contains the original row indices where\n the bottom values are extracted from. The second column contains the bottom nPercent values.\n\n :examples:\n\n >>> import numpy as np\n >>> from random import randint\n >>> dataFrame = h2o.import_file(\"https://s3.amazonaws.com/h2o-public-test-data/bigdata/laptop/jira/TopBottomNRep4.csv.zip\")\n >>> bottomAnswer = h2o.import_file(\"https://s3.amazonaws.com/h2o-public-test-data/smalldata/jira/Bottom20Per.csv.zip\")\n >>> nPercentages = [1,2,3,4]\n >>> frameNames = dataFrame.names\n >>> tolerance=1e-12\n >>> nsample=100\n >>> nP = nPercentages[randint(0, len(nPercentages)-1)]\n >>> colIndex = randint(0, len(frameNames)-2)\n >>> dataFrame.bottomN(frameNames[colIndex], nP)\n \"\"\"\n return self.topNBottomN(column, nPercent, -1)\n\n def sub(self, pattern, replacement, ignore_case=False):\n \"\"\"\n Substitute the first occurrence of pattern in a string with replacement.\n\n :param str pattern: A regular expression.\n :param str replacement: A replacement string.\n :param bool ignore_case: If True then pattern will match case-insensitively.\n :returns: an H2OFrame with all values matching ``pattern`` replaced with ``replacement``.\n\n :examples:\n\n >>> frame = h2o.import_file(\"http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris.csv\")\n >>> frame[\"C5\"].sub('s', 'z', ignore_case=False)\n \"\"\"\n return H2OFrame._expr(expr=ExprNode(\"replacefirst\", self, pattern, replacement, ignore_case))\n\n\n def gsub(self, pattern, replacement, ignore_case=False):\n \"\"\"\n Globally substitute occurrences of pattern in a string with replacement.\n\n :param str pattern: A regular expression.\n :param str replacement: A replacement string.\n :param bool ignore_case: If True then pattern will match case-insensitively.\n :returns: an H2OFrame with all occurrences of ``pattern`` in all values replaced with ``replacement``.\n\n :examples:\n\n >>> iris = h2o.import_file((\"http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris.csv\"),\n ... col_types=[\"numeric\",\"numeric\",\n ... \"numeric\",\"numeric\",\n ... \"string\"])\n >>> iris[\"C5\"].gsub(\"s\",\"z\",ignore_case=False)\n \"\"\"\n return H2OFrame._expr(expr=ExprNode(\"replaceall\", self, pattern, replacement, ignore_case))\n\n\n def interaction(self, factors, pairwise, max_factors, min_occurrence, destination_frame=None):\n \"\"\"\n Categorical Interaction Feature Creation in H2O.\n\n Creates a frame in H2O with n-th order interaction features between categorical columns, as specified by\n the user.\n\n :param factors: list of factor columns (either indices or column names).\n :param bool pairwise: Whether to create pairwise interactions between factors (otherwise create one\n higher-order interaction). Only applicable if there are 3 or more factors.\n :param int max_factors: Max. number of factor levels in pair-wise interaction terms (if enforced, one extra\n catch-all factor will be made).\n :param int min_occurrence: Min. occurrence threshold for factor levels in pair-wise interaction terms.\n :param str destination_frame: (internal) string indicating the key for the frame created.\n\n :returns: an H2OFrame\n\n :examples:\n\n >>> iris = h2o.import_file(\"http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader.csv\")\n >>> iris = iris.cbind(iris[4] == \"Iris-setosa\")\n >>> iris[5] = iris[5].asfactor()\n >>> iris.set_name(5,\"C6\")\n >>> iris = iris.cbind(iris[4] == \"Iris-virginica\")\n >>> iris[6] = iris[6].asfactor()\n >>> iris.set_name(6, name=\"C7\")\n >>> two_way_interactions = h2o.interaction(iris,\n ... factors=[4,5,6],\n ... pairwise=True,\n ... max_factors=10000,\n ... min_occurrence=1)\n >>> two_way_interactions\n \"\"\"\n return h2o.interaction(data=self, factors=factors, pairwise=pairwise, max_factors=max_factors,\n min_occurrence=min_occurrence, destination_frame=destination_frame)\n\n\n def toupper(self):\n \"\"\"\n Translate characters from lower to upper case for a particular column.\n\n :returns: new H2OFrame with all strings in the current frame converted to the uppercase.\n\n :examples:\n\n >>> frame = h2o.import_file(\"http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris.csv\")\n >>> frame[\"C5\"]\n >>> frame[\"C5\"].toupper()\n \"\"\"\n return H2OFrame._expr(expr=ExprNode(\"toupper\", self), cache=self._ex._cache)\n\n def grep(self,pattern, ignore_case = False, invert = False, output_logical = False):\n \"\"\"\n Searches for matches to argument `pattern` within each element\n of a string column.\n\n Default behavior is to return indices of the elements matching the pattern. Parameter\n `output_logical` can be used to return a logical vector indicating if the element matches\n the pattern (1) or not (0).\n\n :param str pattern: A character string containing a regular expression.\n :param bool ignore_case: If True, then case is ignored during matching.\n :param bool invert: If True, then identify elements that do not match the pattern.\n :param bool output_logical: If True, then return logical vector of indicators instead of list of matching positions\n :return: H2OFrame holding the matching positions or a logical list if `output_logical` is enabled.\n\n :examples:\n\n >>> iris = h2o.import_file(\"http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader.csv\")\n >>> pattern = \"Iris-setosa\"\n >>> iris[\"class\"].grep(pattern, output_logical=True)\n \"\"\"\n return H2OFrame._expr(expr=ExprNode(\"grep\", self, pattern, ignore_case, invert, output_logical))\n\n def tolower(self):\n \"\"\"\n Translate characters from upper to lower case for a particular column.\n\n :returns: new H2OFrame with all strings in the current frame converted to the lowercase.\n\n :examples:\n\n >>> frame = h2o.import_file(\"http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris.csv\")\n >>> frame[\"C5\"]\n >>> frame[\"C5\"].tolower()\n \"\"\"\n return H2OFrame._expr(expr=ExprNode(\"tolower\", self), cache=self._ex._cache)\n\n\n def rep_len(self, length_out):\n \"\"\"\n Create a new frame replicating the current frame.\n\n If the source frame has a single column, then the new frame will be replicating rows and its dimensions\n will be ``length_out x 1``. However if the source frame has more than 1 column, then then new frame\n will be replicating data in columnwise direction, and its dimensions will be ``nrows x length_out``,\n where ``nrows`` is the number of rows in the source frame. Also note that if ``length_out`` is smaller\n than the corresponding dimension of the source frame, then the new frame will actually be a truncated\n version of the original.\n\n :param int length_out: Number of columns (rows) of the resulting H2OFrame\n :returns: new H2OFrame with repeated data from the current frame.\n\n :examples:\n\n >>> from random import randrange\n >>> import numpy as np\n >>> import math\n >>> row_num = randrange(1,10)\n >>> col_num = randrange(1,10)\n >>> length_out_r = math.ceil(0.78*row_num)\n >>> python_lists = np.random.randint(-5,5, (row_num, col_num))\n >>> h2oframe = h2o.H2OFrame(python_obj=python_lists)\n >>> h2oframe\n >>> one_column = h2oframe[0].rep_len(length_out=(length_out_r+row_num))\n >>> one_column\n \"\"\"\n return H2OFrame._expr(expr=ExprNode(\"rep_len\", self, length_out))\n\n\n def scale(self, center=True, scale=True):\n \"\"\"\n Center and/or scale the columns of the current frame.\n\n :param center: If True, then demean the data. If False, no shifting is done. If ``center`` is a list of\n numbers then shift each column by the corresponding amount.\n :param scale: If True, then scale the data by each column's standard deviation. If False, no scaling\n is done. If ``scale`` is a list of numbers, then scale each column by the requested amount.\n :returns: an H2OFrame with scaled values from the current frame.\n\n :examples:\n\n >>> import numpy as np\n >>> python_lists = np.random.uniform(1, 10, (10000,2))\n >>> h2oframe = h2o.H2OFrame(python_obj=python_lists)\n >>> h2oframe\n >>> newframe = h2oframe.scale(center=True, scale=True)\n >>> newframe\n \"\"\"\n return H2OFrame._expr(expr=ExprNode(\"scale\", self, center, scale), cache=self._ex._cache)\n\n\n def signif(self, digits=6):\n \"\"\"\n Round doubles/floats to the given number of significant digits.\n\n :param int digits: Number of significant digits to retain.\n :returns: new H2OFrame with rounded values from the original frame.\n\n :examples:\n\n >>> data = [[0.2348, 1.2380, 8.9032134],\n ... [4.321321, 4.907432, 6.3]]\n >>> h2o_data = h2o.H2OFrame(data)\n >>> h2o_data\n >>> h2o_data.signif(digits = 2)\n \"\"\"\n return H2OFrame._expr(expr=ExprNode(\"signif\", self, digits), cache=self._ex._cache)\n\n\n def round(self, digits=0):\n \"\"\"\n Round doubles/floats to the given number of decimal places.\n\n :param int digits: The number of decimal places to retain. Rounding to a negative number of decimal places is\n not supported. For rounding we use the \"round half to even\" mode (IEC 60559 standard), so that\n ``round(2.5) = 2`` and ``round(3.5) = 4``.\n :returns: new H2OFrame with rounded values from the original frame.\n\n :examples:\n\n >>> data = [[0.2348, 1.2380, 8.9032134],\n ... [4.321321, 4.907432, 6.3]]\n >>> h2o_data = h2o.H2OFrame(data)\n >>> h2o_data.round(digits = 4)\n >>> h2o_data.round(digits = 0)\n \"\"\"\n return H2OFrame._expr(expr=ExprNode(\"round\", self, digits), cache=self._ex._cache)\n\n\n def asnumeric(self):\n \"\"\"\n Create a new frame with all columns converted to numeric.\n \n :returns: New frame with all columns converted to numeric.\n\n :examples:\n\n >>> cars = h2o.import_file(\"https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv\")\n >>> cars.asnumeric()\n \"\"\"\n fr = H2OFrame._expr(expr=ExprNode(\"as.numeric\", self), cache=self._ex._cache)\n if fr._ex._cache.types_valid():\n fr._ex._cache.types = {k: \"real\" for k in fr._ex._cache.types.keys()}\n return fr\n\n\n def ascharacter(self):\n \"\"\"\n Convert all columns in the frame into strings.\n\n :returns: new H2OFrame with columns of \"string\" type.\n\n :examples:\n\n >>> h2o = h2o.import_file(\"https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv\")\n >>> h2o['cylinders'] = h2o['cylinders'].asfactor()\n >>> h2o['cylinders'].ascharacter()\n \"\"\"\n fr = H2OFrame._expr(expr=ExprNode(\"as.character\", self), cache=self._ex._cache)\n if fr._ex._cache.types_valid():\n fr._ex._cache.types = {k: \"string\" for k in fr._ex._cache.types.keys()}\n return fr\n\n\n def na_omit(self):\n \"\"\"\n Remove rows with NAs from the H2OFrame.\n\n :returns: new H2OFrame with all rows from the original frame containing any NAs removed.\n\n :examples:\n\n >>> iris = h2o.import_file(\"http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader_NA_2.csv\")\n >>> iris\n >>> newframe=iris.na_omit()\n >>> newframe\n \"\"\"\n fr = H2OFrame._expr(expr=ExprNode(\"na.omit\", self), cache=self._ex._cache)\n fr._ex._cache.nrows = -1\n return fr\n\n\n def difflag1(self):\n \"\"\"\n Conduct a diff-1 transform on a numeric frame column.\n\n :returns: an H2OFrame where each element is equal to the corresponding element in the source\n frame minus the previous-row element in the same frame.\n\n :examples:\n\n >>> import pandas as pd\n >>> import numpy as np\n >>> df = pd.DataFrame(np.random.randint(0,100,size=(1000000, 1)),\n ... columns=list('A'))\n >>> df_diff = df.diff()\n >>> df_diff_h2o = h2o.H2OFrame(df_diff)\n >>> fr = h2o.H2OFrame(df)\n >>> fr_diff = fr.difflag1()\n >>> fr_diff\n \"\"\"\n if self.ncols > 1:\n raise H2OValueError(\"Only single-column frames supported\")\n if self.types[self.columns[0]] not in {\"real\", \"int\", \"bool\"}:\n raise H2OValueError(\"Numeric column expected\")\n fr = H2OFrame._expr(expr=ExprNode(\"difflag1\", self), cache=self._ex._cache)\n return fr\n\n\n def isna(self):\n \"\"\"\n For each element in an H2OFrame, determine if it is NA or not.\n\n :returns: an H2OFrame of 1s and 0s, where 1s mean the values were NAs.\n\n :examples:\n\n >>> from collections import OrderedDict\n >>> frame = h2o.H2OFrame.from_python(OrderedDict([\n ... (\"A\", [1, 0, 3, 4, 8, 4, 7]),\n ... (\"B\", [2, nan, -1, nan, nan, 9, 0]),\n ... (\"C\", [\"one\", \"\", \"two\", \"\", \"seventeen\", \"1\", \"\"]),\n ... (\"D\", [\"oneteen\", \"\", \"twoteen\", \"\", \"sixteen\", \"twenteen\", \"\"])\n ... ]), na_strings=[\"\"],\n ... column_types={\"C\": \"enum\", \"D\": \"string\"})\n >>> frame.isna()\n \"\"\"\n fr = H2OFrame._expr(expr=ExprNode(\"is.na\", self))\n fr._ex._cache.nrows = self._ex._cache.nrows\n fr._ex._cache.ncols = self._ex._cache.ncols\n if self._ex._cache.names:\n fr._ex._cache.names = [\"isNA(%s)\" % n for n in self._ex._cache.names]\n fr._ex._cache.types = {\"isNA(%s)\" % n: \"int\" for n in self._ex._cache.names}\n return fr\n\n\n def year(self):\n \"\"\"\n Extract the \"year\" part from a date column.\n\n :returns: a single-column H2OFrame containing the \"year\" part from the source frame.\n\n :examples:\n\n >>> df = h2o.create_frame(rows=10,\n ... cols=3,\n ... factors=10,\n ... categorical_fraction=1.0/3,\n ... time_fraction=1.0/3,\n ... real_fraction=1.0/3,\n ... real_range=100,\n ... missing_fraction=0.0,\n ... seed=123)\n >>> df[\"C1\"].year()\n \"\"\"\n fr = H2OFrame._expr(expr=ExprNode(\"year\", self), cache=self._ex._cache)\n if fr._ex._cache.types_valid():\n fr._ex._cache.types = {k: \"int\" for k in self._ex._cache.types.keys()}\n return fr\n\n\n def month(self):\n \"\"\"\n Extract the \"month\" part from a date column.\n\n :returns: a single-column H2OFrame containing the \"month\" part from the source frame.\n\n :examples:\n\n >>> df = h2o.create_frame(rows=10,\n ... cols=3,\n ... factors=10,\n ... categorical_fraction=1.0/3,\n ... time_fraction=1.0/3,\n ... real_fraction=1.0/3,\n ... real_range=100,\n ... missing_fraction=0.0,\n ... seed=123)\n >>> df[\"C1\"].month()\n \"\"\"\n fr = H2OFrame._expr(expr=ExprNode(\"month\", self), cache=self._ex._cache)\n if fr._ex._cache.types_valid():\n fr._ex._cache.types = {k: \"int\" for k in self._ex._cache.types.keys()}\n return fr\n\n\n def week(self):\n \"\"\"\n Extract the \"week\" part from a date column.\n\n :returns: a single-column H2OFrame containing the \"week\" part from the source frame.\n\n :examples:\n\n >>> df = h2o.create_frame(rows=10,\n ... cols=3,\n ... factors=10,\n ... categorical_fraction=1.0/3,\n ... time_fraction=1.0/3,\n ... real_fraction=1.0/3,\n ... real_range=100,\n ... missing_fraction=0.0,\n ... seed=123)\n >>> df[\"C1\"].week()\n \"\"\"\n fr = H2OFrame._expr(expr=ExprNode(\"week\", self), cache=self._ex._cache)\n if fr._ex._cache.types_valid():\n fr._ex._cache.types = {k: \"int\" for k in self._ex._cache.types.keys()}\n return fr\n\n\n def day(self):\n \"\"\"\n Extract the \"day\" part from a date column.\n\n :returns: a single-column H2OFrame containing the \"day\" part from the source frame.\n\n :examples:\n\n >>> df = h2o.create_frame(rows=10,\n ... cols=3,\n ... factors=10,\n ... categorical_fraction=1.0/3,\n ... time_fraction=1.0/3,\n ... real_fraction=1.0/3,\n ... real_range=100,\n ... missing_fraction=0.0,\n ... seed=123)\n >>> df[\"C1\"].day()\n \"\"\"\n fr = H2OFrame._expr(expr=ExprNode(\"day\", self), cache=self._ex._cache)\n if fr._ex._cache.types_valid():\n fr._ex._cache.types = {k: \"int\" for k in self._ex._cache.types.keys()}\n return fr\n\n\n def dayOfWeek(self):\n \"\"\"\n Extract the \"day-of-week\" part from a date column.\n\n :returns: a single-column H2OFrame containing the \"day-of-week\" part from the source frame.\n\n :examples:\n\n >>> df = h2o.create_frame(rows=10,\n ... cols=3,\n ... factors=10,\n ... categorical_fraction=1.0/3,\n ... time_fraction=1.0/3,\n ... real_fraction=1.0/3,\n ... real_range=100,\n ... missing_fraction=0.0,\n ... seed=123)\n >>> df[\"C1\"].dayOfWeek()\n \"\"\"\n fr = H2OFrame._expr(expr=ExprNode(\"dayOfWeek\", self), cache=self._ex._cache)\n if fr._ex._cache.types_valid():\n fr._ex._cache.types = {k: \"int\" for k in self._ex._cache.types.keys()}\n return fr\n\n\n def hour(self):\n \"\"\"\n Extract the \"hour-of-day\" part from a date column.\n\n :returns: a single-column H2OFrame containing the \"hour-of-day\" part from the source frame.\n\n :examples:\n\n >>> df = h2o.create_frame(rows=10,\n ... cols=3,\n ... factors=10,\n ... categorical_fraction=1.0/3,\n ... time_fraction=1.0/3,\n ... real_fraction=1.0/3,\n ... real_range=100,\n ... missing_fraction=0.0,\n ... seed=123)\n >>> df[\"C1\"].hour()\n \"\"\"\n fr = H2OFrame._expr(expr=ExprNode(\"hour\", self), cache=self._ex._cache)\n if fr._ex._cache.types_valid():\n fr._ex._cache.types = {k: \"int\" for k in self._ex._cache.types.keys()}\n return fr\n\n\n def minute(self):\n \"\"\"\n Extract the \"minute\" part from a date column.\n\n :returns: a single-column H2OFrame containing the \"minute\" part from the source frame.\n\n :examples:\n\n >>> df = h2o.create_frame(rows=10,\n ... cols=3,\n ... factors=10,\n ... categorical_fraction=1.0/3,\n ... time_fraction=1.0/3,\n ... real_fraction=1.0/3,\n ... real_range=100,\n ... missing_fraction=0.0,\n ... seed=123)\n >>> df[\"C1\"].minute()\n \"\"\"\n fr = H2OFrame._expr(expr=ExprNode(\"minute\", self), cache=self._ex._cache)\n if fr._ex._cache.types_valid():\n fr._ex._cache.types = {k: \"int\" for k in self._ex._cache.types.keys()}\n return fr\n\n\n def second(self):\n \"\"\"\n Extract the \"second\" part from a date column.\n\n :returns: a single-column H2OFrame containing the \"second\" part from the source frame.\n\n :examples:\n\n >>> df = h2o.create_frame(rows=10,\n ... cols=3,\n ... factors=10,\n ... categorical_fraction=1.0/3,\n ... time_fraction=1.0/3,\n ... real_fraction=1.0/3,\n ... real_range=100,\n ... missing_fraction=0.0,\n ... seed=123)\n >>> df[\"C1\"].second()\n \"\"\"\n fr = H2OFrame._expr(expr=ExprNode(\"second\", self), cache=self._ex._cache)\n if fr._ex._cache.types_valid():\n fr._ex._cache.types = {k: \"int\" for k in self._ex._cache.types.keys()}\n return fr\n\n\n def runif(self, seed=None):\n \"\"\"\n Generate a column of random numbers drawn from a uniform distribution [0,1) and\n having the same data layout as the source frame.\n\n :param int seed: seed for the random number generator.\n\n :returns: Single-column H2OFrame filled with doubles sampled uniformly from [0,1).\n\n :examples:\n\n >>> import numpy as np\n >>> python_lists = np.random.uniform(0,1, 10000)\n >>> h2oframe = h2o.H2OFrame(python_obj=python_lists)\n >>> h2oframe.runif(seed=None)\n \"\"\"\n fr = H2OFrame._expr(expr=ExprNode(\"h2o.runif\", self, -1 if seed is None else seed))\n fr._ex._cache.ncols = 1\n fr._ex._cache.nrows = self.nrow\n return fr\n\n\n def stratified_split(self, test_frac=0.2, seed=-1):\n \"\"\"\n Construct a column that can be used to perform a random stratified split.\n\n :param float test_frac: The fraction of rows that will belong to the \"test\".\n :param int seed: The seed for the random number generator.\n\n :returns: an H2OFrame having single categorical column with two levels: ``\"train\"`` and ``\"test\"``.\n\n :examples:\n \n >>> import numpy as np\n >>> python_lists = np.random.randint(-3,3, (10000,2))\n >>> h2oframe = h2o.H2OFrame(python_obj=python_lists).asfactor()\n >>> h2oframe[1].stratified_split(test_frac=0.2, seed=-1)\n \"\"\"\n return H2OFrame._expr(expr=ExprNode('h2o.random_stratified_split', self, test_frac, seed))\n\n\n def match(self, table, nomatch=0):\n \"\"\"\n Make a vector of the positions of (first) matches of its first argument in its second.\n\n Only applicable to single-column categorical/string frames.\n\n :param List table: the list of items to match against\n :param int nomatch: value that should be returned when there is no match.\n :returns: a new H2OFrame containing for each cell from the source frame the index where\n the pattern ``table`` first occurs within that cell.\n\n :examples:\n\n >>> iris = h2o.import_file(\"http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris.csv\")\n >>> matchFrame = iris[\"C5\"].match(['Iris-versicolor'])\n >>> matchFrame\n >>> matchFrame = iris[\"C5\"].match(['Iris-setosa'])\n >>> matchFrame\n \"\"\"\n return H2OFrame._expr(expr=ExprNode(\"match\", self, table, nomatch, None))\n\n\n def cut(self, breaks, labels=None, include_lowest=False, right=True, dig_lab=3):\n \"\"\"\n Cut a numeric vector into categorical \"buckets\".\n\n This method is only applicable to a single-column numeric frame.\n\n :param List[float] breaks: The cut points in the numeric vector.\n :param List[str] labels: Labels for categorical levels produced. Defaults to set notation of\n intervals defined by the breaks.\n :param bool include_lowest: By default, cuts are defined as intervals ``(lo, hi]``. If this parameter\n is True, then the interval becomes ``[lo, hi]``.\n :param bool right: Include the high value: ``(lo, hi]``. If False, get ``(lo, hi)``.\n :param int dig_lab: Number of digits following the decimal point to consider.\n\n :returns: Single-column H2OFrame of categorical data.\n\n :examples:\n\n >>> import numpy as np\n >>> python_lists = np.random.uniform(-2,2,(100,1))\n >>> h2oframe = h2o.H2OFrame(python_obj=python_lists)\n >>> breaks = [-2,1,0,1,2]\n >>> newframe = h2oframe.cut(breaks,\n ... labels=None,\n ... include_lowest=False,\n ... right=True,\n ... dig_lab=3)\n >>> newframe\n \"\"\"\n assert_is_type(breaks, [numeric])\n if self.ncols != 1: raise H2OValueError(\"Single-column frame is expected\")\n if self.types[self.names[0]] not in {\"int\", \"real\"}: raise H2OValueError(\"A numeric column is expected\")\n fr = H2OFrame._expr(expr=ExprNode(\"cut\", self, breaks, labels, include_lowest, right, dig_lab),\n cache=self._ex._cache)\n fr._ex._cache.types = {k: \"enum\" for k in self.names}\n return fr\n\n\n def which(self):\n \"\"\"\n Compose the list of row indices for which the frame contains non-zero values.\n\n Only applicable to integer single-column frames.\n Equivalent to comprehension ``[index for index, value in enumerate(self) if value]``.\n\n :returns: a new single-column H2OFrame containing indices of those rows in the original frame\n that contained non-zero values.\n\n :examples:\n\n >>> import numpy as np\n >>> python_lists = np.random.randint(1,5, (100,1))\n >>> h2oframe = h2o.H2OFrame(python_obj=python_lists)\n >>> h2oframe.which()\n \"\"\"\n return H2OFrame._expr(expr=ExprNode(\"which\", self))\n\n def idxmax(self,skipna=True, axis=0):\n \"\"\"\n Get the index of the max value in a column or row\n\n :param bool skipna: If True (default), then NAs are ignored during the search. Otherwise presence\n of NAs renders the entire result NA.\n :param int axis: Direction of finding the max index. If 0 (default), then the max index is searched columnwise, and the\n result is a frame with 1 row and number of columns as in the original frame. If 1, then the max index is searched\n rowwise and the result is a frame with 1 column, and number of rows equal to the number of rows in the original frame.\n :returns: either a list of max index values per-column or an H2OFrame containing max index values\n per-row from the original frame.\n\n :examples:\n\n >>> f1 = h2o.create_frame(rows = 10000,\n ... cols = 100,\n ... categorical_fraction = 0,\n ... missing_fraction = 0,\n ... seed=1234)\n >>> f1.idxmax()\n \"\"\"\n return H2OFrame._expr(expr=ExprNode(\"which.max\", self, skipna, axis))\n\n def idxmin(self,skipna=True, axis=0):\n \"\"\"\n Get the index of the min value in a column or row\n\n :param bool skipna: If True (default), then NAs are ignored during the search. Otherwise presence\n of NAs renders the entire result NA.\n :param int axis: Direction of finding the min index. If 0 (default), then the min index is searched columnwise, and the\n result is a frame with 1 row and number of columns as in the original frame. If 1, then the min index is searched\n rowwise and the result is a frame with 1 column, and number of rows equal to the number of rows in the original frame.\n :returns: either a list of min index values per-column or an H2OFrame containing min index values\n per-row from the original frame.\n\n :examples:\n\n >>> f1 = h2o.create_frame(rows = 10000,\n ... cols = 100,\n ... categorical_fraction = 0,\n ... missing_fraction = 0,\n ... seed=1234)\n >>> f1.idxmin()\n \"\"\"\n return H2OFrame._expr(expr=ExprNode(\"which.min\", self, skipna, axis))\n\n\n def ifelse(self, yes, no):\n \"\"\"\n Equivalent to ``[y if t else n for t,y,n in zip(self,yes,no)]``.\n\n Based on the booleans in the test vector, the output has the values of the\n yes and no vectors interleaved (or merged together). All Frames must have\n the same row count. Single column frames are broadened to match wider\n Frames. Scalars are allowed, and are also broadened to match wider frames.\n\n :param yes: Frame to use if ``test`` is true; may be a scalar or single column\n :param no: Frame to use if ``test`` is false; may be a scalar or single column\n\n :returns: an H2OFrame of the merged yes/no frames/scalars according to the test input frame.\n\n :examples:\n\n >>> import numpy as np\n >>> from h2o.frame import H2OFrame\n >>> python_lists = np.random.uniform(-1,1, (5,5))\n >>> h2oframe = h2o.H2OFrame(python_obj=python_lists)\n >>> newFrame = (h2oframe>0).ifelse(1, -1)\n >>> newFrame\n \"\"\"\n return H2OFrame._expr(expr=ExprNode(\"ifelse\", self, yes, no))\n\n\n def apply(self, fun=None, axis=0):\n \"\"\"\n Apply a lambda expression to an H2OFrame.\n\n :param fun: a lambda expression to be applied per row or per column.\n :param axis: 0 = apply to each column; 1 = apply to each row\n :returns: a new H2OFrame with the results of applying ``fun`` to the current frame.\n\n :examples:\n\n >>> python_lists = [[1,2,3,4], [1,2,3,4]]\n >>> h2oframe = h2o.H2OFrame(python_obj=python_lists,\n ... na_strings=['NA'])\n >>> colMean = h2oframe.apply(lambda x: x.mean(), axis=0)\n >>> rowMean = h2oframe.apply(lambda x: x.mean(), axis=1)\n >>> colMean\n >>> rowMean\n \"\"\"\n from .astfun import lambda_to_expr\n assert_is_type(axis, 0, 1)\n assert_is_type(fun, FunctionType)\n assert_satisfies(fun, fun.__name__ == \"<lambda>\")\n res = lambda_to_expr(fun)\n return H2OFrame._expr(expr=ExprNode(\"apply\", self, 1 + (axis == 0), *res))\n\n\n #-------------------------------------------------------------------------------------------------------------------\n # Synonyms + Deprecated\n #-------------------------------------------------------------------------------------------------------------------\n # Here we have all methods that are provided as alternative names to some other names defined above. This also\n # includes methods that we rename as part of the deprecation process (but keeping the old name for the sake of\n # backward compatibility). We gather them all down here to have a slightly cleaner code.\n\n @staticmethod\n def mktime(year=1970, month=0, day=0, hour=0, minute=0, second=0, msec=0):\n \"\"\"\n Deprecated, use :func:`moment` instead.\n\n This function was left for backward-compatibility purposes only. It is\n not very stable, and counterintuitively uses 0-based months and days,\n so \"January 4th, 2001\" should be entered as ``mktime(2001, 0, 3)``.\n \"\"\"\n return H2OFrame._expr(ExprNode(\"mktime\", year, month, day, hour, minute, second, msec))\n\n @property\n def columns(self):\n \"\"\"\n Displays the column names. Same as ``self.names``.\n\n :returns: Column names.\n\n :examples:\n\n >>> python_obj = [1,2,2.5,-100.9,0]\n >>> frame = h2o.H2OFrame(python_obj)\n >>> frame.columns\n \"\"\"\n return self.names\n\n @columns.setter\n def columns(self, value):\n self.set_names(value)\n\n @property\n def col_names(self):\n \"\"\"\n Displays the column names. Same as ``self.names``.\n\n :returns: Column names.\n\n :examples:\n\n >>> python_obj = [1,2,2.5,-100.9,0]\n >>> frame = h2o.H2OFrame(python_obj)\n >>> frame.col_names\n \"\"\"\n return self.names\n\n @col_names.setter\n def col_names(self, value):\n self.set_names(value)\n\n def __len__(self):\n \"\"\"Number of rows in the dataframe, same as ``self.nrows``.\"\"\"\n return self.nrows\n\n @property\n def nrow(self):\n \"\"\"\n Same as ``self.nrows``.\n\n :returns: Number of rows in the dataframe.\n\n :examples:\n\n >>> iris = h2o.import_file(\"http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader_NA_2.csv\")\n >>> iris.nrow\n \"\"\"\n return self.nrows\n\n @property\n def ncol(self):\n \"\"\"\n Same as ``self.ncols``.\n\n :returns: Number of columns in the dataframe.\n\n :examples:\n\n >>> iris = h2o.import_file(\"http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader_NA_2.csv\")\n >>> iris.ncol\n \"\"\"\n return self.ncols\n\n @property\n def dim(self):\n \"\"\"\n Gives the dimensions of the frame. Same as ``list(self.shape)``.\n\n :returns: Frame dimensions.\n \n :examples:\n\n >>> iris = h2o.import_file(\"http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader.csv\")\n >>> iris.dim\n \"\"\"\n return [self.nrow, self.ncol]\n\n #@property\n #def frame_id(self):\n # \"\"\"Same as ``frame.id``.\"\"\"\n # return self.id\n\n #@frame_id.setter\n #def frame_id(self, value):\n # self.id = value\n\n @staticmethod\n def from_python(python_obj, destination_frame=None, header=0, separator=\",\", column_names=None,\n column_types=None, na_strings=None):\n \"\"\"[DEPRECATED] Use constructor ``H2OFrame()`` instead.\"\"\"\n return H2OFrame(python_obj, destination_frame, header, separator, column_names, column_types,\n na_strings)\n\n\n def ischaracter(self):\n \"\"\"[DEPRECATED] Use ``frame.isstring()``.\"\"\"\n return self.isstring()\n\n\n\n#-----------------------------------------------------------------------------------------------------------------------\n# Helpers\n#-----------------------------------------------------------------------------------------------------------------------\n\ndef _getValidCols(by_idx, fr): # so user can input names of the columns as well is idx num\n tmp = []\n for i in by_idx:\n if type(i) == str:\n if i not in fr.names:\n raise H2OValueError(\"Column: \" + i + \" not in frame.\")\n tmp.append(fr.names.index(i))\n elif type(i) != int:\n raise H2OValueError(\"Join on column: \" + i + \" not of type int\")\n else:\n tmp.append(i)\n return list(set(tmp))\n\ndef _binop(lhs, op, rhs, rtype=None):\n assert_is_type(lhs, str, numeric, datetime.date, pandas_timestamp, numpy_datetime, H2OFrame)\n assert_is_type(rhs, str, numeric, datetime.date, pandas_timestamp, numpy_datetime, H2OFrame)\n if isinstance(lhs, H2OFrame) and isinstance(rhs, H2OFrame) and lhs._is_frame and rhs._is_frame:\n lrows, lcols = lhs.shape\n rrows, rcols = rhs.shape\n compatible = ((lcols == rcols and lrows == rrows) or\n (lcols == 1 and lrows == rrows) or\n (lcols == 1 and lrows == 1) or\n (rcols == 1 and lrows == rrows) or\n (rcols == 1 and rrows == 1) or\n (lrows == 1 and lcols == rcols) or\n (rrows == 1 and lcols == rcols)\n )\n if not compatible:\n raise H2OValueError(\"Attempting to operate on incompatible frames: (%d x %d) and (%d x %d)\"\n % (lrows, lcols, rrows, rcols))\n\n if is_type(lhs, pandas_timestamp, numpy_datetime, datetime.date):\n lhs = H2OFrame.moment(date=lhs)\n if is_type(rhs, pandas_timestamp, numpy_datetime, datetime.date):\n rhs = H2OFrame.moment(date=rhs)\n\n cache = lhs._ex._cache if isinstance(lhs, H2OFrame) else rhs._ex._cache\n res = H2OFrame._expr(expr=ExprNode(op, lhs, rhs), cache=cache)\n if rtype is not None and res._ex._cache._names is not None:\n res._ex._cache._types = {name: rtype for name in res._ex._cache._names}\n return res\n\n\n\n\ndef generatePandaEnumCols(pandaFtrain, cname, nrows, domainL):\n \"\"\"\n For an H2O Enum column, we perform one-hot-encoding here and add one more column, \"missing(NA)\" to it.\n\n :param pandaFtrain: panda frame derived from H2OFrame\n :param cname: column name of enum col\n :param nrows: number of rows of enum col\n :return: panda frame with enum col encoded correctly for native XGBoost\n \"\"\"\n import numpy as np\n import pandas as pd\n \n cmissingNames=[cname+\".missing(NA)\"]\n tempnp = np.zeros((nrows,1), dtype=np.int)\n # check for nan and assign it correct value\n colVals = pandaFtrain[cname]\n for ind in range(nrows):\n try:\n if not(colVals[ind] in domainL):\n tempnp[ind]=1\n except ValueError:\n pass\n zeroFrame = pd.DataFrame(tempnp)\n zeroFrame.columns=cmissingNames\n temp = pd.get_dummies(pandaFtrain[cname], prefix=cname, drop_first=False)\n tempNames = list(temp) # get column names\n colLength = len(tempNames)\n newNames = ['a']*colLength\n\n for ind in range(0,colLength):\n newNames[ind]=cname+\"_\"+domainL[ind]\n ftemp = temp[newNames]\n ctemp = pd.concat([ftemp, zeroFrame], axis=1)\n return ctemp\n" ]
[ [ "scipy.sparse.issparse", "scipy.sparse.find", "pandas.DataFrame", "scipy.sparse.csr_matrix", "pandas.concat", "pandas.get_dummies" ] ]
guptakhil12/show-tell
[ "5003898894425e902bed10d4cf57207842ee3bcb" ]
[ "rnn.py" ]
[ "\nfrom cnn import ResNet\nimport torchvision\nimport torchvision.transforms as transforms\nimport torch\nimport torch.nn as nn\n\n\n\nclass RNN(torch.nn.Module):\n\n def __init__(self, embed_dim, num_hidden_units, vocab_size, num_layers):\n '''\n Args:\n embed_dim (int) : Embedding dimension between CNN and RNN\n num_hidden_units (int) : Number of hidden units\n vocab_size (int) : Size of the vocabulary\n num_layers (int) : # of layers\n '''\n\n super(RNN, self).__init__()\n\n self.embeddings = nn.Embedding(vocab_size, embed_dim)\n self.unit = nn.GRU(embed_dim, num_hidden_units, num_layers, batch_first=True)\n self.linear = nn.Linear(num_hidden_units, vocab_size)\n\n def forward(self, cnn_feature, image_caption, caption_size):\n\n caption_embedding = self.embeddings(image_caption)\n torch_raw_embeddings = torch.cat((cnn_feature.unsqueeze(1), caption_embedding), 1)\n torch_packed_embeddings = nn.utils.rnn.pack_padded_sequence(torch_raw_embeddings, caption_size, batch_first=True)\n torch_packed_embeddings_unit= self.unit(torch_packed_embeddings)[0]\n tokenized_predicted_sentence = self.linear(torch_packed_embeddings_unit[0])\n\n return tokenized_predicted_sentence\n\n def sentence_index(self, cnn_feature, beam_size=0):\n \n caption_max_size = 25 \n rnn_hidden_state = None\n rnn_data = cnn_feature.unsqueeze(1)\n\n # the previous code, which gives the same result as when beam_size=1\n if beam_size == 0: \n predicted_sentence_idx = []\n \n for idx in range(caption_max_size):\n \n next_state, rnn_hidden_state = self.unit(rnn_data, rnn_hidden_state)\n result_state = self.linear(next_state.squeeze(1))\n predicted_tokenized_word = result_state.max(1)[1]\n predicted_sentence_idx.append(predicted_tokenized_word)\n rnn_data = self.embeddings(predicted_tokenized_word)\n rnn_data = rnn_data.unsqueeze(1)\n \n predicted_sentence_idx = torch.stack(predicted_sentence_idx, 1).squeeze()\n \n return predicted_sentence_idx\n \n # the new code to implement beam search, now only works when batch_size=1\n next_state, rnn_hidden_state = self.unit(rnn_data, rnn_hidden_state)\n result_state = self.linear(next_state.squeeze(1))\n topk_predicted_tokenized_word = result_state.topk(k=beam_size,dim=1)[1]\n \n old_beam_sentence = [] \n old_beam_word = []\n for k in range(beam_size):\n kth_predicted_tokenized_word = topk_predicted_tokenized_word[:,k]\n old_beam_word.append(kth_predicted_tokenized_word)\n \n \n kth_predicted_sentence_idx = []\n kth_predicted_sentence_idx.append(kth_predicted_tokenized_word) \n\n old_beam_sentence.append(kth_predicted_sentence_idx) \n\n idx = 1\n while (idx < caption_max_size):\n idx = idx + 1\n new_beam_sentence = [] \n new_beam_word = [] \n new_beam_prob = []\n for k in range(beam_size):\n \n rnn_data = self.embeddings(old_beam_word[k])\n rnn_data = rnn_data.unsqueeze(1)\n next_state, rnn_hidden_state = self.unit(rnn_data, rnn_hidden_state)\n result_state = self.linear(next_state.squeeze(1))\n \n topk_predicted_tokenized_word = result_state.topk(k=beam_size,dim=1)[1]\n topk_predicted_tokenized_word_prob = result_state.topk(k=beam_size,dim=1)[0] ##\n \n for j in range(beam_size):\n previous_predicted_sentence_idx = old_beam_sentence[k].copy()\n jth_predicted_tokenized_word = topk_predicted_tokenized_word[:,j]\n jth_sentence = previous_predicted_sentence_idx\n jth_sentence.append(jth_predicted_tokenized_word) # \n new_beam_sentence.append(jth_sentence)\n new_beam_word.append(jth_predicted_tokenized_word)\n new_beam_prob.append(topk_predicted_tokenized_word_prob[:,j])\n \n old_beam_sentence = [x for _,x in sorted(zip(new_beam_prob,new_beam_sentence), reverse=True)][0:beam_size]\n old_beam_word = [x for _,x in sorted(zip(new_beam_prob,new_beam_word), reverse=True)][0:beam_size]\n\n\n predicted_sentence_idx = old_beam_sentence[0]\n predicted_sentence_idx = torch.stack(predicted_sentence_idx, 1).squeeze()\n return predicted_sentence_idx\n\n\n\n \nif __name__ == \"__main__\":\n \n # Just to show how the rnn works. Never mind what's the input here.\n # Now our beam search only works with batch_size=1.\n \n cnn = ResNet(resnet_version=18)\n rnn = RNN(embed_dim=256, num_hidden_units=512, vocab_size=8000, num_layers=1)\n cnn.eval()\n rnn.eval()\n transform_train = transforms.Compose([\n transforms.RandomHorizontalFlip(),\n transforms.RandomCrop(32, padding=4),\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))\n ]) \n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n trainset = torchvision.datasets.CIFAR100(root='~/scratch/',\n train=True,download=True, transform=transform_train)\n trainloader = torch.utils.data.DataLoader(trainset, batch_size=1, shuffle=True, num_workers=8)\n for images, labels in trainloader:\n images = images.to(device)\n cnn_feature = cnn(images)\n \n # previous result\n rnn_tokenized_sentence_prediction = rnn.sentence_index(cnn_feature)\n print(rnn_tokenized_sentence_prediction)\n \n # same as when beam_size=1\n rnn_tokenized_sentence_prediction = rnn.sentence_index(cnn_feature, beam_size=1)\n print(rnn_tokenized_sentence_prediction)\n \n # beam_size=20\n rnn_tokenized_sentence_prediction = rnn.sentence_index(cnn_feature, beam_size=20)\n print(rnn_tokenized_sentence_prediction)\n\n\n break\n \n \n\n" ]
[ [ "torch.utils.data.DataLoader", "torch.stack", "torch.nn.Linear", "torch.nn.utils.rnn.pack_padded_sequence", "torch.nn.Embedding", "torch.nn.GRU", "torch.cuda.is_available" ] ]
suneric/face_detection
[ "bf345a29690729eca912d830f3f5d690245de995" ]
[ "face_in_image.py" ]
[ "import face_recognition\nimport cv2\nimport argparse\nimport os\nimport numpy as np\nimport math\n\ndef get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--input', type=str, default=\"img\")\n parser.add_argument('--output', type=str, default=None)\n parser.add_argument('--source', type=str, default=None )\n parser.add_argument('--task', type=str, default='display') # sort, convert\n return parser.parse_args()\n\ndef distance(pt1, pt2):\n return math.sqrt((pt1[0]-pt2[0])*(pt1[0]-pt2[0]) + (pt1[1]-pt2[1])*(pt1[1]-pt2[1]))\n\ndef pose(file):\n f = feature(file)\n if f == None:\n print(\"no feature\", file)\n return None\n leb, reb, nose, chin = f[0], f[1], f[2], f[3] \n ld = 0.5*(distance(leb[0],nose[0])+distance(leb[-1],nose[0]))\n rd = 0.5*(distance(reb[0],nose[0])+distance(reb[-1],nose[0])) \n return ld-rd\n\ndef sort(files, input, output):\n d = len(files)*[0.0]\n for i in range(0, len(files)):\n d[i] = pose(os.path.join(input, files[i]))\n sd = d.copy()\n sd.sort()\n # print(d)\n # print(sd)\n for i in range(0,len(sd)):\n index, = np.where(np.isclose(d,sd[i]))\n # print(index)\n for j in range(0,len(index)):\n source = cv2.imread(os.path.join(input, files[index[j]]))\n cv2.imwrite(os.path.join(output, str(i)+'_'+str(j)+'.jpg'), source)\n\ndef feature(file):\n image = face_recognition.load_image_file(file)\n face_landmarks_list = face_recognition.face_landmarks(image)\n if len(face_landmarks_list) == 0:\n print(\"no feature found\")\n return None\n\n face_landmarks = face_landmarks_list[0]\n image = cv2.polylines(image, np.array(face_landmarks[\"left_eyebrow\"]).reshape((-1,1,2)), 1, (0,0,255),2)\n image = cv2.polylines(image, np.array(face_landmarks[\"right_eyebrow\"]).reshape((-1,1,2)), 1, (0,0,255),2)\n image = cv2.polylines(image, np.array(face_landmarks[\"left_eye\"]).reshape((-1,1,2)), 1, (0,0,255),2)\n image = cv2.polylines(image, np.array(face_landmarks[\"right_eye\"]).reshape((-1,1,2)), 1, (0,0,255,2))\n image = cv2.polylines(image, np.array(face_landmarks[\"top_lip\"]).reshape((-1,1,2)), 1, (0,0,255),2)\n image = cv2.polylines(image, np.array(face_landmarks[\"bottom_lip\"]).reshape((-1,1,2)), 1, (0,0,255),2)\n image = cv2.polylines(image, np.array(face_landmarks[\"nose_bridge\"]).reshape((-1,1,2)), 1, (0,0,255),2)\n image = cv2.polylines(image, np.array(face_landmarks[\"nose_tip\"]).reshape((-1,1,2)), 1, (0,0,255),2)\n image = cv2.polylines(image, np.array(face_landmarks[\"chin\"]).reshape((-1,1,2)), 1, (0,0,255),2)\n cv2.namedWindow(\"Akira\", cv2.WND_PROP_FULLSCREEN)\n cv2.imshow('Akira', image)\n cv2.waitKey(3)\n\n left_eb = face_landmarks[\"left_eyebrow\"]\n right_eb = face_landmarks[\"right_eyebrow\"]\n nose = face_landmarks[\"nose_bridge\"]\n chin = face_landmarks[\"chin\"]\n\n return (left_eb, right_eb, nose, chin)\n \ndef process(file, encode):\n image = face_recognition.load_image_file(file)\n # print(face_landmarks_list)\n (h, w) = image.shape[:2]\n #image = cv2.resize(image, None, fx=0.5, fy=0.5)\n face_locations = face_recognition.face_locations(image)\n\n index = 0\n if encode != None:\n unknown_encode = face_recognition.face_encodings(image)\n for i in range(0,len(unknown_encode)):\n results = face_recognition.compare_faces([encode], unknown_encode[i])\n if results[0]:\n index = i\n break\n\n if len(face_locations) > index:\n # draw image with face recognition location\n (t,r,b,l) = face_locations[index]\n cv2.rectangle(image, (l,t), (r,b), (0,255,0), 2)\n cv2.putText(image, \"AKIRA\", (l,t-10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 1)\n cv2.namedWindow(\"Akira\", cv2.WND_PROP_FULLSCREEN)\n cv2.imshow('Akira', image)\n cv2.waitKey(3)\n\n f = feature(file)\n nose = f[2]\n ref0,ref1 = nose[0],nose[-1]\n fh = 3*int(distance(ref0,ref1))\n fw = 2*fh\n # print(ref0, ref1)\n \n t1 = ref0[1] - 2*fh # u, height\n b1 = ref0[1] + 5*fh \n r1 = ref0[0] + 2*fw # v, width\n l1 = ref0[0] - 2*fw\n \n if t1 < 0:\n t1 = 0 \n if b1 > h:\n b1 = h\n if l1 < 0:\n l1 = 0 \n if r1 > w:\n r1 = w\n \n oh = 800\n origin = cv2.imread(file, cv2.IMREAD_COLOR)\n crop = origin[int(t1):int(b1), int(l1):int(r1)]\n (h, w) = crop.shape[:2]\n r = float(oh/h)\n ow = int(r*w)\n resize = cv2.resize(crop, (ow, oh))\n if ow > 700:\n resize = resize[0:799,int(0.5*ow)-350:int(0.5*ow)+350] \n dst = cv2.blur(resize,(3,3))\n # dst = cv2.detailEnhance(resize, sigma_s=10, sigma_r=0.01)\n return dst\n else:\n return cv2.imread(file, cv2.IMREAD_COLOR)\n\n\nif __name__ == \"__main__\":\n args = get_args()\n task = args.task \n path = os.walk(args.input)\n\n if task == 'convert':\n known_encode = None\n if args.source != None:\n known_image = face_recognition.load_image_file(args.source)\n known_encode = face_recognition.face_encodings(known_image)[0]\n\n for root, directories, files in path:\n for directory in directories:\n print(directory)\n for file in files:\n inputname = os.path.join(args.input, file) \n print(inputname)\n img = process(inputname, known_encode)\n cv2.imwrite(os.path.join(args.output, file), img)\n elif task == 'display':\n for root, directories, files in path:\n for directory in directories:\n print(directory)\n for file in files:\n inputname = os.path.join(args.input, file) \n print(inputname)\n feature(inputname)\n elif task == 'sort':\n for root, directories, files in path:\n sort(files,args.input,args.output)\n" ]
[ [ "numpy.array", "numpy.isclose" ] ]
ultrainren/robosuite
[ "4a32bde5f8f6247733a2ca439318bedc5a83a941" ]
[ "robosuite/environments/panda.py" ]
[ "from collections import OrderedDict\nimport numpy as np\n\nimport robosuite.utils.transform_utils as T\nfrom robosuite.environments import MujocoEnv\n\nfrom robosuite.models.grippers import gripper_factory\nfrom robosuite.models.robots import Panda\n\nfrom robosuite.controllers.arm_controller import *\nfrom collections import deque\nimport hjson\n\n\nclass PandaEnv(MujocoEnv):\n \"\"\"Initializes a Panda robot environment.\"\"\"\n\n def __init__(\n self,\n controller_config_file,\n controller,\n gripper_type=None,\n gripper_visualization=False,\n use_indicator_object=False,\n has_renderer=False,\n has_offscreen_renderer=True,\n render_collision_mesh=False,\n render_visual_mesh=True,\n control_freq=10,\n horizon=1000,\n ignore_done=False,\n use_camera_obs=False,\n camera_name=\"frontview\",\n camera_height=256,\n camera_width=256,\n camera_depth=False,\n impedance_ctrl=True, # TODO\n initial_policy=None, # TODO - currently not included in the config file (should be a function)\n **kwargs\n ):\n \"\"\"\n Args:\n controller_config_file (str): filepath to the corresponding controller config file that contains the\n associated controller parameters\n\n controller (str): Can be 'position', 'position_orientation', 'joint_velocity', 'joint_impedance', or\n 'joint_torque'. Specifies the type of controller to be used for dynamic trajectories\n\n gripper_type (str): type of gripper, used to instantiate\n gripper models from gripper factory.\n\n gripper_visualization (bool): True if using gripper visualization.\n Useful for teleoperation.\n\n use_indicator_object (bool): if True, sets up an indicator object that\n is useful for debugging.\n\n has_renderer (bool): If true, render the simulation state in\n a viewer instead of headless mode.\n\n has_offscreen_renderer (bool): True if using off-screen rendering.\n\n render_collision_mesh (bool): True if rendering collision meshes\n in camera. False otherwise.\n\n render_visual_mesh (bool): True if rendering visual meshes\n in camera. False otherwise.\n\n control_freq (float): how many control signals to receive\n in every second. This sets the amount of simulation time\n that passes between every action input.\n\n horizon (int): Every episode lasts for exactly @horizon timesteps.\n\n ignore_done (bool): True if never terminating the environment (ignore @horizon).\n\n use_camera_obs (bool): if True, every observation includes a\n rendered image.\n\n camera_name (str): name of camera to be rendered. Must be\n set if @use_camera_obs is True.\n\n camera_height (int): height of camera frame.\n\n camera_width (int): width of camera frame.\n\n camera_depth (bool): True if rendering RGB-D, and RGB otherwise.\n\n impedance_ctrl (bool) : True if we want to control impedance of the end effector\n\n #########\n **kwargs includes additional params that may be specified and will override values found in\n the controller configuration file if the names match\n \"\"\"\n\n self.initial_policy = initial_policy\n self.impedance_ctrl = impedance_ctrl\n if self.impedance_ctrl:\n # Load the appropriate controller\n self._load_controller(controller, controller_config_file, kwargs)\n\n if 'residual_policy_multiplier' in kwargs:\n self.residual_policy_multiplier = kwargs['residual_policy_multiplier']\n else:\n self.residual_policy_multiplier = None\n\n self.goal = np.zeros(3)\n self.goal_orientation = np.zeros(3)\n self.desired_force = np.zeros(3)\n self.desired_torque = np.zeros(3)\n if 'residual_policy_multiplier' in kwargs:\n self.residual_policy_multiplier = kwargs['residual_policy_multiplier']\n else:\n self.residual_policy_multiplier = None\n\n self.initial_policy = initial_policy\n\n self.control_freq = control_freq\n self.timestep = 0\n\n # self.position_limits = [[0,0,0],[0,0,0]]\n # self.orientation_limits = [[0,0,0],[0,0,0]]\n\n self.ee_force = np.zeros(3)\n self.ee_force_bias = np.zeros(3)\n self.contact_threshold = 1 # Maximum contact variation allowed without contact [N]\n\n self.ee_torque = np.zeros(3)\n self.ee_torque_bias = np.zeros(3)\n\n # self.controller = controller\n # TODO - check that these are updated properly\n self.total_kp = np.zeros(6)\n self.total_damping = np.zeros(6)\n\n self.n_avg_ee_acc = 10\n\n self.has_gripper = gripper_type is not None\n self.gripper_type = gripper_type\n self.gripper_visualization = gripper_visualization\n self.use_indicator_object = use_indicator_object\n super().__init__(\n has_renderer=has_renderer,\n has_offscreen_renderer=has_offscreen_renderer,\n render_collision_mesh=render_collision_mesh,\n render_visual_mesh=render_visual_mesh,\n control_freq=control_freq,\n horizon=horizon,\n ignore_done=ignore_done,\n use_camera_obs=use_camera_obs,\n camera_name=camera_name,\n camera_height=camera_height,\n camera_width=camera_height,\n camera_depth=camera_depth,\n )\n\n # Current and previous policy step q values, joint torques, ft ee applied and actions\n self.prev_pstep_ft = np.zeros(6)\n self.curr_pstep_ft = np.zeros(6)\n self.prev_pstep_a = np.zeros(self.dof)\n self.curr_pstep_a = np.zeros(self.dof)\n self.prev_pstep_q = np.zeros(len(self._ref_joint_vel_indexes))\n self.curr_pstep_q = np.zeros(len(self._ref_joint_vel_indexes))\n self.prev_pstep_t = np.zeros(len(self._ref_joint_vel_indexes))\n self.curr_pstep_t = np.zeros(len(self._ref_joint_vel_indexes))\n self.prev_pstep_ee_v = np.zeros(6)\n self.curr_pstep_ee_v = np.zeros(6)\n self.buffer_pstep_ee_v = deque(np.zeros(6) for _ in range(self.n_avg_ee_acc))\n self.ee_acc = np.zeros(6)\n\n self.total_ee_acc = np.zeros(6) # used to compute average\n self.total_js_energy = np.zeros(len(self._ref_joint_vel_indexes))\n\n self.torque_total = 0\n self.joint_torques = 0\n\n self.prev_ee_pos = np.zeros(7)\n self.ee_pos = np.zeros(7)\n\n ## counting joint limits\n self.joint_limit_count = 0\n\n def _load_controller(self, controller_type, controller_file, kwargs):\n \"\"\"\n Loads controller to be used for dynamic trajectories\n\n Controller_type is a specified controller, and controller_params is a config file containing the appropriate\n parameters for that controller\n\n Kwargs is kwargs passed from init call and represents individual params to override in controller config file\n \"\"\"\n\n # Load the controller config file\n try:\n with open(controller_file) as f:\n params = hjson.load(f)\n except FileNotFoundError:\n print(\"Controller config file '{}' not found. Please check filepath and try again.\".format(\n controller_file))\n\n controller_params = params[controller_type]\n\n # Load additional arguments from kwargs and override the prior config-file loaded ones\n for key, value in kwargs.items():\n if key in controller_params:\n controller_params[key] = value\n\n if controller_type == ControllerType.POS:\n self.controller = PositionController(**controller_params)\n elif controller_type == ControllerType.POS_ORI:\n self.controller = PositionOrientationController(**controller_params)\n elif controller_type == ControllerType.JOINT_IMP:\n self.controller = JointImpedanceController(**controller_params)\n elif controller_type == ControllerType.JOINT_TORQUE:\n self.controller = JointTorqueController(**controller_params)\n else:\n self.controller = JointVelocityController(**controller_params)\n\n def _load_model(self):\n \"\"\"\n Loads robot and optionally add grippers.\n \"\"\"\n super()._load_model()\n # Use xml that has motor torque actuators enabled\n self.mujoco_robot = Panda(xml_path=\"robots/panda/robot_torque.xml\")\n\n if self.has_gripper:\n self.gripper = gripper_factory(self.gripper_type)\n if not self.gripper_visualization:\n self.gripper.hide_visualization()\n self.mujoco_robot.add_gripper(\"right_hand\", self.gripper)\n\n def _reset_internal(self):\n \"\"\"\n Sets initial pose of arm and grippers.\n \"\"\"\n super()._reset_internal()\n self.sim.data.qpos[self._ref_joint_pos_indexes] = self.mujoco_robot.init_qpos\n\n if self.has_gripper:\n self.sim.data.qpos[\n self._ref_joint_gripper_actuator_indexes\n ] = self.gripper.init_qpos\n\n self.goal = np.zeros(3)\n self.goal_orientation = np.zeros(3)\n self.desired_force = np.zeros(3)\n self.desired_torque = np.zeros(3)\n self.prev_pstep_q = np.array(self.mujoco_robot.init_qpos)\n self.curr_pstep_q = np.array(self.mujoco_robot.init_qpos)\n self.prev_pstep_a = np.zeros(self.dof)\n self.curr_pstep_a = np.zeros(self.dof)\n self.prev_pstep_ee_v = np.zeros(6)\n self.curr_pstep_ee_v = np.zeros(6)\n self.buffer_pstep_ee_v = deque(np.zeros(6) for _ in range(self.n_avg_ee_acc))\n self.ee_acc = np.zeros(6)\n self.total_ee_acc = np.zeros(6) # used to compute average\n self.total_kp = np.zeros(6)\n self.total_damping = np.zeros(6)\n self.total_js_energy = np.zeros(len(self._ref_joint_vel_indexes))\n self.prev_ee_pos = np.zeros(7)\n self.ee_pos = np.zeros(7)\n self.total_joint_torque = 0\n self.joint_torques = 0\n\n def _get_reference(self):\n \"\"\"\n Sets up necessary reference for robots, grippers, and objects.\n \"\"\"\n super()._get_reference()\n\n # indices for joints in qpos, qvel\n self.robot_joints = list(self.mujoco_robot.joints)\n self._ref_joint_pos_indexes = [\n self.sim.model.get_joint_qpos_addr(x) for x in self.robot_joints\n ]\n self._ref_joint_vel_indexes = [\n self.sim.model.get_joint_qvel_addr(x) for x in self.robot_joints\n ]\n\n if self.use_indicator_object:\n ind_qpos = self.sim.model.get_joint_qpos_addr(\"pos_indicator\")\n self._ref_indicator_pos_low, self._ref_indicator_pos_high = ind_qpos\n\n ind_qvel = self.sim.model.get_joint_qvel_addr(\"pos_indicator\")\n self._ref_indicator_vel_low, self._ref_indicator_vel_high = ind_qvel\n\n self.indicator_id = self.sim.model.body_name2id(\"pos_indicator\")\n\n # indices for grippers in qpos, qvel\n if self.has_gripper:\n self.gripper_joints = list(self.gripper.joints)\n self._ref_gripper_joint_pos_indexes = [\n self.sim.model.get_joint_qpos_addr(x) for x in self.gripper_joints\n ]\n self._ref_gripper_joint_vel_indexes = [\n self.sim.model.get_joint_qvel_addr(x) for x in self.gripper_joints\n ]\n\n # indices for joint pos actuation, joint vel actuation, gripper actuation\n self._ref_joint_pos_actuator_indexes = [\n self.sim.model.actuator_name2id(actuator)\n for actuator in self.sim.model.actuator_names\n if actuator.startswith(\"pos\")\n ]\n\n self._ref_joint_vel_actuator_indexes = [\n self.sim.model.actuator_name2id(actuator)\n for actuator in self.sim.model.actuator_names\n if actuator.startswith(\"vel\")\n ]\n\n if self.has_gripper:\n self._ref_joint_gripper_actuator_indexes = [\n self.sim.model.actuator_name2id(actuator)\n for actuator in self.sim.model.actuator_names\n if actuator.startswith(\"gripper\")\n ]\n\n # IDs of sites for gripper visualization\n self.eef_site_id = self.sim.model.site_name2id(\"grip_site\")\n self.eef_cylinder_id = self.sim.model.site_name2id(\"grip_site_cylinder\")\n\n def move_indicator(self, pos):\n \"\"\"\n Sets 3d position of indicator object to @pos.\n \"\"\"\n if self.use_indicator_object:\n index = self._ref_indicator_pos_low\n self.sim.data.qpos[index : index + 3] = pos\n\n def _pre_action(self, action, policy_step):\n \"\"\"\n Overrides the superclass method to actuate the robot with the\n passed joint velocities and gripper control.\n\n Args:\n action (numpy array): The control to apply to the robot. The first\n @self.mujoco_robot.dof dimensions should be the desired\n normalized joint velocities and if the robot has\n a gripper, the next @self.gripper.dof dimensions should be\n actuation controls for the gripper.\n \"\"\"\n\n self.policy_step = policy_step\n\n # Make sure action length is correct\n assert len(action) == self.dof, \"environment got invalid action dimension\"\n\n # i.e.: not using new controller\n if not self.impedance_ctrl:\n\n # clip actions into valid range\n low, high = self.action_spec\n action = np.clip(action, low, high)\n\n if self.has_gripper:\n arm_action = action[: self.mujoco_robot.dof]\n gripper_action_in = action[\n self.mujoco_robot.dof: self.mujoco_robot.dof + self.gripper.dof\n ]\n gripper_action_actual = self.gripper.format_action(gripper_action_in)\n action = np.concatenate([arm_action, gripper_action_actual])\n\n # rescale normalized action to control ranges\n ctrl_range = self.sim.model.actuator_ctrlrange\n bias = 0.5 * (ctrl_range[:, 1] + ctrl_range[:, 0])\n weight = 0.5 * (ctrl_range[:, 1] - ctrl_range[:, 0])\n applied_action = bias + weight * action\n self.sim.data.ctrl[self._ref_joint_vel_indexes] = applied_action\n\n # gravity compensation\n self.sim.data.qfrc_applied[\n self._ref_joint_vel_indexes\n ] = self.sim.data.qfrc_bias[self._ref_joint_vel_indexes]\n\n if self.use_indicator_object:\n self.sim.data.qfrc_applied[\n self._ref_indicator_vel_low: self._ref_indicator_vel_high\n ] = self.sim.data.qfrc_bias[\n self._ref_indicator_vel_low: self._ref_indicator_vel_high\n ]\n\n # using new controller\n else:\n # Split action into joint control and peripheral (i.e.: gripper) control (as specified by individual gripper)\n gripper_action = []\n if self.has_gripper:\n gripper_action = action[self.controller.control_dim:] # all indexes past controller dimension indexes\n action = action[:self.controller.control_dim]\n\n # TODO\n # First, get joint space action\n # action = action.copy() # ensure that we don't change the action outside of this scope\n self.controller.update_model(self.sim, id_name='right_hand', joint_index=self._ref_joint_pos_indexes)\n torques = self.controller.action_to_torques(action,\n self.policy_step) # this scales and clips the actions correctly\n\n if self.initial_policy:\n initial_policy_torques = self.initial_policy.action_to_torques(self.sim, 'right_hand',\n self._ref_joint_pos_indexes,\n self.initial_policy_action,\n self.policy_step)\n self.residual_torques = torques\n self.initial_torques = initial_policy_torques\n if self.residual_policy_multiplier is not None:\n torques = self.residual_policy_multiplier * torques + initial_policy_torques\n else:\n torques = torques + initial_policy_torques # TODO\n\n self.total_joint_torque += np.sum(abs(torques))\n self.joint_torques = torques\n\n # Get gripper action, if applicable\n if self.has_gripper:\n gripper_action_actual = self.gripper.format_action(gripper_action)\n # rescale normalized gripper action to control ranges\n ctrl_range = self.sim.model.actuator_ctrlrange[self._ref_gripper_joint_vel_indexes]\n bias = 0.5 * (ctrl_range[:, 1] + ctrl_range[:, 0])\n weight = 0.5 * (ctrl_range[:, 1] - ctrl_range[:, 0])\n applied_gripper_action = bias + weight * gripper_action_actual\n self.sim.data.ctrl[self._ref_gripper_joint_vel_indexes] = applied_gripper_action\n\n # Now, control both gripper and joints\n self.sim.data.ctrl[self._ref_joint_vel_indexes] = self.sim.data.qfrc_bias[\n self._ref_joint_vel_indexes] + torques\n\n if self.policy_step:\n self.prev_pstep_q = np.array(self.curr_pstep_q)\n self.curr_pstep_q = np.array(self.sim.data.qpos[self._ref_joint_vel_indexes])\n self.prev_pstep_a = np.array(self.curr_pstep_a)\n self.curr_pstep_a = np.array(action) # .copy()) # TODO\n self.prev_pstep_t = np.array(self.curr_pstep_t)\n self.curr_pstep_t = np.array(self.sim.data.ctrl[self._ref_joint_vel_indexes])\n self.prev_pstep_ft = np.array(self.curr_pstep_ft)\n\n # Assumes a ft sensor on the wrist\n force_sensor_id = self.sim.model.sensor_name2id(\"force_ee\")\n force_ee = self.sim.data.sensordata[force_sensor_id * 3: force_sensor_id * 3 + 3]\n torque_sensor_id = self.sim.model.sensor_name2id(\"torque_ee\")\n torque_ee = self.sim.data.sensordata[torque_sensor_id * 3: torque_sensor_id * 3 + 3]\n self.curr_pstep_ft = np.concatenate([force_ee, torque_ee])\n\n self.prev_pstep_ee_v = self.curr_pstep_ee_v\n self.curr_pstep_ee_v = np.concatenate(\n [self.sim.data.body_xvelp[self.sim.model.body_name2id(\"right_hand\")],\n self.sim.data.body_xvelr[self.sim.model.body_name2id(\"right_hand\")]])\n\n self.buffer_pstep_ee_v.popleft()\n self.buffer_pstep_ee_v.append(self.curr_pstep_ee_v)\n\n # convert to matrix\n buffer_mat = []\n for v in self.buffer_pstep_ee_v:\n buffer_mat += [v]\n buffer_mat = np.vstack(buffer_mat)\n\n diffs = np.diff(buffer_mat, axis=0)\n diffs *= self.control_freq\n diffs = np.vstack([self.ee_acc, diffs])\n diffs.reshape((self.n_avg_ee_acc, 6))\n\n self.ee_acc = np.array(\n [np.convolve(col, np.ones((self.n_avg_ee_acc,)) / self.n_avg_ee_acc, mode='valid')[0] for col in\n diffs.transpose()])\n\n def _post_action(self, action):\n \"\"\"\n (Optional) does gripper visualization after actions.\n \"\"\"\n self.prev_ee_pos = self.ee_pos\n self.ee_pos = np.array(self.sim.data.body_xpos[self.sim.model.body_name2id('right_hand')])\n\n force_sensor_id = self.sim.model.sensor_name2id(\"force_ee\")\n self.ee_force = np.array(self.sim.data.sensordata[force_sensor_id * 3: force_sensor_id * 3 + 3])\n\n if np.linalg.norm(self.ee_force_bias) == 0:\n self.ee_force_bias = self.ee_force\n\n torque_sensor_id = self.sim.model.sensor_name2id(\"torque_ee\")\n self.ee_torque = np.array(self.sim.data.sensordata[torque_sensor_id * 3: torque_sensor_id * 3 + 3])\n\n if np.linalg.norm(self.ee_torque_bias) == 0:\n self.ee_torque_bias = self.ee_torque\n\n ret = super()._post_action(action)\n self._gripper_visualization()\n return ret\n\n def _get_observation(self):\n \"\"\"\n Returns an OrderedDict containing observations [(name_string, np.array), ...].\n\n Important keys:\n robot-state: contains robot-centric information.\n \"\"\"\n\n di = super()._get_observation()\n\n # camera observations\n if self.use_camera_obs:\n camera_obs = self.sim.render(camera_name=self.camera_name,\n width=self.camera_width,\n height=self.camera_height,\n depth=self.camera_depth)\n if self.camera_depth:\n di['image'], di['depth'] = camera_obs\n else:\n di['image'] = camera_obs\n\n # Skip for now, not worth importing cv2 just for this\n # if self.visualize_offscreen and not self.real_robot:\n # cv2.imshow('Robot observation', np.flip(camera_obs[..., ::-1], 0))\n # cv2.waitKey(10)\n\n # proprioceptive features\n di[\"joint_pos\"] = np.array(\n [self.sim.data.qpos[x] for x in self._ref_joint_pos_indexes]\n )\n di[\"joint_vel\"] = np.array(\n [self.sim.data.qvel[x] for x in self._ref_joint_vel_indexes]\n )\n\n robot_states = [\n np.sin(di[\"joint_pos\"]),\n np.cos(di[\"joint_pos\"]),\n di[\"joint_vel\"],\n ]\n\n if self.has_gripper:\n di[\"gripper_qpos\"] = np.array(\n [self.sim.data.qpos[x] for x in self._ref_gripper_joint_pos_indexes]\n )\n di[\"gripper_qvel\"] = np.array(\n [self.sim.data.qvel[x] for x in self._ref_gripper_joint_vel_indexes]\n )\n\n di[\"eef_pos\"] = np.array(self.sim.data.body_xpos[self.sim.model.body_name2id('right_hand')])\n di[\"eef_quat\"] = T.convert_quat(\n self.sim.data.get_body_xquat(\"right_hand\"), to=\"xyzw\"\n )\n di[\"eef_vlin\"] = np.array(self.sim.data.get_body_xvelp('right_hand'))\n di[\"eef_vang\"] = np.array(self.sim.data.get_body_xvelr('right_hand'))\n\n # add in gripper information\n robot_states.extend([di[\"gripper_qpos\"], di[\"eef_pos\"], di[\"eef_quat\"], di[\"eef_vlin\"], di[\"eef_vang\"]])\n\n di[\"robot-state\"] = np.concatenate(robot_states)\n\n di[\"prev-act\"] = self.prev_pstep_a\n\n # Adding binary contact observation\n in_contact = np.linalg.norm(self.ee_force - self.ee_force_bias) > self.contact_threshold\n di[\"contact-obs\"] = in_contact\n\n return di\n\n @property\n def action_spec(self):\n \"\"\"\n Action lower/upper limits per dimension.\n \"\"\"\n low = np.ones(self.dof) * -1.\n high = np.ones(self.dof) * 1.\n return low, high\n\n @property\n def dof(self):\n \"\"\"\n Returns the DoF of the robot (with grippers).\n \"\"\"\n if self.impedance_ctrl:\n dof = self.controller.action_dim\n else:\n dof = self.mujoco_robot.dof\n\n if self.has_gripper:\n dof += self.gripper.dof\n return dof\n\n def pose_in_base_from_name(self, name):\n \"\"\"\n A helper function that takes in a named data field and returns the pose\n of that object in the base frame.\n \"\"\"\n\n pos_in_world = self.sim.data.get_body_xpos(name)\n rot_in_world = self.sim.data.get_body_xmat(name).reshape((3, 3))\n pose_in_world = T.make_pose(pos_in_world, rot_in_world)\n\n base_pos_in_world = self.sim.data.get_body_xpos(\"base\")\n base_rot_in_world = self.sim.data.get_body_xmat(\"base\").reshape((3, 3))\n base_pose_in_world = T.make_pose(base_pos_in_world, base_rot_in_world)\n world_pose_in_base = T.pose_inv(base_pose_in_world)\n\n pose_in_base = T.pose_in_A_to_pose_in_B(pose_in_world, world_pose_in_base)\n return pose_in_base\n\n def set_robot_joint_positions(self, jpos):\n \"\"\"\n Helper method to force robot joint positions to the passed values.\n \"\"\"\n self.sim.data.qpos[self._ref_joint_pos_indexes] = jpos\n self.sim.forward()\n\n @property\n def _right_hand_joint_cartesian_pose(self):\n \"\"\"\n Returns the cartesian pose of the last robot joint in base frame of robot.\n \"\"\"\n return self.pose_in_base_from_name(\"right_l6\")\n\n @property\n def _right_hand_pose(self):\n \"\"\"\n Returns eef pose in base frame of robot.\n \"\"\"\n return self.pose_in_base_from_name(\"right_hand\")\n\n @property\n def _right_hand_quat(self):\n \"\"\"\n Returns eef quaternion in base frame of robot.\n \"\"\"\n return T.mat2quat(self._right_hand_orn)\n\n @property\n def _right_hand_total_velocity(self):\n \"\"\"\n Returns the total eef velocity (linear + angular) in the base frame\n as a numpy array of shape (6,)\n \"\"\"\n\n # Use jacobian to translate joint velocities to end effector velocities.\n Jp = self.sim.data.get_body_jacp(\"right_hand\").reshape((3, -1))\n Jp_joint = Jp[:, self._ref_joint_vel_indexes]\n\n Jr = self.sim.data.get_body_jacr(\"right_hand\").reshape((3, -1))\n Jr_joint = Jr[:, self._ref_joint_vel_indexes]\n\n eef_lin_vel = Jp_joint.dot(self._joint_velocities)\n eef_rot_vel = Jr_joint.dot(self._joint_velocities)\n return np.concatenate([eef_lin_vel, eef_rot_vel])\n\n @property\n def _right_hand_pos(self):\n \"\"\"\n Returns position of eef in base frame of robot.\n \"\"\"\n eef_pose_in_base = self._right_hand_pose\n return eef_pose_in_base[:3, 3]\n\n @property\n def _right_hand_orn(self):\n \"\"\"\n Returns orientation of eef in base frame of robot as a rotation matrix.\n \"\"\"\n eef_pose_in_base = self._right_hand_pose\n return eef_pose_in_base[:3, :3]\n\n @property\n def _right_hand_vel(self):\n \"\"\"\n Returns velocity of eef in base frame of robot.\n \"\"\"\n return self._right_hand_total_velocity[:3]\n\n @property\n def _right_hand_ang_vel(self):\n \"\"\"\n Returns angular velocity of eef in base frame of robot.\n \"\"\"\n return self._right_hand_total_velocity[3:]\n\n @property\n def _joint_positions(self):\n \"\"\"\n Returns a numpy array of joint positions.\n Panda robots have 7 joints and positions are in rotation angles.\n \"\"\"\n return self.sim.data.qpos[self._ref_joint_pos_indexes]\n\n @property\n def _joint_velocities(self):\n \"\"\"\n Returns a numpy array of joint velocities.\n Panda robots have 7 joints and velocities are angular velocities.\n \"\"\"\n return self.sim.data.qvel[self._ref_joint_vel_indexes]\n\n def _gripper_visualization(self):\n \"\"\"\n Do any needed visualization here.\n \"\"\"\n\n # By default, don't do any coloring.\n self.sim.model.site_rgba[self.eef_site_id] = [0., 0., 0., 0.]\n\n def _check_contact(self):\n \"\"\"\n Returns True if the gripper is in contact with another object.\n \"\"\"\n return False\n\n def _check_arm_contact(self):\n \"\"\"\n Returns True if the arm is in contact with another object.\n \"\"\"\n collision = False\n for contact in self.sim.data.contact[:self.sim.data.ncon]:\n if self.sim.model.geom_id2name(contact.geom1) in self.mujoco_robot.contact_geoms or \\\n self.sim.model.geom_id2name(contact.geom2) in self.mujoco_robot.contact_geoms:\n collision = True\n break\n return collision\n\n def _check_q_limits(self):\n \"\"\"\n Returns True if the arm is in joint limits or very close to.\n \"\"\"\n joint_limits = False\n tolerance = 0.1\n for (idx, (q, q_limits)) in enumerate(\n zip(self.sim.data.qpos[self._ref_joint_pos_indexes], self.sim.model.jnt_range)):\n if not (q > q_limits[0] + tolerance and q < q_limits[1] - tolerance):\n print(\"Joint limit reached in joint \" + str(idx))\n joint_limits = True\n self.joint_limit_count += 1\n return joint_limits\n\n def _compute_q_delta(self):\n \"\"\"\n Returns the change in joint space configuration between previous and current steps\n \"\"\"\n q_delta = self.prev_pstep_q - self.curr_pstep_q\n\n return q_delta\n\n def _compute_t_delta(self):\n \"\"\"\n Returns the change in joint space configuration between previous and current steps\n \"\"\"\n t_delta = self.prev_pstep_t - self.curr_pstep_t\n\n return t_delta\n\n def _compute_a_delta(self):\n \"\"\"\n Returns the change in policy action between previous and current steps\n \"\"\"\n\n a_delta = self.prev_pstep_a - self.curr_pstep_a\n\n return a_delta\n\n def _compute_ft_delta(self):\n \"\"\"\n Returns the change in policy action between previous and current steps\n \"\"\"\n\n ft_delta = self.prev_pstep_ft - self.curr_pstep_ft\n\n return ft_delta\n\n def _compute_js_energy(self):\n \"\"\"\n Returns the energy consumed by each joint between previous and current steps\n \"\"\"\n # Mean torque applied\n mean_t = self.prev_pstep_t - self.curr_pstep_t\n\n # We assume in the motors torque is proportional to current (and voltage is constant)\n # In that case the amount of power scales proportional to the torque and the energy is the\n # time integral of that\n js_energy = np.abs((1.0 / self.control_freq) * mean_t)\n\n return js_energy\n\n def _compute_ee_ft_integral(self):\n \"\"\"\n Returns the integral over time of the applied ee force-torque\n \"\"\"\n\n mean_ft = self.prev_pstep_ft - self.curr_pstep_ft\n integral_ft = np.abs((1.0 / self.control_freq) * mean_ft)\n\n return integral_ft\n\n def render_additional_image(self, camera_name, camera_width, camera_height, camera_depth):\n img = self.sim.render(camera_name=camera_name,\n width=camera_width,\n height=camera_height,\n depth=camera_depth)\n return img\n\n" ]
[ [ "numpy.vstack", "numpy.ones", "numpy.zeros", "numpy.diff", "numpy.abs", "numpy.cos", "numpy.clip", "numpy.array", "numpy.sin", "numpy.concatenate", "numpy.linalg.norm" ] ]
msobrevillac/sockeye
[ "6ed5e2dbe003673d03272987b79f73bdee86283d" ]
[ "sockeye/build_input_embeddings.py" ]
[ "import numpy as np\nimport json\n\nsrc_embed_file = '/home/msobrevillac/Projects/phd/NLG/sockeye/pre-embeddings/embed-in-src.npy'\nsrc_vocab_file = '/home/msobrevillac/Projects/phd/NLG/sockeye/pre-embeddings/vocab-in-src.json'\n\nvocab = {}\nvectors = []\n\nwith open('/home/msobrevillac/Projects/phd/Resources/Embeddings/glove/glove.6B.300d.txt', 'rb') as f:\n index = 0\n for line in f:\n fields = line.split()\n word = fields[0].decode('utf-8')\n vocab[word] = index\n index += 1\n vector = np.fromiter((float(x) for x in fields[1:]),\n dtype=np.float)\n vectors.append(vector)\n\nsrc_array = np.array(vectors)\n\nnp.save(src_embed_file, src_array)\n\nwith open(src_vocab_file, \"w\", encoding=\"utf-8\") as out:\n json.dump(vocab, out, indent=4, ensure_ascii=False) \n" ]
[ [ "numpy.array", "numpy.save" ] ]
Zber5/OpenRadar
[ "701cf166203c3f3e1ba4873cd132a7ccba4f0863" ]
[ "FER/em_network/train_3dCNNfusion_baseline_full_size.py" ]
[ "import torch\nimport torch.nn as nn\nimport numpy as np\nimport random\nimport time\n\nfrom utils import device, AverageMeter, dir_path, write_log, accuracy, save_checkpoint\nfrom models.c3d import C3DFusionBaselineFull\nfrom dataset import HeatmapDataset\nfrom torch.utils.data import DataLoader\nimport os\nimport pandas as pd\nfrom FER.utils import ROOT_PATH\ntorch.autograd.set_detect_anomaly(True)\n\nos.chdir(ROOT_PATH)\n\n# set seed, make result reporducable\nSEED = 1234\nrandom.seed(SEED)\nnp.random.seed(SEED)\ntorch.manual_seed(SEED)\ntorch.cuda.manual_seed(SEED)\ntorch.backends.cudnn.deterministic = True\n\n\ndef train(model, data_loader, criterion, optimizer, epoch=0, to_log=None, print_freq=25):\n # create Average Meters\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n top1 = AverageMeter()\n top5 = AverageMeter()\n train_loss = []\n\n # switch to train mode\n model.train()\n # record start time\n start = time.time()\n\n for i, (azi, ele, target) in enumerate(data_loader):\n # prepare input and target to device\n azi = azi.to(device, dtype=torch.float)\n ele = ele.to(device, dtype=torch.float)\n target = target.to(device, dtype=torch.long)\n\n # measure data loading time\n data_time.update(time.time() - start)\n\n # zero the parameter gradients\n optimizer.zero_grad()\n # print(\" {} \".format(i))\n\n # gradient and do SGD step\n output = model(azi, ele)\n loss = criterion(output, target)\n # print(loss.item())\n\n # L2 regularization\n # l2_lambda = config['l2_lambda']\n # l2_norm = sum(p.pow(2.0).sum()\n # for p in model.parameters())\n #\n # loss = loss + l2_lambda * l2_norm\n\n # L1 regularization\n # l1_lambda = 0.001\n # l1_norm = sum(p.abs(2.0).sum()\n # for p in model.parameters())\n #\n # loss = loss + l1_lambda * l1_norm\n\n train_loss.append(loss.item())\n loss.backward()\n optimizer.step()\n\n # measure accuracy and record loss\n prec1, prec5 = accuracy(output.data, target, topk=(1, 5))\n losses.update(loss.item(), data_loader.batch_size)\n top1.update(prec1.item(), data_loader.batch_size)\n top5.update(prec5.item(), data_loader.batch_size)\n\n # measure elapsed time\n batch_time.update(time.time() - start)\n start = time.time()\n\n # print training info\n if i % print_freq == 0:\n str = ('Epoch: [{0}][{1}/{2}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Data {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Prec@1 {top1.val:3.3f} ({top1.avg:3.3f})\\t'\n 'Prec@5 {top5.val:3.3f} ({top5.avg:3.3f})'.format(\n epoch, i, len(data_loader), batch_time=batch_time,\n data_time=data_time, loss=losses, top1=top1, top5=top5))\n print(str)\n\n if to_log is not None:\n write_log(str + '\\n', to_log)\n\n return train_loss\n\n\ndef test(model, test_loader, criterion, to_log=None):\n model.eval()\n test_loss = 0\n correct = 0\n with torch.no_grad():\n for (azi, ele, target) in test_loader:\n # prepare input and target to device\n azi = azi.to(device, dtype=torch.float)\n ele = ele.to(device, dtype=torch.float)\n target = target.to(device, dtype=torch.long)\n\n output = model(azi, ele)\n loss = criterion(output, target)\n test_loss += loss\n pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability\n correct += pred.eq(target.view_as(pred)).sum().item()\n\n test_loss /= len(test_loader.sampler)\n test_loss *= test_loader.batch_size\n acc = 100. * correct / len(test_loader.sampler)\n format_str = 'Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\\n'.format(\n test_loss, correct, len(test_loader.sampler), acc\n )\n print(format_str)\n if to_log is not None:\n write_log(format_str, to_log)\n return test_loss.item(), acc\n\n\nif __name__ == \"__main__\":\n\n config = dict(num_epochs=60,\n lr=0.0003,\n lr_step_size=20,\n lr_decay_gamma=0.2,\n num_classes=7,\n batch_size=16,\n h_num_frames=300,\n # l2_lambda=0.008,\n # l1_lambda=0.002\n )\n\n emotion_list = ['Joy', 'Surprise', 'Anger', 'Sadness', 'Fear', 'Disgust', 'Neutral']\n\n # results dir\n result_dir = \"FER/results\"\n\n # heatmap root dir\n heatmap_root = \"C:/Users/Zber/Desktop/Subjects_Heatmap_Large\"\n\n # annotation dir\n annotation_train = os.path.join(heatmap_root, \"heatmap_annotation_full_train.txt\")\n annotation_test = os.path.join(heatmap_root, \"heatmap_annotation_full_test.txt\")\n\n # load data\n dataset_train = HeatmapDataset(heatmap_root, annotation_train)\n dataset_test = HeatmapDataset(heatmap_root, annotation_test)\n train_loader = DataLoader(dataset_train, batch_size=config['batch_size'], num_workers=4, pin_memory=True)\n test_loader = DataLoader(dataset_test, batch_size=config['batch_size'], num_workers=4, pin_memory=True)\n\n # log path\n path = dir_path(\"sensor_heatmap_3dcnn_fusion_baseline_full_size_full_length\", result_dir)\n\n # create model\n model = C3DFusionBaselineFull(sample_duration=config['h_num_frames'], num_classes=config['num_classes'])\n model = model.to(device)\n\n # initialize critierion and optimizer\n # could add weighted loss e.g. pos_weight = torch.ones([64])\n # criterion = nn.BCELoss()\n criterion = nn.CrossEntropyLoss()\n optimizer = torch.optim.Adam(model.parameters(), lr=config['lr'])\n\n lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=config['lr_step_size'],\n gamma=config['lr_decay_gamma'])\n\n metrics_dic = {\n 'loss': [],\n 'precision': []\n }\n\n best_acc = 0\n for epoch in range(config['num_epochs']):\n train_loss = train(model, data_loader=train_loader, criterion=criterion,\n optimizer=optimizer, epoch=epoch,\n to_log=path['log'])\n test_loss, acc = test(model, test_loader=test_loader, criterion=criterion, to_log=path['log'])\n if acc >= best_acc:\n best_acc = acc\n save_checkpoint(model.state_dict(), is_best=True, checkpoint=path['dir'])\n else:\n save_checkpoint(model.state_dict(), is_best=False, checkpoint=path['dir'])\n\n lr_scheduler.step()\n\n metrics_dic['loss'].append(test_loss)\n metrics_dic['precision'].append(acc)\n\n # print best acc after training\n write_log(\"<<<<< Best Accuracy = {:.2f} >>>>>\".format(best_acc), path['log'])\n\n # save csv log\n df = pd.DataFrame.from_dict(metrics_dic)\n df.to_csv(path['metrics'], sep='\\t', encoding='utf-8')\n" ]
[ [ "torch.utils.data.DataLoader", "torch.optim.lr_scheduler.StepLR", "torch.cuda.manual_seed", "torch.manual_seed", "torch.no_grad", "numpy.random.seed", "torch.nn.CrossEntropyLoss", "torch.autograd.set_detect_anomaly", "pandas.DataFrame.from_dict" ] ]
nivosco/detection-tools
[ "bfe461b668cdcd9bcfcfccbdad92342d6fb12fd1" ]
[ "detection_tools/core/post_processing.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Post-processing operations on detected boxes.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport numpy as np\nfrom six.moves import range\nfrom six.moves import zip\nimport tensorflow.compat.v1 as tf\n\nfrom detection_tools.core import box_list\nfrom detection_tools.core import box_list_ops\nfrom detection_tools.core import keypoint_ops\nfrom detection_tools.core import standard_fields as fields\nfrom detection_tools.utils import shape_utils\n\n_NMS_TILE_SIZE = 512\n\n\ndef batch_iou(boxes1, boxes2):\n \"\"\"Calculates the overlap between proposal and ground truth boxes.\n\n Some `boxes2` may have been padded. The returned `iou` tensor for these\n boxes will be -1.\n\n Args:\n boxes1: a tensor with a shape of [batch_size, N, 4]. N is the number of\n proposals before groundtruth assignment. The last dimension is the pixel\n coordinates in [ymin, xmin, ymax, xmax] form.\n boxes2: a tensor with a shape of [batch_size, MAX_NUM_INSTANCES, 4]. This\n tensor might have paddings with a negative value.\n\n Returns:\n iou: a tensor with as a shape of [batch_size, N, MAX_NUM_INSTANCES].\n \"\"\"\n with tf.name_scope('BatchIOU'):\n y1_min, x1_min, y1_max, x1_max = tf.split(\n value=boxes1, num_or_size_splits=4, axis=2)\n y2_min, x2_min, y2_max, x2_max = tf.split(\n value=boxes2, num_or_size_splits=4, axis=2)\n\n # Calculates the intersection area.\n intersection_xmin = tf.maximum(x1_min, tf.transpose(x2_min, [0, 2, 1]))\n intersection_xmax = tf.minimum(x1_max, tf.transpose(x2_max, [0, 2, 1]))\n intersection_ymin = tf.maximum(y1_min, tf.transpose(y2_min, [0, 2, 1]))\n intersection_ymax = tf.minimum(y1_max, tf.transpose(y2_max, [0, 2, 1]))\n intersection_area = tf.maximum(\n (intersection_xmax - intersection_xmin), 0) * tf.maximum(\n (intersection_ymax - intersection_ymin), 0)\n\n # Calculates the union area.\n area1 = (y1_max - y1_min) * (x1_max - x1_min)\n area2 = (y2_max - y2_min) * (x2_max - x2_min)\n # Adds a small epsilon to avoid divide-by-zero.\n union_area = area1 + tf.transpose(area2,\n [0, 2, 1]) - intersection_area + 1e-8\n\n # Calculates IoU.\n iou = intersection_area / union_area\n\n # Fills -1 for padded ground truth boxes.\n padding_mask = tf.logical_and(\n tf.less(intersection_xmax, 0), tf.less(intersection_ymax, 0))\n iou = tf.where(padding_mask, -tf.ones_like(iou), iou)\n\n return iou\n\n\ndef _self_suppression(iou, iou_threshold, loop_condition, iou_sum):\n \"\"\"Bounding-boxes self-suppression loop body.\n\n Args:\n iou: A float Tensor with shape [1, num_boxes, max_num_instance]: IOUs.\n iou_threshold: A scalar, representing IOU threshold.\n loop_condition: The loop condition returned from last iteration.\n iou_sum: iou_sum_new returned from last iteration.\n\n Returns:\n iou_suppressed: A float Tensor with shape [1, num_boxes, max_num_instance],\n IOU after suppression.\n iou_threshold: A scalar, representing IOU threshold.\n loop_condition: Bool Tensor of shape [], the loop condition.\n iou_sum_new: The new IOU sum.\n \"\"\"\n del loop_condition\n can_suppress_others = tf.cast(\n tf.reshape(tf.reduce_max(iou, 1) <= iou_threshold, [1, -1, 1]), iou.dtype)\n iou_suppressed = tf.reshape(\n tf.cast(\n tf.reduce_max(can_suppress_others * iou, 1) <= iou_threshold,\n iou.dtype), [1, -1, 1]) * iou\n iou_sum_new = tf.reduce_sum(iou_suppressed, [1, 2])\n return [\n iou_suppressed, iou_threshold,\n tf.reduce_any(iou_sum - iou_sum_new > iou_threshold), iou_sum_new\n ]\n\n\ndef _cross_suppression(boxes, box_slice, iou_threshold, inner_idx):\n \"\"\"Bounding-boxes cross-suppression loop body.\n\n Args:\n boxes: A float Tensor of shape [1, anchors, 4], representing boxes.\n box_slice: A float Tensor of shape [1, _NMS_TILE_SIZE, 4], the box tile\n returned from last iteration\n iou_threshold: A scalar, representing IOU threshold.\n inner_idx: A scalar, representing inner index.\n\n Returns:\n boxes: A float Tensor of shape [1, anchors, 4], representing boxes.\n ret_slice: A float Tensor of shape [1, _NMS_TILE_SIZE, 4], the box tile\n after suppression\n iou_threshold: A scalar, representing IOU threshold.\n inner_idx: A scalar, inner index incremented.\n \"\"\"\n new_slice = tf.slice(boxes, [0, inner_idx * _NMS_TILE_SIZE, 0],\n [1, _NMS_TILE_SIZE, 4])\n iou = batch_iou(new_slice, box_slice)\n ret_slice = tf.expand_dims(\n tf.cast(tf.reduce_all(iou < iou_threshold, [1]), box_slice.dtype),\n 2) * box_slice\n return boxes, ret_slice, iou_threshold, inner_idx + 1\n\n\ndef _suppression_loop_body(boxes, iou_threshold, output_size, idx):\n \"\"\"Process boxes in the range [idx*_NMS_TILE_SIZE, (idx+1)*_NMS_TILE_SIZE).\n\n Args:\n boxes: a tensor with a shape of [1, anchors, 4].\n iou_threshold: a float representing the threshold for deciding whether boxes\n overlap too much with respect to IOU.\n output_size: an int32 tensor of size [1]. Representing the number of\n selected boxes.\n idx: an integer scalar representing induction variable.\n\n Returns:\n boxes: updated boxes.\n iou_threshold: pass down iou_threshold to the next iteration.\n output_size: the updated output_size.\n idx: the updated induction variable.\n \"\"\"\n num_tiles = tf.shape(boxes)[1] // _NMS_TILE_SIZE\n\n # Iterates over tiles that can possibly suppress the current tile.\n box_slice = tf.slice(boxes, [0, idx * _NMS_TILE_SIZE, 0],\n [1, _NMS_TILE_SIZE, 4])\n _, box_slice, _, _ = tf.while_loop(\n lambda _boxes, _box_slice, _threshold, inner_idx: inner_idx < idx,\n _cross_suppression, [boxes, box_slice, iou_threshold,\n tf.constant(0)])\n\n # Iterates over the current tile to compute self-suppression.\n iou = batch_iou(box_slice, box_slice)\n mask = tf.expand_dims(\n tf.reshape(tf.range(_NMS_TILE_SIZE), [1, -1]) > tf.reshape(\n tf.range(_NMS_TILE_SIZE), [-1, 1]), 0)\n iou *= tf.cast(tf.logical_and(mask, iou >= iou_threshold), iou.dtype)\n suppressed_iou, _, _, _ = tf.while_loop(\n lambda _iou, _threshold, loop_condition, _iou_sum: loop_condition,\n _self_suppression,\n [iou, iou_threshold,\n tf.constant(True),\n tf.reduce_sum(iou, [1, 2])])\n suppressed_box = tf.reduce_sum(suppressed_iou, 1) > 0\n box_slice *= tf.expand_dims(1.0 - tf.cast(suppressed_box, box_slice.dtype), 2)\n\n # Uses box_slice to update the input boxes.\n mask = tf.reshape(\n tf.cast(tf.equal(tf.range(num_tiles), idx), boxes.dtype), [1, -1, 1, 1])\n boxes = tf.tile(tf.expand_dims(box_slice, [1]),\n [1, num_tiles, 1, 1]) * mask + tf.reshape(\n boxes, [1, num_tiles, _NMS_TILE_SIZE, 4]) * (1 - mask)\n boxes = tf.reshape(boxes, [1, -1, 4])\n\n # Updates output_size.\n output_size += tf.reduce_sum(\n tf.cast(tf.reduce_any(box_slice > 0, [2]), tf.int32), [1])\n return boxes, iou_threshold, output_size, idx + 1\n\n\ndef partitioned_non_max_suppression_padded(boxes,\n scores,\n max_output_size,\n iou_threshold=0.5,\n score_threshold=float('-inf')):\n \"\"\"A tiled version of [`tf.image.non_max_suppression_padded`](https://www.tensorflow.org/api_docs/python/tf/image/non_max_suppression_padded).\n\n The overall design of the algorithm is to handle boxes tile-by-tile:\n\n boxes = boxes.pad_to_multiple_of(tile_size)\n num_tiles = len(boxes) // tile_size\n output_boxes = []\n for i in range(num_tiles):\n box_tile = boxes[i*tile_size : (i+1)*tile_size]\n for j in range(i - 1):\n suppressing_tile = boxes[j*tile_size : (j+1)*tile_size]\n iou = batch_iou(box_tile, suppressing_tile)\n # if the box is suppressed in iou, clear it to a dot\n box_tile *= _update_boxes(iou)\n # Iteratively handle the diagonal tile.\n iou = _box_overlap(box_tile, box_tile)\n iou_changed = True\n while iou_changed:\n # boxes that are not suppressed by anything else\n suppressing_boxes = _get_suppressing_boxes(iou)\n # boxes that are suppressed by suppressing_boxes\n suppressed_boxes = _get_suppressed_boxes(iou, suppressing_boxes)\n # clear iou to 0 for boxes that are suppressed, as they cannot be used\n # to suppress other boxes any more\n new_iou = _clear_iou(iou, suppressed_boxes)\n iou_changed = (new_iou != iou)\n iou = new_iou\n # remaining boxes that can still suppress others, are selected boxes.\n output_boxes.append(_get_suppressing_boxes(iou))\n if len(output_boxes) >= max_output_size:\n break\n\n Args:\n boxes: A 2-D float `Tensor` of shape `[num_boxes, 4]`.\n scores: A 1-D float `Tensor` of shape `[num_boxes]` representing a single\n score corresponding to each box (each row of boxes).\n max_output_size: a scalar integer `Tensor` representing the maximum number\n of boxes to be selected by non max suppression.\n iou_threshold: a float representing the threshold for deciding whether boxes\n overlap too much with respect to IOU.\n score_threshold: A float representing the threshold for deciding when to\n remove boxes based on score.\n\n Returns:\n selected_indices: a tensor of shape [anchors].\n num_valid_boxes: a scalar int tensor.\n nms_proposals: a tensor with a shape of [anchors, 4]. It has\n same dtype as input boxes.\n nms_scores: a tensor with a shape of [anchors]. It has same\n dtype as input scores.\n argsort_ids: a tensor of shape [anchors], mapping from input order of boxes\n to output order of boxes.\n \"\"\"\n num_boxes = tf.shape(boxes)[0]\n pad = tf.cast(\n tf.ceil(tf.cast(num_boxes, tf.float32) / _NMS_TILE_SIZE),\n tf.int32) * _NMS_TILE_SIZE - num_boxes\n\n scores, argsort_ids = tf.nn.top_k(scores, k=num_boxes, sorted=True)\n boxes = tf.gather(boxes, argsort_ids)\n num_boxes = tf.shape(boxes)[0]\n num_boxes += pad\n boxes = tf.pad(\n tf.cast(boxes, tf.float32), [[0, pad], [0, 0]], constant_values=-1)\n scores = tf.pad(tf.cast(scores, tf.float32), [[0, pad]])\n\n # mask boxes to -1 by score threshold\n scores_mask = tf.expand_dims(\n tf.cast(scores > score_threshold, boxes.dtype), axis=1)\n boxes = ((boxes + 1.) * scores_mask) - 1.\n\n boxes = tf.expand_dims(boxes, axis=0)\n scores = tf.expand_dims(scores, axis=0)\n\n def _loop_cond(unused_boxes, unused_threshold, output_size, idx):\n return tf.logical_and(\n tf.reduce_min(output_size) < max_output_size,\n idx < num_boxes // _NMS_TILE_SIZE)\n\n selected_boxes, _, output_size, _ = tf.while_loop(\n _loop_cond, _suppression_loop_body,\n [boxes, iou_threshold,\n tf.zeros([1], tf.int32),\n tf.constant(0)])\n idx = num_boxes - tf.cast(\n tf.nn.top_k(\n tf.cast(tf.reduce_any(selected_boxes > 0, [2]), tf.int32) *\n tf.expand_dims(tf.range(num_boxes, 0, -1), 0), max_output_size)[0],\n tf.int32)\n idx = tf.minimum(idx, num_boxes - 1 - pad)\n idx = tf.reshape(idx + tf.reshape(tf.range(1) * num_boxes, [-1, 1]), [-1])\n num_valid_boxes = tf.reduce_sum(output_size)\n return (idx, num_valid_boxes, tf.reshape(boxes, [-1, 4]),\n tf.reshape(scores, [-1]), argsort_ids)\n\n\ndef _validate_boxes_scores_iou_thresh(boxes, scores, iou_thresh,\n change_coordinate_frame, clip_window):\n \"\"\"Validates boxes, scores and iou_thresh.\n\n This function validates the boxes, scores, iou_thresh\n and if change_coordinate_frame is True, clip_window must be specified.\n\n Args:\n boxes: A [k, q, 4] float32 tensor containing k detections. `q` can be either\n number of classes or 1 depending on whether a separate box is predicted\n per class.\n scores: A [k, num_classes] float32 tensor containing the scores for each of\n the k detections. The scores have to be non-negative when\n pad_to_max_output_size is True.\n iou_thresh: scalar threshold for IOU (new boxes that have high IOU overlap\n with previously selected boxes are removed).\n change_coordinate_frame: Whether to normalize coordinates after clipping\n relative to clip_window (this can only be set to True if a clip_window is\n provided)\n clip_window: A float32 tensor of the form [y_min, x_min, y_max, x_max]\n representing the window to clip and normalize boxes to before performing\n non-max suppression.\n\n Raises:\n ValueError: if iou_thresh is not in [0, 1] or if input boxlist does not\n have a valid scores field.\n \"\"\"\n if not 0 <= iou_thresh <= 1.0:\n raise ValueError('iou_thresh must be between 0 and 1')\n if scores.shape.ndims != 2:\n raise ValueError('scores field must be of rank 2')\n if shape_utils.get_dim_as_int(scores.shape[1]) is None:\n raise ValueError('scores must have statically defined second ' 'dimension')\n if boxes.shape.ndims != 3:\n raise ValueError('boxes must be of rank 3.')\n if not (shape_utils.get_dim_as_int(\n boxes.shape[1]) == shape_utils.get_dim_as_int(scores.shape[1]) or\n shape_utils.get_dim_as_int(boxes.shape[1]) == 1):\n raise ValueError('second dimension of boxes must be either 1 or equal '\n 'to the second dimension of scores')\n if shape_utils.get_dim_as_int(boxes.shape[2]) != 4:\n raise ValueError('last dimension of boxes must be of size 4.')\n if change_coordinate_frame and clip_window is None:\n raise ValueError('if change_coordinate_frame is True, then a clip_window'\n 'must be specified.')\n\n\ndef _clip_window_prune_boxes(sorted_boxes, clip_window, pad_to_max_output_size,\n change_coordinate_frame):\n \"\"\"Prune boxes with zero area.\n\n Args:\n sorted_boxes: A BoxList containing k detections.\n clip_window: A float32 tensor of the form [y_min, x_min, y_max, x_max]\n representing the window to clip and normalize boxes to before performing\n non-max suppression.\n pad_to_max_output_size: flag indicating whether to pad to max output size or\n not.\n change_coordinate_frame: Whether to normalize coordinates after clipping\n relative to clip_window (this can only be set to True if a clip_window is\n provided).\n\n Returns:\n sorted_boxes: A BoxList containing k detections after pruning.\n num_valid_nms_boxes_cumulative: Number of valid NMS boxes\n \"\"\"\n sorted_boxes = box_list_ops.clip_to_window(\n sorted_boxes,\n clip_window,\n filter_nonoverlapping=not pad_to_max_output_size)\n # Set the scores of boxes with zero area to -1 to keep the default\n # behaviour of pruning out zero area boxes.\n sorted_boxes_size = tf.shape(sorted_boxes.get())[0]\n non_zero_box_area = tf.cast(box_list_ops.area(sorted_boxes), tf.bool)\n sorted_boxes_scores = tf.where(\n non_zero_box_area, sorted_boxes.get_field(fields.BoxListFields.scores),\n -1 * tf.ones(sorted_boxes_size))\n sorted_boxes.add_field(fields.BoxListFields.scores, sorted_boxes_scores)\n num_valid_nms_boxes_cumulative = tf.reduce_sum(\n tf.cast(tf.greater_equal(sorted_boxes_scores, 0), tf.int32))\n sorted_boxes = box_list_ops.sort_by_field(sorted_boxes,\n fields.BoxListFields.scores)\n if change_coordinate_frame:\n sorted_boxes = box_list_ops.change_coordinate_frame(sorted_boxes,\n clip_window)\n if sorted_boxes.has_field(fields.BoxListFields.keypoints):\n sorted_keypoints = sorted_boxes.get_field(fields.BoxListFields.keypoints)\n sorted_keypoints = keypoint_ops.change_coordinate_frame(sorted_keypoints,\n clip_window)\n sorted_boxes.set_field(fields.BoxListFields.keypoints, sorted_keypoints)\n return sorted_boxes, num_valid_nms_boxes_cumulative\n\n\nclass NullContextmanager(object):\n\n def __enter__(self):\n pass\n\n def __exit__(self, type_arg, value_arg, traceback_arg):\n return False\n\n\ndef multiclass_non_max_suppression(boxes,\n scores,\n score_thresh,\n iou_thresh,\n max_size_per_class,\n max_total_size=0,\n clip_window=None,\n change_coordinate_frame=False,\n masks=None,\n boundaries=None,\n pad_to_max_output_size=False,\n use_partitioned_nms=False,\n additional_fields=None,\n soft_nms_sigma=0.0,\n use_hard_nms=False,\n use_cpu_nms=False,\n scope=None):\n \"\"\"Multi-class version of non maximum suppression.\n\n This op greedily selects a subset of detection bounding boxes, pruning\n away boxes that have high IOU (intersection over union) overlap (> thresh)\n with already selected boxes. It operates independently for each class for\n which scores are provided (via the scores field of the input box_list),\n pruning boxes with score less than a provided threshold prior to\n applying NMS.\n\n Please note that this operation is performed on *all* classes, therefore any\n background classes should be removed prior to calling this function.\n\n Selected boxes are guaranteed to be sorted in decreasing order by score (but\n the sort is not guaranteed to be stable).\n\n Args:\n boxes: A [k, q, 4] float32 tensor containing k detections. `q` can be either\n number of classes or 1 depending on whether a separate box is predicted\n per class.\n scores: A [k, num_classes] float32 tensor containing the scores for each of\n the k detections. The scores have to be non-negative when\n pad_to_max_output_size is True.\n score_thresh: scalar threshold for score (low scoring boxes are removed).\n iou_thresh: scalar threshold for IOU (new boxes that have high IOU overlap\n with previously selected boxes are removed).\n max_size_per_class: maximum number of retained boxes per class.\n max_total_size: maximum number of boxes retained over all classes. By\n default returns all boxes retained after capping boxes per class.\n clip_window: A float32 tensor of the form [y_min, x_min, y_max, x_max]\n representing the window to clip and normalize boxes to before performing\n non-max suppression.\n change_coordinate_frame: Whether to normalize coordinates after clipping\n relative to clip_window (this can only be set to True if a clip_window\n is provided)\n masks: (optional) a [k, q, mask_height, mask_width] float32 tensor\n containing box masks. `q` can be either number of classes or 1 depending\n on whether a separate mask is predicted per class.\n boundaries: (optional) a [k, q, boundary_height, boundary_width] float32\n tensor containing box boundaries. `q` can be either number of classes or 1\n depending on whether a separate boundary is predicted per class.\n pad_to_max_output_size: If true, the output nmsed boxes are padded to be of\n length `max_size_per_class`. Defaults to false.\n use_partitioned_nms: If true, use partitioned version of\n non_max_suppression.\n additional_fields: (optional) If not None, a dictionary that maps keys to\n tensors whose first dimensions are all of size `k`. After non-maximum\n suppression, all tensors corresponding to the selected boxes will be\n added to resulting BoxList.\n soft_nms_sigma: A scalar float representing the Soft NMS sigma parameter;\n See Bodla et al, https://arxiv.org/abs/1704.04503). When\n `soft_nms_sigma=0.0` (which is default), we fall back to standard (hard)\n NMS. Soft NMS is currently only supported when pad_to_max_output_size is\n False.\n use_hard_nms: Enforce the usage of hard NMS.\n use_cpu_nms: Enforce NMS to run on CPU.\n scope: name scope.\n\n Returns:\n A tuple of sorted_boxes and num_valid_nms_boxes. The sorted_boxes is a\n BoxList holds M boxes with a rank-1 scores field representing\n corresponding scores for each box with scores sorted in decreasing order\n and a rank-1 classes field representing a class label for each box. The\n num_valid_nms_boxes is a 0-D integer tensor representing the number of\n valid elements in `BoxList`, with the valid elements appearing first.\n\n Raises:\n ValueError: if iou_thresh is not in [0, 1] or if input boxlist does not have\n a valid scores field.\n ValueError: if Soft NMS (tf.image.non_max_suppression_with_scores) is not\n supported in the current TF version and `soft_nms_sigma` is nonzero.\n \"\"\"\n _validate_boxes_scores_iou_thresh(boxes, scores, iou_thresh,\n change_coordinate_frame, clip_window)\n if pad_to_max_output_size and soft_nms_sigma != 0.0:\n raise ValueError('Soft NMS (soft_nms_sigma != 0.0) is currently not '\n 'supported when pad_to_max_output_size is True.')\n\n with tf.name_scope(scope, 'MultiClassNonMaxSuppression'), tf.device(\n 'cpu:0') if use_cpu_nms else NullContextmanager():\n num_scores = tf.shape(scores)[0]\n num_classes = shape_utils.get_dim_as_int(scores.get_shape()[1])\n\n selected_boxes_list = []\n num_valid_nms_boxes_cumulative = tf.constant(0)\n per_class_boxes_list = tf.unstack(boxes, axis=1)\n if masks is not None:\n per_class_masks_list = tf.unstack(masks, axis=1)\n if boundaries is not None:\n per_class_boundaries_list = tf.unstack(boundaries, axis=1)\n boxes_ids = (range(num_classes) if len(per_class_boxes_list) > 1\n else [0] * num_classes)\n for class_idx, boxes_idx in zip(range(num_classes), boxes_ids):\n per_class_boxes = per_class_boxes_list[boxes_idx]\n boxlist_and_class_scores = box_list.BoxList(per_class_boxes)\n class_scores = tf.reshape(\n tf.slice(scores, [0, class_idx], tf.stack([num_scores, 1])), [-1])\n\n boxlist_and_class_scores.add_field(fields.BoxListFields.scores,\n class_scores)\n if masks is not None:\n per_class_masks = per_class_masks_list[boxes_idx]\n boxlist_and_class_scores.add_field(fields.BoxListFields.masks,\n per_class_masks)\n if boundaries is not None:\n per_class_boundaries = per_class_boundaries_list[boxes_idx]\n boxlist_and_class_scores.add_field(fields.BoxListFields.boundaries,\n per_class_boundaries)\n if additional_fields is not None:\n for key, tensor in additional_fields.items():\n boxlist_and_class_scores.add_field(key, tensor)\n\n nms_result = None\n selected_scores = None\n if pad_to_max_output_size:\n max_selection_size = max_size_per_class\n if use_partitioned_nms:\n (selected_indices, num_valid_nms_boxes,\n boxlist_and_class_scores.data['boxes'],\n boxlist_and_class_scores.data['scores'],\n _) = partitioned_non_max_suppression_padded(\n boxlist_and_class_scores.get(),\n boxlist_and_class_scores.get_field(fields.BoxListFields.scores),\n max_selection_size,\n iou_threshold=iou_thresh,\n score_threshold=score_thresh)\n else:\n selected_indices, num_valid_nms_boxes = (\n tf.image.non_max_suppression_padded(\n boxlist_and_class_scores.get(),\n boxlist_and_class_scores.get_field(\n fields.BoxListFields.scores),\n max_selection_size,\n iou_threshold=iou_thresh,\n score_threshold=score_thresh,\n pad_to_max_output_size=True))\n nms_result = box_list_ops.gather(boxlist_and_class_scores,\n selected_indices)\n selected_scores = nms_result.get_field(fields.BoxListFields.scores)\n else:\n max_selection_size = tf.minimum(max_size_per_class,\n boxlist_and_class_scores.num_boxes())\n if (hasattr(tf.image, 'non_max_suppression_with_scores') and\n tf.compat.forward_compatible(2019, 6, 6) and not use_hard_nms):\n (selected_indices, selected_scores\n ) = tf.image.non_max_suppression_with_scores(\n boxlist_and_class_scores.get(),\n boxlist_and_class_scores.get_field(fields.BoxListFields.scores),\n max_selection_size,\n iou_threshold=iou_thresh,\n score_threshold=score_thresh,\n soft_nms_sigma=soft_nms_sigma)\n num_valid_nms_boxes = tf.shape(selected_indices)[0]\n selected_indices = tf.concat(\n [selected_indices,\n tf.zeros(max_selection_size-num_valid_nms_boxes, tf.int32)], 0)\n selected_scores = tf.concat(\n [selected_scores,\n tf.zeros(max_selection_size-num_valid_nms_boxes,\n tf.float32)], -1)\n nms_result = box_list_ops.gather(boxlist_and_class_scores,\n selected_indices)\n else:\n if soft_nms_sigma != 0:\n raise ValueError('Soft NMS not supported in current TF version!')\n selected_indices = tf.image.non_max_suppression(\n boxlist_and_class_scores.get(),\n boxlist_and_class_scores.get_field(fields.BoxListFields.scores),\n max_selection_size,\n iou_threshold=iou_thresh,\n score_threshold=score_thresh)\n num_valid_nms_boxes = tf.shape(selected_indices)[0]\n selected_indices = tf.concat(\n [selected_indices,\n tf.zeros(max_selection_size-num_valid_nms_boxes, tf.int32)], 0)\n nms_result = box_list_ops.gather(boxlist_and_class_scores,\n selected_indices)\n selected_scores = nms_result.get_field(fields.BoxListFields.scores)\n # Make the scores -1 for invalid boxes.\n valid_nms_boxes_indices = tf.less(\n tf.range(max_selection_size), num_valid_nms_boxes)\n\n nms_result.add_field(\n fields.BoxListFields.scores,\n tf.where(valid_nms_boxes_indices,\n selected_scores, -1*tf.ones(max_selection_size)))\n num_valid_nms_boxes_cumulative += num_valid_nms_boxes\n\n nms_result.add_field(\n fields.BoxListFields.classes, (tf.zeros_like(\n nms_result.get_field(fields.BoxListFields.scores)) + class_idx))\n selected_boxes_list.append(nms_result)\n selected_boxes = box_list_ops.concatenate(selected_boxes_list)\n sorted_boxes = box_list_ops.sort_by_field(selected_boxes,\n fields.BoxListFields.scores)\n if clip_window is not None:\n # When pad_to_max_output_size is False, it prunes the boxes with zero\n # area.\n sorted_boxes, num_valid_nms_boxes_cumulative = _clip_window_prune_boxes(\n sorted_boxes, clip_window, pad_to_max_output_size,\n change_coordinate_frame)\n\n if max_total_size:\n max_total_size = tf.minimum(max_total_size, sorted_boxes.num_boxes())\n sorted_boxes = box_list_ops.gather(sorted_boxes, tf.range(max_total_size))\n num_valid_nms_boxes_cumulative = tf.where(\n max_total_size > num_valid_nms_boxes_cumulative,\n num_valid_nms_boxes_cumulative, max_total_size)\n # Select only the valid boxes if pad_to_max_output_size is False.\n if not pad_to_max_output_size:\n sorted_boxes = box_list_ops.gather(\n sorted_boxes, tf.range(num_valid_nms_boxes_cumulative))\n\n return sorted_boxes, num_valid_nms_boxes_cumulative\n\n\ndef class_agnostic_non_max_suppression(boxes,\n scores,\n score_thresh,\n iou_thresh,\n max_classes_per_detection=1,\n max_total_size=0,\n clip_window=None,\n change_coordinate_frame=False,\n masks=None,\n boundaries=None,\n pad_to_max_output_size=False,\n use_partitioned_nms=False,\n additional_fields=None,\n soft_nms_sigma=0.0,\n scope=None):\n \"\"\"Class-agnostic version of non maximum suppression.\n\n This op greedily selects a subset of detection bounding boxes, pruning\n away boxes that have high IOU (intersection over union) overlap (> thresh)\n with already selected boxes. It operates on all the boxes using\n max scores across all classes for which scores are provided (via the scores\n field of the input box_list), pruning boxes with score less than a provided\n threshold prior to applying NMS.\n\n Please note that this operation is performed in a class-agnostic way,\n therefore any background classes should be removed prior to calling this\n function.\n\n Selected boxes are guaranteed to be sorted in decreasing order by score (but\n the sort is not guaranteed to be stable).\n\n Args:\n boxes: A [k, q, 4] float32 tensor containing k detections. `q` can be either\n number of classes or 1 depending on whether a separate box is predicted\n per class.\n scores: A [k, num_classes] float32 tensor containing the scores for each of\n the k detections. The scores have to be non-negative when\n pad_to_max_output_size is True.\n score_thresh: scalar threshold for score (low scoring boxes are removed).\n iou_thresh: scalar threshold for IOU (new boxes that have high IOU overlap\n with previously selected boxes are removed).\n max_classes_per_detection: maximum number of retained classes per detection\n box in class-agnostic NMS.\n max_total_size: maximum number of boxes retained over all classes. By\n default returns all boxes retained after capping boxes per class.\n clip_window: A float32 tensor of the form [y_min, x_min, y_max, x_max]\n representing the window to clip and normalize boxes to before performing\n non-max suppression.\n change_coordinate_frame: Whether to normalize coordinates after clipping\n relative to clip_window (this can only be set to True if a clip_window is\n provided)\n masks: (optional) a [k, q, mask_height, mask_width] float32 tensor\n containing box masks. `q` can be either number of classes or 1 depending\n on whether a separate mask is predicted per class.\n boundaries: (optional) a [k, q, boundary_height, boundary_width] float32\n tensor containing box boundaries. `q` can be either number of classes or 1\n depending on whether a separate boundary is predicted per class.\n pad_to_max_output_size: If true, the output nmsed boxes are padded to be of\n length `max_size_per_class`. Defaults to false.\n use_partitioned_nms: If true, use partitioned version of\n non_max_suppression.\n additional_fields: (optional) If not None, a dictionary that maps keys to\n tensors whose first dimensions are all of size `k`. After non-maximum\n suppression, all tensors corresponding to the selected boxes will be added\n to resulting BoxList.\n soft_nms_sigma: A scalar float representing the Soft NMS sigma parameter;\n See Bodla et al, https://arxiv.org/abs/1704.04503). When\n `soft_nms_sigma=0.0` (which is default), we fall back to standard (hard)\n NMS. Soft NMS is currently only supported when pad_to_max_output_size is\n False.\n scope: name scope.\n\n Returns:\n A tuple of sorted_boxes and num_valid_nms_boxes. The sorted_boxes is a\n BoxList holds M boxes with a rank-1 scores field representing\n corresponding scores for each box with scores sorted in decreasing order\n and a rank-1 classes field representing a class label for each box. The\n num_valid_nms_boxes is a 0-D integer tensor representing the number of\n valid elements in `BoxList`, with the valid elements appearing first.\n\n Raises:\n ValueError: if iou_thresh is not in [0, 1] or if input boxlist does not have\n a valid scores field or if non-zero soft_nms_sigma is provided when\n pad_to_max_output_size is True.\n \"\"\"\n _validate_boxes_scores_iou_thresh(boxes, scores, iou_thresh,\n change_coordinate_frame, clip_window)\n if pad_to_max_output_size and soft_nms_sigma != 0.0:\n raise ValueError('Soft NMS (soft_nms_sigma != 0.0) is currently not '\n 'supported when pad_to_max_output_size is True.')\n\n if max_classes_per_detection > 1:\n raise ValueError('Max classes per detection box >1 not supported.')\n q = shape_utils.get_dim_as_int(boxes.shape[1])\n if q > 1:\n class_ids = tf.expand_dims(\n tf.argmax(scores, axis=1, output_type=tf.int32), axis=1)\n boxes = tf.batch_gather(boxes, class_ids)\n if masks is not None:\n masks = tf.batch_gather(masks, class_ids)\n if boundaries is not None:\n boundaries = tf.batch_gather(boundaries, class_ids)\n boxes = tf.squeeze(boxes, axis=[1])\n if masks is not None:\n masks = tf.squeeze(masks, axis=[1])\n if boundaries is not None:\n boundaries = tf.squeeze(boundaries, axis=[1])\n\n with tf.name_scope(scope, 'ClassAgnosticNonMaxSuppression'):\n boxlist_and_class_scores = box_list.BoxList(boxes)\n max_scores = tf.reduce_max(scores, axis=-1)\n classes_with_max_scores = tf.argmax(scores, axis=-1)\n boxlist_and_class_scores.add_field(fields.BoxListFields.scores, max_scores)\n if masks is not None:\n boxlist_and_class_scores.add_field(fields.BoxListFields.masks, masks)\n if boundaries is not None:\n boxlist_and_class_scores.add_field(fields.BoxListFields.boundaries,\n boundaries)\n\n if additional_fields is not None:\n for key, tensor in additional_fields.items():\n boxlist_and_class_scores.add_field(key, tensor)\n\n nms_result = None\n selected_scores = None\n if pad_to_max_output_size:\n max_selection_size = max_total_size\n if use_partitioned_nms:\n (selected_indices, num_valid_nms_boxes,\n boxlist_and_class_scores.data['boxes'],\n boxlist_and_class_scores.data['scores'],\n argsort_ids) = partitioned_non_max_suppression_padded(\n boxlist_and_class_scores.get(),\n boxlist_and_class_scores.get_field(fields.BoxListFields.scores),\n max_selection_size,\n iou_threshold=iou_thresh,\n score_threshold=score_thresh)\n classes_with_max_scores = tf.gather(classes_with_max_scores,\n argsort_ids)\n else:\n selected_indices, num_valid_nms_boxes = (\n tf.image.non_max_suppression_padded(\n boxlist_and_class_scores.get(),\n boxlist_and_class_scores.get_field(fields.BoxListFields.scores),\n max_selection_size,\n iou_threshold=iou_thresh,\n score_threshold=score_thresh,\n pad_to_max_output_size=True))\n nms_result = box_list_ops.gather(boxlist_and_class_scores,\n selected_indices)\n selected_scores = nms_result.get_field(fields.BoxListFields.scores)\n else:\n max_selection_size = tf.minimum(max_total_size,\n boxlist_and_class_scores.num_boxes())\n if (hasattr(tf.image, 'non_max_suppression_with_scores') and\n tf.compat.forward_compatible(2019, 6, 6)):\n (selected_indices, selected_scores\n ) = tf.image.non_max_suppression_with_scores(\n boxlist_and_class_scores.get(),\n boxlist_and_class_scores.get_field(fields.BoxListFields.scores),\n max_selection_size,\n iou_threshold=iou_thresh,\n score_threshold=score_thresh,\n soft_nms_sigma=soft_nms_sigma)\n num_valid_nms_boxes = tf.shape(selected_indices)[0]\n selected_indices = tf.concat([\n selected_indices,\n tf.zeros(max_selection_size - num_valid_nms_boxes, tf.int32)\n ], 0)\n selected_scores = tf.concat(\n [selected_scores,\n tf.zeros(max_selection_size-num_valid_nms_boxes, tf.float32)], -1)\n nms_result = box_list_ops.gather(boxlist_and_class_scores,\n selected_indices)\n else:\n if soft_nms_sigma != 0:\n raise ValueError('Soft NMS not supported in current TF version!')\n selected_indices = tf.image.non_max_suppression(\n boxlist_and_class_scores.get(),\n boxlist_and_class_scores.get_field(fields.BoxListFields.scores),\n max_selection_size,\n iou_threshold=iou_thresh,\n score_threshold=score_thresh)\n num_valid_nms_boxes = tf.shape(selected_indices)[0]\n selected_indices = tf.concat(\n [selected_indices,\n tf.zeros(max_selection_size-num_valid_nms_boxes, tf.int32)], 0)\n nms_result = box_list_ops.gather(boxlist_and_class_scores,\n selected_indices)\n selected_scores = nms_result.get_field(fields.BoxListFields.scores)\n valid_nms_boxes_indices = tf.less(\n tf.range(max_selection_size), num_valid_nms_boxes)\n nms_result.add_field(\n fields.BoxListFields.scores,\n tf.where(valid_nms_boxes_indices,\n selected_scores, -1*tf.ones(max_selection_size)))\n\n selected_classes = tf.gather(classes_with_max_scores, selected_indices)\n selected_classes = tf.cast(selected_classes, tf.float32)\n nms_result.add_field(fields.BoxListFields.classes, selected_classes)\n selected_boxes = nms_result\n sorted_boxes = box_list_ops.sort_by_field(selected_boxes,\n fields.BoxListFields.scores)\n\n if clip_window is not None:\n # When pad_to_max_output_size is False, it prunes the boxes with zero\n # area.\n sorted_boxes, num_valid_nms_boxes = _clip_window_prune_boxes(\n sorted_boxes, clip_window, pad_to_max_output_size,\n change_coordinate_frame)\n\n if max_total_size:\n max_total_size = tf.minimum(max_total_size, sorted_boxes.num_boxes())\n sorted_boxes = box_list_ops.gather(sorted_boxes, tf.range(max_total_size))\n num_valid_nms_boxes = tf.where(max_total_size > num_valid_nms_boxes,\n num_valid_nms_boxes, max_total_size)\n # Select only the valid boxes if pad_to_max_output_size is False.\n if not pad_to_max_output_size:\n sorted_boxes = box_list_ops.gather(sorted_boxes,\n tf.range(num_valid_nms_boxes))\n\n return sorted_boxes, num_valid_nms_boxes\n\n\ndef batch_multiclass_non_max_suppression(boxes,\n scores,\n score_thresh,\n iou_thresh,\n max_size_per_class,\n max_total_size=0,\n clip_window=None,\n change_coordinate_frame=False,\n num_valid_boxes=None,\n masks=None,\n additional_fields=None,\n soft_nms_sigma=0.0,\n scope=None,\n use_static_shapes=False,\n use_partitioned_nms=False,\n parallel_iterations=32,\n use_class_agnostic_nms=False,\n max_classes_per_detection=1,\n use_dynamic_map_fn=False,\n use_combined_nms=False,\n use_hard_nms=False,\n use_cpu_nms=False):\n \"\"\"Multi-class version of non maximum suppression that operates on a batch.\n\n This op is similar to `multiclass_non_max_suppression` but operates on a batch\n of boxes and scores. See documentation for `multiclass_non_max_suppression`\n for details.\n\n Args:\n boxes: A [batch_size, num_anchors, q, 4] float32 tensor containing\n detections. If `q` is 1 then same boxes are used for all classes\n otherwise, if `q` is equal to number of classes, class-specific boxes are\n used.\n scores: A [batch_size, num_anchors, num_classes] float32 tensor containing\n the scores for each of the `num_anchors` detections. The scores have to be\n non-negative when use_static_shapes is set True.\n score_thresh: scalar threshold for score (low scoring boxes are removed).\n iou_thresh: scalar threshold for IOU (new boxes that have high IOU overlap\n with previously selected boxes are removed).\n max_size_per_class: maximum number of retained boxes per class.\n max_total_size: maximum number of boxes retained over all classes. By\n default returns all boxes retained after capping boxes per class.\n clip_window: A float32 tensor of shape [batch_size, 4] where each entry is\n of the form [y_min, x_min, y_max, x_max] representing the window to clip\n boxes to before performing non-max suppression. This argument can also be\n a tensor of shape [4] in which case, the same clip window is applied to\n all images in the batch. If clip_widow is None, all boxes are used to\n perform non-max suppression.\n change_coordinate_frame: Whether to normalize coordinates after clipping\n relative to clip_window (this can only be set to True if a clip_window is\n provided)\n num_valid_boxes: (optional) a Tensor of type `int32`. A 1-D tensor of shape\n [batch_size] representing the number of valid boxes to be considered for\n each image in the batch. This parameter allows for ignoring zero\n paddings.\n masks: (optional) a [batch_size, num_anchors, q, mask_height, mask_width]\n float32 tensor containing box masks. `q` can be either number of classes\n or 1 depending on whether a separate mask is predicted per class.\n additional_fields: (optional) If not None, a dictionary that maps keys to\n tensors whose dimensions are [batch_size, num_anchors, ...].\n soft_nms_sigma: A scalar float representing the Soft NMS sigma parameter;\n See Bodla et al, https://arxiv.org/abs/1704.04503). When\n `soft_nms_sigma=0.0` (which is default), we fall back to standard (hard)\n NMS. Soft NMS is currently only supported when pad_to_max_output_size is\n False.\n scope: tf scope name.\n use_static_shapes: If true, the output nmsed boxes are padded to be of\n length `max_size_per_class` and it doesn't clip boxes to max_total_size.\n Defaults to false.\n use_partitioned_nms: If true, use partitioned version of\n non_max_suppression.\n parallel_iterations: (optional) number of batch items to process in\n parallel.\n use_class_agnostic_nms: If true, this uses class-agnostic non max\n suppression\n max_classes_per_detection: Maximum number of retained classes per detection\n box in class-agnostic NMS.\n use_dynamic_map_fn: If true, images in the batch will be processed within a\n dynamic loop. Otherwise, a static loop will be used if possible.\n use_combined_nms: If true, it uses tf.image.combined_non_max_suppression (\n multi-class version of NMS that operates on a batch).\n It greedily selects a subset of detection bounding boxes, pruning away\n boxes that have high IOU (intersection over union) overlap (> thresh) with\n already selected boxes. It operates independently for each batch.\n Within each batch, it operates independently for each class for which\n scores are provided (via the scores field of the input box_list),\n pruning boxes with score less than a provided threshold prior to applying\n NMS. This operation is performed on *all* batches and *all* classes\n in the batch, therefore any background classes should be removed prior to\n calling this function.\n Masks and additional fields are not supported.\n See argument checks in the code below for unsupported arguments.\n use_hard_nms: Enforce the usage of hard NMS.\n use_cpu_nms: Enforce NMS to run on CPU.\n\n Returns:\n 'nmsed_boxes': A [batch_size, max_detections, 4] float32 tensor\n containing the non-max suppressed boxes.\n 'nmsed_scores': A [batch_size, max_detections] float32 tensor containing\n the scores for the boxes.\n 'nmsed_classes': A [batch_size, max_detections] float32 tensor\n containing the class for boxes.\n 'nmsed_masks': (optional) a\n [batch_size, max_detections, mask_height, mask_width] float32 tensor\n containing masks for each selected box. This is set to None if input\n `masks` is None.\n 'nmsed_additional_fields': (optional) a dictionary of\n [batch_size, max_detections, ...] float32 tensors corresponding to the\n tensors specified in the input `additional_fields`. This is not returned\n if input `additional_fields` is None.\n 'num_detections': A [batch_size] int32 tensor indicating the number of\n valid detections per batch item. Only the top num_detections[i] entries in\n nms_boxes[i], nms_scores[i] and nms_class[i] are valid. The rest of the\n entries are zero paddings.\n\n Raises:\n ValueError: if `q` in boxes.shape is not 1 or not equal to number of\n classes as inferred from scores.shape.\n \"\"\"\n if use_combined_nms:\n if change_coordinate_frame:\n raise ValueError(\n 'change_coordinate_frame (normalizing coordinates'\n ' relative to clip_window) is not supported by combined_nms.')\n if num_valid_boxes is not None:\n raise ValueError('num_valid_boxes is not supported by combined_nms.')\n if masks is not None:\n raise ValueError('masks is not supported by combined_nms.')\n if soft_nms_sigma != 0.0:\n raise ValueError('Soft NMS is not supported by combined_nms.')\n if use_class_agnostic_nms:\n raise ValueError('class-agnostic NMS is not supported by combined_nms.')\n if clip_window is not None:\n tf.logging.warning(\n 'clip_window is not supported by combined_nms unless it is'\n ' [0. 0. 1. 1.] for each image.')\n if additional_fields is not None:\n tf.logging.warning('additional_fields is not supported by combined_nms.')\n if parallel_iterations != 32:\n tf.logging.warning('Number of batch items to be processed in parallel is'\n ' not configurable by combined_nms.')\n if max_classes_per_detection > 1:\n tf.logging.warning(\n 'max_classes_per_detection is not configurable by combined_nms.')\n\n with tf.name_scope(scope, 'CombinedNonMaxSuppression'):\n (batch_nmsed_boxes, batch_nmsed_scores, batch_nmsed_classes,\n batch_num_detections) = tf.image.combined_non_max_suppression(\n boxes=boxes,\n scores=scores,\n max_output_size_per_class=max_size_per_class,\n max_total_size=max_total_size,\n iou_threshold=iou_thresh,\n score_threshold=score_thresh,\n pad_per_class=use_static_shapes)\n # Not supported by combined_non_max_suppression.\n batch_nmsed_masks = None\n # Not supported by combined_non_max_suppression.\n batch_nmsed_additional_fields = None\n return (batch_nmsed_boxes, batch_nmsed_scores, batch_nmsed_classes,\n batch_nmsed_masks, batch_nmsed_additional_fields,\n batch_num_detections)\n\n q = shape_utils.get_dim_as_int(boxes.shape[2])\n num_classes = shape_utils.get_dim_as_int(scores.shape[2])\n if q != 1 and q != num_classes:\n raise ValueError('third dimension of boxes must be either 1 or equal '\n 'to the third dimension of scores.')\n if change_coordinate_frame and clip_window is None:\n raise ValueError('if change_coordinate_frame is True, then a clip_window'\n 'must be specified.')\n original_masks = masks\n\n # Create ordered dictionary using the sorted keys from\n # additional fields to ensure getting the same key value assignment\n # in _single_image_nms_fn(). The dictionary is thus a sorted version of\n # additional_fields.\n if additional_fields is None:\n ordered_additional_fields = collections.OrderedDict()\n else:\n ordered_additional_fields = collections.OrderedDict(\n sorted(additional_fields.items(), key=lambda item: item[0]))\n\n with tf.name_scope(scope, 'BatchMultiClassNonMaxSuppression'):\n boxes_shape = boxes.shape\n batch_size = shape_utils.get_dim_as_int(boxes_shape[0])\n num_anchors = shape_utils.get_dim_as_int(boxes_shape[1])\n\n if batch_size is None:\n batch_size = tf.shape(boxes)[0]\n if num_anchors is None:\n num_anchors = tf.shape(boxes)[1]\n\n # If num valid boxes aren't provided, create one and mark all boxes as\n # valid.\n if num_valid_boxes is None:\n num_valid_boxes = tf.ones([batch_size], dtype=tf.int32) * num_anchors\n\n # If masks aren't provided, create dummy masks so we can only have one copy\n # of _single_image_nms_fn and discard the dummy masks after map_fn.\n if masks is None:\n masks_shape = tf.stack([batch_size, num_anchors, q, 1, 1])\n masks = tf.zeros(masks_shape)\n\n if clip_window is None:\n clip_window = tf.stack([\n tf.reduce_min(boxes[:, :, :, 0]),\n tf.reduce_min(boxes[:, :, :, 1]),\n tf.reduce_max(boxes[:, :, :, 2]),\n tf.reduce_max(boxes[:, :, :, 3])\n ])\n if clip_window.shape.ndims == 1:\n clip_window = tf.tile(tf.expand_dims(clip_window, 0), [batch_size, 1])\n\n def _single_image_nms_fn(args):\n \"\"\"Runs NMS on a single image and returns padded output.\n\n Args:\n args: A list of tensors consisting of the following:\n per_image_boxes - A [num_anchors, q, 4] float32 tensor containing\n detections. If `q` is 1 then same boxes are used for all classes\n otherwise, if `q` is equal to number of classes, class-specific\n boxes are used.\n per_image_scores - A [num_anchors, num_classes] float32 tensor\n containing the scores for each of the `num_anchors` detections.\n per_image_masks - A [num_anchors, q, mask_height, mask_width] float32\n tensor containing box masks. `q` can be either number of classes\n or 1 depending on whether a separate mask is predicted per class.\n per_image_clip_window - A 1D float32 tensor of the form\n [ymin, xmin, ymax, xmax] representing the window to clip the boxes\n to.\n per_image_additional_fields - (optional) A variable number of float32\n tensors each with size [num_anchors, ...].\n per_image_num_valid_boxes - A tensor of type `int32`. A 1-D tensor of\n shape [batch_size] representing the number of valid boxes to be\n considered for each image in the batch. This parameter allows for\n ignoring zero paddings.\n\n Returns:\n 'nmsed_boxes': A [max_detections, 4] float32 tensor containing the\n non-max suppressed boxes.\n 'nmsed_scores': A [max_detections] float32 tensor containing the scores\n for the boxes.\n 'nmsed_classes': A [max_detections] float32 tensor containing the class\n for boxes.\n 'nmsed_masks': (optional) a [max_detections, mask_height, mask_width]\n float32 tensor containing masks for each selected box. This is set to\n None if input `masks` is None.\n 'nmsed_additional_fields': (optional) A variable number of float32\n tensors each with size [max_detections, ...] corresponding to the\n input `per_image_additional_fields`.\n 'num_detections': A [batch_size] int32 tensor indicating the number of\n valid detections per batch item. Only the top num_detections[i]\n entries in nms_boxes[i], nms_scores[i] and nms_class[i] are valid. The\n rest of the entries are zero paddings.\n \"\"\"\n per_image_boxes = args[0]\n per_image_scores = args[1]\n per_image_masks = args[2]\n per_image_clip_window = args[3]\n # Make sure that the order of elements passed in args is aligned with\n # the iteration order of ordered_additional_fields\n per_image_additional_fields = {\n key: value\n for key, value in zip(ordered_additional_fields, args[4:-1])\n }\n per_image_num_valid_boxes = args[-1]\n if use_static_shapes:\n total_proposals = tf.shape(per_image_scores)\n per_image_scores = tf.where(\n tf.less(tf.range(total_proposals[0]), per_image_num_valid_boxes),\n per_image_scores,\n tf.fill(total_proposals, np.finfo('float32').min))\n else:\n per_image_boxes = tf.reshape(\n tf.slice(per_image_boxes, 3 * [0],\n tf.stack([per_image_num_valid_boxes, -1, -1])), [-1, q, 4])\n per_image_scores = tf.reshape(\n tf.slice(per_image_scores, [0, 0],\n tf.stack([per_image_num_valid_boxes, -1])),\n [-1, num_classes])\n per_image_masks = tf.reshape(\n tf.slice(per_image_masks, 4 * [0],\n tf.stack([per_image_num_valid_boxes, -1, -1, -1])),\n [-1, q, shape_utils.get_dim_as_int(per_image_masks.shape[2]),\n shape_utils.get_dim_as_int(per_image_masks.shape[3])])\n if per_image_additional_fields is not None:\n for key, tensor in per_image_additional_fields.items():\n additional_field_shape = tensor.get_shape()\n additional_field_dim = len(additional_field_shape)\n per_image_additional_fields[key] = tf.reshape(\n tf.slice(\n per_image_additional_fields[key],\n additional_field_dim * [0],\n tf.stack([per_image_num_valid_boxes] +\n (additional_field_dim - 1) * [-1])), [-1] + [\n shape_utils.get_dim_as_int(dim)\n for dim in additional_field_shape[1:]\n ])\n if use_class_agnostic_nms:\n nmsed_boxlist, num_valid_nms_boxes = class_agnostic_non_max_suppression(\n per_image_boxes,\n per_image_scores,\n score_thresh,\n iou_thresh,\n max_classes_per_detection,\n max_total_size,\n clip_window=per_image_clip_window,\n change_coordinate_frame=change_coordinate_frame,\n masks=per_image_masks,\n pad_to_max_output_size=use_static_shapes,\n use_partitioned_nms=use_partitioned_nms,\n additional_fields=per_image_additional_fields,\n soft_nms_sigma=soft_nms_sigma)\n else:\n nmsed_boxlist, num_valid_nms_boxes = multiclass_non_max_suppression(\n per_image_boxes,\n per_image_scores,\n score_thresh,\n iou_thresh,\n max_size_per_class,\n max_total_size,\n clip_window=per_image_clip_window,\n change_coordinate_frame=change_coordinate_frame,\n masks=per_image_masks,\n pad_to_max_output_size=use_static_shapes,\n use_partitioned_nms=use_partitioned_nms,\n additional_fields=per_image_additional_fields,\n soft_nms_sigma=soft_nms_sigma,\n use_hard_nms=use_hard_nms,\n use_cpu_nms=use_cpu_nms)\n\n if not use_static_shapes:\n nmsed_boxlist = box_list_ops.pad_or_clip_box_list(\n nmsed_boxlist, max_total_size)\n num_detections = num_valid_nms_boxes\n nmsed_boxes = nmsed_boxlist.get()\n nmsed_scores = nmsed_boxlist.get_field(fields.BoxListFields.scores)\n nmsed_classes = nmsed_boxlist.get_field(fields.BoxListFields.classes)\n nmsed_masks = nmsed_boxlist.get_field(fields.BoxListFields.masks)\n nmsed_additional_fields = []\n # Sorting is needed here to ensure that the values stored in\n # nmsed_additional_fields are always kept in the same order\n # across different execution runs.\n for key in sorted(per_image_additional_fields.keys()):\n nmsed_additional_fields.append(nmsed_boxlist.get_field(key))\n return ([nmsed_boxes, nmsed_scores, nmsed_classes, nmsed_masks] +\n nmsed_additional_fields + [num_detections])\n\n num_additional_fields = 0\n if ordered_additional_fields:\n num_additional_fields = len(ordered_additional_fields)\n num_nmsed_outputs = 4 + num_additional_fields\n\n if use_dynamic_map_fn:\n map_fn = tf.map_fn\n else:\n map_fn = shape_utils.static_or_dynamic_map_fn\n\n batch_outputs = map_fn(\n _single_image_nms_fn,\n elems=([boxes, scores, masks, clip_window] +\n list(ordered_additional_fields.values()) + [num_valid_boxes]),\n dtype=(num_nmsed_outputs * [tf.float32] + [tf.int32]),\n parallel_iterations=parallel_iterations)\n\n batch_nmsed_boxes = batch_outputs[0]\n batch_nmsed_scores = batch_outputs[1]\n batch_nmsed_classes = batch_outputs[2]\n batch_nmsed_masks = batch_outputs[3]\n batch_nmsed_values = batch_outputs[4:-1]\n\n batch_nmsed_additional_fields = {}\n if num_additional_fields > 0:\n # Sort the keys to ensure arranging elements in same order as\n # in _single_image_nms_fn.\n batch_nmsed_keys = list(ordered_additional_fields.keys())\n for i in range(len(batch_nmsed_keys)):\n batch_nmsed_additional_fields[\n batch_nmsed_keys[i]] = batch_nmsed_values[i]\n\n batch_num_detections = batch_outputs[-1]\n\n if original_masks is None:\n batch_nmsed_masks = None\n\n if not ordered_additional_fields:\n batch_nmsed_additional_fields = None\n\n return (batch_nmsed_boxes, batch_nmsed_scores, batch_nmsed_classes,\n batch_nmsed_masks, batch_nmsed_additional_fields,\n batch_num_detections)\n" ]
[ [ "tensorflow.compat.v1.image.combined_non_max_suppression", "tensorflow.compat.v1.logging.warning", "tensorflow.compat.v1.expand_dims", "tensorflow.compat.v1.unstack", "tensorflow.compat.v1.reduce_sum", "tensorflow.compat.v1.shape", "tensorflow.compat.v1.compat.forward_compatible", "tensorflow.compat.v1.reduce_max", "tensorflow.compat.v1.where", "tensorflow.compat.v1.logical_and", "tensorflow.compat.v1.gather", "tensorflow.compat.v1.device", "tensorflow.compat.v1.constant", "tensorflow.compat.v1.maximum", "tensorflow.compat.v1.cast", "tensorflow.compat.v1.argmax", "tensorflow.compat.v1.nn.top_k", "tensorflow.compat.v1.stack", "tensorflow.compat.v1.reshape", "tensorflow.compat.v1.transpose", "tensorflow.compat.v1.reduce_all", "tensorflow.compat.v1.slice", "tensorflow.compat.v1.reduce_any", "tensorflow.compat.v1.ones", "tensorflow.compat.v1.minimum", "tensorflow.compat.v1.batch_gather", "tensorflow.compat.v1.split", "numpy.finfo", "tensorflow.compat.v1.less", "tensorflow.compat.v1.greater_equal", "tensorflow.compat.v1.zeros", "tensorflow.compat.v1.range", "tensorflow.compat.v1.ones_like", "tensorflow.compat.v1.reduce_min", "tensorflow.compat.v1.squeeze", "tensorflow.compat.v1.name_scope" ] ]
lukeandshuo/sensiac_fast_rcnn_quebec
[ "4b23dad3d0490b3e288bc538dd0f83512f595149" ]
[ "experiments/curves/Recall/6:2/DrawRecall.py" ]
[ "import cPickle\nimport matplotlib.pyplot as plt\nimport os\nimport os.path as op\nbase_path= os.path.split(os.path.abspath(__file__))[0]\nxlabel=[0.5,0.6,0.7,0.8,0.9]\nIR=[0.469,0.231,0.050,0.001,0.00]\nVisible =[0.919,0.692,0.371,0.108,0.005]\nplt.plot(xlabel,Visible,'r',label='Visible =0.3731')\nplt.plot(xlabel,IR,'b',label='IR = 0.124')\nplt.xlabel('IOU')\nplt.ylabel('Recall')\nplt.xlim([0.5,0.9])\nplt.ylim([0.0,1.0])\nplt.legend(loc='upper right')\nplt.savefig('recall.png')\nplt.show()" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.savefig", "matplotlib.pyplot.xlim", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.ylim", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel" ] ]
Mishne-Lab/SOAP-Self-supervised-Online-Adversarial-Purification
[ "09535124ef13e3f957d25b3a4e54af7f5f713a73" ]
[ "models.py" ]
[ "import torch\nimport torch.nn as nn\nimport torchvision.models as models\nimport torchvision.transforms as transforms\n\nfrom resnet import *\nfrom wide_resnet import *\n\nimport os\n\ndef load_model(name, dataset, n_class=10, in_channel=3, save_dir=None, substitute=False):\n if name == 'fcnet':\n model = FCNet(n_class=n_class, in_dim=784, hidden_dim=128)\n elif name == 'cnet':\n model = CNet(n_class=n_class, in_channel=in_channel)\n elif name == 'ae':\n model = AutoEncoder(n_class=n_class, in_dim=784, hidden_dim=128)\n elif name == 'cae':\n model = ConvAutoEncoder(n_class=n_class, in_channel=in_channel)\n elif name == 'resnet':\n model = ResNet_(18, n_class)\n elif name == 'wide-resnet':\n model = Wide_ResNet_(28, 10, 0.3, n_class)\n elif name == 'resnet-rot':\n model = ResNet(n_class=n_class)\n elif name == 'wide-resnet-rot':\n model = WResNet(n_class=n_class)\n else:\n raise TypeError(\"Unrecognized model name: {}\".format(name))\n\n if dataset == 'cifar10':\n model.add_normalizer(normalizer(mean=[0.4914, 0.4822, 0.4465], std=[0.2023, 0.1994, 0.2010]))\n elif dataset == 'cifar100':\n model.add_normalizer(normalizer(mean=[0.5071, 0.4867, 0.4408], std=[0.2675, 0.2565, 0.2761]))\n\n if save_dir is not None:\n if substitute:\n model.load_state_dict(torch.load(os.path.join(save_dir, 'substitute_{}.pth'.format(name)), map_location='cpu'))\n else:\n model.load_state_dict(torch.load(os.path.join(save_dir, 'latest_model.pth'), map_location='cpu'))\n return model\n\n\nclass normalizer(nn.Module):\n\n def __init__(self, mean, std):\n super(normalizer, self).__init__()\n self.mean = torch.FloatTensor(mean)[:, None, None]\n self.std = torch.FloatTensor(std)[:, None, None]\n\n def forward(self, x):\n return (x - self.mean.to(x.device)) / self.std.to(x.device)\n\n\nclass add_noise(nn.Module):\n\n def __init__(self, std):\n super(add_noise, self).__init__()\n self.std = std\n\n def forward(self, x):\n return (x + torch.randn_like(x)*self.std).clamp(0,1)\n\n\nclass FCNet(nn.Module):\n\n def __init__(self, n_class, in_dim, hidden_dim=128, nonlinear='Relu'):\n super(FCNet, self).__init__()\n self.fc1 = nn.Linear(in_dim, hidden_dim*2)\n self.fc2 = nn.Linear(hidden_dim*2, hidden_dim)\n self.cls = nn.Linear(hidden_dim, n_class)\n self.relu = nn.ReLU(inplace=True)\n self.dropout = nn.Dropout(p=0.5)\n\n def forward(self, x, return_reps=False):\n\n x = x.flatten(start_dim=1)\n x = self.relu(self.fc1(x))\n x = self.dropout(x)\n x = self.relu(self.fc2(x))\n x = self.dropout(x)\n x = self.cls(x)\n return x\n\n\nclass CNet(nn.Module):\n def __init__(self, n_class, in_channel=3, hidden_dim=1024):\n\n super(CNet, self).__init__()\n self.conv1 = nn.Conv2d(in_channel, 32, 5, padding=2)\n self.conv2 = nn.Conv2d(32, 64, 5, padding=2)\n self.fc1 = nn.Linear(7*7*64, hidden_dim)\n self.cls = nn.Linear(hidden_dim, n_class)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(2)\n\n def forward(self, x, return_reps=False):\n\n x = self.relu(self.conv1(x))\n x = self.maxpool(x)\n x = self.relu(self.conv2(x))\n x = self.maxpool(x)\n x = x.view(-1, 7*7*64)\n x = self.relu(self.fc1(x))\n x = self.cls(x)\n return x\n\n\nclass AutoEncoder(nn.Module):\n\n def __init__(self, n_class, in_dim, hidden_dim=128):\n\n super(AutoEncoder, self).__init__()\n self.fc1 = nn.Linear(in_dim, hidden_dim*2)\n self.fc2 = nn.Linear(hidden_dim*2, hidden_dim)\n self.fc3 = nn.Linear(hidden_dim, hidden_dim*2)\n self.fc4 = nn.Linear(hidden_dim*2, in_dim)\n self.cls = nn.Linear(hidden_dim, n_class)\n \n self.relu = nn.ReLU(inplace=True)\n self.sigmoid = nn.Sigmoid()\n self.dropout = nn.Dropout(p=0.5)\n\n self.aux = False\n\n def forward(self, x, add_noise=False, return_reps=False):\n\n if add_noise:\n x = (x + torch.randn_like(x)*0.5).clamp(0,1)\n size = x.shape\n x = x.flatten(start_dim=1)\n x = self.relu(self.fc1(x))\n x = self.relu(self.fc2(x))\n\n if return_reps:\n return x\n\n l = self.cls(x)\n self.pred = l\n \n x = self.relu(self.fc3(x))\n x = self.sigmoid(self.fc4(x))\n self.r = x.reshape(*size)\n if self.aux:\n return self.r\n\n return l\n\n\nclass ConvAutoEncoder(nn.Module):\n\n def __init__(self, n_class, in_channel=3, hidden_dim=1024, out_channel=None):\n\n super(ConvAutoEncoder, self).__init__()\n if not out_channel:\n out_channel = in_channel\n self.conv1 = nn.Conv2d(in_channel, 32, 3, padding=1, stride=2)\n self.conv2 = nn.Conv2d(32, 64, 3, padding=1, stride=2)\n self.fc1 = nn.Linear(7*7*64, hidden_dim)\n self.cls = nn.Linear(hidden_dim, n_class)\n\n self.relu = nn.ReLU()\n self.sigmoid = nn.Sigmoid()\n\n self.fc2 = nn.Linear(hidden_dim, 7*7*64)\n self.conv3 = nn.ConvTranspose2d(64, 32, 3, stride=2, padding=1, output_padding=1)\n self.conv4 = nn.ConvTranspose2d(32, out_channel, 3, stride=2, padding=1, output_padding=1)\n\n self.aux = False\n\n def forward(self, x, add_noise=False, return_reps=False):\n\n size = x.shape\n if add_noise:\n x = (x + torch.randn_like(x)*0.5).clamp(0,1)\n x = self.relu(self.conv1(x))\n x = self.relu(self.conv2(x))\n\n x = x.view(-1, 7*7*64)\n x = self.relu(self.fc1(x))\n\n if return_reps:\n return x\n\n l = self.cls(x)\n self.pred = l\n \n x = self.relu(self.fc2(x))\n x = x.view(-1, 64, 7, 7)\n x = self.relu(self.conv3(x))\n x = self.sigmoid(self.conv4(x))\n \n self.r = x.reshape(*size)\n if self.aux:\n return self.r\n return l\n\n\nclass FCNet_rotate(nn.Module):\n\n def __init__(self, n_class, in_dim, hidden_dim=128):\n\n super(FCNet_rotate, self).__init__()\n self.fc1 = nn.Linear(in_dim, hidden_dim)\n self.fc2 = nn.Linear(hidden_dim, 32)\n self.fc3 = nn.Linear(32, n_class)\n self.fc4 = nn.Linear(32, 4)\n self.relu = nn.ReLU(inplace=True)\n self.aux = False\n\n def forward(self, x):\n\n size = x.shape\n x = x.flatten(start_dim=1)\n x = self.relu(self.fc1(x))\n x = self.relu(self.fc2(x))\n self.pred_deg = self.fc4(x)\n if self.aux:\n return self.pred_deg\n return self.fc3(x)\n\n\nclass ResNet(nn.Module):\n\n def __init__(self, n_class):\n\n super(ResNet, self).__init__()\n self.resnet = ResNet_(18, n_class)\n self.fc1 = nn.Linear(64, 4)\n self.aux = False\n\n def forward(self, x, add_noise=False, return_reps=False):\n\n if add_noise:\n x = (x + torch.randn_like(x)*0.1).clamp(0,1)\n l = self.resnet(x)\n\n if return_reps:\n return self.resnet.x\n self.pred_deg = self.fc1(self.resnet.x)\n if self.aux:\n return self.pred_deg\n self.pred = l\n return l\n\n def add_normalizer(self, normalizer):\n self.resnet.add_normalizer(normalizer) \n\n\nclass WResNet(nn.Module):\n\n def __init__(self, n_class, k=10):\n\n super(WResNet, self).__init__()\n self.resnet = Wide_ResNet_(28, k, 0.3, n_class)\n self.fc1 = nn.Linear(k*64, 4)\n self.aux = False\n\n def forward(self, x, add_noise=False, return_reps=False):\n\n if add_noise:\n x = (x + torch.randn_like(x)*0.1).clamp(0,1)\n # normalization in wide-resnet\n l = self.resnet(x)\n\n if return_reps:\n return self.resnet.x\n \n self.pred_deg = self.fc1(self.resnet.x)\n if self.aux:\n return self.pred_deg\n self.pred = l\n return l\n\n def add_normalizer(self, normalizer):\n self.resnet.add_normalizer(normalizer) \n" ]
[ [ "torch.nn.MaxPool2d", "torch.FloatTensor", "torch.nn.Linear", "torch.randn_like", "torch.nn.Conv2d", "torch.nn.Sigmoid", "torch.nn.ReLU", "torch.nn.Dropout", "torch.nn.ConvTranspose2d" ] ]
SoliareofAstora/DeepFRI
[ "a7de5c4e3036109b5a5711ca97715d0ec340cd8d" ]
[ "deepfrier/DeepCNN.py" ]
[ "import glob\nimport tensorflow as tf\n\nfrom .utils import get_batched_dataset\nfrom .layers import FuncPredictor\n\nimport matplotlib.pyplot as plt\nplt.switch_backend('agg')\n\n\nclass DeepCNN(object):\n \"\"\" Class containig the CNN model for predicting protein function. \"\"\"\n def __init__(self, output_dim, n_channels=26, num_filters=[100], filter_lens=[3], lr=0.0002, drop=0.3, l2_reg=0.001,\n lm_model_name=None, model_name_prefix=None):\n \"\"\" Initialize the model\n :param output_dim: {int} number of GO terms/EC numbers\n :param n_channels: {int} number of input features per residue (26 for 1-hot encoding)\n :param num_filters: {list <int>} number of filters in the first CNN layer\n :param filter_lens: {list <int>} filter lengths in the first CNN layer\n :param lr: {float} learning rate for Adam optimizer\n :param drop: {float} dropout fraction for Dense layers\n :param l2_reg: {float} l2 regularization parameter\n :lm_model: {string} name of the pre-trained LSTM language model to be loaded\n \"\"\"\n self.output_dim = output_dim\n self.n_channels = n_channels\n self.model_name_prefix = model_name_prefix\n\n if lm_model_name is not None:\n lm_model = tf.keras.models.load_model(lm_model_name)\n lm_model = tf.keras.Model(inputs=lm_model.input, outputs=lm_model.get_layer(\"LSTM2\").output)\n lm_model.trainable = False\n else:\n lm_model = None\n\n # build and compile model\n self._build_model(num_filters, filter_lens, n_channels, output_dim, lr, drop, l2_reg, lm_model=lm_model)\n\n def _build_model(self, num_filters, filter_lens, n_channels, output_dim, lr, drop, l2_reg, lm_model=None):\n print (\"### Compiling DeepCNN model...\")\n\n input_seq = tf.keras.layers.Input(shape=(None, n_channels), name='seq')\n\n # Encoding layers\n x = input_seq\n if lm_model is not None:\n x_lm = tf.keras.layers.Dense(128, use_bias=False, name='LM_embedding')(lm_model(x))\n x_aa = tf.keras.layers.Dense(128, name='AA_embedding')(x)\n x = tf.keras.layers.Add(name='Emedding')([x_lm, x_aa])\n x = tf.keras.layers.Activation('relu')(x)\n\n # Conv layers\n x_concat = []\n for l in range(0, len(num_filters)):\n x_l = tf.keras.layers.Conv1D(filters=num_filters[l], kernel_size=filter_lens[l],\n padding='same', kernel_regularizer=tf.keras.regularizers.l2(l2_reg))(x)\n x_concat.append(x_l)\n\n x = tf.keras.layers.Concatenate()(x_concat)\n x = tf.keras.layers.BatchNormalization()(x)\n x = tf.keras.layers.Activation('relu', name='CNN_concatenate')(x)\n x = tf.keras.layers.Dropout(drop)(x)\n\n # 1-d features\n x = tf.keras.layers.GlobalMaxPooling1D()(x)\n x = tf.keras.layers.Dropout(2*drop)(x)\n\n # Output layer\n output_layer = FuncPredictor(output_dim=output_dim, name='labels')(x)\n\n self.model = tf.keras.Model(inputs=input_seq, outputs=output_layer)\n optimizer = tf.keras.optimizers.Adam(lr=lr, beta_1=0.95, beta_2=0.99)\n pred_loss = tf.keras.losses.CategoricalCrossentropy()\n self.model.compile(optimizer=optimizer, loss=pred_loss, metrics=['acc'])\n print (self.model.summary())\n\n def train(self, train_tfrecord_fn, valid_tfrecord_fn, epochs=100, batch_size=64, pad_len=1000, ont='mf', class_weight=None):\n n_train_records = sum(1 for f in glob.glob(train_tfrecord_fn) for _ in tf.data.TFRecordDataset(f))\n n_valid_records = sum(1 for f in glob.glob(valid_tfrecord_fn) for _ in tf.data.TFRecordDataset(f))\n print (\"### Training on: \", n_train_records, \"contact maps.\")\n print (\"### Validating on: \", n_valid_records, \"contact maps.\")\n\n # train tfrecords\n batch_train = get_batched_dataset(train_tfrecord_fn,\n batch_size=batch_size,\n pad_len=pad_len,\n n_goterms=self.output_dim,\n channels=self.n_channels,\n gcn=False,\n ont=ont)\n\n # validation tfrecords\n batch_valid = get_batched_dataset(valid_tfrecord_fn,\n batch_size=batch_size,\n pad_len=pad_len,\n n_goterms=self.output_dim,\n channels=self.n_channels,\n gcn=False,\n ont=ont)\n\n # early stopping\n es = tf.keras.callbacks.EarlyStopping(monitor='val_loss', mode='min', patience=5)\n\n # model checkpoint\n mc = tf.keras.callbacks.ModelCheckpoint(self.model_name_prefix + '_best_train_model.h5', monitor='val_loss', mode='min', verbose=2,\n save_best_only=True, save_weights_only=True)\n\n # fit model\n history = self.model.fit(batch_train,\n epochs=epochs,\n validation_data=batch_valid,\n class_weight=class_weight,\n steps_per_epoch=n_train_records//batch_size,\n validation_steps=n_valid_records//batch_size,\n callbacks=[es, mc])\n\n self.history = history.history\n\n def predict(self, input_data):\n return self.model(input_data).numpy()[0][:, 0]\n\n def plot_losses(self):\n plt.figure()\n plt.plot(self.history['loss'], '-')\n plt.plot(self.history['val_loss'], '-')\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'validation'], loc='upper left')\n plt.savefig(self.model_name_prefix + '_model_loss.png', bbox_inches='tight')\n\n plt.figure()\n plt.plot(self.history['acc'], '-')\n plt.plot(self.history['val_acc'], '-')\n plt.title('model accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n plt.legend(['train', 'validation'], loc='upper left')\n plt.savefig(self.model_name_prefix + '_model_accuracy.png', bbox_inches='tight')\n\n def save_model(self):\n self.model.save(self.model_name_prefix + '.hdf5')\n\n def load_model(self):\n self.model = tf.keras.models.load_model(self.model_name_prefix,\n custom_objects={'FuncPredictor': FuncPredictor})\n" ]
[ [ "tensorflow.data.TFRecordDataset", "tensorflow.keras.optimizers.Adam", "tensorflow.keras.layers.Concatenate", "tensorflow.keras.callbacks.EarlyStopping", "matplotlib.pyplot.ylabel", "tensorflow.keras.layers.GlobalMaxPooling1D", "tensorflow.keras.layers.Add", "matplotlib.pyplot.plot", "matplotlib.pyplot.figure", "matplotlib.pyplot.savefig", "tensorflow.keras.layers.Activation", "matplotlib.pyplot.title", "tensorflow.keras.layers.BatchNormalization", "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.Dropout", "tensorflow.keras.regularizers.l2", "matplotlib.pyplot.legend", "tensorflow.keras.models.load_model", "matplotlib.pyplot.switch_backend", "tensorflow.keras.Model", "tensorflow.keras.losses.CategoricalCrossentropy", "tensorflow.keras.callbacks.ModelCheckpoint", "matplotlib.pyplot.xlabel", "tensorflow.keras.layers.Input" ] ]
arnabgho/infoGAN-pytorch
[ "60f31010768f3e07010ac60845411a4a41fa1bba" ]
[ "common_net.py" ]
[ "import torch\nimport torch.nn as nn\n\ndef gaussian_weights_init(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n m.weight.data.normal_(0.0, 0.02)\n elif classname.find('BatchNorm') != -1:\n m.weight.data.normal_(1.0, 0.02)\n m.bias.data.fill_(0)\n\n\nclass BATCHResBlock(nn.Module):\n def __init__(self,num_neurons,dropout=0.0):\n super(BATCHResBlock, self).__init__()\n\n model = []\n model += [nn.Linear(num_neurons,num_neurons)]\n model += [nn.BatchNorm1d(num_neurons)]\n model += [nn.ReLU(inplace=True)]\n model += [nn.Linear(num_neurons,num_neurons)]\n model += [nn.BatchNorm1d(num_neurons)]\n if dropout > 0:\n model += [nn.Dropout(p=dropout)]\n self.model = nn.Sequential(*model)\n\n def forward(self,x):\n residual=x\n out=self.model(x)\n out+=residual\n return out\n\n\nclass INSResBlock(nn.Module):\n def __init__(self,num_neurons,dropout=0.0):\n super(INSResBlock, self).__init__()\n\n model = []\n model += [nn.Linear(num_neurons,num_neurons)]\n model += [nn.InstanceNorm1d(num_neurons)]\n model += [nn.ReLU(inplace=True)]\n model += [nn.Linear(num_neurons,num_neurons)]\n model += [nn.InstanceNorm1d(num_neurons)]\n if dropout > 0:\n model += [nn.Dropout(p=dropout)]\n self.model = nn.Sequential(*model)\n\n def forward(self,x):\n residual=x\n out=self.model(x)\n out+=residual\n return out\n\nclass ResBlock(nn.Module):\n def __init__(self,num_neurons,dropout=0.0):\n super(ResBlock, self).__init__()\n\n model = []\n model += [nn.Linear(num_neurons,num_neurons)]\n model += [nn.BatchNorm1d(num_neurons)] # Just testing might be removed\n model += [nn.ReLU(inplace=True)]\n model += [nn.Linear(num_neurons,num_neurons)]\n #model += [nn.BatchNorm1d(num_neurons)] # Just testing might be removed\n if dropout > 0:\n model += [nn.Dropout(p=dropout)]\n self.model = nn.Sequential(*model)\n\n def forward(self,x):\n residual=x\n out=self.model(x)\n out+=residual\n return out\n\nclass GatedResBlock(nn.Module):\n def __init__(self,num_neurons,dropout=0.0):\n super(GatedResBlock, self).__init__()\n\n model = []\n model += [nn.Linear(num_neurons,num_neurons)]\n model += [nn.ReLU(inplace=True)]\n model += [nn.Linear(num_neurons,num_neurons)]\n if dropout > 0:\n model += [nn.Dropout(p=dropout)]\n self.model = nn.Sequential(*model)\n\n def forward(self,x,alpha):\n residual=x\n out=alpha*self.model(x)\n out+=residual\n return out\n\nclass GatedConvResBlock(nn.Module):\n def conv3x3(self, inplanes, out_planes, stride=1):\n return nn.Conv2d(inplanes, out_planes, kernel_size=3, stride=stride, padding=1)\n\n def __init__(self, inplanes, planes, stride=1, dropout=0.0):\n super(GatedConvResBlock, self).__init__()\n model = []\n model += [self.conv3x3(inplanes, planes, stride)]\n model += [nn.BatchNorm2d(planes)] #[nn.InstanceNorm2d(planes)]\n model += [nn.ReLU(inplace=True)]\n model += [self.conv3x3(planes, planes)]\n model += [nn.BatchNorm2d(planes)] #[nn.InstanceNorm2d(planes)]\n if dropout > 0:\n model += [nn.Dropout(p=dropout)]\n self.model = nn.Sequential(*model)\n #self.model.apply(gaussian_weights_init)\n\n def forward(self, x,alpha):\n residual = x\n out = alpha*self.model(x)\n out += residual\n return out\n\nclass ConvResBlock(nn.Module):\n def conv3x3(self, inplanes, out_planes, stride=1):\n return nn.Conv2d(inplanes, out_planes, kernel_size=3, stride=stride, padding=1)\n\n def __init__(self, inplanes, planes, stride=1, dropout=0.0):\n super(ConvResBlock, self).__init__()\n model = []\n model += [self.conv3x3(inplanes, planes, stride)]\n #model += [nn.InstanceNorm2d(planes)]\n model += [nn.ReLU(inplace=True)]\n model += [self.conv3x3(planes, planes)]\n #model += [nn.InstanceNorm2d(planes)]\n if dropout > 0:\n model += [nn.Dropout(p=dropout)]\n self.model = nn.Sequential(*model)\n #self.model.apply(gaussian_weights_init)\n\n def forward(self, x):\n residual = x\n out = self.model(x)\n out += residual\n return out\n\n\nclass MAX_SELECTResBlock(nn.Module):\n def __init__(self,num_neurons,dropout=0.0):\n super(MAX_SELECTResBlock, self).__init__()\n\n model = []\n model += [nn.Linear(num_neurons,num_neurons)]\n model += [nn.ReLU(inplace=True)]\n model += [nn.Linear(num_neurons,num_neurons)]\n if dropout > 0:\n model += [nn.Dropout(p=dropout)]\n self.model = nn.Sequential(*model)\n\n def forward(self,x):\n residual=x\n out=self.model(x)\n out=torch.max(out,residual)\n return out\n\nclass MAX_PARALLELResBlock(nn.Module):\n def __init__(self,num_neurons,dropout=0.0):\n super(MAX_PARALLELResBlock, self).__init__()\n\n model_1 = []\n model_1 += [nn.Linear(num_neurons,num_neurons)]\n model_1 += [nn.ReLU(inplace=True)]\n model_1 += [nn.Linear(num_neurons,num_neurons)]\n if dropout > 0:\n model_1 += [nn.Dropout(p=dropout)]\n self.model_1 = nn.Sequential(*model_1)\n\n model_2 = []\n model_2 += [nn.Linear(num_neurons,num_neurons)]\n model_2 += [nn.ReLU(inplace=True)]\n model_2 += [nn.Linear(num_neurons,num_neurons)]\n if dropout > 0:\n model_2 += [nn.Dropout(p=dropout)]\n self.model_2 = nn.Sequential(*model_2)\n\n\n def forward(self,x):\n residual=x\n out_1=self.model_1(x)\n out_2=self.model_2(x)\n out_max=torch.max(out_1,out_2)\n out = residual + out_max\n return out\n\n\nclass RELUResBlock(nn.Module):\n def __init__(self,num_neurons,dropout=0.0):\n super(RELUResBlock, self).__init__()\n\n model = []\n model += [nn.Linear(num_neurons,num_neurons)]\n model += [nn.ReLU(inplace=True)]\n model += [nn.Linear(num_neurons,num_neurons)]\n model += [nn.ReLU()]\n if dropout > 0:\n model += [nn.Dropout(p=dropout)]\n self.model = nn.Sequential(*model)\n\n def forward(self,x):\n residual=x\n out=self.model(x)\n out+=residual\n return out\n\nclass LinearRELUBlock(nn.Module):\n def __init__(self,num_neurons,dropout=0.0):\n super(LinearRELUBlock,self).__init__()\n model=[]\n model += [nn.Linear(num_neurons,num_neurons)]\n model += [nn.ReLU()]\n if dropout>0:\n model+= [nn.Dropout(p=dropout)]\n\n self.model = nn.Sequential(*model)\n\n def forward(self,x):\n out=self.model(x)\n return out\n" ]
[ [ "torch.nn.BatchNorm2d", "torch.nn.InstanceNorm1d", "torch.nn.Linear", "torch.nn.BatchNorm1d", "torch.nn.Conv2d", "torch.nn.Sequential", "torch.max", "torch.nn.ReLU", "torch.nn.Dropout" ] ]
isl-mt/xnmt-isl
[ "0f25c8f7f90cd5fa39de03302219a66d640a44f0" ]
[ "xnmt/xnmt_run_experiments.py" ]
[ "#!/usr/bin/env python3\n\n\"\"\"\nReads experiments descriptions in the passed configuration file\nand runs them sequentially, logging outputs\n\"\"\"\nimport argparse\nimport logging\nimport os\nimport random\nimport sys\nimport socket\nimport datetime\nimport faulthandler\nfaulthandler.enable()\nimport traceback\n\nimport numpy as np\nfrom xnmt.settings import settings\n\nfrom xnmt import logger, file_logger\nfrom xnmt.tee import log_preamble\nfrom xnmt.param_collections import ParamManager\nfrom xnmt import tee\nfrom xnmt.persistence import YamlPreloader, save_to_file, initialize_if_needed\nfrom xnmt import utils\n\nif settings.RESOURCE_WARNINGS:\n import warnings\n warnings.simplefilter('always', ResourceWarning)\n\ndef main(overwrite_args=None):\n\n with tee.Tee(), tee.Tee(error=True):\n argparser = argparse.ArgumentParser()\n utils.add_dynet_argparse(argparser)\n argparser.add_argument(\"--settings\", type=str, default=\"standard\", help=\"settings (standard, debug, or unittest)\"\n \"must be given in '=' syntax, e.g.\"\n \" --settings=standard\")\n argparser.add_argument(\"--resume\", action='store_true', help=\"whether a saved experiment is being resumed, and\"\n \"locations of output files should be re-used.\")\n argparser.add_argument(\"experiments_file\")\n argparser.add_argument(\"experiment_name\", nargs='*', help=\"Run only the specified experiments\")\n argparser.set_defaults(generate_doc=False)\n args = argparser.parse_args(overwrite_args)\n\n if args.dynet_seed:\n random.seed(args.dynet_seed)\n np.random.seed(args.dynet_seed)\n\n if args.dynet_gpu:\n if settings.CHECK_VALIDITY:\n settings.CHECK_VALIDITY = False\n log_preamble(\"disabling CHECK_VALIDITY because it is not supported on GPU currently\", logging.WARNING)\n\n config_experiment_names = YamlPreloader.experiment_names_from_file(args.experiments_file)\n\n results = []\n\n # Check ahead of time that all experiments exist, to avoid bad surprises\n experiment_names = args.experiment_name or config_experiment_names\n\n if args.experiment_name:\n nonexistent = set(experiment_names).difference(config_experiment_names)\n if len(nonexistent) != 0:\n raise Exception(\"Experiments {} do not exist\".format(\",\".join(list(nonexistent))))\n\n log_preamble(f\"running XNMT revision {tee.get_git_revision()} on {socket.gethostname()} on {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\")\n for experiment_name in experiment_names:\n\n ParamManager.init_param_col()\n\n uninitialized_exp_args = YamlPreloader.preload_experiment_from_file(args.experiments_file, experiment_name,\n resume=args.resume)\n\n logger.info(f\"=> Running {experiment_name}\")\n\n glob_args = uninitialized_exp_args.data.exp_global\n log_file = glob_args.log_file\n\n if os.path.isfile(log_file) and not settings.OVERWRITE_LOG:\n logger.warning(f\"log file {log_file} already exists, skipping experiment; please delete log file by hand if you want to overwrite it \"\n f\"(or activate OVERWRITE_LOG, by either specifying an environment variable as OVERWRITE_LOG=1, \"\n f\"or specifying --settings=debug, or changing xnmt.settings.Standard.OVERWRITE_LOG manually)\")\n continue\n\n tee.set_out_file(log_file, exp_name=experiment_name)\n\n try:\n\n model_file = glob_args.model_file\n\n uninitialized_exp_args.data.exp_global.commandline_args = vars(args)\n\n # Create the model\n experiment = initialize_if_needed(uninitialized_exp_args)\n ParamManager.param_col.model_file = experiment.exp_global.model_file\n ParamManager.param_col.save_num_checkpoints = experiment.exp_global.save_num_checkpoints\n ParamManager.populate()\n\n # Run the experiment\n eval_scores = experiment(save_fct = lambda: save_to_file(model_file, experiment))\n results.append((experiment_name, eval_scores))\n print_results(results)\n\n except Exception as e:\n file_logger.error(traceback.format_exc())\n raise e\n finally:\n tee.unset_out_file()\n \ndef print_results(results):\n print(\"\")\n print(\"{:<30}|{:<40}\".format(\"Experiment\", \" Final Scores\"))\n print(\"-\" * (70 + 1))\n\n for experiment_name, eval_scores in results:\n for i in range(len(eval_scores)):\n print(\"{:<30}| {:<40}\".format((experiment_name if i==0 else \"\"), str(eval_scores[i])))\n\n\nif __name__ == '__main__':\n sys.exit(main())\n" ]
[ [ "numpy.random.seed" ] ]
rhayes777/PyAutoFit
[ "ac0a424cf97103106b292e124911de43635fc15a" ]
[ "autofit/non_linear/samples/mcmc.py" ]
[ "import math\nfrom typing import List, Optional\n\nimport numpy as np\n\nfrom autofit.mapper.model_mapper import ModelMapper\nfrom autofit.non_linear.mcmc.auto_correlations import AutoCorrelationsSettings\nfrom autofit.non_linear.samples.pdf import PDFSamples\nfrom .samples import Samples\nfrom .sample import Sample, load_from_table\n\n\nclass MCMCSamples(PDFSamples):\n\n def __init__(\n self,\n model: ModelMapper,\n sample_list: List[Sample],\n auto_correlation_settings: AutoCorrelationsSettings,\n unconverged_sample_size: int = 100,\n time: Optional[float] = None,\n ):\n\n self.auto_correlation_settings = auto_correlation_settings\n\n super().__init__(\n model=model,\n sample_list=sample_list,\n unconverged_sample_size=unconverged_sample_size,\n time=time,\n )\n\n @property\n def total_walkers(self):\n raise NotImplementedError\n\n @property\n def total_steps(self):\n raise NotImplementedError\n\n @property\n def auto_correlations(self):\n raise NotImplementedError\n\n @classmethod\n def from_table(self, filename: str, model: ModelMapper, number_live_points: int = None):\n \"\"\"\n Write a table of parameters, posteriors, priors and likelihoods\n\n Parameters\n ----------\n filename\n Where the table is to be written\n \"\"\"\n\n sample_list = load_from_table(filename=filename)\n\n return Samples(\n model=model,\n sample_list=sample_list\n )\n\n @property\n def info_json(self):\n return {\n \"times\": None,\n \"check_size\": self.auto_correlations.check_size,\n \"required_length\": self.auto_correlations.required_length,\n \"change_threshold\": self.auto_correlations.change_threshold,\n \"total_walkers\": self.total_walkers,\n \"total_steps\": self.total_steps,\n \"time\": self.time,\n }\n\n @property\n def pdf_converged(self):\n \"\"\"\n To analyse and visualize samples using corner.py, the analysis must be sufficiently converged to produce\n smooth enough PDF for analysis. This property checks whether the non-linear search's samples are sufficiently\n converged for corner.py use.\n\n Emcee samples can be analysed by corner.py irrespective of how long the sampler has run, albeit low run times\n will likely produce inaccurate results.\n \"\"\"\n try:\n samples_after_burn_in = self.samples_after_burn_in\n if len(samples_after_burn_in) == 0:\n return False\n return True\n except ValueError:\n return False\n\n @property\n def samples_after_burn_in(self) -> [List]:\n \"\"\"\n The emcee samples with the initial burn-in samples removed.\n\n The burn-in period is estimated using the auto-correlation times of the parameters.\n \"\"\"\n raise NotImplementedError()\n\n @property\n def converged(self) -> bool:\n \"\"\"\n Whether the emcee samples have converged on a solution or if they are still in a burn-in period, based on the\n auto correlation times of parameters.\n \"\"\"\n return self.auto_correlations.check_if_converged(\n total_samples=self.total_samples\n )\n\n @property\n def median_pdf_vector(self) -> [float]:\n \"\"\"\n The median of the probability density function (PDF) of every parameter marginalized in 1D, returned\n as a list of values.\n\n This is computed by binning all sampls after burn-in into a histogram and take its median (e.g. 50%) value.\n \"\"\"\n\n if self.pdf_converged:\n return [\n float(np.percentile(self.samples_after_burn_in[:, i], [50]))\n for i in range(self.model.prior_count)\n ]\n\n return self.max_log_likelihood_vector\n\n def vector_at_sigma(self, sigma: float) -> [float]:\n \"\"\"\n The value of every parameter marginalized in 1D at an input sigma value of its probability density function\n (PDF), returned as two lists of values corresponding to the lower and upper values parameter values.\n\n For example, if sigma is 1.0, the marginalized values of every parameter at 31.7% and 68.2% percentiles of each\n PDF is returned.\n\n This does not account for covariance between parameters. For example, if two parameters (x, y) are degenerate\n whereby x decreases as y gets larger to give the same PDF, this function will still return both at their\n upper values. Thus, caution is advised when using the function to reperform a model-fits.\n\n For Emcee, if the samples have converged this is estimated by binning the samples after burn-in into a\n histogram and taking the parameter values at the input PDF %.\n\n Parameters\n ----------\n sigma\n The sigma within which the PDF is used to estimate errors (e.g. sigma = 1.0 uses 0.6826 of the PDF).\n \"\"\"\n limit = math.erf(0.5 * sigma * math.sqrt(2))\n\n if self.pdf_converged:\n samples = self.samples_after_burn_in\n\n return [\n tuple(\n np.percentile(samples[:, i], [100.0 * (1.0 - limit), 100.0 * limit])\n )\n for i in range(self.model.prior_count)\n ]\n\n parameters_min = list(\n np.min(self.parameter_lists[-self.unconverged_sample_size:], axis=0)\n )\n parameters_max = list(\n np.max(self.parameter_lists[-self.unconverged_sample_size:], axis=0)\n )\n\n return [\n (parameters_min[index], parameters_max[index])\n for index in range(len(parameters_min))\n ]\n\n @property\n def log_evidence(self):\n return None\n" ]
[ [ "numpy.percentile", "numpy.max", "numpy.min" ] ]
tgen/bisbee
[ "d12ded4e4ee497e18a8b8e4929dac0e08de9f7f6" ]
[ "prot/utils.py" ]
[ "\nimport pyensembl\nimport Bio.SeqIO\nimport Bio.Seq\nimport pandas as pd\nimport sys\nimport re\nfrom Bio import pairwise2\n\n\ndef get_transcript_adj_exons(ensembl,gene_id,exon_coord):\n try:\n transcript_ids=ensembl.transcript_ids_of_gene_id(gene_id)\n except:\n print('Warning: ' + gene_id + ' not found')\n transcript_ids=[]\n transcript_list=[]\n for tid in transcript_ids:\n transcript=ensembl.transcript_by_id(tid)\n transcript.exon_intervals.sort()\n if exon_coord[0] in transcript.exon_intervals:\n idx=transcript.exon_intervals.index(exon_coord[0])\n if exon_coord == transcript.exon_intervals[idx:idx+len(exon_coord)]:\n transcript_list.append(transcript)\n return transcript_list\n\ndef has_coding_transcript(transcript_list):\n has_coding=False\n for transcript in transcript_list:\n if transcript.biotype=='protein_coding':\n has_coding=True\n return has_coding\n\ndef get_transcript_contain_exons(ensembl,gene_id,exon_coord):\n try:\n transcript_ids=ensembl.transcript_ids_of_gene_id(gene_id)\n except:\n print('Warning: ' + gene_id + ' not found')\n transcript_ids=[]\n transcript_list=[]\n for tid in transcript_ids:\n transcript=ensembl.transcript_by_id(tid)\n if set(exon_coord).issubset(set(transcript.exon_intervals)):\n transcript_list.append(transcript)\n return transcript_list\n\ndef find_overlap(rangeDF,coord):\n return (coord[0]<=rangeDF.loc[:,'end']) & (coord[1]>=rangeDF.loc[:,'start'])\n\ndef make_seq_from_coord(ref,contig,coordDF,strand):\n seq=''\n if not contig in ref:\n contig=\"chr\"+str(contig)\n if not contig in ref:\n print(contig + \"not found in ref\")\n return seq\n for index,row in coordDF.iterrows():\n if strand=='+':\n seq=seq+str(ref[contig].seq[int(row.start)-1:int(row.end)])\n else:\n seq=str(ref[contig].seq[int(row.start)-1:int(row.end)].reverse_complement())+seq\n return seq\n\ndef find_seq_diff(seq1,seq2):\n align_list=pairwise2.align.globalms(seq1,seq2,2,-1,-10,0)\n if len(align_list)==0:\n seq1_diff_pos=(0,len(seq1)-1)\n seq2_diff_pos=(0,len(seq2)-1)\n elif seq1==seq2:\n seq1_diff_pos=(float('nan'),float('nan'))\n seq2_diff_pos=(float('nan'),float('nan'))\n else:\n align_data=pairwise2.format_alignment(*align_list[0]).split('\\n')\n #print(align_data)\n seq_comp=pd.DataFrame({\"seq1\": list(align_data[0]), \"seq2\": list(align_data[2])})\n seq_comp=seq_comp.assign(seq1_pos= seq_comp.seq1.isin(list(Bio.Seq.Alphabet.IUPAC.IUPACProtein.letters)).cumsum())\n seq_comp=seq_comp.assign(seq2_pos= seq_comp.seq2.isin(list(Bio.Seq.Alphabet.IUPAC.IUPACProtein.letters)).cumsum())\n seq_comp=seq_comp.assign(match=seq_comp.seq1==seq_comp.seq2)\n #print(seq_comp.head())\n #print(seq_comp[seq_comp.match==False])\n first_mismatch=seq_comp.loc[seq_comp.match==False].index[0]\n if first_mismatch==0:\n seq1_diff_pos=(-1,max(seq_comp.seq1_pos[seq_comp.match==False]))\n seq2_diff_pos=(-1,max(seq_comp.seq2_pos[seq_comp.match==False]))\n else:\n seq1_diff_pos=(seq_comp.seq1_pos[first_mismatch-1],max(seq_comp.seq1_pos[seq_comp.match==False]))\n seq2_diff_pos=(seq_comp.seq2_pos[first_mismatch-1],max(seq_comp.seq2_pos[seq_comp.match==False]))\n\n return seq1_diff_pos, seq2_diff_pos\n\ndef is_coding_effect(transcript,effect_coord):\n coding_effect=dict()\n if transcript.biotype!='protein_coding' or not transcript.contains_stop_codon or not transcript.contains_start_codon:\n coding_effect['type']='NoncodingOrIncompleteTranscript'\n coding_effect['is_coding']=False\n else:\n coding_coord=pd.DataFrame(transcript.coding_sequence_position_ranges,columns=['start','end'])\n coding_effect['coord']=coding_coord.append(pd.Series({'start':min(transcript.stop_codon_positions),'end':max(transcript.stop_codon_positions)}),ignore_index=True).sort_values(by=\"start\")\n coding_effect['overlap']=find_overlap(coding_effect['coord'],effect_coord)\n if effect_coord[1]<coding_coord.start.min() or effect_coord[0]>coding_coord.end.max():\n coding_effect['type']='UTR'\n coding_effect['is_coding']=False\n else:\n coding_effect['is_coding']=True\n return coding_effect\n\ndef make_prot_from_coord(transcript,coord,ref):\n trans=Bio.Seq.translate(make_seq_from_coord(ref,transcript.contig,coord,transcript.strand))\n stop_count=trans.count('*')\n if trans.startswith('M'):\n start_lost=False\n else:\n start_lost=True\n if stop_count==0:\n stop_lost=True\n else:\n stop_lost=False\n\n if stop_lost and not start_lost:\n if transcript.strand=='+':\n coord.iloc[-1,1]=transcript.exon_intervals[-1][1]\n else:\n coord.iloc[0,0]=transcript.exon_intervals[0][0]\n trans=Bio.Seq.translate(make_seq_from_coord(ref,transcript.contig,coord,transcript.strand))\n stop_count=trans.count('*')\n\n if start_lost or stop_count==0:\n prot_seq=''\n else:\n prot_seq=trans.split('*')[0]+'*'\n\n if start_lost:\n effect='StartLost'\n elif stop_count==0:\n effect='StopLost'\n elif stop_lost:\n effect='PostStop'\n elif stop_count==1 and trans.endswith('*'):\n effect='InFrame'\n else:\n effect='PrematureStop'\n return prot_seq, effect\n\ndef get_event_coords(event_info,event_type):\n event_coords=pd.DataFrame(columns=(\"isoform\",\"start\",\"end\"))\n if event_type==\"IR\":\n event_coords=event_coords.append({\"isoform\":\"iso2\",\"start\":event_info[\"exon1_end\"],\"end\":event_info[\"exon2_start\"]},ignore_index=True)\n elif event_type==\"ES\":\n event_coords=event_coords.append({\"isoform\":\"iso1\",\"start\":event_info[\"exon_pre_end\"],\"end\":event_info[\"exon_aft_start\"]},ignore_index=True)\n event_coords=event_coords.append({\"isoform\":\"iso2\",\"start\":event_info[\"exon_pre_end\"],\"end\":event_info[\"exon_start\"]},ignore_index=True)\n event_coords=event_coords.append({\"isoform\":\"iso2\",\"start\":event_info[\"exon_end\"],\"end\":event_info[\"exon_aft_start\"]},ignore_index=True)\n elif event_type==\"MUT\":\n event_coords=event_coords.append({\"isoform\":\"iso1\",\"start\":event_info[\"exon_pre_end\"],\"end\":event_info[\"exon1_start\"]},ignore_index=True)\n event_coords=event_coords.append({\"isoform\":\"iso1\",\"start\":event_info[\"exon1_end\"],\"end\":event_info[\"exon_aft_start\"]},ignore_index=True)\n event_coords=event_coords.append({\"isoform\":\"iso2\",\"start\":event_info[\"exon_pre_end\"],\"end\":event_info[\"exon2_start\"]},ignore_index=True)\n event_coords=event_coords.append({\"isoform\":\"iso2\",\"start\":event_info[\"exon2_end\"],\"end\":event_info[\"exon_aft_start\"]},ignore_index=True)\n elif (event_type==\"A3\" and event_info[\"strand\"]==\"+\") or (event_type==\"A5\" and event_info[\"strand\"]==\"-\"):\n event_coords=event_coords.append({\"isoform\":\"iso2\",\"start\":event_info[\"exon_const_end\"],\"end\":event_info[\"exon_alt1_start\"]},ignore_index=True)\n event_coords=event_coords.append({\"isoform\":\"iso1\",\"start\":event_info[\"exon_const_end\"],\"end\":event_info[\"exon_alt2_start\"]},ignore_index=True)\n elif (event_type==\"A3\" and event_info[\"strand\"]==\"-\") or (event_type==\"A5\" and event_info[\"strand\"]==\"+\"):\n event_coords=event_coords.append({\"isoform\":\"iso2\",\"start\":event_info[\"exon_alt1_end\"],\"end\":event_info[\"exon_const_start\"]},ignore_index=True)\n event_coords=event_coords.append({\"isoform\":\"iso1\",\"start\":event_info[\"exon_alt2_end\"],\"end\":event_info[\"exon_const_start\"]},ignore_index=True)\n return event_coords\n\ndef jid_to_coords(event_jid):\n event_coords=pd.DataFrame(columns=(\"isoform\",\"start\",\"end\"))\n iso1_coords=event_jid.split(\"g.\")[1].split(\">\")[0].split('_')\n for coord in iso1_coords:\n if not coord==\"NONE\":\n event_coords=event_coords.append({\"isoform\":\"iso1\",\"start\":int(coord.split('j')[0]),\"end\":int(coord.split('j')[1])},ignore_index=True)\n iso2_coords=event_jid.split(\"g.\")[1].split(\">\")[1].split(\"[\")[0].split('_')\n for coord in iso2_coords:\n if not coord==\"NONE\":\n event_coords=event_coords.append({\"isoform\":\"iso2\",\"start\":int(coord.split('j')[0]),\"end\":int(coord.split('j')[1])},ignore_index=True)\n return event_coords\n\n\ndef find_matching_transcripts(ensembl,gene_id,event_coords):\n try:\n transcript_ids=ensembl.transcript_ids_of_gene_id(gene_id[0:15])\n except:\n print('Warning: ' + gene_id[0:15] + ' not found')\n transcript_ids=[]\n transcript_table=pd.DataFrame(columns=[\"coding\",\"matching_isoform\"],index=transcript_ids)\n for tid in transcript_ids:\n transcript=ensembl.transcript_by_id(tid)\n if transcript.biotype!='protein_coding' or not transcript.contains_stop_codon or not transcript.contains_start_codon:\n transcript_table.loc[tid,\"coding\"]=False\n else:\n transcript_table.loc[tid,\"coding\"]=True\n transcript_table.loc[tid,\"matching_isoform\"]=get_matching_isoform(transcript,event_coords)\n return transcript_table\n\ndef get_matching_isoform(transcript,event_coords):\n exons=pd.DataFrame(transcript.exon_intervals,columns=[\"start\",\"end\"]).sort_values(by=\"start\")\n event_region=[event_coords.start.min(),event_coords.end.max()]\n exons=exons[find_overlap(exons,event_region)]\n junctions=pd.DataFrame(columns=[\"start\",\"end\"])\n junctions[\"end\"]=exons.start[1:].values\n junctions[\"start\"]=exons.end[0:-1].values\n if junctions.equals(event_coords.loc[event_coords.isoform==\"iso2\",[\"start\",\"end\"]].reset_index(drop=True).astype(int)):\n matching_isoform=\"iso2\"\n elif sum(event_coords.isoform==\"iso1\")==0:\n if len(exons.start)>0:\n if event_coords.start[0]>exons.start.iloc[0] and event_coords.end[0]<exons.end.iloc[0]:\n matching_isoform=\"iso1\"\n else:\n matching_isoform=\"none\"\n else:\n matching_isoform=\"none\"\n elif junctions.equals(event_coords.loc[event_coords.isoform==\"iso1\",[\"start\",\"end\"]].reset_index(drop=True).astype(int)):\n matching_isoform=\"iso1\"\n else:\n matching_isoform=\"none\"\n return matching_isoform\n\ndef get_new_coord(matching_isoform,event_coords,old_coord):\n novel_junc=event_coords.loc[event_coords.isoform!=matching_isoform]\n overlap=find_overlap(old_coord,[event_coords.start.min(),event_coords.end.max()])\n if novel_junc.size>0 and overlap.any():\n new_coord=pd.DataFrame(columns=[\"start\",\"end\"])\n new_coord=new_coord.append({'start':old_coord[overlap].start.iloc[0],'end':novel_junc.start.iloc[0]},ignore_index=True)\n new_coord=new_coord.append(pd.DataFrame({'start':novel_junc.end.iloc[0:-1].values,'end':novel_junc.start.iloc[1:].values}),ignore_index=True)\n new_coord=new_coord.append({'start':novel_junc.end.iloc[-1],'end':old_coord[overlap].end.iloc[-1]},ignore_index=True)\n new_coord=new_coord.append(old_coord.loc[overlap==False],ignore_index=True).sort_values(by=\"start\")\n elif overlap.any():\n new_coord=pd.DataFrame(columns=[\"start\",\"end\"])\n new_coord=new_coord.append({'start':old_coord[overlap].start.iloc[0],'end':old_coord[overlap].end.iloc[-1]},ignore_index=True)\n new_coord=new_coord.append(old_coord.loc[overlap==False],ignore_index=True).sort_values(by=\"start\")\n else:\n new_coord=old_coord\n return new_coord\n" ]
[ [ "pandas.DataFrame" ] ]
CEMES-CNRS/pymodaq_plugins_physical_measurements
[ "1259e239a0a80e0a6937060d47c7c30eb6cb6b4d" ]
[ "src/pymodaq_plugins_physical_measurements/daq_viewer_plugins/plugins_0D/daq_0Dviewer_Keithley_Pico.py" ]
[ "from PyQt5.QtCore import pyqtSignal\nfrom easydict import EasyDict as edict\nfrom pymodaq.daq_utils.daq_utils import ThreadCommand, getLineInfo, DataFromPlugins\nfrom pymodaq.daq_viewer.utility_classes import DAQ_Viewer_base\nfrom collections import OrderedDict\nimport numpy as np\nfrom enum import IntEnum\nfrom pymodaq.daq_viewer.utility_classes import comon_parameters\n\nclass DAQ_0DViewer_Keithley_Pico_type(IntEnum):\n \"\"\"\n Enum class of Keithley_Pico_type\n\n =============== =========\n **Attributes** **Type**\n *Pico_648X* int\n *Pico_6430* int\n *Pico_6514* int\n =============== =========\n \"\"\"\n Pico_648X=0\n Pico_6430=1\n Pico_6514=2\n @classmethod\n def names(cls):\n return [name for name, member in cls.__members__.items()]\n\n\nclass DAQ_0DViewer_Keithley_Pico(DAQ_Viewer_base):\n \"\"\"\n ==================== ========================\n **Attributes** **Type**\n *data_grabed_signal* instance of pyqtSignal\n *VISA_rm* ResourceManager\n *com_ports* \n *params* dictionnary list\n *keithley*\n *settings*\n ==================== ========================\n \"\"\"\n data_grabed_signal=pyqtSignal(list)\n\n ##checking VISA ressources\n\n from pyvisa import ResourceManager\n VISA_rm=ResourceManager()\n com_ports=list(VISA_rm.list_resources())\n# import serial.tools.list_ports;\n# com_ports=[comport.device for comport in serial.tools.list_ports.comports()]\n\n params= comon_parameters+[\n {'title': 'VISA:','name': 'VISA_ressources', 'type': 'list', 'values': com_ports },\n {'title': 'Keithley Type:','name': 'keithley_type', 'type': 'list', 'values': DAQ_0DViewer_Keithley_Pico_type.names()},\n {'title': 'Id:', 'name': 'id', 'type': 'text', 'value': \"\" },\n {'title': 'Timeout (ms):', 'name': 'timeout', 'type': 'int', 'value': 10000, 'default': 10000, 'min': 2000 },\n {'title': 'Configuration:', 'name': 'config', 'type': 'group', 'children':[\n {'title': 'Meas. type:', 'name': 'meas_type', 'type': 'list', 'value': 'CURR', 'default': 'CURR', 'values': ['CURR','VOLT','RES','CHAR'] },\n\n\n ] },\n ]\n\n def __init__(self,parent=None,params_state=None):\n super(DAQ_0DViewer_Keithley_Pico,self).__init__(parent,params_state)\n from pyvisa import ResourceManager\n self.VISA_rm=ResourceManager()\n self.controller=None\n\n def ini_detector(self, controller=None):\n \"\"\"\n Initialisation procedure of the detector.\n\n Returns\n -------\n\n The initialized status.\n\n See Also\n --------\n daq_utils.ThreadCommand\n \"\"\"\n self.status.update(edict(initialized=False,info=\"\",x_axis=None,y_axis=None,controller=None))\n try:\n\n if self.settings.child(('controller_status')).value()==\"Slave\":\n if controller is None: \n raise Exception('no controller has been defined externally while this detector is a slave one')\n else:\n self.controller=controller\n else:\n self.controller=self.VISA_rm.open_resource(self.settings.child(('VISA_ressources')).value(), read_termination='\\r')\n\n self.controller.timeout=self.settings.child(('timeout')).value()\n\n self.controller.write(\"*rst; status:preset; *cls;\")\n txt=self.controller.query('*IDN?')\n self.settings.child(('id')).setValue(txt)\n self.controller.write('CONF:'+self.settings.child('config','meas_type').value())\n self.controller.write(':FORM:ELEM READ;DATA ASC;')\n self.controller.write('ARM:SOUR IMM;')\n self.controller.write('ARM:COUNt 1;')\n self.controller.write('TRIG:SOUR IMM;')\n #%%\n data=self.controller.query_ascii_values('READ?')\n\n self.status.initialized=True\n self.status.controller=self.controller\n return self.status\n\n except Exception as e:\n self.emit_status(ThreadCommand('Update_Status',[getLineInfo()+ str(e),'log']))\n self.status.info=getLineInfo()+ str(e)\n self.status.initialized=False\n return self.status\n\n\n def commit_settings(self, param):\n \"\"\"\n Activate the parameters changes in the hardware.\n\n =============== ================================= ============================\n **Parameters** **Type** **Description**\n *param* instance of pyqtgraph.parameter The parameter to be checked.\n =============== ================================= ============================\n\n See Also\n --------\n daq_utils.ThreadCommand\n \"\"\"\n try:\n if param.name()=='timeout':\n self.controller.timeout=self.settings.child(('timeout')).value()\n elif param.name()=='meas_type':\n self.controller.write('CONF:'+param.value())\n\n\n except Exception as e:\n self.emit_status(ThreadCommand('Update_Status',[getLineInfo()+ str(e),'log']))\n\n def close(self):\n \"\"\"\n close the current instance of Keithley viewer.\n \"\"\"\n self.controller.close()\n\n def grab_data(self, Naverage=1, **kwargs):\n \"\"\"\n | Start new acquisition.\n | grab the current values with keithley profile procedure.\n | Send the data_grabed_signal once done.\n\n =============== ======== ===============================================\n **Parameters** **Type** **Description**\n *Naverage* int Number of values to average\n =============== ======== ===============================================\n \"\"\"\n data_tot=[]\n self.controller.write('ARM:SOUR IMM;')\n self.controller.write('ARM:COUNt 1;')\n self.controller.write('TRIG:SOUR IMM;')\n self.controller.write('TRIG:COUN {:};'.format(Naverage))\n data_tot=self.controller.query_ascii_values('READ?')\n #for ind in range(Naverage):\n # data_tot.append(self.controller.query_ascii_values('READ?')[0])\n data_tot=[np.array([np.mean(np.array(data_tot))])]\n self.data_grabed_signal.emit([DataFromPlugins(name='Keithley',data=data_tot, dim='Data0D')])\n\n\n def stop(self):\n \"\"\"\n not implemented?\n \"\"\"\n return \"\"\n" ]
[ [ "numpy.array" ] ]
mangye16/deep-person-reid
[ "42eaccea712c3ebcc5273085a3cfea400a4783b8" ]
[ "torchreid/models/pcb.py" ]
[ "from __future__ import division, absolute_import\nimport torch.utils.model_zoo as model_zoo\nfrom torch import nn\nfrom torch.nn import functional as F\n\n__all__ = ['pcb_p6', 'pcb_p4']\n\nmodel_urls = {\n 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',\n 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',\n 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',\n 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',\n 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',\n}\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(\n in_planes,\n out_planes,\n kernel_size=3,\n stride=stride,\n padding=1,\n bias=False\n )\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm2d(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(\n planes,\n planes,\n kernel_size=3,\n stride=stride,\n padding=1,\n bias=False\n )\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(\n planes, planes * self.expansion, kernel_size=1, bias=False\n )\n self.bn3 = nn.BatchNorm2d(planes * self.expansion)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass DimReduceLayer(nn.Module):\n\n def __init__(self, in_channels, out_channels, nonlinear):\n super(DimReduceLayer, self).__init__()\n layers = []\n layers.append(\n nn.Conv2d(\n in_channels, out_channels, 1, stride=1, padding=0, bias=False\n )\n )\n layers.append(nn.BatchNorm2d(out_channels))\n\n if nonlinear == 'relu':\n layers.append(nn.ReLU(inplace=True))\n elif nonlinear == 'leakyrelu':\n layers.append(nn.LeakyReLU(0.1))\n\n self.layers = nn.Sequential(*layers)\n\n def forward(self, x):\n return self.layers(x)\n\n\nclass PCB(nn.Module):\n\n \"\"\"Part-based Convolutional Baseline.\n\n Reference:\n Sun et al. Beyond Part Models: Person Retrieval with Refined\n Part Pooling (and A Strong Convolutional Baseline). ECCV 2018.\n\n Public keys:\n - ``pcb_p4``: PCB with 4-part strips.\n - ``pcb_p6``: PCB with 6-part strips.\n \"\"\"\n\n def __init__(\n self,\n num_classes,\n loss,\n block,\n layers,\n parts=6,\n reduced_dim=256,\n nonlinear='relu',\n **kwargs\n ):\n self.inplanes = 64\n super(PCB, self).__init__()\n self.loss = loss\n self.parts = parts\n self.feature_dim = 512 * block.expansion\n\n # backbone network\n self.conv1 = nn.Conv2d(\n 3, 64, kernel_size=7, stride=2, padding=3, bias=False\n )\n self.bn1 = nn.BatchNorm2d(64)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=1)\n\n # pcb layers\n self.parts_avgpool = nn.AdaptiveAvgPool2d((self.parts, 1))\n self.dropout = nn.Dropout(p=0.5)\n self.conv5 = DimReduceLayer(\n 512 * block.expansion, reduced_dim, nonlinear=nonlinear\n )\n self.feature_dim = reduced_dim\n self.classifier = nn.ModuleList(\n [\n nn.Linear(self.feature_dim, num_classes)\n for _ in range(self.parts)\n ]\n )\n\n self._init_params()\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(\n self.inplanes,\n planes * block.expansion,\n kernel_size=1,\n stride=stride,\n bias=False\n ),\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def _init_params(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(\n m.weight, mode='fan_out', nonlinearity='relu'\n )\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm1d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.Linear):\n nn.init.normal_(m.weight, 0, 0.01)\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n\n def featuremaps(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n return x\n\n def forward(self, x):\n f = self.featuremaps(x)\n v_g = self.parts_avgpool(f)\n\n if not self.training:\n v_g = F.normalize(v_g, p=2, dim=1)\n return v_g.view(v_g.size(0), -1)\n\n v_g = self.dropout(v_g)\n v_h = self.conv5(v_g)\n\n y = []\n for i in range(self.parts):\n v_h_i = v_h[:, :, i, :]\n v_h_i = v_h_i.view(v_h_i.size(0), -1)\n y_i = self.classifier[i](v_h_i)\n y.append(y_i)\n\n if self.loss == 'softmax':\n return y\n elif self.loss == 'triplet':\n v_g = F.normalize(v_g, p=2, dim=1)\n return y, v_g.view(v_g.size(0), -1)\n else:\n raise KeyError('Unsupported loss: {}'.format(self.loss))\n\n\ndef init_pretrained_weights(model, model_url):\n \"\"\"Initializes model with pretrained weights.\n \n Layers that don't match with pretrained layers in name or size are kept unchanged.\n \"\"\"\n pretrain_dict = model_zoo.load_url(model_url)\n model_dict = model.state_dict()\n pretrain_dict = {\n k: v\n for k, v in pretrain_dict.items()\n if k in model_dict and model_dict[k].size() == v.size()\n }\n model_dict.update(pretrain_dict)\n model.load_state_dict(model_dict)\n\n\ndef pcb_p6(num_classes, loss='softmax', pretrained=True, **kwargs):\n model = PCB(\n num_classes=num_classes,\n loss=loss,\n block=Bottleneck,\n layers=[3, 4, 6, 3],\n last_stride=1,\n parts=6,\n reduced_dim=256,\n nonlinear='relu',\n **kwargs\n )\n if pretrained:\n init_pretrained_weights(model, model_urls['resnet50'])\n return model\n\n\ndef pcb_p4(num_classes, loss='softmax', pretrained=True, **kwargs):\n model = PCB(\n num_classes=num_classes,\n loss=loss,\n block=Bottleneck,\n layers=[3, 4, 6, 3],\n last_stride=1,\n parts=4,\n reduced_dim=256,\n nonlinear='relu',\n **kwargs\n )\n if pretrained:\n init_pretrained_weights(model, model_urls['resnet50'])\n return model\n" ]
[ [ "torch.nn.BatchNorm2d", "torch.nn.MaxPool2d", "torch.nn.init.kaiming_normal_", "torch.nn.Linear", "torch.nn.init.constant_", "torch.nn.AdaptiveAvgPool2d", "torch.nn.functional.normalize", "torch.nn.init.normal_", "torch.nn.Conv2d", "torch.nn.Sequential", "torch.utils.model_zoo.load_url", "torch.nn.ReLU", "torch.nn.Dropout", "torch.nn.LeakyReLU" ] ]
tianweiy/openseg.pytorch
[ "e4159e0b2db86d22149c44f220c5f2e3070a3042" ]
[ "lib/datasets/data_loader.py" ]
[ "##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n## Created by: Donny You, RainbowSecret, JingyiXie\n## Microsoft Research\n## [email protected]\n## Copyright (c) 2019\n##\n## This source code is licensed under the MIT-style license found in the\n## LICENSE file in the root directory of this source tree \n##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport torch\nfrom torch.utils import data\n\nimport lib.datasets.tools.transforms as trans\nimport lib.datasets.tools.cv2_aug_transforms as cv2_aug_trans\nimport lib.datasets.tools.pil_aug_transforms as pil_aug_trans\nfrom lib.datasets.loader.default_loader import DefaultLoader, CSDataTestLoader\nfrom lib.datasets.loader.ade20k_loader import ADE20KLoader\nfrom lib.datasets.loader.lip_loader import LipLoader\nfrom lib.datasets.tools.collate import collate\nfrom lib.utils.tools.logger import Logger as Log\n\nfrom lib.utils.distributed import get_world_size, get_rank, is_distributed\n\nimport pdb\n\n\nclass DataLoader(object):\n\n def __init__(self, configer):\n self.configer = configer\n\n if self.configer.get('data', 'image_tool') == 'pil':\n self.aug_train_transform = pil_aug_trans.PILAugCompose(self.configer, split='train')\n self.aug_val_transform = pil_aug_trans.PILAugCompose(self.configer, split='val')\n elif self.configer.get('data', 'image_tool') == 'cv2':\n self.aug_train_transform = cv2_aug_trans.CV2AugCompose(self.configer, split='train')\n self.aug_val_transform = cv2_aug_trans.CV2AugCompose(self.configer, split='val')\n else:\n Log.error('Not support {} image tool.'.format(self.configer.get('data', 'image_tool')))\n exit(1)\n\n self.img_transform = trans.Compose([\n trans.ToTensor(),\n trans.Normalize(div_value=self.configer.get('normalize', 'div_value'),\n mean=self.configer.get('normalize', 'mean'),\n std=self.configer.get('normalize', 'std')), ])\n\n self.label_transform = trans.Compose([\n trans.ToLabel(),\n trans.ReLabel(255, -1), ])\n\n\n def get_trainloader(self):\n if self.configer.exists('data', 'use_edge') and self.configer.get('data', 'use_edge') == 'ce2p':\n \"\"\"\n ce2p manner:\n load both the ground-truth label and edge.\n \"\"\"\n Log.info('use edge (follow ce2p) for train...')\n trainloader = data.DataLoader(\n LipLoader(root_dir=self.configer.get('data', 'data_dir'), dataset='train',\n aug_transform=self.aug_train_transform,\n img_transform=self.img_transform,\n label_transform=self.label_transform,\n configer=self.configer),\n batch_size=self.configer.get('train', 'batch_size'), pin_memory=True,\n num_workers=self.configer.get('data', 'workers'),\n shuffle=True, drop_last=self.configer.get('data', 'drop_last'),\n collate_fn=lambda *args: collate(\n *args, trans_dict=self.configer.get('train', 'data_transformer')\n )\n )\n return trainloader\n\n elif self.configer.exists('train', 'loader') and \\\n (self.configer.get('train', 'loader') == 'ade20k' \n or self.configer.get('train', 'loader') == 'pascal_context'\n or self.configer.get('train', 'loader') == 'pascal_voc'\n or self.configer.get('train', 'loader') == 'coco_stuff'):\n \"\"\"\n ADE20KLoader manner:\n support input images of different shapes.\n \"\"\"\n Log.info('use ADE20KLoader (diverse input shape) for train...')\n trainloader = data.DataLoader(\n ADE20KLoader(root_dir=self.configer.get('data', 'data_dir'), dataset='train',\n aug_transform=self.aug_train_transform,\n img_transform=self.img_transform,\n label_transform=self.label_transform,\n configer=self.configer),\n batch_size=self.configer.get('train', 'batch_size'), pin_memory=True,\n num_workers=self.configer.get('data', 'workers'),\n shuffle=True, drop_last=self.configer.get('data', 'drop_last'),\n collate_fn=lambda *args: collate(\n *args, trans_dict=self.configer.get('train', 'data_transformer')\n )\n )\n return trainloader\n\n else:\n \"\"\"\n Default manner:\n support input images of the same shapes.\n \"\"\"\n dataset = DefaultLoader(\n root_dir=self.configer.get('data', 'data_dir'), dataset='train',\n aug_transform=self.aug_train_transform,\n img_transform=self.img_transform,\n label_transform=self.label_transform,\n configer=self.configer\n )\n if is_distributed():\n sampler = torch.utils.data.distributed.DistributedSampler(dataset) \n else:\n sampler = None\n Log.info('use the DefaultLoader for train...')\n trainloader = data.DataLoader(\n dataset,\n batch_size=self.configer.get('train', 'batch_size') // get_world_size(), pin_memory=True,\n num_workers=self.configer.get('data', 'workers') // get_world_size(),\n sampler=sampler,\n shuffle=(sampler is None),\n drop_last=self.configer.get('data', 'drop_last'),\n collate_fn=lambda *args: collate(\n *args, trans_dict=self.configer.get('train', 'data_transformer')\n )\n )\n return trainloader\n \n\n def get_valloader(self, dataset=None):\n dataset = 'val' if dataset is None else dataset\n\n if self.configer.get('method') == 'fcn_segmentor':\n \"\"\"\n default manner:\n load the ground-truth label.\n \"\"\" \n Log.info('use DefaultLoader for val ...')\n valloader = data.DataLoader(\n DefaultLoader(root_dir=self.configer.get('data', 'data_dir'), dataset=dataset,\n aug_transform=self.aug_val_transform,\n img_transform=self.img_transform,\n label_transform=self.label_transform,\n configer=self.configer),\n batch_size=self.configer.get('val', 'batch_size'), pin_memory=True,\n num_workers=self.configer.get('data', 'workers'), shuffle=False,\n collate_fn=lambda *args: collate(\n *args, trans_dict=self.configer.get('val', 'data_transformer')\n )\n )\n return valloader\n\n else:\n Log.error('Method: {} loader is invalid.'.format(self.configer.get('method')))\n return None\n\n\n def get_testloader(self, dataset=None):\n dataset = 'test' if dataset is None else dataset\n\n if self.configer.get('method') == 'fcn_segmentor':\n Log.info('use CSDataTestLoader for test ...')\n testloader = data.DataLoader(\n CSDataTestLoader(root_dir=self.configer.get('data', 'data_dir'), dataset=dataset,\n img_transform=self.img_transform,\n configer=self.configer),\n batch_size=self.configer.get('test', 'batch_size'), pin_memory=True,\n num_workers=self.configer.get('data', 'workers'), shuffle=False,\n collate_fn=lambda *args: collate(\n *args, trans_dict=self.configer.get('test', 'data_transformer')\n )\n )\n return testloader\n\n\nif __name__ == \"__main__\":\n pass\n" ]
[ [ "torch.utils.data.distributed.DistributedSampler" ] ]
JJHibbard/ares
[ "4b185747f2182524d732ef8316bff3a709bd85f2" ]
[ "ares/sources/Source.py" ]
[ "\"\"\"\n\nSource.py\n\nAuthor: Jordan Mirocha\nAffiliation: University of Colorado at Boulder\nCreated on: Sun Jul 22 16:28:08 2012\n\nDescription: Initialize a radiation source.\n\n\"\"\"\nfrom __future__ import print_function\nimport re, os\nimport numpy as np\nfrom scipy.integrate import quad\nfrom ..util import ParameterFile\nfrom ..physics.Hydrogen import Hydrogen\nfrom ..physics.Cosmology import Cosmology\nfrom ..util.ParameterFile import ParameterFile\nfrom ..static.IntegralTables import IntegralTable\nfrom ..static.InterpolationTables import LookupTable\nfrom ..physics.Constants import erg_per_ev, E_LL, s_per_myr\nfrom ..util.SetDefaultParameterValues import SourceParameters, \\\n CosmologyParameters\nfrom ..physics.CrossSections import PhotoIonizationCrossSection as sigma_E\n\ntry:\n import h5py\nexcept ImportError:\n pass\n\nnp.seterr(all='ignore') # exp overflow occurs when integrating BB\n # will return 0 as it should for x large\n\nclass Source(object):\n def __init__(self, grid=None, cosm=None, logN=None, init_tabs=True,\n **kwargs):\n \"\"\"\n Initialize a radiation source object.\n\n ..note:: This is inherited by all other ares.sources classes.\n\n Parameters\n ----------\n grid: rt1d.static.Grid.Grid instance\n logN: column densities over which to tabulate integral quantities\n\n \"\"\"\n\n self.pf = ParameterFile(**kwargs)\n self._cosm_ = cosm\n\n # Create lookup tables for integral quantities\n if init_tabs and (grid is not None):\n self._create_integral_table(logN=logN)\n\n @property\n def Emin(self):\n return self.pf['source_Emin']\n @property\n def Emax(self):\n return self.pf['source_Emax']\n\n @property\n def EminNorm(self):\n if not hasattr(self, '_EminNorm'):\n if self.pf['source_EminNorm'] == None:\n self._EminNorm = self.pf['source_Emin']\n else:\n self._EminNorm = self.pf['source_EminNorm']\n\n return self._EminNorm\n\n @property\n def EmaxNorm(self):\n if not hasattr(self, '_EmaxNorm'):\n if self.pf['source_EmaxNorm'] == None:\n self._EmaxNorm = self.pf['source_Emax']\n else:\n self._EmaxNorm = self.pf['source_EmaxNorm']\n\n return self._EmaxNorm\n\n @property\n def info(self):\n \"\"\"\n Print info like Nlw etc in various units!\n \"\"\"\n pass\n\n @property\n def is_delta(self):\n return self.pf['source_sed'] == 'delta'\n\n def SourceOn(self, t):\n if t < self.tau:\n return True\n else:\n return False\n\n @property\n def tau(self):\n if not hasattr(self, '_tau'):\n self._tau = self.pf['source_lifetime'] * s_per_myr\n return self._tau\n\n @property\n def cosm(self):\n if not hasattr(self, '_cosm'):\n if self._cosm_ is not None:\n self._cosm = self._cosm_\n elif self.grid is not None:\n self._cosm = self.grid.cosm\n else:\n self._cosm = Cosmology(pf=self.pf, **self.pf)\n\n return self._cosm\n\n @property\n def multi_freq(self):\n if not hasattr(self, '_multi_freq'):\n self._multi_freq = self.discrete and not self.pf['source_multigroup']\n\n return self._multi_freq\n\n @property\n def multi_group(self):\n if not hasattr(self, '_multi_group'):\n self._multi_group = self.discrete and self.pf['source_multigroup']\n\n return self._multi_group\n\n @property\n def ionizing(self):\n # See if source emits ionizing photons\n # Should also be function of absorbers\n if not hasattr(self, '_ionizing'):\n self._ionizing = self.pf['source_Emax'] > E_LL\n\n return self._ionizing\n\n @property\n def grid(self):\n if not hasattr(self, '_grid'):\n self._grid = None\n\n return self._grid\n\n @grid.setter\n def grid(self, value):\n self._grid = value\n\n @property\n def discrete(self):\n if not hasattr(self, '_discrete'):\n self._discrete = (self.pf['source_E'] != None) or \\\n (self.pf['source_sed'] in ['eldridge2009', 'eldridge2017', \n 'leitherer1999'])\n\n return self._discrete\n\n @property\n def continuous(self):\n if not hasattr(self, '_continuous'):\n self._continuous = not self.discrete\n\n return self._continuous\n\n @property\n def hydr(self):\n if not hasattr(self, '_hydr'):\n self._hydr = None\n\n return self._hydr\n\n @hydr.setter\n def hydr(self, value):\n self._hydr = value\n\n @property\n def frec(self):\n \"\"\"\n Compute average recycling fraction (i.e., spectrum-weighted frec).\n \"\"\"\n\n if self.hydr is None:\n return None\n\n n = np.arange(2, self.hydr.nmax)\n En = np.array(list(map(self.hydr.ELyn, n)))\n In = np.array(list(map(self.Spectrum, En))) / En\n fr = np.array(list(map(self.hydr.frec, n)))\n\n return np.sum(fr * In) / np.sum(In)\n\n @property\n def intrinsic_hardening(self):\n if not hasattr(self, '_intrinsic_hardening'):\n if 'source_hardening' in self.pf:\n self._intrinsic_hardening = \\\n self.pf['source_hardening'] == 'intrinsic'\n else:\n self._intrinsic_hardening = False\n\n return self._intrinsic_hardening\n\n def _hardening_factor(self, E):\n return np.exp(-10.**self.logN \\\n * (sigma_E(E, 0) + self.cosm.y * sigma_E(E, 1)))\n\n @property\n def logN(self):\n if not hasattr(self, '_logN'):\n if 'source_logN' in self.pf:\n self._logN = self.pf['source_logN']\n else:\n self._logN = -np.inf\n\n return self._logN\n\n @property\n def sharp_points(self):\n if not hasattr(self, '_sharp_points'):\n if self.pf['source_sed_sharp_at'] is not None:\n self._sharp_points = [self.pf['source_sed_sharp_at']]\n else:\n self._sharp_points = None\n\n return self._sharp_points\n\n @property\n def _normL(self):\n if not hasattr(self, '_normL_'):\n if self.is_delta:\n self._normL_ = 1. #/ self.pf['source_Emax']#/ self._Intensity(self.pf['source_Emax'])\n elif self.pf['source_Enorm'] is not None:\n En = self.pf['source_Enorm']\n\n if self.intrinsic_hardening:\n self._normL_ = 1. / self._Intensity(En),\n else:\n self._normL_ = 1. / (self._Intensity(En) / self._hardening_factor(En))\n else:\n if self.intrinsic_hardening:\n self._normL_ = 1. / quad(self._Intensity,\n self.pf['source_EminNorm'],\n self.pf['source_EmaxNorm'], points=self.sharp_points)[0]\n else:\n integrand = lambda EE: self._Intensity(EE) / self._hardening_factor(EE)\n self._normL_ = 1. / quad(integrand,\n self.pf['source_EminNorm'],\n self.pf['source_EmaxNorm'], points=self.sharp_points)[0]\n\n return self._normL_\n\n #def _load_spectrum(self):\n # \"\"\" Modify a few parameters if spectrum_file provided. \"\"\"\n #\n # fn = self.pf['spectrum_file']\n #\n # if fn is None:\n # return\n #\n # # Read spectrum - expect hdf5 with (at least) E, LE, and t datasets.\n # if re.search('.hdf5', fn):\n # f = h5py.File(fn)\n # try:\n # self.pf['tables_times'] = f['t'].value\n # except:\n # self.pf['tables_times'] = None\n # self.pf['spectrum_evolving'] = False\n #\n # self.pf['spectrum_E'] = f['E'].value\n # self.pf['spectrum_LE'] = f['LE'].value\n # f.close()\n #\n # if len(self.pf['spectrum_LE'].shape) > 1 \\\n # and not self.pf['spectrum_evolving']:\n # self.pf['spectrum_LE'] = self.pf['spectrum_LE'][0]\n # else:\n # spec = readtab(fn)\n # if len(spec) == 2:\n # self.pf['spectrum_E'], self.pf['spectrum_LE'] = spec\n # else:\n # self.pf['spectrum_E'], self.pf['spectrum_LE'], \\\n # self.pf['spectrum_t'] = spec\n\n @property\n def tables(self):\n if not hasattr(self, '_tables'):\n self._create_integral_table()\n return self._tables\n\n @property\n def tab(self):\n if not hasattr(self, '_tab'):\n self._create_integral_table()\n return self._tab\n\n @property\n def tabs(self):\n if not hasattr(self, '_tabs'):\n self._create_integral_table()\n return self._tabs\n\n def _create_integral_table(self, logN=None):\n \"\"\"\n Take tables and create interpolation functions.\n \"\"\"\n\n if self.discrete:\n return\n\n if self._name == 'diffuse':\n return\n\n if self.pf['source_table'] is None:\n # Overide defaults if supplied - this is dangerous\n if logN is not None:\n self.pf.update({'tables_dlogN': [np.diff(tmp) for tmp in logN]})\n self.pf.update({'tables_logNmin': [np.min(tmp) for tmp in logN]})\n self.pf.update({'tables_logNmax': [np.max(tmp) for tmp in logN]})\n\n # Tabulate away!\n self._tab = IntegralTable(self.pf, self, self.grid, logN)\n self._tabs = self.tab.TabulateRateIntegrals()\n else:\n self._tab = IntegralTable(self.pf, self, self.grid, logN)\n self._tabs = self.tab.load(self.pf['source_table'])\n\n self._setup_interp()\n\n def _setup_interp(self):\n self._tables = {}\n for tab in self.tabs:\n self._tables[tab] = \\\n LookupTable(self.pf, tab, self.tab.logN, self.tabs[tab],\n self.tab.logx, self.tab.t)\n\n @property\n def sigma(self):\n \"\"\"\n Compute bound-free absorption cross-section for all frequencies.\n \"\"\"\n if not self.discrete:\n return None\n if not hasattr(self, '_sigma_all'):\n self._sigma_all = np.array(list(map(sigma_E, self.E)))\n\n return self._sigma_all\n\n def Qdot(self, t=None):\n \"\"\"\n Returns number of photons emitted (s^-1) at all frequencies.\n \"\"\"\n #if not hasattr(self, '_Qdot_all'):\n self._Qdot_all = self.Lbol(t) * self.LE / self.E / erg_per_ev\n\n return self._Qdot_all\n\n def hnu_bar(self, t=0):\n \"\"\"\n Average ionizing (per absorber) photon energy in eV.\n \"\"\"\n if not hasattr(self, '_hnu_bar_all'):\n self._hnu_bar_all = {}\n if not hasattr(self, '_qdot_bar_all'):\n self._qdot_bar_all = {}\n\n if t in self._hnu_bar_all:\n return self._hnu_bar_all[t]\n\n self._hnu_bar_all[t] = np.zeros_like(self.grid.zeros_absorbers)\n self._qdot_bar_all[t] = np.zeros_like(self.grid.zeros_absorbers)\n for i, absorber in enumerate(self.grid.absorbers):\n self._hnu_bar_all[t][i], self._qdot_bar_all[t][i] = \\\n self._FrequencyAveragedBin(absorber=absorber, t=t)\n\n return self._hnu_bar_all\n\n def AveragePhotonEnergy(self, Emin, Emax):\n \"\"\"\n Return average photon energy in supplied band.\n \"\"\"\n\n integrand = lambda EE: self.Spectrum(EE) * EE\n norm = lambda EE: self.Spectrum(EE)\n\n return quad(integrand, Emin, Emax, points=self.sharp_points)[0] \\\n / quad(norm, Emin, Emax, points=self.sharp_points)[0]\n\n @property\n def qdot_bar(self):\n \"\"\"\n Average ionizing photon luminosity (per absorber) in s^-1.\n \"\"\"\n if not hasattr(self, '_qdot_bar_all'):\n hnu_bar = self.hnu_bar\n\n return self._qdot_bar_all\n\n def erg_per_phot(self, Emin, Emax):\n return self.eV_per_phot(Emin, Emax) * erg_per_ev\n\n def eV_per_phot(self, Emin, Emax):\n \"\"\"\n Compute the average energy per photon (in eV) in some band.\n \"\"\"\n\n i1 = lambda E: self.Spectrum(E)\n i2 = lambda E: self.Spectrum(E) / E\n\n # Must convert units\n final = quad(i1, Emin, Emax, points=self.sharp_points)[0] \\\n / quad(i2, Emin, Emax, points=self.sharp_points)[0]\n\n return final\n\n @property\n def sigma_bar(self):\n \"\"\"\n Frequency averaged cross section (single bandpass).\n \"\"\"\n if not hasattr(self, '_sigma_bar_all'):\n self._sigma_bar_all = np.zeros_like(self.grid.zeros_absorbers)\n for i, absorber in enumerate(self.grid.absorbers):\n integrand = lambda x: self.Spectrum(x) \\\n * self.grid.bf_cross_sections[absorber](x) / x\n\n self._sigma_bar_all[i] = self.Lbol \\\n * quad(integrand, self.grid.ioniz_thresholds[absorber],\n self.Emax, points=self.sharp_points)[0] / self.qdot_bar[i] / erg_per_ev\n\n return self._sigma_bar_all\n\n @property\n def sigma_tilde(self):\n if not hasattr(self, '_sigma_tilde_all'):\n self._sigma_tilde_all = np.zeros_like(self.grid.zeros_absorbers)\n for i, absorber in enumerate(self.grid.absorbers):\n integrand = lambda x: self.Spectrum(x) \\\n * self.grid.bf_cross_sections[absorber](x)\n self._sigma_tilde_all[i] = quad(integrand,\n self.grid.ioniz_thresholds[absorber], self.Emax,\n points=self.sharp_points)[0] \\\n / self.fLbol_ionizing[i]\n\n return self._sigma_tilde_all\n\n @property\n def fLbol_ionizing(self, absorber=0):\n \"\"\"\n Fraction of bolometric luminosity emitted above all ionization\n thresholds.\n \"\"\"\n if not hasattr(self, '_fLbol_ioniz_all'):\n self._fLbol_ioniz_all = np.zeros_like(self.grid.zeros_absorbers)\n for i, absorber in enumerate(self.grid.absorbers):\n self._fLbol_ioniz_all[i] = quad(self.Spectrum,\n self.grid.ioniz_thresholds[absorber], self.Emax,\n points=self.sharp_points)[0]\n\n return self._fLbol_ioniz_all\n\n @property\n def Gamma_bar(self):\n \"\"\"\n Return ionization rate (as a function of radius) assuming optical\n depth to cells and of cells is small.\n \"\"\"\n if not hasattr(self, '_Gamma_bar_all'):\n self._Gamma_bar_all = \\\n np.zeros([self.grid.dims, self.grid.N_absorbers])\n for i, absorber in enumerate(self.grid.absorbers):\n self._Gamma_bar_all[..., i] = self.Lbol * self.sigma_bar[i] \\\n * self.fLbol_ionizing[i] / 4. / np.pi / self.grid.r_mid**2 \\\n / self.hnu_bar[i] / erg_per_ev\n\n return self._Gamma_bar_all\n\n @property\n def gamma_bar(self):\n \"\"\"\n Return ionization rate (as a function of radius) assuming optical\n depth to cells and of cells is small.\n \"\"\"\n if not hasattr(self, '_gamma_bar_all'):\n self._gamma_bar_all = \\\n np.zeros([self.grid.dims, self.grid.N_absorbers,\n self.grid.N_absorbers])\n\n if not self.pf['secondary_ionization']:\n return self._gamma_bar_all\n\n for i, absorber in enumerate(self.grid.absorbers):\n for j, otherabsorber in enumerate(self.grid.absorbers):\n self._gamma_bar_all[..., i, j] = self.Gamma_bar[j] \\\n * (self.hnu_bar[j] * self.sigma_tilde[j] \\\n / self.hnu_bar[i] / self.sigma_bar[j] \\\n - self.grid.ioniz_thresholds[otherabsorber] \\\n / self.grid.ioniz_thresholds[absorber])\n\n return self._gamma_bar_all\n\n @property\n def Heat_bar(self):\n \"\"\"\n Return ionization rate (as a function of radius) assuming optical\n depth to cells and of cells is small.\n \"\"\"\n if not hasattr(self, '_Heat_bar_all'):\n self._Heat_bar_all = \\\n np.zeros([self.grid.dims, self.grid.N_absorbers])\n for i, absorber in enumerate(self.grid.absorbers):\n self._Heat_bar_all[..., i] = self.Gamma_bar[..., i] \\\n * erg_per_ev * (self.hnu_bar[i] * self.sigma_tilde[i] \\\n / self.sigma_bar[i] - self.grid.ioniz_thresholds[absorber])\n\n return self._Heat_bar_all\n\n def IonizingPhotonLuminosity(self, t=0, bin=None):\n \"\"\"\n Return Qdot (photons / s) for this source at energy E.\n \"\"\"\n\n if self.pf['source_type'] in [0, 1, 2]:\n return self.Qdot[bin]\n else:\n # Currently only BHs have a time-varying bolometric luminosity\n return self.BolometricLuminosity(t) * self.LE[bin] / self.E[bin] / erg_per_ev\n\n #def _Intensity(self, E, i, Type, t=0, absorb=True):\n # \"\"\"\n # Return quantity *proportional* to fraction of bolometric luminosity emitted\n # at photon energy E. Normalization handled separately.\n # \"\"\"\n #\n # Lnu = self.src._Intensity(E, i, Type, t=t)\n #\n # # Apply absorbing column\n # if self.SpectrumPars['logN'][i] > 0 and absorb:\n # return Lnu * np.exp(-10.**self.SpectrumPars['logN'][i] \\\n # * (sigma_E(E, 0) + y * sigma_E(E, 1)))\n # else:\n # return Lnu\n #\n def Spectrum(self, E, t=0.0):\n r\"\"\"\n Return fraction of bolometric luminosity emitted at energy E.\n\n Elsewhere denoted as :math:`I_{\\nu}`, normalized such that\n :math:`\\int I_{\\nu} d\\nu = 1`\n\n Parameters\n ----------\n E: float\n Emission energy in eV\n t: float\n Time in seconds since source turned on.\n i: int\n Index of component to include. If None, includes contribution\n from all components.\n\n Returns\n -------\n Fraction of bolometric luminosity emitted at E in units of\n eV\\ :sup:`-1`\\.\n\n \"\"\"\n\n if self.pf['source_Ekill'] is not None:\n if self.pf['source_Ekill'][0] <= E <= self.pf['source_Ekill'][1]:\n return 0.0\n\n return self._normL * self._Intensity(E, t=t)\n\n def BolometricLuminosity(self, t=0.0, M=None):\n \"\"\"\n Returns the bolometric luminosity of a source in units of erg/s.\n For accreting black holes, the bolometric luminosity will increase\n with time, hence the optional 't' and 'M' arguments.\n \"\"\"\n\n if self._name == 'bh':\n return self.Luminosity(t, M)\n else:\n return self.Luminosity(t)\n\n def _FrequencyAveragedBin(self, absorber='h_1', Emin=None, Emax=None,\n energy_weighted=False, t=0):\n \"\"\"\n Bolometric luminosity / number of ionizing photons in spectrum in bandpass\n spanning interval (Emin, Emax). Returns mean photon energy and number of\n ionizing photons in band.\n \"\"\"\n\n if Emin is None:\n Emin = max(self.grid.ioniz_thresholds[absorber], self.Emin)\n if Emax is None:\n Emax = self.Emax\n\n if energy_weighted:\n f = lambda x: x\n else:\n f = lambda x: 1.0\n\n L = self.Lbol * quad(lambda x: self.Spectrum(x) * f(x), Emin, Emax,\n points=self.sharp_points)[0]\n Q = self.Lbol * quad(lambda x: self.Spectrum(x) * f(x) / x, Emin,\n Emax, points=self.sharp_points)[0] / erg_per_ev\n\n return L / Q / erg_per_ev, Q\n\n def dump(self, fn, E, clobber=False):\n \"\"\"\n Write SED out to file.\n\n Parameters\n ----------\n fn : str\n Filename, suffix determines type. If 'hdf5' or 'h5' will write\n to HDF5 file, otherwise, to ASCII.\n E : np.ndarray\n Array of photon energies at which to sample SED. Units = eV.\n\n \"\"\"\n\n if os.path.exists(fn) and (clobber == False):\n raise OSError('{!s} exists!'.format(fn))\n\n if re.search('.hdf5', fn) or re.search('.h5', fn):\n out = 'hdf5'\n else:\n out = 'ascii'\n\n LE = list(map(self.Spectrum, E))\n\n if out == 'hdf5':\n f = h5py.File(fn, 'w')\n f.create_dataset('E', data=E)\n f.create_dataset('LE', data=LE)\n f.close()\n else:\n f = open(fn, 'w')\n print(\"# E LE\", file=f)\n for i, nrg in enumerate(E):\n print(\"{0:.8e} {1:.8e}\".format(nrg, LE[i]), file=f)\n f.close()\n\n print(\"Wrote {!s}.\".format(fn))\n\n def sed_name(self, i=0):\n \"\"\"\n Return name of output file based on SED properties.\n \"\"\"\n\n name = ('{0!s}_logM_{1:.2g}_Gamma_{2:.3g}_fsc_{3:.3g}_' +\\\n 'logE_{4:.2g}-{5:.2g}').format(self.SpectrumPars['type'][i],\\\n np.log10(self.src.M0), self.src.spec_pars['alpha'][i],\n self.src.spec_pars['fsc'][i], np.log10(self.Emin), np.log10(self.Emax))\n\n return name\n" ]
[ [ "numpy.zeros_like", "numpy.sum", "scipy.integrate.quad", "numpy.zeros", "numpy.diff", "numpy.seterr", "numpy.arange", "numpy.max", "numpy.log10", "numpy.min" ] ]
xxxxHolic/scikit-beam
[ "d1b4e746ff3daba8d0e8d5163259462fc9298b9a" ]
[ "skbeam/core/accumulators/binned_statistic.py" ]
[ "\"\"\"\nCopyright 2001, 2002 Enthought, Inc.\nAll rights reserved.\n\nCopyright 2003-2013 SciPy Developers.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions\nare met:\n\n- Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n\n- Redistributions in binary form must reproduce the above\n copyright notice, this list of conditions and the following\n disclaimer in the documentation and/or other materials provided\n with the distribution.\n\n- Neither the name of Enthought nor the names of the SciPy Developers\n may be used to endorse or promote products derived from this software\n without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR\nCONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\nEXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\nPROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\nPROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY\nOF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\nNEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\nfrom __future__ import division, print_function, absolute_import\n\nimport warnings\n\nimport numpy as np\nfrom ..utils import radial_grid, angle_grid, bin_edges_to_centers\n\n\nclass BinnedStatisticDD(object):\n std_ = ('mean', 'median', 'count', 'sum', 'std')\n\n def __init__(self, sample, statistic='mean',\n bins=10, range=None, mask=None):\n \"\"\"\n Compute a multidimensional binned statistic for a set of data.\n\n This is a generalization of a histogramdd function. A\n histogram divides the space into bins, and returns the count\n of the number of points in each bin. This function allows the\n computation of the sum, mean, median, or other statistic of\n the values within each bin.\n\n Parameters\n ----------\n sample : array_like\n Data to histogram passed as a sequence of D arrays of length N, or\n as an (N,D) array.\n statistic : string or callable, optional\n The statistic to compute (default is 'mean'). To compute multiple\n statistics efficiently, override this at __call__ time.\n The following statistics are available:\n\n * 'mean' : compute the mean of values for points within each bin.\n Empty bins will be represented by NaN.\n * 'median' : compute the median of values for points within each\n bin. Empty bins will be represented by NaN.\n * 'count' : compute the count of points within each bin. This is\n identical to an unweighted histogram. `values` array is not\n referenced.\n * 'sum' : compute the sum of values for points within each bin.\n This is identical to a weighted histogram.\n * function : a user-defined function which takes a 1D array of\n values, and outputs a single numerical statistic. This function\n will be called on the values in each bin. Empty bins will be\n represented by function([]), or NaN if this returns an error.\n bins : sequence or int, optional\n The bin specification:\n\n * A sequence of arrays describing the bin edges along each\n dimension.\n * The number of bins for each dimension (nx, ny, ... =bins)\n * The number of bins for all dimensions (nx=ny=...=bins).\n range : sequence, optional\n A sequence of lower and upper bin edges to be used if the\n edges are not given explicitely in `bins`. Defaults to the\n minimum and maximum values along each dimension.\n mask : array_like\n array of ones and zeros with total size N (see documentation\n for `sample`). Values with mask==0 will be ignored.\n\n Note: If using numpy versions < 1.10.0, you may notice slow behavior of\n this constructor. This has to do with digitize, which was optimized\n from 1.10.0 onwards.\n \"\"\"\n\n # This code is based on np.histogramdd\n try:\n # Sample is an ND-array.\n N, self.D = sample.shape\n except (AttributeError, ValueError):\n # Sample is a sequence of 1D arrays.\n sample = np.atleast_2d(sample).T\n N, self.D = sample.shape\n\n self.nbin = np.empty(self.D, int)\n self.edges = self.D * [None]\n self._centers = self.D * [None]\n dedges = self.D * [None]\n\n try:\n M = len(bins)\n if M != self.D:\n raise AttributeError('The dimension of bins must be equal '\n 'to the dimension of the sample x.')\n except TypeError:\n bins = self.D * [bins]\n\n # Select range for each dimension\n # Used only if number of bins is given.\n if range is None:\n smin = np.atleast_1d(np.array(sample.min(0), float))\n smax = np.atleast_1d(np.array(sample.max(0), float))\n else:\n smin = np.zeros(self.D)\n smax = np.zeros(self.D)\n for i in np.arange(self.D):\n smin[i], smax[i] = range[i]\n\n # Make sure the bins have a finite width.\n for i in np.arange(len(smin)):\n if smin[i] == smax[i]:\n smin[i] = smin[i] - .5\n smax[i] = smax[i] + .5\n\n # Create edge arrays\n for i in np.arange(self.D):\n if np.isscalar(bins[i]):\n self.nbin[i] = bins[i] + 2 # +2 for outlier bins\n self.edges[i] = np.linspace(smin[i], smax[i], self.nbin[i] - 1)\n else:\n self.edges[i] = np.asarray(bins[i], float)\n self.nbin[i] = len(self.edges[i]) + 1 # +1 for outlier bins\n self._centers[i] = bin_edges_to_centers(self.edges[i])\n dedges[i] = np.diff(self.edges[i])\n\n self.nbin = np.asarray(self.nbin)\n\n # Compute the bin number each sample falls into.\n Ncount = {}\n for i in np.arange(self.D):\n # Apply mask in a non-ideal way by setting value outside range.\n # Would be better to do this using bincount \"weights\", perhaps.\n thissample = sample[:, i]\n if mask is not None:\n thissample[mask == 0] = (self.edges[i][0] -\n 0.01 * (\n 1 + np.fabs(self.edges[i][0])))\n Ncount[i] = np.digitize(thissample, self.edges[i])\n\n # Using digitize, values that fall on an edge are put in the\n # right bin. For the rightmost bin, we want values equal to\n # the right edge to be counted in the last bin, and not as an\n # outlier.\n\n for i in np.arange(self.D):\n # Rounding precision\n decimal = int(-np.log10(dedges[i].min())) + 6\n # Find which points are on the rightmost edge.\n on_edge = np.where(np.around(sample[:, i], decimal) ==\n np.around(self.edges[i][-1], decimal))[0]\n # Shift these points one bin to the left.\n Ncount[i][on_edge] -= 1\n\n # Compute the sample indices in the flattened statistic matrix.\n self.ni = self.nbin.argsort()\n self.xy = np.zeros(N, int)\n for i in np.arange(0, self.D - 1):\n self.xy += Ncount[self.ni[i]] * self.nbin[self.ni[i + 1:]].prod()\n self.xy += Ncount[self.ni[-1]]\n self._flatcount = None # will be computed if needed\n self._argsort_index = None\n self.statistic = statistic\n\n @property\n def binmap(self):\n ''' Return the map of the bins per dimension.\n i.e. reverse transformation of flattened to unflattened bins\n\n Returns\n -------\n D np.ndarrays of length N where D is the number of dimensions\n and N is the number of data points.\n For each dimension, the min bin id is 0 and max n+1 where n is\n the number of bins in that dimension. The ids 0 and n+1 mark\n the outliers of the bins.\n '''\n N, = self.xy.shape\n binmap = np.zeros((self.D, N), dtype=int)\n denominator = 1\n\n for i in range(self.D):\n ind = self.D - i - 1\n subbinmap = (self.xy // denominator)\n if i < self.D - 1:\n subbinmap = subbinmap % self.nbin[self.ni[ind - 1]]\n binmap[ind] = subbinmap\n denominator *= self.nbin[self.ni[ind]]\n\n return binmap\n\n @property\n def flatcount(self):\n # Compute flatcount the first time it is accessed. Some statistics\n # never access it.\n if self._flatcount is None:\n self._flatcount = np.bincount(self.xy, None)\n return self._flatcount\n\n @property\n def argsort_index(self):\n # Compute argsort the first time it is accessed. Some statistics\n # never access it.\n if self._argsort_index is None:\n self._argsort_index = self.xy.argsort()\n return self._argsort_index\n\n @property\n def bin_edges(self):\n \"\"\"\n bin_edges : array of dtype float\n Return the bin edges ``(length(statistic)+1)``.\n \"\"\"\n return self.edges\n\n @property\n def bin_centers(self):\n \"\"\"\n bin_centers : array of dtype float\n Return the bin centers ``(length(statistic))``.\n \"\"\"\n return self._centers\n\n @property\n def statistic(self):\n return self._statistic\n\n @statistic.setter\n def statistic(self, new_statistic):\n if not callable(new_statistic) and new_statistic not in self.std_:\n raise ValueError('invalid statistic %r' % (new_statistic,))\n else:\n self._statistic = new_statistic\n\n def __call__(self, values, statistic=None):\n \"\"\"\n Parameters\n ----------\n values : array_like\n The values on which the statistic will be computed. This must be\n the same shape as `sample` in the constructor.\n statistic : string or callable, optional\n The statistic to compute (default is whatever was passed in when\n this object was instantiated).\n The following statistics are available:\n\n * 'mean' : compute the mean of values for points within each bin.\n Empty bins will be represented by NaN.\n * 'median' : compute the median of values for points within each\n bin. Empty bins will be represented by NaN.\n * 'count' : compute the count of points within each bin. This is\n identical to an unweighted histogram. `values` array is not\n referenced.\n * 'sum' : compute the sum of values for points within each bin.\n This is identical to a weighted histogram.\n * function : a user-defined function which takes a 1D array of\n values, and outputs a single numerical statistic. This function\n will be called on the values in each bin. Empty bins will be\n represented by function([]), or NaN if this returns an error.\n\n Returns\n -------\n statistic_values : array\n The values of the selected statistic in each bin.\n \"\"\"\n if statistic is None:\n statistic = self.statistic\n\n self.result = np.empty(self.nbin.prod(), float)\n if statistic == 'mean':\n self.result.fill(np.nan)\n flatsum = np.bincount(self.xy, values)\n a = self.flatcount.nonzero()\n self.result[a] = flatsum[a] / self.flatcount[a]\n elif statistic == 'std':\n self.result.fill(0)\n flatsum = np.bincount(self.xy, values)\n flatsum2 = np.bincount(self.xy, values ** 2)\n a = self.flatcount.nonzero()\n self.result[a] = np.sqrt(flatsum2[a] / self.flatcount[a] -\n (flatsum[a] / self.flatcount[a]) ** 2)\n elif statistic == 'count':\n self.result.fill(0)\n a = np.arange(len(self.flatcount))\n self.result[a] = self.flatcount\n elif statistic == 'sum':\n self.result.fill(0)\n flatsum = np.bincount(self.xy, values)\n a = np.arange(len(flatsum))\n self.result[a] = flatsum\n elif callable(statistic) or statistic == 'median':\n if statistic == 'median':\n internal_statistic = np.median\n else:\n internal_statistic = statistic\n with warnings.catch_warnings():\n # Numpy generates a warnings for mean/std/... with empty list\n warnings.filterwarnings('ignore', category=RuntimeWarning)\n old = np.seterr(invalid='ignore')\n try:\n null = internal_statistic([])\n except:\n null = np.nan\n np.seterr(**old)\n self.result.fill(null)\n\n vfs = values[self.argsort_index]\n i = 0\n for j, k in enumerate(self.flatcount):\n if k > 0:\n self.result[j] = internal_statistic(vfs[i: i + k])\n i += k\n\n # Shape into a proper matrix\n self.result = self.result.reshape(np.sort(self.nbin))\n ni = np.copy(self.ni)\n for i in np.arange(self.nbin.size):\n j = ni.argsort()[i]\n self.result = self.result.swapaxes(i, j)\n ni[i], ni[j] = ni[j], ni[i]\n\n # Remove outliers (indices 0 and -1 for each dimension).\n core = self.D * [slice(1, -1)]\n self.result = self.result[core]\n\n if (self.result.shape != self.nbin - 2).any():\n raise RuntimeError('Internal Shape Error')\n\n return self.result\n\n\nclass BinnedStatistic1D(BinnedStatisticDD):\n def __init__(self, x, statistic='mean',\n bins=10, range=None, mask=None):\n \"\"\"\n A refactored version of scipy.stats.binned_statistic to improve\n performance for the case where binning doesn't need to be\n re-initialized on every call.\n\n Compute a binned statistic for a set of data.\n\n This is a generalization of a histogram function. A histogram divides\n the space into bins, and returns the count of the number of points in\n each bin. This function allows the computation of the sum, mean,\n median, or other statistic of the values within each bin.\n\n Parameters\n ----------\n x : array_like\n A sequence of values to be binned.\n statistic : string or callable, optional\n The statistic to compute (default is 'mean').\n The following statistics are available:\n\n * 'mean' : compute the mean of values for points within each bin.\n Empty bins will be represented by NaN.\n * 'median' : compute the median of values for points within each\n bin. Empty bins will be represented by NaN.\n * 'count' : compute the count of points within each bin. This is\n identical to an unweighted histogram. `values` array is not\n referenced.\n * 'sum' : compute the sum of values for points within each bin.\n This is identical to a weighted histogram.\n * function : a user-defined function which takes a 1D array of\n values, and outputs a single numerical statistic. This function\n will be called on the values in each bin. Empty bins will be\n represented by function([]), or NaN if this returns an error.\n bins : int or sequence of scalars, optional\n If `bins` is an int, it defines the number of equal-width bins in\n the given range (10 by default). If `bins` is a sequence, it\n defines the bin edges, including the rightmost edge, allowing for\n non-uniform bin widths. Values in `x` that are smaller than lowest\n bin edge are assigned to bin number 0, values beyond the highest\n bin are assigned to ``bins[-1]``.\n range : (float, float) or [(float, float)], optional\n The lower and upper range of the bins. If not provided, range\n is simply ``(x.min(), x.max())``. Values outside the range are\n ignored.\n mask : array_like\n ones and zeros with the same shape as `x`.\n Values with mask==0 will be ignored.\n\n See Also\n --------\n numpy.histogram, binned_statistic_2d, binned_statistic_dd\n\n Notes\n -----\n All but the last (righthand-most) bin is half-open. In other words, if\n `bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including\n 1, but excluding 2) and the second ``[2, 3)``. The last bin, however,\n is ``[3, 4]``, which *includes* 4.\n \"\"\"\n try:\n N = len(bins)\n except TypeError:\n N = 1\n\n if N != 1:\n bins = [np.asarray(bins, float)]\n\n if range is not None:\n if len(range) == 2:\n range = [range]\n\n super(BinnedStatistic1D, self).__init__([x], statistic=statistic,\n bins=bins, range=range,\n mask=mask)\n\n @property\n def bin_edges(self):\n \"\"\"\n bin_edges : 1D array of dtype float\n Return the bin edges.\n \"\"\"\n return super(BinnedStatistic1D, self).bin_edges[0]\n\n @property\n def bin_centers(self):\n \"\"\"\n bin_centers : 1D array of dtype float\n Return the bin centers.\n \"\"\"\n return super(BinnedStatistic1D, self).bin_centers[0]\n\n\nclass BinnedStatistic2D(BinnedStatisticDD):\n \"\"\"\n Compute a bidimensional binned statistic for a set of data.\n\n This is a generalization of a histogram2d function. A histogram divides\n the space into bins, and returns the count of the number of points in\n each bin. This function allows the computation of the sum, mean, median,\n or other statistic of the values within each bin.\n\n Parameters\n ----------\n x : (N,) array_like\n A sequence of values to be binned along the first dimension.\n y : (M,) array_like\n A sequence of values to be binned along the second dimension.\n statistic : string or callable, optional\n The statistic to compute (default is 'mean').\n The following statistics are available:\n\n * 'mean' : compute the mean of values for points within each bin.\n Empty bins will be represented by NaN.\n * 'median' : compute the median of values for points within each\n bin. Empty bins will be represented by NaN.\n * 'count' : compute the count of points within each bin. This is\n identical to an unweighted histogram. `values` array is not\n referenced.\n * 'sum' : compute the sum of values for points within each bin.\n This is identical to a weighted histogram.\n * function : a user-defined function which takes a 1D array of\n values, and outputs a single numerical statistic. This function\n will be called on the values in each bin. Empty bins will be\n represented by function([]), or NaN if this returns an error.\n\n bins : int or [int, int] or array_like or [array, array], optional\n The bin specification:\n\n * the number of bins for the two dimensions (nx=ny=bins),\n * the number of bins in each dimension (nx, ny = bins),\n * the bin edges for the two dimensions (x_edges = y_edges = bins),\n * the bin edges in each dimension (x_edges, y_edges = bins).\n\n range : (2,2) array_like, optional\n The leftmost and rightmost edges of the bins along each dimension\n (if not specified explicitly in the `bins` parameters):\n [[xmin, xmax], [ymin, ymax]]. All values outside of this range will be\n considered outliers and not tallied in the histogram.\n mask : array_like\n ones and zeros with the same shape as `x`.\n Values with mask==0 will be ignored.\n\n See Also\n --------\n numpy.histogram2d, binned_statistic, binned_statistic_dd\n\n \"\"\"\n\n def __init__(self, x, y, statistic='mean',\n bins=10, range=None, mask=None):\n # This code is based on np.histogram2d\n try:\n N = len(bins)\n except TypeError:\n N = 1\n\n if N != 1 and N != 2:\n xedges = yedges = np.asarray(bins, float)\n bins = [xedges, yedges]\n\n super(BinnedStatistic2D, self).__init__([x, y], statistic=statistic,\n bins=bins, range=range,\n mask=mask)\n\n def __call__(self, values, statistic=None):\n \"\"\"\n Parameters\n ----------\n values : array_like\n The values on which the statistic will be computed. This must\n match the dimensions of ``x`` and ``y`` that were passed in when\n this object was instantiated.\n statistic : string or callable, optional\n The statistic to compute (default is whatever was passed in when\n this object was instantiated).\n The following statistics are available:\n\n * 'mean' : compute the mean of values for points within each bin.\n Empty bins will be represented by NaN.\n * 'median' : compute the median of values for points within each\n bin. Empty bins will be represented by NaN.\n * 'count' : compute the count of points within each bin. This is\n identical to an unweighted histogram. `values` array is not\n referenced.\n * 'sum' : compute the sum of values for points within each bin.\n This is identical to a weighted histogram.\n * function : a user-defined function which takes a 1D array of\n values, and outputs a single numerical statistic. This function\n will be called on the values in each bin. Empty bins will be\n represented by function([]), or NaN if this returns an error.\n\n Returns\n -------\n statistic_values : array\n The values of the selected statistic in each bin.\n \"\"\"\n return super(BinnedStatistic2D, self).__call__(values, statistic)\n\n\nclass RPhiBinnedStatistic(BinnedStatistic2D):\n \"\"\"\n Create a 2-dimensional histogram by binning a 2-dimensional\n image in both radius and phi.\n \"\"\"\n\n def __init__(self, shape, bins=10, range=None,\n origin=None, mask=None, r_map=None, statistic='mean'):\n \"\"\"\n Parameters:\n -----------\n shape : tuple of ints of length 2.\n shape of image.\n bins : int or [int, int] or array_like or [array, array], optional\n The bin specification:\n * number of bins for the two dimensions (nr=nphi=bins),\n * number of bins in each dimension (nr, nphi = bins),\n * bin edges for the two dimensions (r_edges = phi_edges = bins),\n * the bin edges in each dimension (r_edges, phi_edges = bins).\n Phi has a range of -pi to pi and is defined as arctan(row/col)\n (i.e. x is column and y is row, or \"cartesian\" format,\n not \"matrix\")\n range : (2,2) array_like, optional\n The leftmost and rightmost edges of the bins along each dimension\n (if not specified explicitly in the `bins` parameters):\n [[rmin, rmax], [phimin, phimax]]. All values outside of this range\n will be considered outliers and not tallied in the histogram.\n See \"bins\" parameter for definition of phi.\n origin : tuple of floats with length 2, optional\n location (in pixels) of origin (default: image center).\n mask : 2-dimensional np.ndarray of ints, optional\n array of zero/non-zero values, with shape `shape`.\n zero values will be ignored.\n r_map : 2d np.ndarray of floats, optional\n The map of pixel radii for each pixel. For example, r_map can be\n used to define the radius of each pixel relative to the origin in\n reciprocal space (on the Ewald sphere).\n statistic : string or callable, optional\n The statistic to compute (default is 'mean').\n The following statistics are available:\n\n * 'mean' : compute the mean of values for points within each bin.\n Empty bins will be represented by NaN.\n * 'median' : compute the median of values for points within each\n bin. Empty bins will be represented by NaN.\n * 'count' : compute the count of points within each bin. This is\n identical to an unweighted histogram. `values` array is not\n referenced.\n * 'sum' : compute the sum of values for points within each bin.\n This is identical to a weighted histogram.\n * function : a user-defined function which takes a 1D array of\n values, and outputs a single numerical statistic. This function\n will be called on the values in each bin. Empty bins will be\n represented by function([]), or NaN if this returns an error.\n \"\"\"\n if origin is None:\n origin = (shape[0] - 1) / 2., (shape[1] - 1) / 2.\n\n if r_map is None:\n r_map = radial_grid(origin, shape)\n\n phi_map = angle_grid(origin, shape)\n\n self.expected_shape = tuple(shape)\n if mask is not None:\n if mask.shape != self.expected_shape:\n raise ValueError('\"mask\" has incorrect shape. '\n ' Expected: ' + str(self.expected_shape) +\n ' Received: ' + str(mask.shape))\n mask = mask.reshape(-1)\n\n super(RPhiBinnedStatistic, self).__init__(r_map.reshape(-1),\n phi_map.reshape(-1),\n statistic,\n bins=bins,\n mask=mask,\n range=range)\n\n def __call__(self, values, statistic=None):\n \"\"\"\n Parameters\n ----------\n values : array_like\n The values on which the statistic will be computed. This must\n match the ``shape`` that passed in when this object was\n instantiated.\n statistic : string or callable, optional\n The statistic to compute (default is whatever was passed in when\n this object was instantiated).\n The following statistics are available:\n\n * 'mean' : compute the mean of values for points within each bin.\n Empty bins will be represented by NaN.\n * 'median' : compute the median of values for points within each\n bin. Empty bins will be represented by NaN.\n * 'count' : compute the count of points within each bin. This is\n identical to an unweighted histogram. `values` array is not\n referenced.\n * 'sum' : compute the sum of values for points within each bin.\n This is identical to a weighted histogram.\n * function : a user-defined function which takes a 1D array of\n values, and outputs a single numerical statistic. This function\n will be called on the values in each bin. Empty bins will be\n represented by function([]), or NaN if this returns an error.\n\n Returns\n -------\n statistic_values : array\n The values of the selected statistic in each bin.\n \"\"\"\n # check for what I believe could be a common error\n if values.shape != self.expected_shape:\n raise ValueError('\"values\" has incorrect shape.'\n ' Expected: ' + str(self.expected_shape) +\n ' Received: ' + str(values.shape))\n return super(RPhiBinnedStatistic, self).__call__(values.reshape(-1),\n statistic)\n\n\nclass RadialBinnedStatistic(BinnedStatistic1D):\n \"\"\"\n Create a 1-dimensional histogram by binning a 2-dimensional\n image in radius.\n \"\"\"\n\n def __init__(self, shape, bins=10, range=None, origin=None, mask=None,\n r_map=None, statistic='mean'):\n \"\"\"\n Parameters:\n -----------\n shape : tuple of ints of length 2.\n shape of image.\n bins : int or sequence of scalars, optional\n If `bins` is an int, it defines the number of equal-width bins in\n the given range (10 by default). If `bins` is a sequence, it\n defines the bin edges, including the rightmost edge, allowing for\n non-uniform bin widths. Values in `x` that are smaller than lowest\n bin edge are assigned to bin number 0, values beyond the highest\n bin are assigned to ``bins[-1]``.\n Phi has a range of -pi to pi and is defined as arctan(row/col)\n (i.e. x is column and y is row, or \"cartesian\" format,\n not \"matrix\")\n range : (float, float) or [(float, float)], optional\n The lower and upper range of the bins. If not provided, range\n is simply ``(x.min(), x.max())``. Values outside the range are\n ignored.\n See \"bins\" parameter for definition of phi.\n origin : tuple of floats with length 2, optional\n location (in pixels) of origin (default: image center).\n mask : 2-dimensional np.ndarray of ints, optional\n array of zero/non-zero values, with shape `shape`.\n zero values will be ignored.\n r_map : the map of pixel radii for each pixel. This is useful when the\n detector has some curvature or is a more complex 2D shape embedded\n in a 3D space (for example, Ewald curvature).\n statistic : string or callable, optional\n The statistic to compute (default is 'mean').\n The following statistics are available:\n\n * 'mean' : compute the mean of values for points within each bin.\n Empty bins will be represented by NaN.\n * 'median' : compute the median of values for points within each\n bin. Empty bins will be represented by NaN.\n * 'count' : compute the count of points within each bin. This is\n identical to an unweighted histogram. `values` array is not\n referenced.\n * 'sum' : compute the sum of values for points within each bin.\n This is identical to a weighted histogram.\n * function : a user-defined function which takes a 1D array of\n values, and outputs a single numerical statistic. This function\n will be called on the values in each bin. Empty bins will be\n represented by function([]), or NaN if this returns an error.\n \"\"\"\n if origin is None:\n origin = (shape[0] - 1) / 2, (shape[1] - 1) / 2\n\n if r_map is None:\n r_map = radial_grid(origin, shape)\n\n self.expected_shape = tuple(shape)\n if mask is not None:\n if mask.shape != self.expected_shape:\n raise ValueError('\"mask\" has incorrect shape. '\n ' Expected: ' + str(self.expected_shape) +\n ' Received: ' + str(mask.shape))\n mask = mask.reshape(-1)\n\n super(RadialBinnedStatistic, self).__init__(r_map.reshape(-1),\n statistic,\n bins=bins,\n mask=mask,\n range=range)\n\n def __call__(self, values, statistic=None):\n \"\"\"\n Parameters\n ----------\n values : array_like\n The values on which the statistic will be computed. This must\n match the ``shape`` that passed in when this object was\n instantiated.\n statistic : string or callable, optional\n The statistic to compute (default is whatever was passed in when\n this object was instantiated).\n The following statistics are available:\n\n * 'mean' : compute the mean of values for points within each bin.\n Empty bins will be represented by NaN.\n * 'median' : compute the median of values for points within each\n bin. Empty bins will be represented by NaN.\n * 'count' : compute the count of points within each bin. This is\n identical to an unweighted histogram. `values` array is not\n referenced.\n * 'sum' : compute the sum of values for points within each bin.\n This is identical to a weighted histogram.\n * function : a user-defined function which takes a 1D array of\n values, and outputs a single numerical statistic. This function\n will be called on the values in each bin. Empty bins will be\n represented by function([]), or NaN if this returns an error.\n\n Returns\n -------\n statistic_values : array\n The values of the selected statistic in each bin.\n \"\"\"\n # check for what I believe could be a common error\n if values.shape != self.expected_shape:\n raise ValueError('\"values\" has incorrect shape.'\n ' Expected: ' + str(self.expected_shape) +\n ' Received: ' + str(values.shape))\n return super(RadialBinnedStatistic, self).__call__(values.reshape(-1),\n statistic)\n" ]
[ [ "numpy.sqrt", "numpy.bincount", "numpy.empty", "numpy.zeros", "numpy.diff", "numpy.atleast_2d", "numpy.fabs", "numpy.asarray", "numpy.copy", "numpy.arange", "numpy.seterr", "numpy.sort", "numpy.around", "numpy.isscalar", "numpy.linspace", "numpy.digitize" ] ]
Wliubei/Multiple-Resolutionse
[ "3d8a9a725a00e5f9a2ac3b2b9659bc22b3876f75" ]
[ "torch_stft/stft.py" ]
[ "import torch\nimport numpy as np\nimport torch.nn.functional as F\nfrom scipy.signal import get_window\nfrom librosa.util import pad_center, tiny\nfrom .util import window_sumsquare\nimport librosa\nclass STFT(torch.nn.Module):\n def __init__(self, filter_length=1724, hop_length=130, win_length=None,\n window='blackman'):\n \"\"\"\n This module implements an STFT using 1D convolution and 1D transpose convolutions.\n This is a bit tricky so there are some cases that probably won't work as working\n out the same sizes before and after in all overlap add setups is tough. Right now,\n this code should work with hop lengths that are half the filter length (50% overlap\n between frames).\n \n Keyword Arguments:\n filter_length {int} -- Length of filters used (default: {1024})\n hop_length {int} -- Hop length of STFT (restrict to 50% overlap between frames) (default: {512})\n win_length {[type]} -- Length of the window function applied to each frame (if not specified, it\n equals the filter length). (default: {None})\n window {str} -- Type of window to use (options are bartlett, hann, hamming, blackman, blackmanharris) \n (default: {'hann'})\n \"\"\"\n super(STFT, self).__init__()\n self.filter_length = filter_length\n self.hop_length = hop_length\n self.win_length = win_length if win_length else filter_length\n self.window = window\n self.forward_transform = None\n self.pad_amount = int(self.filter_length / 2)\n scale = self.filter_length / self.hop_length\n fourier_basis = np.fft.fft(np.eye(self.filter_length))\n\n cutoff = int((self.filter_length / 2 + 1))\n fourier_basis = np.vstack([np.real(fourier_basis[:cutoff, :]),\n np.imag(fourier_basis[:cutoff, :])])\n forward_basis = torch.FloatTensor(fourier_basis[:, None, :])\n inverse_basis = torch.FloatTensor(\n np.linalg.pinv(scale * fourier_basis).T[:, None, :])\n\n assert(filter_length >= self.win_length)\n # get window and zero center pad it to filter_length\n fft_window = get_window(window, self.win_length, fftbins=True)\n fft_window = pad_center(fft_window, filter_length)\n fft_window = torch.from_numpy(fft_window).float()\n\n # window the bases\n forward_basis *= fft_window\n inverse_basis *= fft_window\n\n self.register_buffer('forward_basis', forward_basis.float())\n self.register_buffer('inverse_basis', inverse_basis.float())\n\n def transform(self, input_data):\n \"\"\"Take input data (audio) to STFT domain.\n \n Arguments:\n input_data {tensor} -- Tensor of floats, with shape (num_batch, num_samples)\n \n Returns:\n magnitude {tensor} -- Magnitude of STFT with shape (num_batch, \n num_frequencies, num_frames)\n phase {tensor} -- Phase of STFT with shape (num_batch, \n num_frequencies, num_frames)\n \"\"\"\n num_batches = input_data.shape[0]\n # x=input_data.shape[1]\n num_samples = input_data.shape[-1]\n\n self.num_samples = num_samples\n # print(input_data.shape)\n # similar to librosa, reflect-pad the input\n input_data = input_data.view(num_batches, 1, num_samples).cpu()\n \n input_data = F.pad(\n input_data.unsqueeze(1),\n (self.pad_amount, self.pad_amount, 0, 0),\n mode='reflect')\n input_data = input_data.squeeze(1)\n\n forward_transform = F.conv1d(\n input_data,\n self.forward_basis,\n stride=self.hop_length,\n padding=0)\n # print(forward_transform.weight)\n\n cutoff = int((self.filter_length / 2) + 1)\n real_part = forward_transform[:, :cutoff, :]\n imag_part = forward_transform[:, cutoff:, :]\n\n magnitude = torch.sqrt(real_part**2 + imag_part**2)\n # print(magnitude.size())\n phase = torch.atan2(imag_part.data, real_part.data)\n powspec = np.square(magnitude.cpu())\n logpowspec = torch.cuda.FloatTensor(librosa.power_to_db(powspec, ref=1.0, amin=1e-30, top_db=None))\n return logpowspec, phase\n\n # def inverse(self, magnitude, phase):\n # \"\"\"Call the inverse STFT (iSTFT), given magnitude and phase tensors produced\n # by the ```transform``` function.\n #\n # Arguments:\n # magnitude {tensor} -- Magnitude of STFT with shape (num_batch,\n # num_frequencies, num_frames)\n # phase {tensor} -- Phase of STFT with shape (num_batch,\n # num_frequencies, num_frames)\n #\n # Returns:\n # inverse_transform {tensor} -- Reconstructed audio given magnitude and phase. Of\n # shape (num_batch, num_samples)\n # \"\"\"\n # recombine_magnitude_phase = torch.cat(\n # [magnitude*torch.cos(phase), magnitude*torch.sin(phase)], dim=1)\n #\n # inverse_transform = F.conv_transpose1d(\n # recombine_magnitude_phase,\n # self.inverse_basis,\n # stride=self.hop_length,\n # padding=0)\n #\n # if self.window is not None:\n # window_sum = window_sumsquare(\n # self.window, magnitude.size(-1), hop_length=self.hop_length,\n # win_length=self.win_length, n_fft=self.filter_length,\n # dtype=np.float32)\n # # remove modulation effects\n # approx_nonzero_indices = torch.from_numpy(\n # np.where(window_sum > tiny(window_sum))[0])\n # window_sum = torch.from_numpy(window_sum).to(inverse_transform.device)\n # inverse_transform[:, :, approx_nonzero_indices] /= window_sum[approx_nonzero_indices]\n #\n # # scale by hop ratio\n # inverse_transform *= float(self.filter_length) / self.hop_length\n #\n # inverse_transform = inverse_transform[..., self.pad_amount:]\n # inverse_transform = inverse_transform[..., :self.num_samples]\n # inverse_transform = inverse_transform.squeeze(1)\n #\n # return inverse_transform\n\n def forward(self, input_data):\n \"\"\"Take input data (audio) to STFT domain and then back to audio.\n \n Arguments:\n input_data {tensor} -- Tensor of floats, with shape (num_batch, num_samples)\n \n Returns:\n reconstruction {tensor} -- Reconstructed audio given magnitude and phase. Of\n shape (num_batch, num_samples)\n \"\"\"\n num_batches = input_data.shape[0]\n # x=input_data.shape[1]\n num_samples = input_data.shape[-1]\n\n self.num_samples = num_samples\n\n # similar to librosa, reflect-pad the input\n input_data = input_data.view(num_batches, 1, num_samples)\n\n input_data = F.pad(\n input_data.unsqueeze(1),\n (self.pad_amount, self.pad_amount, 0, 0),\n mode='reflect')\n input_data = input_data.squeeze(1)\n\n forward_transform = F.conv1d(\n input_data,\n self.forward_basis,\n stride=self.hop_length,\n padding=0)\n\n cutoff = int((self.filter_length / 2) + 1)\n real_part = forward_transform[:, :cutoff, :]\n imag_part = forward_transform[:, cutoff:, :]\n\n magnitude = torch.sqrt(real_part ** 2 + imag_part ** 2)\n # print(magnitude.size())\n phase = torch.atan2(imag_part.data, real_part.data)\n powspec = np.square(magnitude.cpu())\n logpowspec = torch.cuda.FloatTensor(librosa.power_to_db(powspec, ref=1.0, amin=1e-30, top_db=None))\n return logpowspec, phase" ]
[ [ "numpy.eye", "torch.FloatTensor", "scipy.signal.get_window", "torch.nn.functional.conv1d", "torch.sqrt", "numpy.imag", "torch.from_numpy", "numpy.linalg.pinv", "numpy.real", "torch.atan2" ] ]
fzalkow/music_indexing
[ "b7a0405f229f81d759e3fbe5a870da3532874afc" ]
[ "common/__init__.py" ]
[ "'''\nFile name: common/__init__.py\nAuthor: Frank Zalkow\nDate: 2020\nLicense: MIT\nThis file is part of the following repository:\n https://github.com/fzalkow/music_indexing\n'''\n\nimport numpy as np\nimport librosa\n\nCHROMA_DIMS = 12\n\n\ndef compute_features(fn_audio):\n Fs = 22050\n H = 2205\n smooth = 41\n downsample = 10\n\n x, _ = librosa.load(fn_audio, sr=Fs)\n X_iirt = librosa.spectrum.iirt(x, sr=Fs, win_length=H*2, hop_length=H, center=True)\n fmin = librosa.midi_to_hz(24)\n X_cens = librosa.feature.chroma_cens(sr=Fs, C=X_iirt, fmin=fmin, bins_per_octave=CHROMA_DIMS, n_octaves=7,\n win_len_smooth=smooth, norm=2)[:, ::downsample]\n\n return X_cens\n\n\ndef generate_shingles(X_cens, L=20):\n num_shingles = X_cens.shape[1] - L + 1\n shingles = np.empty((num_shingles, L * CHROMA_DIMS))\n for idx in range(num_shingles):\n shingles[idx, :] = X_cens[:, idx:idx + L].ravel()\n return shingles\n" ]
[ [ "numpy.empty" ] ]
seinecke/aict-tools
[ "87601eba47d77385ac35145fd0b85d7aa7d66c71" ]
[ "aict_tools/scripts/plot_direction_performance.py" ]
[ "import click\nimport logging\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_pdf import PdfPages\nimport numpy as np\nfrom sklearn.externals import joblib\nfrom ..configuration import AICTConfig\nimport fact.io\n\nfrom ..plotting import (\n plot_roc,\n plot_probabilities,\n plot_regressor_confusion,\n plot_bias_resolution,\n plot_bias,\n plot_resolution,\n plot_feature_importances,\n)\n\n\[email protected]()\[email protected]('configuration_path', type=click.Path(exists=True, dir_okay=False))\[email protected]('performance_path', type=click.Path(exists=True, dir_okay=False))\[email protected]('model_path_sign', type=click.Path(exists=True, dir_okay=False))\[email protected]('model_path_disp', type=click.Path(exists=True, dir_okay=False))\[email protected]('-o', '--output', type=click.Path(exists=False, dir_okay=False))\[email protected]('-k', '--key', help='HDF5 key for hdf5', default='data')\[email protected]('-p', '--parameter', type=click.Choice(['energy', 'disp']), \n default='energy', help='Parameter to be estimated')\ndef main(configuration_path, performance_path, model_path_sign, model_path_disp, \n output, key, parameter):\n ''' Create some performance evaluation plots for the separator '''\n logging.basicConfig(level=logging.INFO)\n log = logging.getLogger()\n\n log.info('Loading perfomance data')\n df = fact.io.read_data(performance_path, key=key)\n\n log.info('Loading model sign')\n model_sign = joblib.load(model_path_sign)\n\n log.info('Loading model disp')\n model_disp = joblib.load(model_path_disp)\n\n config = AICTConfig.from_yaml(configuration_path)\n model_config = config.disp\n figures = []\n\n # Plot ROC\n figures.append(plt.figure())\n ax = figures[-1].add_subplot(1, 1, 1)\n plot_roc(df, model_sign, ax=ax, label='sign', pred='sign_probabilities')\n\n # Plot hists of probas\n figures.append(plt.figure())\n ax = figures[-1].add_subplot(1, 1, 1)\n ax = plot_probabilities(df, model_sign, ax=ax, \n label='sign', pred='sign_probabilities', \n classnames=('Minus', 'Plus'))\n\n # Plot confusion\n figures.append(plt.figure())\n ax = figures[-1].add_subplot(1, 1, 1)\n ax = plot_regressor_confusion(df, ax=ax, log_xy=False,\n label='disp', pred='disp_prediction')\n ax.plot([0,500], [0,500], color='#D03A3B', alpha=0.5)\n ax.set_xlabel(r'$disp_{\\mathrm{true}} \\,\\, / \\,\\, \\mathrm{m}$')\n ax.set_ylabel(r'$disp_{\\mathrm{rec}} \\,\\, / \\,\\, \\mathrm{m}$')\n\n\n # Plot confusion for different energies\n max_disp = max(\n np.nanmax(df['disp']), \n np.nanmax(df['disp_prediction'])\n )\n energies_min = np.nanmin(df[config.energy.target_column])\n energies_max = np.nanmax(df[config.energy.target_column])\n energies = np.logspace(np.log10(energies_min),np.log10(energies_max),5)\n\n figures.append(plt.figure())\n ax = figures[-1].add_subplot(2, 2, 1)\n ax = plot_regressor_confusion(df[df[config.energy.target_column]<energies[1]], \n ax=ax, log_xy=False, label='disp', pred='disp_prediction')\n ax.set_ylabel(r'$disp_{\\mathrm{rec}} \\,\\, / \\,\\, \\mathrm{m}$')\n ax.set_xlim([0,max_disp])\n ax.set_ylim([0,max_disp])\n ax.plot([0,max_disp], [0,max_disp], color='#D03A3B', alpha=0.5)\n ax.text(0.1,0.9,'< {:1.0f} TeV'.format(energies[1]), fontsize=8,\n transform=ax.transAxes, horizontalalignment='left')\n\n ax = figures[-1].add_subplot(2, 2, 2)\n ax = plot_regressor_confusion(df[(df[config.energy.target_column]>energies[1]) \n & (df[config.energy.target_column]<energies[2])], \n ax=ax, log_xy=False, label='disp', pred='disp_prediction')\n ax.set_xlim([0,max_disp])\n ax.set_ylim([0,max_disp])\n ax.plot([0,max_disp], [0,max_disp], color='#D03A3B', alpha=0.5)\n ax.text(0.1,0.9,'{:1.0f} - {:1.0f} TeV'.format(energies[1], energies[2]), fontsize=8,\n transform=ax.transAxes, horizontalalignment='left')\n\n ax = figures[-1].add_subplot(2, 2, 3)\n ax = plot_regressor_confusion(df[(df[config.energy.target_column]>energies[2]) \n & (df[config.energy.target_column]<energies[3])], \n ax=ax, log_xy=False, label='disp', pred='disp_prediction')\n ax.set_xlabel(r'$disp_{\\mathrm{true}} \\,\\, / \\,\\, \\mathrm{m}$')\n ax.set_ylabel(r'$disp_{\\mathrm{rec}} \\,\\, / \\,\\, \\mathrm{m}$')\n ax.set_xlim([0,max_disp])\n ax.set_ylim([0,max_disp])\n ax.plot([0,max_disp], [0,max_disp], color='#D03A3B', alpha=0.5)\n ax.text(0.1,0.9,'{:1.0f} - {:1.0f} TeV'.format(energies[2], energies[3]), fontsize=8,\n transform=ax.transAxes, horizontalalignment='left')\n\n ax = figures[-1].add_subplot(2, 2, 4)\n ax = plot_regressor_confusion(df[df[config.energy.target_column]>energies[3]], \n ax=ax, log_xy=False, label='disp', pred='disp_prediction')\n ax.set_xlabel(r'$disp_{\\mathrm{true}} \\,\\, / \\,\\, \\mathrm{m}$')\n ax.set_xlim([0,max_disp])\n ax.set_ylim([0,max_disp])\n ax.plot([0,max_disp], [0,max_disp], color='#D03A3B', alpha=0.5)\n ax.text(0.1,0.9,'> {:1.0f} TeV'.format(energies[3]), fontsize=8,\n transform=ax.transAxes, horizontalalignment='left')\n\n\n # Plot bias\n figures.append(plt.figure())\n ax = figures[-1].add_subplot(1, 1, 1)\n ax = plot_bias(df, bins=15, ax=ax, log_x=False,\n label='disp', pred='disp_prediction')\n ax.set_xlabel(r'$disp_{\\mathrm{true}} \\,\\, / \\,\\, \\mathrm{m}$')\n ax.set_ylabel('Bias')\n\n # Plot resolution\n figures.append(plt.figure())\n ax = figures[-1].add_subplot(1, 1, 1)\n ax = plot_resolution(df, bins=15, ax=ax, log_x=False,\n label='disp', pred='disp_prediction')\n ax.set_xlabel(r'$disp_{\\mathrm{true}} \\,\\, / \\,\\, \\mathrm{m}$')\n ax.set_ylabel('Resolution')\n\n # Plot feature importances\n figures.append(plt.figure())\n ax = figures[-1].add_subplot(1, 1, 1)\n features = model_config.features\n ax = plot_feature_importances(model_disp, features, ax=ax)\n ax.text(0.95, 0.05, 'Disp Regression',\n transform=ax.transAxes, horizontalalignment='right')\n\n # Plot feature importances\n figures.append(plt.figure())\n ax = figures[-1].add_subplot(1, 1, 1)\n features = model_config.features\n ax = plot_feature_importances(model_sign, features, ax=ax)\n ax.text(0.95, 0.05, 'Sign Classification',\n transform=ax.transAxes, horizontalalignment='right')\n\n\n if output is None:\n plt.show()\n else:\n with PdfPages(output) as pdf:\n for fig in figures:\n pdf.savefig(fig)\n" ]
[ [ "numpy.nanmax", "matplotlib.pyplot.figure", "matplotlib.backends.backend_pdf.PdfPages", "sklearn.externals.joblib.load", "numpy.nanmin", "matplotlib.pyplot.show", "numpy.log10" ] ]
MoonBlvd/TFSegmentation
[ "baa874237289227c99163fe119448579904a231f" ]
[ "models/encoders/resnet_18.py" ]
[ "import tensorflow as tf\nimport numpy as np\n\nfrom layers.utils import variable_summaries, variable_with_weight_decay\nfrom utils.misc import timeit\nfrom utils.misc import _debug\n# import torchfile\nimport pickle\nimport pdb\n\nclass RESNET18:\n \"\"\"\n RESNET 18 Encoder class\n \"\"\"\n\n def __init__(self, x_input,\n num_classes,\n pretrained_path,\n train_flag,\n bias=-1,\n weight_decay=5e-4,\n test_classification=False):\n \"\"\"\n\n :param x_input: Input Images to the RESNET Encoder\n :param num_classes:\n :param pretrained_path:\n :param train_flag:\n :param weight_decay:\n \"\"\"\n\n # Load pretrained path\n if pretrained_path.split('.')[-1]=='npy':\n self.pretrained_weights = np.load(pretrained_path)\n elif pretrained_path.split('.')[-1]=='pkl':\n with open(pretrained_path, 'rb') as ff:\n self.pretrained_weights = pickle.load(ff, encoding='latin1')\n\n print('pretrained weights dictionary loaded from disk')\n\n # init parameters and input\n self.x_input = x_input\n self.num_classes = num_classes\n self.train_flag = train_flag\n self.wd = weight_decay\n self.bias = bias\n self.use_bias = True\n if self.bias == -1:\n self.use_bias = False\n\n self.test_classification = test_classification\n\n # All layers\n self.resnet_mean = None\n self.resnet_std = None\n self.x_preprocessed = None\n self.conv1 = None\n self.conv2 = None\n self.conv3 = None\n self.conv4 = None\n self.conv5 = None\n self.score = None\n\n # These feed layers are for the decoder\n self.feed1 = None\n self.feed2 = None\n self.encoder_1 = None\n self.encoder_2 = None\n self.encoder_3 = None\n self.encoder_4 = None\n\n def build(self):\n \"\"\"\n Build the RESNET model using loaded weights\n \"\"\"\n\n print(\"Building the RESNET..\")\n\n # Convert RGB to BGR\n with tf.name_scope('Pre_Processing'):\n self.x_preprocessed = self.x_input * (1.0 / 255.0)\n# self.x_preprocessed= self.x_input\n stat= torchfile.load('stat.t7')\n self.resnet_mean= stat.transpose(1,2,0)\n# self.resnet_mean = tf.constant([0.2869, 0.3251, 0.2839], dtype=tf.float32)\n self.x_preprocessed = (self.x_preprocessed - self.resnet_mean) #/ self.resnet_std\n# red, green, blue = tf.split(self.x_preprocessed, num_or_size_splits=3, axis=3)\n# self.x_preprocessed = tf.concat([blue,green,red], 3)\n\n # These variables to keep track of what i do\n # filters = [64, 64, 128, 256, 512]\n # kernels = [7, 3, 3, 3, 3]\n # strides = [2, 0, 2, 2, 2]\n tf.add_to_collection('debug_layers', self.x_preprocessed)\n\n with tf.variable_scope('conv1_x'):\n print('Building unit: conv1')\n self.conv1 = self._conv('conv1', self.x_preprocessed, padding= [[0,0],[3,3],[3,3],[0,0]],\n num_filters=64, kernel_size=(7, 7), stride=(2, 2), l2_strength=self.wd,\n bias=self.bias)\n\n self.conv1 = self._bn('bn1', self.conv1)\n\n self.conv1 = self._relu('relu1', self.conv1)\n _debug(self.conv1)\n self.conv1= tf.pad(self.conv1, tf.constant([[0,0],[1,1],[1,1],[0,0]]), \"CONSTANT\")\n self.conv1 = tf.nn.max_pool(self.conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID',\n name='max_pool1')\n _debug(self.conv1)\n print('conv1-shape: ' + str(self.conv1.shape.as_list()))\n\n with tf.variable_scope('conv2_x'):\n self.conv2 = self._residual_block('conv2_1', self.conv1, 64)\n _debug(self.conv2)\n self.conv2 = self._residual_block('conv2_2', self.conv2, 64)\n _debug(self.conv2)\n\n with tf.variable_scope('conv3_x'):\n self.conv3 = self._residual_block('conv3_1', self.conv2, 128, pool_first=True, strides=2)\n _debug(self.conv3)\n self.conv3 = self._residual_block('conv3_2', self.conv3, 128)\n _debug(self.conv3)\n\n with tf.variable_scope('conv4_x'):\n self.conv4 = self._residual_block('conv4_1', self.conv3, 256, pool_first=True, strides=2)\n _debug(self.conv4)\n self.conv4 = self._residual_block('conv4_2', self.conv4, 256)\n _debug(self.conv4)\n\n with tf.variable_scope('conv5_x'):\n self.conv5 = self._residual_block('conv5_1', self.conv4, 512, pool_first=True, strides=2)\n _debug(self.conv5)\n self.conv5 = self._residual_block('conv5_2', self.conv5, 512)\n _debug(self.conv5)\n\n if self.test_classification:\n with tf.variable_scope('logits'):\n print('Building unit: logits')\n self.score = tf.reduce_mean(self.conv5, axis=[1, 2])\n self.score = self._fc('logits_dense', self.score, output_dim=self.num_classes, l2_strength=self.wd)\n print('logits-shape: ' + str(self.score.shape.as_list()))\n\n self.feed1 = self.conv4\n self.feed2 = self.conv3\n\n self.encoder_1 = self.conv2\n self.encoder_2 = self.conv3\n self.encoder_3 = self.conv4\n self.encoder_4 = self.conv5\n print(\"\\nEncoder RESNET is built successfully\\n\\n\")\n\n @timeit\n def load_pretrained_weights(self, sess):\n print(\"Loading pretrained weights of resnet18\")\n all_vars = tf.trainable_variables()\n all_vars += tf.get_collection('mu_sigma_bn')\n for v in all_vars:\n if v.op.name in self.pretrained_weights.keys():\n assign_op = v.assign(self.pretrained_weights[v.op.name])\n sess.run(assign_op)\n print(v.op.name + \" - loaded successfully\")\n print(\"All pretrained weights of resnet18 is loaded\")\n\n def _residual_block(self, name, x, filters, pool_first=False, strides=1, dilation=1):\n print('Building residual unit: %s' % name)\n with tf.variable_scope(name):\n # get input channels\n in_channel = x.shape.as_list()[-1]\n\n # Shortcut connection\n shortcut = tf.identity(x)\n\n if pool_first:\n if in_channel == filters:\n if strides == 1:\n shortcut = tf.identity(x)\n else:\n shortcut= tf.pad(x, tf.constant([[0,0],[1,1],[1,1],[0,0]]), \"CONSTANT\")\n shortcut = tf.nn.max_pool(shortcut, [1, strides, strides, 1], [1, strides, strides, 1], 'VALID')\n else:\n shortcut = self._conv('shortcut_conv', x, padding='VALID',\n num_filters=filters, kernel_size=(1, 1), stride=(strides, strides),\n bias=self.bias)\n else:\n if dilation != 1:\n shortcut = self._conv('shortcut_conv', x, padding='VALID',\n num_filters=filters, kernel_size=(1, 1), dilation=dilation, bias=self.bias)\n\n # Residual\n x = self._conv('conv_1', x, padding=[[0,0],[1,1],[1,1],[0,0]],\n num_filters=filters, kernel_size=(3, 3), stride=(strides, strides), bias=self.bias)\n x = self._bn('bn_1', x)\n x = self._relu('relu_1', x)\n x = self._conv('conv_2', x, padding=[[0,0],[1,1],[1,1],[0,0]],\n num_filters=filters, kernel_size=(3, 3), bias=self.bias)\n x = self._bn('bn_2', x)\n\n # Merge\n x = x + shortcut\n x = self._relu('relu_2', x)\n\n print('residual-unit-%s-shape: ' % name + str(x.shape.as_list()))\n\n return x\n\n @staticmethod\n def _conv(name, x, num_filters=16, kernel_size=(3, 3), padding='SAME', stride=(1, 1),\n initializer=tf.contrib.layers.xavier_initializer(), l2_strength=0.0, dilation=1.0, bias=-1):\n\n with tf.variable_scope(name):\n stride = [1, stride[0], stride[1], 1]\n kernel_shape = [kernel_size[0], kernel_size[1], x.shape[-1], num_filters]\n\n w = variable_with_weight_decay(kernel_shape, initializer, l2_strength)\n\n variable_summaries(w)\n if dilation > 1:\n conv = tf.nn.atrous_conv2d(x, w, dilation, padding)\n else:\n if type(padding)==type(''):\n conv = tf.nn.conv2d(x, w, stride, padding)\n else:\n conv = tf.pad(x, padding, \"CONSTANT\")\n conv = tf.nn.conv2d(conv, w, stride, padding='VALID')\n\n if bias != -1:\n bias = tf.get_variable('biases', [num_filters], initializer=tf.constant_initializer(bias))\n\n variable_summaries(bias)\n conv = tf.nn.bias_add(conv, bias)\n\n tf.add_to_collection('debug_layers', conv)\n\n return conv\n\n @staticmethod\n def _relu(name, x):\n with tf.variable_scope(name):\n return tf.nn.relu(x)\n\n @staticmethod\n def _fc(name, x, output_dim=128,\n initializer=tf.contrib.layers.xavier_initializer(), l2_strength=0.0, bias=0.0):\n\n with tf.variable_scope(name):\n n_in = x.get_shape()[-1].value\n\n w = variable_with_weight_decay([n_in, output_dim], initializer, l2_strength)\n\n variable_summaries(w)\n\n if isinstance(bias, float):\n bias = tf.get_variable(\"biases\", [output_dim], tf.float32, tf.constant_initializer(bias))\n\n variable_summaries(bias)\n\n output = tf.nn.bias_add(tf.matmul(x, w), bias)\n\n return output\n\n def _bn(self, name, x):\n with tf.variable_scope(name):\n moving_average_decay = 0.9\n decay = moving_average_decay\n\n batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2])\n\n mu = tf.get_variable('mu', batch_mean.shape, dtype=tf.float32,\n initializer=tf.zeros_initializer(), trainable=False)\n tf.add_to_collection(tf.GraphKeys.GLOBAL_VARIABLES, mu)\n tf.add_to_collection('mu_sigma_bn', mu)\n sigma = tf.get_variable('sigma', batch_var.shape, dtype=tf.float32,\n initializer=tf.ones_initializer(), trainable=False)\n tf.add_to_collection(tf.GraphKeys.GLOBAL_VARIABLES, sigma)\n tf.add_to_collection('mu_sigma_bn', sigma)\n beta = tf.get_variable('beta', batch_mean.shape, dtype=tf.float32,\n initializer=tf.zeros_initializer())\n gamma = tf.get_variable('gamma', batch_var.shape, dtype=tf.float32,\n initializer=tf.ones_initializer())\n\n # BN when training\n update = 1.0 - decay\n update_mu = mu.assign_sub(update * (mu - batch_mean))\n update_sigma = sigma.assign_sub(update * (sigma - batch_var))\n tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_mu)\n tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_sigma)\n\n mean, var = tf.cond(self.train_flag, lambda: (batch_mean, batch_var), lambda: (mu, sigma))\n bn = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-5)\n\n tf.add_to_collection('debug_layers', bn)\n\n return bn\n" ]
[ [ "tensorflow.variable_scope", "tensorflow.matmul", "tensorflow.name_scope", "tensorflow.identity", "tensorflow.nn.max_pool", "tensorflow.nn.atrous_conv2d", "tensorflow.ones_initializer", "tensorflow.cond", "tensorflow.constant", "tensorflow.add_to_collection", "numpy.load", "tensorflow.constant_initializer", "tensorflow.nn.batch_normalization", "tensorflow.get_collection", "tensorflow.zeros_initializer", "tensorflow.contrib.layers.xavier_initializer", "tensorflow.pad", "tensorflow.nn.moments", "tensorflow.nn.bias_add", "tensorflow.reduce_mean", "tensorflow.trainable_variables", "tensorflow.nn.conv2d", "tensorflow.nn.relu" ] ]
daniel03c1/audio_augment
[ "ee73bb0844e22c57c9cbeb129560da4a3853f77d" ]
[ "transforms_test.py" ]
[ "import PIL.Image\r\nimport PIL.ImageEnhance\r\nimport PIL.ImageOps\r\nimport numpy as np\r\nimport os\r\nimport random\r\nimport torch\r\nimport unittest\r\nfrom transforms import *\r\n\r\n\r\nclass TransformsTest(unittest.TestCase):\r\n def setUp(self):\r\n self.pil_img = PIL.Image.open('test_img.jpeg')\r\n self.np_img = np.array(self.pil_img).transpose(2, 0, 1) # to C, H, W\r\n self.torch_img = torch.Tensor(self.np_img) / 255.\r\n\r\n def test_autocontrast(self):\r\n target = PIL.ImageOps.autocontrast(self.pil_img)\r\n pred = AutoContrast(1.)(self.torch_img)\r\n self.compare(target, pred)\r\n\r\n def test_invert(self):\r\n target = PIL.ImageOps.invert(self.pil_img)\r\n pred = Invert(1.)(self.torch_img)\r\n self.compare(target, pred)\r\n\r\n def test_equalize(self):\r\n target = PIL.ImageOps.equalize(self.pil_img)\r\n pred = Equalize(1.)(self.torch_img)\r\n self.compare(target, pred)\r\n\r\n def test_solarize(self):\r\n target = PIL.ImageOps.solarize(self.pil_img, 110)\r\n pred = Solarize(1.)(self.torch_img)\r\n self.compare(target, pred)\r\n\r\n def test_posterize(self):\r\n target = PIL.ImageOps.posterize(self.pil_img, 2)\r\n pred = Posterize(0.5)(self.torch_img)\r\n self.compare(target, pred)\r\n\r\n def test_sharpness(self):\r\n target = PIL.ImageEnhance.Sharpness(self.pil_img).enhance(0.1)\r\n pred = Sharpness(.0)(self.torch_img)\r\n print((np.array(target)/255.).transpose(2, 0, 1))\r\n print(pred.numpy())\r\n self.compare(target, pred)\r\n\r\n def test_identity(self):\r\n target = self.pil_img\r\n pred = Identity(1.)(self.torch_img)\r\n self.compare(target, pred)\r\n\r\n def compare(self, pil_img, torch_img):\r\n self.assertTrue(np.allclose((np.array(pil_img)/255.).transpose(2, 0, 1),\r\n torch_img.numpy()))\r\n\r\n\r\nif __name__ == '__main__':\r\n os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'\r\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1'\r\n unittest.main()\r\n\r\n" ]
[ [ "numpy.array", "torch.Tensor" ] ]
MFadhilArkan/gym-pybullet-drones
[ "cc2e14da194682b2a5fb2f1a6ca7d047bf2dafae" ]
[ "gym_pybullet_drones/control/DSLPIDControl.py" ]
[ "import math\nimport numpy as np\nimport pybullet as p\nfrom scipy.spatial.transform import Rotation\n\nfrom gym_pybullet_drones.control.BaseControl import BaseControl\nfrom gym_pybullet_drones.envs.BaseAviary import DroneModel, BaseAviary\n\nclass DSLPIDControl(BaseControl):\n \"\"\"PID control class for Crazyflies.\n\n Based on work conducted at UTIAS' DSL by SiQi Zhou and James Xu.\n\n \"\"\"\n\n ################################################################################\n\n def __init__(self,\n drone_model: DroneModel,\n g: float=9.8\n ):\n \"\"\"Common control classes __init__ method.\n\n Parameters\n ----------\n drone_model : DroneModel\n The type of drone to control (detailed in an .urdf file in folder `assets`).\n g : float, optional\n The gravitational acceleration in m/s^2.\n\n \"\"\"\n super().__init__(drone_model=drone_model, g=g)\n if self.DRONE_MODEL != DroneModel.CF2X and self.DRONE_MODEL != DroneModel.CF2P:\n print(\"[ERROR] in DSLPIDControl.__init__(), DSLPIDControl requires DroneModel.CF2X or DroneModel.CF2P\")\n exit()\n self.P_COEFF_FOR = np.array([.4, .4, 1.25])\n self.I_COEFF_FOR = np.array([.05, .05, .05])\n self.D_COEFF_FOR = np.array([.2, .2, .5])\n self.P_COEFF_TOR = np.array([70000., 70000., 60000.])\n self.I_COEFF_TOR = np.array([.0, .0, 500.])\n self.D_COEFF_TOR = np.array([20000., 20000., 12000.])\n self.PWM2RPM_SCALE = 0.2685\n if self.DRONE_MODEL == DroneModel.ARDRONE2:\n self.PWM2RPM_SCALE = 1.25\n self.PWM2RPM_CONST = 4070.3\n self.MIN_PWM = 20000\n self.MAX_PWM = 65535\n if self.DRONE_MODEL in [DroneModel.CF2X, DroneModel.ARDRONE2]:\n self.MIXER_MATRIX = np.array([ [.5, -.5, -1], [.5, .5, 1], [-.5, .5, -1], [-.5, -.5, 1] ])\n elif self.DRONE_MODEL == DroneModel.CF2P:\n self.MIXER_MATRIX = np.array([ [0, -1, -1], [+1, 0, 1], [0, 1, -1], [-1, 0, 1] ])\n self.reset()\n\n ################################################################################\n\n def reset(self):\n \"\"\"Resets the control classes.\n\n The previous step's and integral errors for both position and attitude are set to zero.\n\n \"\"\"\n super().reset()\n #### Store the last roll, pitch, and yaw ###################\n self.last_rpy = np.zeros(3)\n #### Initialized PID control variables #####################\n self.last_pos_e = np.zeros(3)\n self.integral_pos_e = np.zeros(3)\n self.last_rpy_e = np.zeros(3)\n self.integral_rpy_e = np.zeros(3)\n\n ################################################################################\n \n def computeControl(self,\n control_timestep,\n cur_pos,\n cur_quat,\n cur_vel,\n cur_ang_vel,\n target_pos,\n target_rpy=np.zeros(3),\n target_vel=np.zeros(3),\n target_rpy_rates=np.zeros(3)\n ):\n \"\"\"Computes the PID control action (as RPMs) for a single drone.\n\n This methods sequentially calls `_dslPIDPositionControl()` and `_dslPIDAttitudeControl()`.\n Parameter `cur_ang_vel` is unused.\n\n Parameters\n ----------\n control_timestep : float\n The time step at which control is computed.\n cur_pos : ndarray\n (3,1)-shaped array of floats containing the current position.\n cur_quat : ndarray\n (4,1)-shaped array of floats containing the current orientation as a quaternion.\n cur_vel : ndarray\n (3,1)-shaped array of floats containing the current velocity.\n cur_ang_vel : ndarray\n (3,1)-shaped array of floats containing the current angular velocity.\n target_pos : ndarray\n (3,1)-shaped array of floats containing the desired position.\n target_rpy : ndarray, optional\n (3,1)-shaped array of floats containing the desired orientation as roll, pitch, yaw.\n target_vel : ndarray, optional\n (3,1)-shaped array of floats containing the desired velocity.\n target_rpy_rates : ndarray, optional\n (3,1)-shaped array of floats containing the desired roll, pitch, and yaw rates.\n\n Returns\n -------\n ndarray\n (4,1)-shaped array of integers containing the RPMs to apply to each of the 4 motors.\n ndarray\n (3,1)-shaped array of floats containing the current XYZ position error.\n float\n The current yaw error.\n\n \"\"\"\n self.control_counter += 1\n thrust, computed_target_rpy, pos_e = self._dslPIDPositionControl(control_timestep,\n cur_pos,\n cur_quat,\n cur_vel,\n target_pos,\n target_rpy,\n target_vel\n )\n rpm = self._dslPIDAttitudeControl(control_timestep,\n thrust,\n cur_quat,\n computed_target_rpy,\n target_rpy_rates\n )\n cur_rpy = p.getEulerFromQuaternion(cur_quat)\n return rpm, pos_e, computed_target_rpy[2] - cur_rpy[2]\n \n ################################################################################\n\n def _dslPIDPositionControl(self,\n control_timestep,\n cur_pos,\n cur_quat,\n cur_vel,\n target_pos,\n target_rpy,\n target_vel\n ):\n \"\"\"DSL's CF2.x PID position control.\n\n Parameters\n ----------\n control_timestep : float\n The time step at which control is computed.\n cur_pos : ndarray\n (3,1)-shaped array of floats containing the current position.\n cur_quat : ndarray\n (4,1)-shaped array of floats containing the current orientation as a quaternion.\n cur_vel : ndarray\n (3,1)-shaped array of floats containing the current velocity.\n target_pos : ndarray\n (3,1)-shaped array of floats containing the desired position.\n target_rpy : ndarray\n (3,1)-shaped array of floats containing the desired orientation as roll, pitch, yaw.\n target_vel : ndarray\n (3,1)-shaped array of floats containing the desired velocity.\n\n Returns\n -------\n float\n The target thrust along the drone z-axis.\n ndarray\n (3,1)-shaped array of floats containing the target roll, pitch, and yaw.\n float\n The current position error.\n\n \"\"\"\n cur_rotation = np.array(p.getMatrixFromQuaternion(cur_quat)).reshape(3, 3)\n pos_e = target_pos - cur_pos\n vel_e = target_vel - cur_vel\n self.integral_pos_e = self.integral_pos_e + pos_e*control_timestep\n self.integral_pos_e = np.clip(self.integral_pos_e, -2., 2.)\n self.integral_pos_e[2] = np.clip(self.integral_pos_e[2], -0.15, .15)\n #### PID target thrust #####################################\n target_thrust = np.multiply(self.P_COEFF_FOR, pos_e) \\\n + np.multiply(self.I_COEFF_FOR, self.integral_pos_e) \\\n + np.multiply(self.D_COEFF_FOR, vel_e) + np.array([0, 0, self.GRAVITY])\n scalar_thrust = max(0., np.dot(target_thrust, cur_rotation[:,2]))\n thrust = (math.sqrt(scalar_thrust / (4*self.KF)) - self.PWM2RPM_CONST) / self.PWM2RPM_SCALE\n target_z_ax = target_thrust / np.linalg.norm(target_thrust)\n target_x_c = np.array([math.cos(target_rpy[2]), math.sin(target_rpy[2]), 0])\n target_y_ax = np.cross(target_z_ax, target_x_c) / np.linalg.norm(np.cross(target_z_ax, target_x_c))\n target_x_ax = np.cross(target_y_ax, target_z_ax)\n target_rotation = (np.vstack([target_x_ax, target_y_ax, target_z_ax])).transpose()\n #### Target rotation #######################################\n target_euler = (Rotation.from_matrix(target_rotation)).as_euler('XYZ', degrees=False)\n if np.any(np.abs(target_euler) > math.pi):\n print(\"\\n[ERROR] ctrl it\", self.control_counter, \"in Control._dslPIDPositionControl(), values outside range [-pi,pi]\")\n return thrust, target_euler, pos_e\n \n ################################################################################\n\n def _dslPIDAttitudeControl(self,\n control_timestep,\n thrust,\n cur_quat,\n target_euler,\n target_rpy_rates\n ):\n \"\"\"DSL's CF2.x PID attitude control.\n\n Parameters\n ----------\n control_timestep : float\n The time step at which control is computed.\n thrust : float\n The target thrust along the drone z-axis.\n cur_quat : ndarray\n (4,1)-shaped array of floats containing the current orientation as a quaternion.\n target_euler : ndarray\n (3,1)-shaped array of floats containing the computed target Euler angles.\n target_rpy_rates : ndarray\n (3,1)-shaped array of floats containing the desired roll, pitch, and yaw rates.\n\n Returns\n -------\n ndarray\n (4,1)-shaped array of integers containing the RPMs to apply to each of the 4 motors.\n\n \"\"\"\n cur_rotation = np.array(p.getMatrixFromQuaternion(cur_quat)).reshape(3, 3)\n cur_rpy = np.array(p.getEulerFromQuaternion(cur_quat))\n target_quat = (Rotation.from_euler('XYZ', target_euler, degrees=False)).as_quat()\n w,x,y,z = target_quat\n target_rotation = (Rotation.from_quat([w, x, y, z])).as_matrix()\n rot_matrix_e = np.dot((target_rotation.transpose()),cur_rotation) - np.dot(cur_rotation.transpose(),target_rotation)\n rot_e = np.array([rot_matrix_e[2, 1], rot_matrix_e[0, 2], rot_matrix_e[1, 0]]) \n rpy_rates_e = target_rpy_rates - (cur_rpy - self.last_rpy)/control_timestep\n self.last_rpy = cur_rpy\n self.integral_rpy_e = self.integral_rpy_e - rot_e*control_timestep\n self.integral_rpy_e = np.clip(self.integral_rpy_e, -1500., 1500.)\n self.integral_rpy_e[0:2] = np.clip(self.integral_rpy_e[0:2], -1., 1.)\n #### PID target torques ####################################\n target_torques = - np.multiply(self.P_COEFF_TOR, rot_e) \\\n + np.multiply(self.D_COEFF_TOR, rpy_rates_e) \\\n + np.multiply(self.I_COEFF_TOR, self.integral_rpy_e)\n target_torques = np.clip(target_torques, -3200, 3200)\n pwm = thrust + np.dot(self.MIXER_MATRIX, target_torques)\n pwm = np.clip(pwm, self.MIN_PWM, self.MAX_PWM)\n return self.PWM2RPM_SCALE * pwm + self.PWM2RPM_CONST\n \n ################################################################################\n\n def _one23DInterface(thrust):\n \"\"\"Utility function interfacing 1, 2, or 3D use cases.\n\n Parameters\n ----------\n thrust : ndarray\n Array of floats of length 1, 2, or 4 containing a desired thrust input.\n\n Returns\n -------\n ndarray\n (4,1)-shaped array of integers containing the PWM (not RPMs) to apply to each of the 4 motors.\n\n \"\"\"\n DIM = len(np.array(thrust))\n pwm = np.clip((np.sqrt(np.array(thrust)/(self.KF*(4/DIM)))-self.PWM2RPM_CONST)/self.PWM2RPM_SCALE, self.MIN_PWM, self.MAX_PWM)\n if DIM in [1, 4]:\n return np.repeat(pwm, 4/DIM)\n elif DIM==2:\n return np.hstack([pwm, np.flip(pwm)])\n else:\n print(\"[ERROR] in DSLPIDControl._one23DInterface()\")\n exit()\n" ]
[ [ "numpy.vstack", "numpy.multiply", "numpy.zeros", "scipy.spatial.transform.Rotation.from_quat", "numpy.cross", "numpy.repeat", "numpy.abs", "scipy.spatial.transform.Rotation.from_euler", "numpy.clip", "scipy.spatial.transform.Rotation.from_matrix", "numpy.flip", "numpy.array", "numpy.dot", "numpy.linalg.norm" ] ]
mariana1412/FEUP-PRI
[ "9c72e8f41d073230cf91d27cfb3e55adb9718ed2" ]
[ "review_scrape/review_scrape.py" ]
[ "import pandas as pd\nimport requests\nimport re\nfrom bs4 import BeautifulSoup\nfrom timeit import default_timer\nimport asyncio\nfrom concurrent.futures import ThreadPoolExecutor\nfrom threading import Thread\nfrom queue import Empty, Queue\nimport signal\nimport sys\n\nSTART_TIME = default_timer()\nCLEANR = re.compile('<.*?>')\n\nqueue = Queue()\n\nf = open(\"reviews2.csv\", \"w\")\n\ndef signal_handler(sig, frame):\n print('You pressed Ctrl+C!')\n f.flush()\n f.close()\n sys.exit(0)\n\nsignal.signal(signal.SIGINT, signal_handler)\n\n\ndef consume():\n f.write('Id, Reviews' + '\\n')\n while True:\n if not queue.empty():\n bookId, rows = queue.get()\n # Row comes out of queue; CSV writing goes here\n for row in rows:\n try:\n f.write(row)\n except:\n continue\n\n\n\ndef parseReviews(bookId, data):\n soup = BeautifulSoup(data, \"html.parser\")\n\n reviews = soup.find_all(\"div\", class_=\"ReviewsList\")\n\n soup = BeautifulSoup(str(reviews[0]), \"html.parser\")\n\n reviews = soup.find_all(\"span\", class_=\"Formatted\")\n csv_reviews = []\n\n for review in reviews:\n rr = re.findall('(?<=<span class=\"Formatted\">).+?(?=<\\/span>)',str(review))\n if rr and re.match('.[a-zA-Z0-9-()]', rr[0]):\n new_rr = re.sub(r'<.*?>', '', str(rr))\n new_rr = re.sub(r'^\\[', '', str(new_rr))\n new_rr = re.sub(r'\\]$', '', str(new_rr))\n new_rr = re.sub(r'$', '', str(new_rr))\n csv_reviews.append(str(bookId) + ',' + '\\\"' + str(new_rr) + '\\\"' + '\\n') \n return bookId, csv_reviews\n\n \n\n \n \n\n\ndef fetch(session, index, link):\n #if bookId > 763255: #remove comment if script breaks for some reason -> continue on specified id\n\n\n with session.get(link) as response:\n data = response.text\n if response.status_code != 200:\n print(\"FAILURE::\" + link)\n\n #select genres or reviews - comment/uncomment\n\n queue.put(parseReviews(index,data))\n\n elapsed = default_timer() - START_TIME\n time_completed_at = \"{:5.2f}s\".format(elapsed)\n print(\"{0:<30} {1:>20}\".format(link, time_completed_at))\n\n return\n\n\nasync def get_data_asynchronous(books):\n print(\"{0:<30} {1:>20}\".format(\"Book\", \"Completed at\"))\n\n with ThreadPoolExecutor(max_workers=35) as executor:\n with requests.Session() as session:\n \n # Set any session parameters here before calling `fetch`\n session.cookies.set(\"srb_1\", \"1_wl\")\n\n # Initialize the event loop \n loop = asyncio.get_event_loop()\n\n # Set the START_TIME for the `fetch` function\n START_TIME = default_timer()\n\n # Use list comprehension to create a list of\n # tasks to complete. The executor will run the `fetch`\n # function for each csv in the csvs_to_fetch list\n \n tasks = [\n loop.run_in_executor(\n executor,\n fetch,\n *(session, index, row.link) # Allows us to pass in multiple arguments to `fetch`\n )\n for index, row in books.iterrows()\n ]\n\n # Initializes the tasks to run and awaits their results\n \n\nconsumer = Thread(target=consume)\nconsumer.setDaemon(True)\nconsumer.start()\n\ndef main(): \n\n #input file to get ids\n books = pd.read_csv(\"./GoodReads_100k_books.csv\")\n \n loop = asyncio.get_event_loop()\n future = asyncio.ensure_future(get_data_asynchronous(books))\n loop.run_until_complete(future)\n\n consumer.join()\n\nmain()" ]
[ [ "pandas.read_csv" ] ]
cindyxinyiwang/MoEM
[ "1d37f96cec1a97856224793f15a90170b8266a1b" ]
[ "model_v2.py" ]
[ "import math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom embed_regularize import embedded_dropout\nfrom locked_dropout import LockedDropout\nfrom weight_drop import WeightDrop\n\nclass RNNModel_v2(nn.Module):\n \"\"\"Container module with an encoder, a recurrent module, and a decoder.\"\"\"\n\n def __init__(self, rnn_type, ntoken, ninp, nhid, nhidlast, nlayers, \n dropout=0.5, dropouth=0.5, dropouti=0.5, dropoute=0.1, wdrop=0, \n tie_weights=False, ldropout=0.5, n_experts=10):\n super(RNNModel_v2, self).__init__()\n self.lockdrop = LockedDropout()\n self.encoder = nn.Embedding(ntoken*n_experts, ninp)\n\n self.rnns = [torch.nn.LSTM(ninp if l == 0 else nhid, nhid if l != nlayers - 1 else nhidlast, 1, dropout=0) for l in range(nlayers)]\n if wdrop:\n self.rnns = [WeightDrop(rnn, ['weight_hh_l0'], dropout=wdrop) for rnn in self.rnns]\n self.rnns = torch.nn.ModuleList(self.rnns)\n\n self.prior = nn.Linear(ninp, 1, bias=False)\n #self.latent = nn.Sequential(nn.Linear(nhidlast, ninp), nn.Tanh())\n self.decoder = nn.Linear(ninp, ntoken*n_experts)\n #self.proj_expert = nn.Linear(ntoken*n_experts, ntoken)\n #print(ninp, ntoken)\n # Optionally tie weights as in:\n # \"Using the Output Embedding to Improve Language Models\" (Press & Wolf 2016)\n # https://arxiv.org/abs/1608.05859\n # and\n # \"Tying Word Vectors and Word Classifiers: A Loss Framework for Language Modeling\" (Inan et al. 2016)\n # https://arxiv.org/abs/1611.01462\n if tie_weights:\n #if nhid != ninp:\n # raise ValueError('When using the tied flag, nhid must be equal to emsize')\n self.decoder.weight = self.encoder.weight\n\n self.init_weights()\n\n self.rnn_type = rnn_type\n self.ninp = ninp\n self.nhid = nhid\n self.nhidlast = nhidlast\n self.nlayers = nlayers\n self.dropout = dropout\n self.dropouti = dropouti\n self.dropouth = dropouth\n self.dropoute = dropoute\n self.ldropout = ldropout\n self.dropoutl = ldropout\n self.n_experts = n_experts\n self.ntoken = ntoken\n\n size = 0\n for p in self.parameters():\n size += p.nelement()\n print('param size: {}'.format(size))\n\n def init_weights(self):\n initrange = 0.1\n self.encoder.weight.data.uniform_(-initrange, initrange)\n self.decoder.bias.data.fill_(0)\n self.decoder.weight.data.uniform_(-initrange, initrange)\n\n def forward(self, input, hidden, return_h=False, return_prob=False):\n length, batch_size = input.size()\n #print(input)\n input = input * self.n_experts\n # (length, batch_size * n_experts)\n input_experts = torch.cat([input+i for i in range(self.n_experts)], dim=1)\n # (length, batch_size * n_experts, emb_dim)\n #print(input_experts)\n emb = embedded_dropout(self.encoder, input_experts, dropout=self.dropoute if self.training else 0)\n # (length, batch_size * n_experts, emb_dim)\n emb = self.lockdrop(emb, self.dropouti)\n # (length, batch_size, n_experts, emb_dim)\n emb = emb.view(length, self.n_experts, batch_size, -1).permute(0, 2, 1, 3).contiguous()\n # (length, batch_size, n_experts, 1)\n prior_logit = self.prior(emb).squeeze(3)\n # (length, batch_size, n_experts)\n prior = nn.functional.softmax(prior_logit.view(-1, self.n_experts)).view(-1, batch_size, self.n_experts)\n emb = (emb * prior.unsqueeze(3)).sum(2)\n\n raw_output = emb\n new_hidden = []\n #raw_output, hidden = self.rnn(emb, hidden)\n raw_outputs = []\n outputs = []\n for l, rnn in enumerate(self.rnns):\n current_input = raw_output\n raw_output, new_h = rnn(raw_output, hidden[l])\n new_hidden.append(new_h)\n raw_outputs.append(raw_output)\n if l != self.nlayers - 1:\n #self.hdrop(raw_output)\n raw_output = self.lockdrop(raw_output, self.dropouth)\n outputs.append(raw_output)\n hidden = new_hidden\n\n output = self.lockdrop(raw_output, self.dropout) # (length, batch_size, dim_last)\n outputs.append(output)\n\n #print(output)\n logit = self.decoder(output.view(-1, self.ninp)) # (length*batch_size, ntok*n_expert)\n\n prob = nn.functional.softmax(logit).view(-1, batch_size, self.ntoken * self.n_experts)\n #prob = (prob * prior.unsqueeze(3).permute(0, 2, 1, 3)).sum(1) # (length, batch_size, ntoken*n_experts)\n prob = prob.view(-1, self.n_experts, self.ntoken).sum(1)\n if return_prob:\n model_output = prob\n else:\n log_prob = torch.log(prob.add_(1e-8))\n model_output = log_prob\n\n model_output = model_output.view(-1, batch_size, self.ntoken)\n\n if return_h:\n return model_output, hidden, raw_outputs, outputs\n return model_output, hidden\n\n def init_hidden(self, bsz):\n weight = next(self.parameters()).data\n return [(Variable(weight.new(1, bsz, self.nhid if l != self.nlayers - 1 else self.nhidlast).zero_()),\n Variable(weight.new(1, bsz, self.nhid if l != self.nlayers - 1 else self.nhidlast).zero_()))\n for l in range(self.nlayers)]\n\nif __name__ == '__main__':\n model = RNNModel('LSTM', 10, 12, 12, 12, 2)\n input = Variable(torch.LongTensor(13, 9).random_(0, 10))\n hidden = model.init_hidden(9)\n model(input, hidden)\n\n # input = Variable(torch.LongTensor(13, 9).random_(0, 10))\n # hidden = model.init_hidden(9)\n # print(model.sample(input, hidden, 5, 6, 1, 2, sample_latent=True).size())\n\n" ]
[ [ "torch.nn.LSTM", "torch.nn.Linear", "torch.nn.functional.softmax", "torch.nn.Embedding", "torch.nn.ModuleList", "torch.LongTensor" ] ]
hilltailor/22037-Camera-1
[ "4a236bcb236071b3cc051dcce0756d3d9804ec0f" ]
[ "examples/capture_savehdf5_display.py" ]
[ "##########################################################################\n# Testing of display and capture & storage thread combined.\n# Scan for camera\n# Aquire 14 images\n# Convert to b/w\n# Save hdf5 files\n##########################################################################\n# Results\n#\n#\n##########################################################################\n\n# System\nimport logging, time, platform\nfrom datetime import datetime\n\n# Matrix Algebra\nimport numpy as np\nfrom numba import vectorize\n\n# OpenCV\nimport cv2\n\nconfigs = {\n 'camera_res' : (1280, 720 ), # width & height\n 'exposure' : -6, # -1,0 = auto, 1...max=frame interval, \n 'autoexposure' : 1.0, # depends on camera: 0.25 or 0.75(auto), -1,0,1\n 'fps' : 30, # 15, 30, 40, 90, 120, 180\n 'fourcc' : -1, # n.a.\n 'buffersize' : -1, # n.a.\n 'output_res' : (-1, -1), # Output resolution, -1,-1 no change\n 'flip' : 0, # 0=norotation \n # 1=ccw90deg \n # 2=rotation180 \n # 3=cw90 \n # 4=horizontal \n # 5=upright diagonal flip \n # 6=vertical \n # 7=uperleft diagonal flip\n 'displayfps' : 30 # frame rate for display server\n}\n\nif configs['displayfps'] >= configs['fps']: \n display_interval = 0\nelse:\n display_interval = 1.0/configs['displayfps']\n\nres = configs['camera_res']\nheight = res[1]\nwidth = res[0]\nmeasure_time = 5.0 # average measurements over 5 secs\ncamera_index = 0 # default camera starts at 0 by operating system\n\n\n# Processing\ndata_cube = np.zeros((14, height, width), 'uint8')\nbackground = np.zeros((height, width), 'uint8') # where we keep bg\nflatfield = np.cast['uint16'](2**8.*np.random.random((height, width))) # flatfield correction scaled so that 255=100%\ninten = np.zeros(14, 'uint16') # helper to find background image\ndata_cube_corr = np.zeros((14, height, width), 'uint16') # resulting data cube on CPU\n# Numpy Vectorized\n@vectorize(['uint16(uint8, uint16, uint8)'], nopython=True, fastmath=True)\ndef vector_np(data_cube, background, flatfield):\n return np.multiply(np.subtract(data_cube, background), flatfield) # 16bit multiplication\n\n# Setting up logging\nlogging.basicConfig(level=logging.DEBUG) # options are: DEBUG, INFO, ERROR, WARNING\nlogger = logging.getLogger(\"Main\")\n\n# Setting up storage\nfrom camera.streamer.h5storageserver import h5Server\nnow = datetime.now()\nfilename = now.strftime(\"%Y%m%d%H%M%S\") + \".hdf5\"\nhdf5 = h5Server(\"C:\\\\temp\\\\\" + filename)\nlogger.log(logging.INFO, \"Starting Storage Server\")\nhdf5.start()\n\n# Create camera interface\nlogger.log(logging.INFO, \"Starting Capture\")\n# Computer OS and platform dependent\nplat = platform.system()\nif plat == 'Linux' and platform.machine() == \"aarch64\": # this is jetson nano for me\n from camera.capture.nanocapture import nanoCapture\n camera = nanoCapture(configs, camera_index)\nelse:\n from camera.capture.cv2capture_process import cv2Capture\n camera = cv2Capture(configs, camera_index)\nlogger.log(logging.INFO, \"Getting Images\")\ncamera.start()\n\n# Display\nwindow_name = 'Main'\nfont = cv2.FONT_HERSHEY_SIMPLEX\ntextLocation0 = (10,height-40)\ntextLocation1 = (10,height-20)\nfontScale = 1\nfontColor = (255,255,255)\nlineType = 2\ncv2.namedWindow(window_name, cv2.WINDOW_AUTOSIZE) # or WINDOW_NORMAL\n\n# Initialize Variables\nframe_idx = 0 # index to create data cube out of individual frames\nnum_cubes_stored = 0 # keep track of data cubes sent to storage\nnum_cubes_generated = 0 # keep track of data cubes generated\nlast_time = time.perf_counter() # keep track of time to dispay performance\nlast_display = time.perf_counter() # keeo track of time to display images\nnum_frames_received = 0 # keep track of how many captured frames reach the main program\nnum_frames_displayed = 0 # keep track of how many frames are displayed\nmeasured_dps = 0 # computed in main thread, number of frames displayed per second\n\n# Main Loop\nstop = False\nwhile(not stop):\n current_time = time.perf_counter()\n\n # Camera\n (frame_time, frame) = camera.capture.get(block=True, timeout=None)\n data_cube[frame_idx,:,:] = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n frame_idx += 1\n while not camera.log.empty():\n (level, msg)=camera.log.get_nowait()\n logger.log(level, \"Status:{}\".format(msg))\n\n # When we have a complete dataset:\n if frame_idx >= 14: # 0...13 is populated\n frame_idx = 0\n num_cubes_generated += 1\n\n # HDF5 \n try: \n hdf5.queue.put_nowait((frame_time, data_cube)) \n num_cubes_stored += 1\n except:\n logger.log(logging.DEBUG, \"Status:Storage Queue is full!\")\n\n # Background and Field Correction\n # Where is my background?\n _ = np.sum(data_cube[:,::64,::64], axis=(1,2), out = inten)\n frame_idx_bg = np.argmin(inten) # minimum intensity is in this frame\n background = data_cube[frame_idx_bg, :, :]\n # Correct the data\n vector_np(data_cube, flatfield, background, out = data_cube_corr)\n\n while not hdf5.log.empty():\n (level, msg)=hdf5.log.get_nowait()\n logger.log(level, \"Status:{}\".format(msg))\n\n # Display performance in main loop\n if current_time - last_time >= measure_time:\n # how many data cubes did we create\n measured_cps_generated = num_cubes_generated/measure_time\n logger.log(logging.DEBUG, \"Status:captured cubes generated per second:{}\".format(measured_cps_generated))\n num_cubes_generated = 0\n # how many data cubes did we send to storage\n measured_cps_stored = num_cubes_stored/measure_time\n logger.log(logging.DEBUG, \"Status:cubes sent to storage per second:{}\".format(measured_cps_stored))\n num_cubes_sent = 0\n # how many frames did we display\n measured_dps = num_frames_displayed/measure_time\n logger.log(logging.DEBUG, \"Status:frames displayed per second:{}\".format(measured_dps))\n num_frames_displayed = 0\n last_time = current_time\n\n if (current_time - last_display) >= display_interval:\n display_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n cv2.putText(display_frame,\"Capture FPS:{} [Hz]\".format(camera.measured_fps), textLocation0, font, fontScale, fontColor, lineType)\n cv2.imshow(window_name, display_frame)\n if cv2.waitKey(1) & 0xFF == ord('q'): stop = True\n last_display = current_time\n num_frames_displayed += 1\n\n# Cleanup\ncamera.stop()\nhdf5.stop()\ncv2.destroyAllWindows()\n" ]
[ [ "numpy.sum", "numpy.zeros", "numpy.subtract", "numpy.argmin", "numpy.random.random" ] ]
akirayou/cmdstanpy
[ "f6a526807ea4d248844ac2f1a44e4c53bfca197c" ]
[ "test/test_generate_quantities.py" ]
[ "\"\"\"CmdStan method generate_quantities tests\"\"\"\n\nimport os\nimport unittest\nfrom pandas.testing import assert_frame_equal\n\nfrom cmdstanpy.cmdstan_args import Method\nfrom cmdstanpy.model import CmdStanModel\n\nHERE = os.path.dirname(os.path.abspath(__file__))\nDATAFILES_PATH = os.path.join(HERE, 'data')\n\n\nclass GenerateQuantitiesTest(unittest.TestCase):\n def test_gen_quantities_csv_files(self):\n stan = os.path.join(DATAFILES_PATH, 'bernoulli_ppc.stan')\n model = CmdStanModel(stan_file=stan)\n\n jdata = os.path.join(DATAFILES_PATH, 'bernoulli.data.json')\n\n # synthesize list of filenames\n goodfiles_path = os.path.join(DATAFILES_PATH, 'runset-good', 'bern')\n csv_files = []\n for i in range(4):\n csv_files.append('{}-{}.csv'.format(goodfiles_path, i + 1))\n\n bern_gqs = model.generate_quantities(data=jdata, mcmc_sample=csv_files)\n self.assertEqual(\n bern_gqs.runset._args.method, Method.GENERATE_QUANTITIES\n )\n self.assertIn('CmdStanGQ: model=bernoulli_ppc', bern_gqs.__repr__())\n self.assertIn('method=generate_quantities', bern_gqs.__repr__())\n\n # check results - ouput files, quantities of interest, draws\n self.assertEqual(bern_gqs.runset.chains, 4)\n for i in range(bern_gqs.runset.chains):\n self.assertEqual(bern_gqs.runset._retcode(i), 0)\n csv_file = bern_gqs.runset.csv_files[i]\n self.assertTrue(os.path.exists(csv_file))\n column_names = [\n 'y_rep[1]',\n 'y_rep[2]',\n 'y_rep[3]',\n 'y_rep[4]',\n 'y_rep[5]',\n 'y_rep[6]',\n 'y_rep[7]',\n 'y_rep[8]',\n 'y_rep[9]',\n 'y_rep[10]',\n ]\n self.assertEqual(bern_gqs.column_names, tuple(column_names))\n self.assertEqual(\n bern_gqs.sample_plus_quantities.shape[1],\n bern_gqs.mcmc_sample.shape[1]\n + bern_gqs.generated_quantities_pd.shape[1],\n )\n\n def test_gen_quantities_csv_files_bad(self):\n stan = os.path.join(DATAFILES_PATH, 'bernoulli_ppc.stan')\n model = CmdStanModel(stan_file=stan)\n jdata = os.path.join(DATAFILES_PATH, 'bernoulli.data.json')\n\n with self.assertRaises(ValueError):\n model.generate_quantities(data=jdata, mcmc_sample=[])\n\n # synthesize list of filenames\n goodfiles_path = os.path.join(\n DATAFILES_PATH, 'runset-bad', 'bad-draws-bern'\n )\n csv_files = []\n for i in range(4):\n csv_files.append('{}-{}.csv'.format(goodfiles_path, i + 1))\n\n with self.assertRaisesRegex(Exception, 'Invalid mcmc_sample'):\n model.generate_quantities(data=jdata, mcmc_sample=csv_files)\n\n def test_gen_quanties_mcmc_sample(self):\n stan = os.path.join(DATAFILES_PATH, 'bernoulli.stan')\n bern_model = CmdStanModel(stan_file=stan)\n\n jdata = os.path.join(DATAFILES_PATH, 'bernoulli.data.json')\n bern_fit = bern_model.sample(\n data=jdata,\n chains=4,\n parallel_chains=2,\n seed=12345,\n iter_sampling=100,\n )\n\n stan = os.path.join(DATAFILES_PATH, 'bernoulli_ppc.stan')\n model = CmdStanModel(stan_file=stan)\n\n bern_gqs = model.generate_quantities(data=jdata, mcmc_sample=bern_fit)\n self.assertEqual(\n bern_gqs.runset._args.method, Method.GENERATE_QUANTITIES\n )\n self.assertIn('CmdStanGQ: model=bernoulli_ppc', bern_gqs.__repr__())\n self.assertIn('method=generate_quantities', bern_gqs.__repr__())\n\n # check results - ouput files, quantities of interest, draws\n self.assertEqual(bern_gqs.runset.chains, 4)\n for i in range(bern_gqs.runset.chains):\n self.assertEqual(bern_gqs.runset._retcode(i), 0)\n csv_file = bern_gqs.runset.csv_files[i]\n self.assertTrue(os.path.exists(csv_file))\n column_names = [\n 'y_rep[1]',\n 'y_rep[2]',\n 'y_rep[3]',\n 'y_rep[4]',\n 'y_rep[5]',\n 'y_rep[6]',\n 'y_rep[7]',\n 'y_rep[8]',\n 'y_rep[9]',\n 'y_rep[10]',\n ]\n self.assertEqual(bern_gqs.column_names, tuple(column_names))\n self.assertEqual(bern_fit.draws_pd().shape, bern_gqs.mcmc_sample.shape)\n self.assertEqual(\n bern_gqs.sample_plus_quantities.shape[1],\n bern_gqs.mcmc_sample.shape[1]\n + bern_gqs.generated_quantities_pd.shape[1],\n )\n\n def test_sample_plus_quantities_dedup(self):\n stan = os.path.join(DATAFILES_PATH, 'bernoulli_ppc_dup.stan')\n model = CmdStanModel(stan_file=stan)\n\n jdata = os.path.join(DATAFILES_PATH, 'bernoulli.data.json')\n bern_fit = model.sample(\n data=jdata,\n chains=4,\n parallel_chains=2,\n seed=12345,\n iter_sampling=100,\n )\n bern_gqs = model.generate_quantities(data=jdata, mcmc_sample=bern_fit)\n self.assertEqual(\n bern_gqs.sample_plus_quantities.shape[1],\n bern_gqs.mcmc_sample.shape[1],\n )\n\n column_names = [\n 'y_rep[1]',\n 'y_rep[2]',\n 'y_rep[3]',\n 'y_rep[4]',\n 'y_rep[5]',\n 'y_rep[6]',\n 'y_rep[7]',\n 'y_rep[8]',\n 'y_rep[9]',\n 'y_rep[10]',\n ]\n\n assert_frame_equal(\n bern_gqs.generated_quantities_pd[column_names],\n bern_gqs.mcmc_sample[column_names],\n )\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "pandas.testing.assert_frame_equal" ] ]
jmlazaro25/vissig
[ "370262b0546959bd2936cfd1ffa16de5b85a3dee" ]
[ "dev/mathematica/get_ldmx_acceptance_to_share/detector_hit_conditions.py" ]
[ "import numpy as np\nimport collections\n\ndef _xv_from_uni(xi,zmax,gct):\n \"\"\"\n Generate a z vertex displacement from a uniform random variable \n both zmax and gct should be in cm\n \"\"\"\n if xi > 0.:\n return gct*np.log(xi) + zmax\n else:\n return -100.*zmax\n \nxv_from_uni = np.vectorize(_xv_from_uni)\n\ndef _det_hit_condition(_pa, _pr, det_rad, zmax, xv, Ethr=1.):\n \"\"\"\n returns true if lepton hits a circular detector of radius det_rad, \n if it originates from a vector that decays a distance xv from the detector\n pa = relativistic 4 vector momentum of the axion \n pr = relativistic 4 lepton momentum of the recoil electron\n det_rad = detector radius in cm\n xv = z distance of the vector decay vertex from the detector in cm\n \"\"\"\n \n #Ethr = 1. # E137 Ecal detector threshold energy\n \n pa = np.array(_pa)\n pr = np.array(_pr)\n # xv < 0 corresponds to decays beyond the detector\n if xv < 0:\n return False\n \n \n #return (pl[0] >= Ethr) and (np.dot(rvec,rvec) < (det_rad)**2.)\n return (pr[0] <= Ethr)\n \n# Vectorized version of the above\ndef det_hit_condition(pv, pl, det_rad, zmax, xvs, Ethr=1.):\n if not isinstance(xvs, collections.Iterable):\n xvs = np.array([xvs])\n \n return np.array([_det_hit_condition(pv, pl, det_rad, zmax, xv, Ethr) for xv in xvs])\n" ]
[ [ "numpy.array", "numpy.log", "numpy.vectorize" ] ]
Jazys/docker-airflow
[ "347619d857f5d19a6955c51a23f381e2406f7d22" ]
[ "dags/treino04.py" ]
[ "from airflow import DAG\nfrom airflow.operators.bash_operator import BashOperator\nfrom airflow.operators.python_operator import PythonOperator, BranchPythonOperator\nfrom datetime import datetime, timedelta\nimport zipfile\nimport random\nimport pandas as pd\n\ndefault_args = {\n 'owner': 'Neylson Crepalde',\n \"depends_on_past\": False,\n \"start_date\": datetime(2020, 12, 30, 18, 10),\n \"email\": [\"[email protected]\"],\n \"email_on_failure\": False,\n \"email_on_retry\": False\n #\"retries\": 1,\n #\"retry_delay\": timedelta(minutes=1),\n}\n\ndag = DAG(\n \"treino-04\", \n description=\"Uma dag com condicionais\",\n default_args=default_args, \n schedule_interval=timedelta(minutes=2)\n)\n\nget_data = BashOperator(\n task_id=\"get-data\",\n bash_command='curl https://download.inep.gov.br/microdados/Enade_Microdados/microdados_enade_2019.zip -o /usr/local/airflow/data/microdados_enade_2019.zip',\n trigger_rule=\"all_done\",\n dag=dag\n)\n\n\ndef unzip_file():\n with zipfile.ZipFile(\"/usr/local/airflow/data/microdados_enade_2019.zip\", 'r') as zipped:\n zipped.extractall(\"/usr/local/airflow/data\")\n\nunzip_data = PythonOperator(\n task_id='unzip-data',\n python_callable=unzip_file,\n dag=dag\n)\n\n\ndef select_student():\n df = pd.read_csv('/usr/local/airflow/data/microdados_enade_2019/2019/3.DADOS/microdados_enade_2019.txt', sep=';', decimal=',')\n escolha = random.randint(0, df.shape[0]-1)\n aluno = df.iloc[escolha]\n return aluno.TP_SEXO\n\npick_student = PythonOperator(\n task_id=\"pick-student\",\n python_callable=select_student,\n dag=dag\n)\n\ndef MouF(**context):\n value = context['task_instance'].xcom_pull(task_ids='pick-student')\n if value == 'M':\n return 'male_branch'\n elif value == 'F':\n return 'female_branch'\n\nmale_of_female = BranchPythonOperator(\n task_id='condition-male_or_female',\n python_callable=MouF,\n provide_context=True,\n dag=dag\n)\n\n\nmale_branch = BashOperator(\n task_id=\"male_branch\",\n bash_command='echo \"Estudante escolhido foi do sexo Masculino\"',\n dag=dag\n)\n\nfemale_branch = BashOperator(\n task_id=\"female_branch\",\n bash_command='echo \"Estudante escolhido foi do sexo Feminino\"',\n dag=dag\n)\n\nget_data >> unzip_data >> pick_student >> male_of_female >> [male_branch, female_branch]" ]
[ [ "pandas.read_csv" ] ]
NAnnamalai/gramex
[ "6a0845aed2f7423da09eaa84678d3a0519fe1ff1" ]
[ "gramex/handlers/modelhandler.py" ]
[ "import os\nimport json\nimport gramex.ml\nimport pandas as pd\nimport gramex.cache\nimport gramex.data\nfrom gramex.handlers import BaseHandler\nimport tornado.escape\n\n\nclass ModelHandler(BaseHandler):\n '''\n Allows users to create API endpoints to train/test models exposed through Scikit-Learn.\n TODO: support Scikit-Learn Pipelines for data transformations.\n '''\n @classmethod\n def setup(cls, path, **kwargs):\n super(ModelHandler, cls).setup(**kwargs)\n cls.path = path\n\n def prepare(self):\n '''\n Gets called automatically at the beginning of every request.\n takes model name from request path and creates the pickle file path.\n Also merges the request body and the url query args.\n url query args have precedence over request body in case both exist.\n Expects multi-row paramets to be formatted as the output of handler.argparse.\n '''\n self.set_header('Content-Type', 'application/json; charset=utf-8')\n self.pickle_file_path = os.path.join(\n self.path, self.path_args[0] + '.pkl')\n self.request_body = {}\n if self.request.body:\n self.request_body = tornado.escape.json_decode(self.request.body)\n if self.args:\n self.request_body.update(self.args)\n url = self.request_body.get('url', '')\n if url and gramex.data.get_engine(url) == 'file':\n self.request_body['url'] = os.path.join(self.path, os.path.split(url)[-1])\n\n def get_data_flag(self):\n '''\n Return a True if the request is made to /model/name/data.\n '''\n if len(self.path_args) > 1 and self.path_args[1] == 'data':\n return True\n\n def get(self, *path_args):\n '''\n Request sent to model/name with no args returns model information,\n (that can be changed via PUT/POST).\n Request to model/name with args will accept model input and produce predictions.\n Request to model/name/data will return the training data specified in model.url,\n this should accept most formhandler flags and filters as well.\n '''\n model = gramex.cache.open(self.pickle_file_path, gramex.ml.load)\n if self.get_data_flag():\n file_kwargs = self.listify(['engine', 'url', 'ext', 'table', 'query', 'id'])\n _format = file_kwargs.pop('_format', ['json'])[0]\n # TODO: Add Support for formhandler filters/limit/sorting/groupby\n data = gramex.data.filter(model.url, **file_kwargs)\n self.write(gramex.data.download(data, format=_format, **file_kwargs))\n return\n # If no model columns are passed, return model info\n if not vars(model).get('input', '') or not any(col in self.args for col in model.input):\n model_info = {k: v for k, v in vars(model).items()\n if k not in ('model', 'scaler')}\n self.write(json.dumps(model_info, indent=4))\n return\n self._predict(model)\n\n def put(self, *path_args, **path_kwargs):\n '''\n Request to /model/name/ with no params will create a blank model.\n Request to /model/name/ with args will interpret as model paramters.\n Set Model-Retrain: true in headers to either train a model from scratch or extend it.\n To Extend a trained model, don't update the parameters and send Model-Retrain in headers.\n Request to /model/name/data with args will update the training data,\n doesn't currently work on DF's thanks to the gramex.data bug.\n '''\n try:\n model = gramex.cache.open(self.pickle_file_path, gramex.ml.load)\n except EnvironmentError: # noqa\n model = gramex.ml.Classifier(**self.request_body)\n if self.get_data_flag():\n file_kwargs = self.listify(model.input + [model.output] + ['id'])\n gramex.data.update(model.url, args=file_kwargs, id=file_kwargs['id'])\n else:\n if not self._train(model):\n model.save(self.pickle_file_path)\n\n def _predict(self, model):\n '''Helper function for model.train.'''\n params = self.listify(model.input)\n if hasattr(model, 'model') and model.trained:\n data = pd.DataFrame(params)\n data = data[model.input]\n data['result'] = model.predict(data)\n self.write(data.to_json(orient='records'))\n elif params:\n raise AttributeError('model not trained')\n else:\n return\n\n def post(self, *path_args, **path_kwargs):\n '''\n Request to /model/name/ with Model-Retrain: true in the headers will,\n attempt to update model parameters and retrain/extend the model.\n Request to /model/name/ with model input as body/query args and no Model-Retrain,\n in headers will return predictions.\n Request to /model/name/data lets people add rows the test data.\n '''\n # load model object - if it doesn't exist, send a response asking to create the model\n try:\n model = gramex.cache.open(self.pickle_file_path, gramex.ml.load)\n except EnvironmentError: # noqa\n # Log error\n self.write({'Error': 'Please Send PUT Request, model does not exist'})\n raise EnvironmentError # noqa\n if self.get_data_flag():\n file_kwargs = self.listify(model.input + [model.output])\n gramex.data.insert(model.url, args=file_kwargs)\n else:\n # If /data/ is not path_args[1] then post is sending a predict request\n if self._train(model):\n return\n self._predict(model)\n\n def delete(self, *path_args):\n '''\n Request to /model/name/ will delete the trained model.\n Request to /model/name/data needs id and will delete rows from the training data.\n '''\n if self.get_data_flag():\n file_kwargs = self.listify(['id'])\n try:\n model = gramex.cache.open(self.pickle_file_path, gramex.ml.load)\n except EnvironmentError: # noqa\n self.write(\n {'Error': 'Please Send PUT Request, model does not exist'})\n raise EnvironmentError # noqa\n gramex.data.delete(model.url, args=file_kwargs, id=file_kwargs['id'])\n return\n if os.path.exists(self.pickle_file_path):\n os.unlink(self.pickle_file_path)\n\n def _train(self, model):\n ''' Looks for Model-Retrain in Request Headers,\n trains a model and pickles it.\n '''\n # Update model parameters\n model.update_params(self.request_body)\n if 'Model-Retrain' in self.request.headers:\n # Pass non model kwargs to gramex.data.filter\n try:\n data = gramex.data.filter(\n model.url,\n args=self.listify(['engine', 'url', 'ext', 'table', 'query', 'id']))\n except AttributeError:\n raise AttributeError('Model does not have a url')\n # Train the model.\n model.train(data)\n model.trained = True\n model.save(self.pickle_file_path)\n return True\n\n def listify(self, checklst):\n ''' Some functions in data.py expect list values, so creates them.\n checklst is list-like which contains the selected values to be returned.\n '''\n return {\n k: [v] if not isinstance(v, list) else v\n for k, v in self.request_body.items()\n if k in checklst\n }\n" ]
[ [ "pandas.DataFrame" ] ]
infomon/meta_nas
[ "b81b7de86d26ae1ec0d6646b4277f3c918e5e35d" ]
[ "starter_kit/dev_meta_features/competition/data_io.py" ]
[ "# Functions performing various input/output operations for the ChaLearn AutoML challenge\n\n# Main contributors: Arthur Pesah and Isabelle Guyon, August-October 2014\n\n# ALL INFORMATION, SOFTWARE, DOCUMENTATION, AND DATA ARE PROVIDED \"AS-IS\".\n# ISABELLE GUYON, CHALEARN, AND/OR OTHER ORGANIZERS OR CODE AUTHORS DISCLAIM\n# ANY EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ANY PARTICULAR PURPOSE, AND THE\n# WARRANTY OF NON-INFRIGEMENT OF ANY THIRD PARTY'S INTELLECTUAL PROPERTY RIGHTS.\n# IN NO EVENT SHALL ISABELLE GUYON AND/OR OTHER ORGANIZERS BE LIABLE FOR ANY SPECIAL,\n# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER ARISING OUT OF OR IN\n# CONNECTION WITH THE USE OR PERFORMANCE OF SOFTWARE, DOCUMENTS, MATERIALS,\n# PUBLICATIONS, OR INFORMATION MADE AVAILABLE FOR THE CHALLENGE.\n\nfrom __future__ import print_function\n\nimport csv\nimport os\nimport platform\nimport shutil\nfrom collections import deque\nfrom contextlib import closing\nfrom glob import glob as ls\nfrom itertools import chain\nfrom os import getcwd as pwd\nfrom os.path import isfile\nfrom shutil import copy2\nfrom sys import getsizeof, stderr, version\nfrom zipfile import ZIP_DEFLATED, ZipFile\n\nimport numpy as np\nimport pandas as pd\nimport psutil\nimport yaml\nfrom scipy.sparse import * # used in data_binary_sparse\n\nfrom competition import data_converter\n\ntry:\n from reprlib import repr\nexcept ImportError:\n pass\n\n# get_installed_distributions has gone from pip v10\ntry:\n from pip._internal.utils.misc import get_installed_distributions as lib\nexcept ImportError: # pip < 10\n from pip import get_installed_distributions as lib\n\n# ================ Small auxiliary functions =================\n\n\ndef read_as_df(basename, type=\"train\"):\n \"\"\" Function to read the AutoML format and return a Panda Data Frame \"\"\"\n csvfile = basename + \"_\" + type + \".csv\"\n if isfile(csvfile):\n print(\"Reading \" + basename + \"_\" + type + \" from CSV\")\n XY = pd.read_csv(csvfile)\n return XY\n\n print(\"Reading \" + basename + \"_\" + type + \" from AutoML format\")\n feat_name = pd.read_csv(basename + \"_feat.name\", header=None)\n label_name = pd.read_csv(basename + \"_label.name\", header=None)\n X = pd.read_csv(basename + \"_\" + type + \".data\", sep=\" \", names=np.ravel(feat_name))\n [patnum, featnum] = X.shape\n print(\"Number of examples = %d\" % patnum)\n print(\"Number of features = %d\" % featnum)\n\n XY = X\n Y = []\n solution_file = basename + \"_\" + type + \".solution\"\n if isfile(solution_file):\n Y = pd.read_csv(solution_file, sep=\" \", names=np.ravel(label_name))\n [patnum2, classnum] = Y.shape\n assert patnum == patnum2\n print(\"Number of classes = %d\" % classnum)\n # Here we add the target values as a last column, this is convenient to use seaborn\n # Look at http://seaborn.pydata.org/tutorial/axis_grids.html for other ideas\n label_range = np.arange(classnum).transpose(\n ) # This is just a column vector [[0], [1], [2]]\n numerical_target = Y.dot(\n label_range\n ) # This is a column vector of dim patnum with numerical categories\n nominal_target = pd.Series(\n np.array(label_name)[numerical_target].ravel()\n ) # Same with nominal categories\n XY = X.assign(target=nominal_target.values) # Add the last column\n\n return XY\n\n\n# ================ Small auxiliary functions =================\n\nswrite = stderr.write\n\nif os.name == \"nt\":\n filesep = \"\\\\\"\nelse:\n filesep = \"/\"\n\n\ndef write_list(lst):\n \"\"\" Write a list of items to stderr (for debug purposes)\"\"\"\n for item in lst:\n swrite(item + \"\\n\")\n\n\ndef print_dict(verbose, dct):\n \"\"\" Write a dict to stderr (for debug purposes)\"\"\"\n if verbose:\n for item in dct:\n print(item + \" = \" + str(dct[item]))\n\n\ndef mkdir(d):\n \"\"\" Create a new directory\"\"\"\n if not os.path.exists(d):\n os.makedirs(d)\n\n\ndef mvdir(source, dest):\n \"\"\" Move a directory\"\"\"\n if os.path.exists(source):\n os.rename(source, dest)\n\n\ndef rmdir(d):\n \"\"\" Remove an existingdirectory\"\"\"\n if os.path.exists(d):\n shutil.rmtree(d)\n\n\ndef vprint(mode, t):\n \"\"\" Print to stdout, only if in verbose mode\"\"\"\n if mode:\n print(t)\n\n\n# ================ Output prediction results and prepare code submission =================\n\n\ndef write(filename, predictions):\n \"\"\" Write prediction scores in prescribed format\"\"\"\n filename_temp = \"temp_prediction_file_\" + str(np.random.randint(10000))\n filename_temp = os.path.join(os.path.dirname(filename), filename_temp)\n with open(filename_temp, \"w\") as output_file:\n for row in predictions:\n if type(row) is not np.ndarray and type(row) is not list:\n row = [row]\n output_file.write(\" \".join([\"{0:g}\".format(float(val)) for val in row]))\n output_file.write(\"\\n\")\n os.rename(filename_temp, filename)\n\n\ndef zipdir(archivename, basedir):\n \"\"\"Zip directory, from J.F. Sebastian http://stackoverflow.com/\"\"\"\n assert os.path.isdir(basedir)\n with closing(ZipFile(archivename, \"w\", ZIP_DEFLATED)) as z:\n for root, dirs, files in os.walk(basedir):\n # NOTE: ignore empty directories\n for fn in files:\n if not fn.endswith(\".zip\"):\n absfn = os.path.join(root, fn)\n zfn = absfn[len(basedir):] # XXX: relative path\n assert absfn[:len(basedir)] == basedir\n if zfn[0] == os.sep:\n zfn = zfn[1:]\n z.write(absfn, zfn)\n\n\n# ================ Inventory input data and create data structure =================\n\n\ndef inventory_data(input_dir):\n \"\"\" Inventory the datasets in the input directory and return them in alphabetical order\"\"\"\n # Assume first that there is a hierarchy dataname/dataname_train.data\n training_names = ls(os.path.join(input_dir, \"*.data\"))\n training_names = [name.split(\"/\")[-1] for name in training_names]\n\n ntr = len(training_names)\n if ntr == 0:\n print(\"WARNING: Inventory data - No data file found\")\n training_names = []\n training_names.sort()\n # check_dataset\n return training_names\n\n\ndef check_dataset(dirname, name):\n \"\"\" Check the test and valid files are in the directory, as well as the solution\"\"\"\n valid_file = os.path.join(dirname, name + \"_valid.data\")\n if not os.path.isfile(valid_file):\n print(\"No validation file for \" + name)\n exit(1)\n test_file = os.path.join(dirname, name + \"_test.data\")\n if not os.path.isfile(test_file):\n print(\"No test file for \" + name)\n exit(1)\n # Check the training labels are there\n training_solution = os.path.join(dirname, name + \"_train.solution\")\n if not os.path.isfile(training_solution):\n print(\"No training labels for \" + name)\n exit(1)\n return True\n\n\ndef data(filename, nbr_features=None, verbose=False):\n \"\"\" The 2nd parameter makes possible a using of the 3 functions of data reading (data, data_sparse, data_binary_sparse) without changing parameters\"\"\"\n if verbose:\n print(np.array(data_converter.file_to_array(filename)))\n return np.array(data_converter.file_to_array(filename), dtype=float)\n\n\ndef data_sparse(filename, nbr_features):\n \"\"\" This function takes as argument a file representing a sparse matrix\n sparse_matrix[i][j] = \"a:b\" means matrix[i][a] = basename and load it with the loadsvm load_svmlight_file\n \"\"\"\n return data_converter.file_to_libsvm(\n filename=filename, data_binary=False, n_features=nbr_features\n )\n\n\ndef data_binary_sparse(filename, nbr_features):\n \"\"\" This fuction takes as argument a file representing a sparse binary matrix\n sparse_binary_matrix[i][j] = \"a\"and transforms it temporarily into file svmlibs format( <index2>:<value2>)\n to load it with the loadsvm load_svmlight_file\n \"\"\"\n return data_converter.file_to_libsvm(\n filename=filename, data_binary=True, n_features=nbr_features\n )\n\n\n# ================ Copy results from input to output ==========================\n\n\ndef copy_results(datanames, result_dir, output_dir, verbose):\n \"\"\" This function copies all the [dataname.predict] results from result_dir to output_dir\"\"\"\n missing_files = []\n for basename in datanames:\n try:\n missing = False\n test_files = ls(result_dir + \"/\" + basename + \"*_test*.predict\")\n if len(test_files) == 0:\n vprint(verbose, \"[-] Missing 'test' result files for \" + basename)\n missing = True\n valid_files = ls(result_dir + \"/\" + basename + \"*_valid*.predict\")\n if len(valid_files) == 0:\n vprint(verbose, \"[-] Missing 'valid' result files for \" + basename)\n missing = True\n if missing == False:\n for f in test_files:\n copy2(f, output_dir)\n for f in valid_files:\n copy2(f, output_dir)\n vprint(verbose, \"[+] \" + basename.capitalize() + \" copied\")\n else:\n missing_files.append(basename)\n except:\n vprint(verbose, \"[-] Missing result files\")\n return datanames\n return missing_files\n\n\n# ================ Display directory structure and code version (for debug purposes) =================\n\n\ndef show_dir(run_dir):\n print(\"\\n=== Listing run dir ===\")\n write_list(ls(run_dir))\n write_list(ls(run_dir + \"/*\"))\n write_list(ls(run_dir + \"/*/*\"))\n write_list(ls(run_dir + \"/*/*/*\"))\n write_list(ls(run_dir + \"/*/*/*/*\"))\n\n\ndef show_io(input_dir, output_dir):\n swrite(\"\\n=== DIRECTORIES ===\\n\\n\")\n # Show this directory\n swrite(\"-- Current directory \" + pwd() + \":\\n\")\n write_list(ls(\".\"))\n write_list(ls(\"./*\"))\n write_list(ls(\"./*/*\"))\n swrite(\"\\n\")\n\n # List input and output directories\n swrite(\"-- Input directory \" + input_dir + \":\\n\")\n write_list(ls(input_dir))\n write_list(ls(input_dir + \"/*\"))\n write_list(ls(input_dir + \"/*/*\"))\n write_list(ls(input_dir + \"/*/*/*\"))\n swrite(\"\\n\")\n swrite(\"-- Output directory \" + output_dir + \":\\n\")\n write_list(ls(output_dir))\n write_list(ls(output_dir + \"/*\"))\n swrite(\"\\n\")\n\n # write meta data to sdterr\n swrite(\"\\n=== METADATA ===\\n\\n\")\n swrite(\"-- Current directory \" + pwd() + \":\\n\")\n try:\n metadata = yaml.load(open(\"metadata\", \"r\"))\n for key, value in metadata.items():\n swrite(key + \": \")\n swrite(str(value) + \"\\n\")\n except:\n swrite(\"none\\n\")\n swrite(\"-- Input directory \" + input_dir + \":\\n\")\n try:\n metadata = yaml.load(open(os.path.join(input_dir, \"metadata\"), \"r\"))\n for key, value in metadata.items():\n swrite(key + \": \")\n swrite(str(value) + \"\\n\")\n swrite(\"\\n\")\n except:\n swrite(\"none\\n\")\n\n\ndef show_version():\n # Python version and library versions\n swrite(\"\\n=== VERSIONS ===\\n\\n\")\n # Python version\n swrite(\"Python version: \" + version + \"\\n\\n\")\n # Give information on the version installed\n swrite(\"Versions of libraries installed:\\n\")\n map(swrite, sorted([\"%s==%s\\n\" % (i.key, i.version) for i in lib()]))\n\n\n# Compute the total memory size of an object in bytes\n\n\ndef total_size(o, handlers={}, verbose=False):\n \"\"\" Returns the approximate memory footprint an object and all of its contents.\n\n Automatically finds the contents of the following builtin containers and\n their subclasses: tuple, list, deque, dict, set and frozenset.\n To search other containers, add handlers to iterate over their contents:\n\n handlers = {SomeContainerClass: iter,\n OtherContainerClass: OtherContainerClass.get_elements}\n\n \"\"\"\n dict_handler = lambda d: chain.from_iterable(d.items())\n all_handlers = {\n tuple: iter,\n list: iter,\n deque: iter,\n dict: dict_handler,\n set: iter,\n frozenset: iter,\n }\n all_handlers.update(handlers) # user handlers take precedence\n seen = set() # track which object id's have already been seen\n default_size = getsizeof(0) # estimate sizeof object without __sizeof__\n\n def sizeof(o):\n if id(o) in seen: # do not double count the same object\n return 0\n seen.add(id(o))\n s = getsizeof(o, default_size)\n\n if verbose:\n print(s, type(o), repr(o), file=stderr)\n\n for typ, handler in all_handlers.items():\n if isinstance(o, typ):\n s += sum(map(sizeof, handler(o)))\n break\n return s\n\n return sizeof(o)\n\n # write the results in a csv file\n\n\ndef platform_score(basename, mem_used, n_estimators, time_spent, time_budget):\n # write the results and platform information in a csv file (performance.csv)\n with open(\"performance.csv\", \"a\") as fp:\n a = csv.writer(fp, delimiter=\",\")\n # ['Data name','Nb estimators','System', 'Machine' , 'Platform' ,'memory used (Mb)' , 'number of CPU' ,' time spent (sec)' , 'time budget (sec)'],\n data = [\n [\n basename,\n n_estimators,\n platform.system(),\n platform.machine(),\n platform.platform(),\n float(\"{0:.2f}\".format(mem_used / 1048576.0)),\n str(psutil.cpu_count()),\n float(\"{0:.2f}\".format(time_spent)),\n time_budget,\n ]\n ]\n a.writerows(data)\n" ]
[ [ "pandas.read_csv", "numpy.ravel", "numpy.arange", "numpy.array", "numpy.random.randint" ] ]
dumpmemory/nlpatl
[ "59209242d1ac26714b11b86261070ac50cc90432" ]
[ "tests/sampling/certainty/test_most_confidence.py" ]
[ "import unittest\r\nimport numpy as np\r\n\r\nfrom nlpatl.sampling.certainty import MostConfidenceSampling\r\n\r\n\r\nclass TestSamplingConfidence(unittest.TestCase):\r\n @classmethod\r\n def setUpClass(cls):\r\n cls.data = np.array(\r\n [\r\n [0.01689184, 0.02989921, 0.92348951, 0.0158317, 0.01388775],\r\n [0.03950194, 0.06295594, 0.75774054, 0.08782477, 0.0519768],\r\n [0.00507078, 0.06731898, 0.850905, 0.06388271, 0.01282254],\r\n [0.03204307, 0.01932809, 0.91326549, 0.01549605, 0.0198673],\r\n [0.01181161, 0.00393428, 0.04627477, 0.92171903, 0.0162603],\r\n [0.02010514, 0.00241422, 0.03849712, 0.92863317, 0.01035035],\r\n [0.04326279, 0.01329769, 0.02284383, 0.88952749, 0.0310682],\r\n [0.15085014, 0.0128402, 0.05903652, 0.74374557, 0.03352757],\r\n [0.04319251, 0.02102466, 0.10190563, 0.75316733, 0.08070987],\r\n [0.03870851, 0.70293962, 0.1727936, 0.04652781, 0.03903046],\r\n [0.00521765, 0.89092692, 0.06196143, 0.03363766, 0.00825634],\r\n [0.72885295, 0.02342087, 0.06129882, 0.14188246, 0.04454489],\r\n [0.71795835, 0.02464577, 0.07842602, 0.1400593, 0.03891056],\r\n ]\r\n )\r\n\r\n def test_sample(self):\r\n threshold = 0.9\r\n num_sample = 3\r\n\r\n sampling = MostConfidenceSampling(threshold=threshold)\r\n\r\n indices, values = sampling.sample(self.data, num_sample=num_sample)\r\n\r\n assert indices is not None, \"No output\"\r\n assert len(indices) == len(values), \"Sample size of return\"\r\n assert len(np.where(values >= threshold)[0]) == len(\r\n values\r\n ), \"Filtering incorrect result\"\r\n" ]
[ [ "numpy.array", "numpy.where" ] ]
haider4445/MultipleAdvAttacks
[ "a5e420a39b6d0eaf38b300ae5d74d4732e5eb6ec" ]
[ "a2c_ppo_acktr/envs.py" ]
[ "import os\nimport sys\nimport gym\nimport numpy as np\nimport torch\nfrom gym.spaces.box import Box\n\nfrom baselines import bench\nfrom baselines.common.atari_wrappers import make_atari, wrap_deepmind\nfrom baselines.common.vec_env import VecEnvWrapper\nfrom baselines.common.vec_env.dummy_vec_env import DummyVecEnv\nfrom baselines.common.vec_env.shmem_vec_env import ShmemVecEnv\nfrom baselines.common.vec_env.vec_normalize import \\\n VecNormalize as VecNormalize_\n\ntry:\n import dm_control2gym\nexcept ImportError:\n pass\n\ntry:\n import roboschool\nexcept ImportError:\n pass\n\ntry:\n import pybullet_envs\nexcept ImportError:\n pass\n\n\ndef make_env(env_id, seed, rank, log_dir, allow_early_resets):\n def _thunk():\n if env_id.startswith(\"dm\"):\n _, domain, task = env_id.split('.')\n env = dm_control2gym.make(domain_name=domain, task_name=task)\n else:\n env = gym.make(env_id)\n\n is_atari = hasattr(gym.envs, 'atari') and isinstance(\n env.unwrapped, gym.envs.atari.atari_env.AtariEnv)\n if is_atari:\n env = make_atari(env_id)\n\n env.seed(seed + rank)\n\n if str(env.__class__.__name__).find('TimeLimit') >= 0:\n env = TimeLimitMask(env)\n\n if log_dir is not None:\n env = bench.Monitor(\n env,\n os.path.join(log_dir, str(rank)),\n allow_early_resets=allow_early_resets)\n\n if is_atari:\n if len(env.observation_space.shape) == 3:\n env = wrap_deepmind(env)\n elif len(env.observation_space.shape) == 3:\n raise NotImplementedError(\n \"CNN models work only for atari,\\n\"\n \"please use a custom wrapper for a custom pixel input env.\\n\"\n \"See wrap_deepmind for an example.\")\n\n # If the input has shape (W,H,3), wrap for PyTorch convolutions\n obs_shape = env.observation_space.shape\n if len(obs_shape) == 3 and obs_shape[2] in [1, 3]:\n env = TransposeImage(env, op=[2, 0, 1])\n\n return env\n\n return _thunk\n\n\ndef make_vec_envs(env_name,\n seed,\n num_processes,\n gamma,\n log_dir,\n device,\n allow_early_resets,\n num_frame_stack=None):\n envs = [\n make_env(env_name, seed, i, log_dir, allow_early_resets)\n for i in range(num_processes)\n ]\n\n if len(envs) > 1:\n envs = ShmemVecEnv(envs, context='fork')\n else:\n envs = DummyVecEnv(envs)\n\n if len(envs.observation_space.shape) == 1:\n if gamma is None:\n envs = VecNormalize(envs, ret=False)\n else:\n envs = VecNormalize(envs, gamma=gamma)\n\n envs = VecPyTorch(envs, device)\n\n if num_frame_stack is not None:\n envs = VecPyTorchFrameStack(envs, num_frame_stack, device)\n elif len(envs.observation_space.shape) == 3:\n envs = VecPyTorchFrameStack(envs, 4, device)\n\n return envs\n\n\n# Checks whether done was caused my timit limits or not\nclass TimeLimitMask(gym.Wrapper):\n def step(self, action):\n obs, rew, done, info = self.env.step(action)\n if done and self.env._max_episode_steps == self.env._elapsed_steps:\n info['bad_transition'] = True\n\n return obs, rew, done, info\n\n def reset(self, **kwargs):\n return self.env.reset(**kwargs)\n\n\n# Can be used to test recurrent policies for Reacher-v2\nclass MaskGoal(gym.ObservationWrapper):\n def observation(self, observation):\n if self.env._elapsed_steps > 0:\n observation[-2:] = 0\n return observation\n\n\nclass TransposeObs(gym.ObservationWrapper):\n def __init__(self, env=None):\n \"\"\"\n Transpose observation space (base class)\n \"\"\"\n super(TransposeObs, self).__init__(env)\n\n\nclass TransposeImage(TransposeObs):\n def __init__(self, env=None, op=[2, 0, 1]):\n \"\"\"\n Transpose observation space for images\n \"\"\"\n super(TransposeImage, self).__init__(env)\n assert len(op) == 3, \"Error: Operation, \" + str(op) + \", must be dim3\"\n self.op = op\n obs_shape = self.observation_space.shape\n self.observation_space = Box(\n self.observation_space.low[0, 0, 0],\n self.observation_space.high[0, 0, 0], [\n obs_shape[self.op[0]], obs_shape[self.op[1]],\n obs_shape[self.op[2]]\n ],\n dtype=self.observation_space.dtype)\n\n def observation(self, ob):\n return ob.transpose(self.op[0], self.op[1], self.op[2])\n\n\nclass VecPyTorch(VecEnvWrapper):\n def __init__(self, venv, device):\n \"\"\"Return only every `skip`-th frame\"\"\"\n super(VecPyTorch, self).__init__(venv)\n self.device = device\n # TODO: Fix data types\n\n def reset(self):\n obs = self.venv.reset()\n obs = torch.from_numpy(obs).float().to(self.device)\n return obs\n\n def step_async(self, actions):\n if isinstance(actions, torch.LongTensor):\n # Squeeze the dimension for discrete actions\n actions = actions.squeeze(1)\n actions = actions.cpu().numpy()\n self.venv.step_async(actions)\n\n def step_wait(self):\n obs, reward, done, info = self.venv.step_wait()\n obs = torch.from_numpy(obs).float().to(self.device)\n reward = torch.from_numpy(reward).unsqueeze(dim=1).float()\n return obs, reward, done, info\n\n\nclass VecNormalize(VecNormalize_):\n def __init__(self, *args, **kwargs):\n super(VecNormalize, self).__init__(*args, **kwargs)\n self.training = True\n\n def _obfilt(self, obs, update=True):\n if self.ob_rms:\n if self.training and update:\n self.ob_rms.update(obs)\n obs = np.clip((obs - self.ob_rms.mean) /\n np.sqrt(self.ob_rms.var + self.epsilon),\n -self.clipob, self.clipob)\n return obs\n else:\n return obs\n\n def train(self):\n self.training = True\n\n def eval(self):\n self.training = False\n\n\n# Derived from\n# https://github.com/openai/baselines/blob/master/baselines/common/vec_env/vec_frame_stack.py\nclass VecPyTorchFrameStack(VecEnvWrapper):\n def __init__(self, venv, nstack, device=None):\n self.venv = venv\n self.nstack = nstack\n\n wos = venv.observation_space # wrapped ob space\n self.shape_dim0 = wos.shape[0]\n\n low = np.repeat(wos.low, self.nstack, axis=0)\n high = np.repeat(wos.high, self.nstack, axis=0)\n\n if device is None:\n device = torch.device('cpu')\n self.stacked_obs = torch.zeros((venv.num_envs, ) +\n low.shape).to(device)\n\n observation_space = gym.spaces.Box(\n low=low, high=high, dtype=venv.observation_space.dtype)\n VecEnvWrapper.__init__(self, venv, observation_space=observation_space)\n\n def step_wait(self):\n obs, rews, news, infos = self.venv.step_wait()\n self.stacked_obs[:, :-self.shape_dim0] = \\\n self.stacked_obs[:, self.shape_dim0:].clone()\n for (i, new) in enumerate(news):\n if new:\n self.stacked_obs[i] = 0\n self.stacked_obs[:, -self.shape_dim0:] = obs\n return self.stacked_obs, rews, news, infos\n\n def reset(self):\n obs = self.venv.reset()\n if torch.backends.cudnn.deterministic:\n self.stacked_obs = torch.zeros(self.stacked_obs.shape)\n else:\n self.stacked_obs.zero_()\n self.stacked_obs[:, -self.shape_dim0:] = obs\n return self.stacked_obs\n\n def close(self):\n self.venv.close()\n" ]
[ [ "numpy.sqrt", "numpy.repeat", "torch.from_numpy", "torch.zeros", "torch.device" ] ]
AutumnSun1996/GameTools
[ "05ed69c09e69e284092cfaffd9eb6313f654c729" ]
[ "Autodroid/label_al.py" ]
[ "import sys\nimport copy\nfrom enum import IntEnum, auto\nimport tkinter as tk\nfrom tkinter import simpledialog, Label\nimport itertools\n\nimport cv2.cv2 as cv\nimport numpy as np\nfrom PIL import Image, ImageTk\nfrom shapely import geometry\n\nfrom simulator import image_tools, win32_tools\nfrom notebook.azurlane import *\n\nimport itertools\n\ntrans_matrix = np.mat(\n [\n [0.9433189178530791, 0.2679732964804766, -158.43695741776074],\n [1.3417181644390942e-05, 1.5656008796635157, -92.70256198000683],\n [7.711117850185767e-07, 0.0005944962831996344, 1.0],\n ]\n)\nfilter_kernel = np.array([[-4, -2, -4], [-2, 24, -2], [-4, -2, -4]])\ntarget_size = (980, 725)\n_, inv_trans = cv.invert(trans_matrix)\ninv_trans = inv_trans / inv_trans[2, 2]\n\n\ncrop_set = {\n \"CropOffset\": [-30, -30],\n \"CropSize\": [60, 60],\n}\n\nanchor_template = {\n \"CropOffset\": [-40, -40],\n \"CropSize\": [80, 80],\n \"Size\": crop_set[\"CropSize\"],\n \"Type\": \"Anchor\",\n \"MainSize\": [1280, 720],\n}\n\n\ndef get_cadidates(screen):\n warped = cv.warpPerspective(screen, trans_matrix, target_size)\n filtered_map = cv.filter2D(warped, 0, filter_kernel)\n # show(res)\n _, poses = s.search_resource(\"Corner\", image=filtered_map)\n if len(poses) < 4:\n raise ValueError(\"Less than 4 anchors found. Stop.\")\n poses = np.array(poses)\n poses += s.resources[\"Corner\"][\"Offset\"]\n diff = poses % 100\n dx = np.argmax(np.bincount(diff[:, 0]))\n dy = np.argmax(np.bincount(diff[:, 1]))\n\n res = itertools.product(\n range(dx, target_size[0], 100), range(dy, target_size[1], 100)\n )\n res = (np.array(list(res), dtype=\"float\") + 50).reshape(1, -1, 2)\n\n pos_in_screen = cv.perspectiveTransform(res, inv_trans).reshape(-1, 2).astype(\"int\")\n return res.reshape(-1, 2).astype(\"int\"), pos_in_screen\n\n\ndef crop_anchor(screen, x, y):\n offset = crop_set[\"CropOffset\"]\n size = crop_set[\"CropSize\"]\n wh = np.array(list(reversed(s.screen.shape[:2])))\n coef = 0.0005907301142274507\n\n diff_s = []\n results = []\n r = coef * y + 1\n lt = np.asarray(offset) * r + [x, y]\n rb = lt + np.asarray(size) * r\n if lt.min() < 0:\n return None\n if np.any(rb > wh):\n return None\n part = cv_crop(screen, (*lt.astype(\"int\"), *rb.astype(\"int\")))\n part = cv.resize(part, tuple(size))\n return part\n\n\ndef extract_anchors(anchors):\n res = {}\n for anchor in anchors:\n name = \"{}/{}\".format(map_name, anchor[\"Name\"])\n x, y = anchor[\"Pos\"]\n cropped = crop_anchor(s.screen, x, y)\n path = \"%s/resources/%s.png\" % (section, name)\n image_tools.cv_save(path, cropped)\n info = copy.deepcopy(anchor_template)\n info.update(\n {\"Name\": name, \"OnMap\": anchor[\"Name\"], \"Image\": name + \".png\",}\n )\n res[name] = info\n return res\n\n\ndef key(event):\n global anchors\n print(\"pressed\", event.keycode)\n if event.keycode == 8 or event.keycode == 46:\n # delete\n print(\"Remove from\", anchors)\n if anchors:\n anchors.remove(anchors[-1])\n elif event.char == \"s\":\n result = extract_anchors(anchors)\n text = hocon.dump(result)\n print(text)\n set_clip(text)\n\n\ndef get_nearest(point, candidates):\n near = None\n near_dist = np.inf\n for p in candidates:\n dist = np.linalg.norm(np.subtract(p, point))\n if dist < near_dist:\n near = p\n near_dist = dist\n return near\n\n\ndef get_name(compare, pos):\n old_ = cv.perspectiveTransform(\n np.array(compare[\"Pos\"]).reshape(1, -1, 2).astype(\"float32\"), trans_matrix\n )\n new_ = cv.perspectiveTransform(\n np.array(pos).reshape(1, -1, 2).astype(\"float32\"), trans_matrix\n )\n diff = np.round((new_ - old_).reshape(2) / 100).astype(\"int\")\n print(old_, new_, (new_ - old_), diff)\n name = np.add(diff, [ord(i) for i in compare[\"Name\"]])\n name = \"\".join([chr(i) for i in name])\n return name\n\n\ndef on_click(event):\n global anchors, points\n print(\"on_click\", event.x, event.y)\n pos = get_nearest((event.x, event.y), points)\n print(\"Nearest\", pos)\n if not anchors:\n name = simpledialog.askstring(\"Input\", \"OnMapName\")\n else:\n name = get_name(anchors[0], pos)\n anchors.append(\n {\"Name\": name, \"Pos\": tuple(pos),}\n )\n\n\ndef render():\n img = s.screen.copy()\n for pos in points:\n cv.circle(img, tuple(pos), 3, (255, 255, 255), -1)\n for anchor in anchors:\n x, y = anchor[\"Pos\"]\n cv.putText(img, anchor[\"Name\"], (x - 20, y), 0, 1, (255, 255, 0), 2)\n cv2image = cv.cvtColor(img, cv.COLOR_BGR2RGBA) # 转换颜色从BGR到RGBA\n current_image = Image.fromarray(cv2image)\n imgtk = ImageTk.PhotoImage(image=current_image)\n panel.imgtk = imgtk\n panel.config(image=imgtk)\n root.after(20, render)\n\n\nanchors = []\n\nsection = \"AzurLane\"\n\nmap_name = sys.argv[1]\ns = init_map(\"通用地图\")\n_, points = get_cadidates(s.screen)\nprint(\"Init Points\", points)\nroot = tk.Tk()\nroot.title(\"opencv + tkinter\")\nroot.bind(\"<Key>\", key)\n\npanel = Label(root) # initialize image panel\npanel.bind(\"<Button-1>\", on_click)\npanel.pack(padx=10, pady=10)\n\nrender()\n\nroot.mainloop()\ncv.destroyAllWindows()\n" ]
[ [ "numpy.bincount", "numpy.subtract", "numpy.any", "numpy.asarray", "numpy.array", "numpy.mat" ] ]
vks/mordecai
[ "87c1aee4864975cc672323d043e5fbf9d6f554f7" ]
[ "mordecai/utilities.py" ]
[ "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\nimport os\nimport sys\nimport json\nimport numpy\nimport pandas as pd\nfrom elasticsearch_dsl import Search, Q\nfrom elasticsearch import Elasticsearch\n\nimport spacy\n\ntry:\n nlp\nexcept NameError:\n nlp = spacy.load('en_core_web_lg')\n\n\ndef country_list_maker():\n \"\"\"\n Helper function to return dictionary of countries in {\"country\" : \"iso\"} form.\n \"\"\"\n cts = {\"Afghanistan\":\"AFG\", \"Åland Islands\":\"ALA\", \"Albania\":\"ALB\", \"Algeria\":\"DZA\",\n \"American Samoa\":\"ASM\", \"Andorra\":\"AND\", \"Angola\":\"AGO\", \"Anguilla\":\"AIA\",\n \"Antarctica\":\"ATA\", \"Antigua Barbuda\":\"ATG\", \"Argentina\":\"ARG\",\n \"Armenia\":\"ARM\", \"Aruba\":\"ABW\", \"Ascension Island\":\"NA\", \"Australia\":\"AUS\",\n \"Austria\":\"AUT\", \"Azerbaijan\":\"AZE\", \"Bahamas\":\"BHS\", \"Bahrain\":\"BHR\",\n \"Bangladesh\":\"BGD\", \"Barbados\":\"BRB\", \"Belarus\":\"BLR\", \"Belgium\":\"BEL\",\n \"Belize\":\"BLZ\", \"Benin\":\"BEN\", \"Bermuda\":\"BMU\", \"Bhutan\":\"BTN\",\n \"Bolivia\":\"BOL\", \"Bosnia Herzegovina\":\"BIH\",\n \"Botswana\":\"BWA\", \"Bouvet Island\":\"BVT\", \"Brazil\":\"BRA\",\n \"Britain\":\"GBR\", \"Great Britain\":\"GBR\",\n \"British Virgin Islands\":\"VGB\", \"Brunei\":\"BRN\", \"Bulgaria\":\"BGR\", \"Burkina Faso\":\"BFA\",\n \"Burundi\":\"BDI\", \"Cambodia\":\"KHM\", \"Cameroon\":\"CMR\",\n \"Canada\":\"CAN\",\"Cape Verde\":\"CPV\", \"Cayman Islands\":\"CYM\",\n \"Central African Republic\":\"CAF\", \"Chad\":\"TCD\", \"Chile\":\"CHL\", \"China\":\"CHN\",\n \"Cocos Islands\":\"CCK\", \"Colombia\":\"COL\",\n \"Comoros\":\"COM\", \"Republic of Congo\":\"COG\", \"Cook Islands\":\"COK\",\n \"Costa Rica\":\"CRI\", \"Cote Ivoire\":\"CIV\", \"Ivory Coast\":\"CIV\",\"Croatia\":\"HRV\", \"Cuba\":\"CUB\",\n \"Curaçao\":\"CUW\", \"Cyprus\":\"CYP\", \"Czech Republic\":\"CZE\", \"Denmark\":\"DNK\",\n \"Djibouti\":\"DJI\", \"Dominica\":\"DMA\", \"Dominican Republic\":\"DOM\", \"Democratic Republic of Congo\" : \"COD\",\n \"Ecuador\":\"ECU\", \"Egypt\":\"EGY\", \"El Salvador\":\"SLV\", \"England\" : \"GBR\",\n \"Equatorial Guinea\":\"GNQ\", \"Eritrea\":\"ERI\", \"Estonia\":\"EST\", \"Ethiopia\":\"ETH\",\n \"Falkland Islands\":\"FLK\", \"Faroe Islands\":\"FRO\",\n \"Fiji\":\"FJI\", \"Finland\":\"FIN\", \"France\":\"FRA\", \"French Guiana\":\"GUF\",\n \"French Polynesia\":\"PYF\",\"Gabon\":\"GAB\",\n \"Gambia\":\"GMB\", \"Georgia\":\"GEO\", \"Germany\":\"DEU\", \"Ghana\":\"GHA\",\n \"Gibraltar\":\"GIB\", \"Greece\":\"GRC\", \"Greenland\":\"GRL\", \"Grenada\":\"GRD\",\n \"Guadeloupe\":\"GLP\", \"Guam\":\"GUM\", \"Guatemala\":\"GTM\", \"Guernsey\":\"GGY\",\n \"Guinea\":\"GIN\", \"Guinea Bissau\":\"GNB\", \"Guyana\":\"GUY\", \"Haiti\":\"HTI\",\"Honduras\":\"HND\",\n \"Hong Kong\":\"HKG\", \"Hungary\":\"HUN\", \"Iceland\":\"ISL\",\n \"India\":\"IND\", \"Indonesia\":\"IDN\", \"Iran\":\"IRN\", \"Iraq\":\"IRQ\", \"Ireland\":\"IRL\",\n \"Israel\":\"ISR\", \"Italy\":\"ITA\", \"Jamaica\":\"JAM\", \"Japan\":\"JPN\",\n \"Jordan\":\"JOR\", \"Kazakhstan\":\"KAZ\", \"Kenya\":\"KEN\",\n \"Kiribati\":\"KIR\", \"Kosovo\": \"XKX\", \"Kuwait\":\"KWT\", \"Kyrgyzstan\":\"KGZ\", \"Laos\":\"LAO\",\n \"Latvia\":\"LVA\", \"Lebanon\":\"LBN\", \"Lesotho\":\"LSO\", \"Liberia\":\"LBR\",\n \"Libya\":\"LBY\", \"Liechtenstein\":\"LIE\", \"Lithuania\":\"LTU\", \"Luxembourg\":\"LUX\",\n \"Macau\":\"MAC\", \"Macedonia\":\"MKD\", \"Madagascar\":\"MDG\", \"Malawi\":\"MWI\",\n \"Malaysia\":\"MYS\", \"Maldives\":\"MDV\", \"Mali\":\"MLI\", \"Malta\":\"MLT\", \"Marshall Islands\":\"MHL\",\n \"Martinique\":\"MTQ\", \"Mauritania\":\"MRT\", \"Mauritius\":\"MUS\",\n \"Mayotte\":\"MYT\", \"Mexico\":\"MEX\", \"Micronesia\":\"FSM\", \"Moldova\":\"MDA\",\n \"Monaco\":\"MCO\", \"Mongolia\":\"MNG\", \"Montenegro\":\"MNE\", \"Montserrat\":\"MSR\",\n \"Morocco\":\"MAR\", \"Mozambique\":\"MOZ\", \"Myanmar\":\"MMR\", \"Burma\":\"MMR\", \"Namibia\":\"NAM\",\n \"Nauru\":\"NRU\", \"Nepal\":\"NPL\", \"Netherlands\":\"NLD\", \"Netherlands Antilles\":\"ANT\",\n \"New Caledonia\":\"NCL\", \"New Zealand\":\"NZL\", \"Nicaragua\":\"NIC\",\n \"Niger\":\"NER\", \"Nigeria\":\"NGA\", \"Niue\":\"NIU\", \"North Korea\":\"PRK\",\n \"Northern Ireland\":\"IRL\", \"Northern Mariana Islands\":\"MNP\",\n \"Norway\":\"NOR\", \"Oman\":\"OMN\", \"Pakistan\":\"PAK\",\n \"Palau\":\"PLW\", \"Palestine\":\"PSE\",\"Panama\":\"PAN\", \"Papua New Guinea\":\"PNG\",\n \"Paraguay\":\"PRY\", \"Peru\":\"PER\", \"Philippines\":\"PHL\", \"Pitcairn Islands\":\"PCN\",\n \"Poland\":\"POL\", \"Portugal\":\"PRT\", \"Puerto Rico\":\"PRI\",\n \"Qatar\":\"QAT\", \"Réunion\":\"REU\", \"Romania\":\"ROU\", \"Russia\":\"RUS\",\n \"Rwanda\":\"RWA\", \"Saint Barthélemy\":\"BLM\", \"Saint Helena\":\"SHN\",\n \"Saint Kitts Nevis\":\"KNA\", \"Saint Lucia\":\"LCA\",\n \"Saint Pierre Miquelon\":\"SPM\", \"Saint Vincent Grenadines\":\"VCT\",\n \"Samoa\":\"WSM\", \"San Marino\":\"SMR\", \"São Tomé Príncipe\":\"STP\", \"Saudi Arabia\":\"SAU\",\n \"Senegal\":\"SEN\", \"Serbia\":\"SRB\",\n \"Seychelles\":\"SYC\", \"Sierra Leone\":\"SLE\", \"Singapore\":\"SGP\", \"Sint Maarten\":\"SXM\",\n \"Slovakia\":\"SVK\", \"Slovenia\":\"SVN\", \"Solomon Islands\":\"SLB\",\n \"Somalia\":\"SOM\", \"South Africa\":\"ZAF\",\n \"South Korea\":\"KOR\", \"South Sudan\":\"SSD\", \"Spain\":\"ESP\", \"Sri Lanka\":\"LKA\", \"Sudan\":\"SDN\",\n \"Suriname\":\"SUR\", \"Svalbard Jan Mayen\":\"SJM\",\n \"Swaziland\":\"SWZ\", \"Sweden\":\"SWE\", \"Switzerland\":\"CHE\", \"Syria\":\"SYR\",\n \"Taiwan\":\"TWN\", \"Tajikistan\":\"TJK\", \"Tanzania\":\"TZA\", \"Thailand\":\"THA\",\n \"Timor Leste\":\"TLS\", \"East Timor\":\"TLS\",\"Togo\":\"TGO\", \"Tokelau\":\"TKL\", \"Tonga\":\"TON\", \"Trinidad Tobago\":\"TTO\",\n \"Tunisia\":\"TUN\", \"Turkey\":\"TUR\",\n \"Turkmenistan\":\"TKM\", \"Turks Caicos Islands\":\"TCA\", \"Tuvalu\":\"TUV\", \"U.S. Minor Outlying Islands\":\"UMI\",\n \"Virgin Islands\":\"VIR\", \"Uganda\":\"UGA\",\n \"Ukraine\":\"UKR\", \"United Arab Emirates\":\"ARE\", \"United Kingdom\":\"GBR\",\n \"United States\":\"USA\", \"Uruguay\":\"URY\", \"Uzbekistan\":\"UZB\", \"Vanuatu\":\"VUT\", \"Vatican\":\"VAT\",\n \"Venezuela\":\"VEN\",\n \"Vietnam\":\"VNM\", \"Wallis Futuna\":\"WLF\",\n \"Western Sahara\":\"ESH\", \"Yemen\":\"YEM\", \"Zambia\":\"ZMB\", \"Zimbabwe\":\"ZWE\",\n \"UK\":\"GBR\", \"United States\":\"USA\", \"USA\":\"USA\", \"America\":\"USA\", \"Palestinian Territories\":\"PSE\",\n \"Congo Brazzaville\":\"COG\", \"DRC\":\"COD\", \"Congo Kinshasa\":\"COD\", \"Wales\" : \"GBR\",\n \"Scotland\" : \"GBR\", \"Britain\" : \"GBR\",}\n\n return cts\n\n\ndef other_vectors():\n \"\"\"\n Define more {placename : iso} mappings to improve performance of vector-based\n country picking. An easy hack to force a placename to resolve to a defined country\n would be to add it to this list.\n \"\"\"\n # We want the advantage of having more defined vector terms to help\n # matching, but we also want to make sure that when we invert the\n # dictionary for labeling, each ISO code gets resolved to a single country\n # name, as opposed to an alternative name, city, or state.\n other_vecs = {\n # alt. country names\n # US states\n \"Alabama\" : \"USA\", \"Alaska\" : \"USA\", \"Arizona\" : \"USA\", \"Arkansas\" : \"USA\",\n \"California\" : \"USA\", \"Colorado\" : \"USA\", \"Connecticut\" : \"USA\", \"Delaware\" : \"USA\",\n \"Florida\" : \"USA\",\n # \"Georgia\" : \"USA\", <----- hmmmm\n \"Hawaii\" : \"USA\", \"Idaho\" : \"USA\",\n \"Illinois\" : \"USA\", \"Indiana\" : \"USA\", \"Iowa\" : \"USA\", \"Kansas\" : \"USA\",\n \"Kentucky\" : \"USA\", \"Louisiana\" : \"USA\", \"Maine\" : \"USA\",\n \"Maryland\" : \"USA\", \"Massachusetts\" : \"USA\", \"Michigan\" : \"USA\",\n \"Minnesota\" : \"USA\", \"Mississippi\" : \"USA\", \"Missouri\" : \"USA\",\n \"Montana\" : \"USA\", \"Nebraska\" : \"USA\", \"Nevada\" : \"USA\", \"New Hampshire\" : \"USA\",\n \"New Jersey\" : \"USA\", \"New Mexico\" : \"USA\", \"New York\" : \"USA\",\n \"North Carolina\" : \"USA\", \"North Dakota\" : \"USA\", \"Ohio\" : \"USA\",\n \"Oklahoma\" : \"USA\", \"Oregon\" : \"USA\", \"Pennsylvania\" : \"USA\",\n \"Rhode Island\" : \"USA\", \"South Carolina\" : \"USA\", \"South Dakota\" : \"USA\",\n \"Tennessee\" : \"USA\", \"Texas\" : \"USA\", \"Utah\" : \"USA\",\n \"Vermont\" : \"USA\", \"Virginia\" : \"USA\", \"Washington\" : \"USA\",\n \"West Virginia\" : \"USA\", \"Wisconsin\" : \"USA\", \"Wyoming\" : \"USA\",\n # cities\n \"Beijing\" : \"CHN\", \"Chicago\" : \"USA\",\n \"Tbilisi\" : \"GEO\", \"Gaza\":\"PSE\"}\n return other_vecs\n\n\ndef make_skip_list(cts):\n \"\"\"\n Return hand-defined list of place names to skip and not attempt to geolocate. If users would like to exclude\n country names, this would be the function to do it with.\n \"\"\"\n # maybe make these non-country searches but don't discard, at least for\n # some (esp. bodies of water)\n special_terms = [\"Europe\", \"West\", \"the West\", \"South Pacific\", \"Gulf of Mexico\", \"Atlantic\",\n \"the Black Sea\", \"Black Sea\", \"North America\", \"Mideast\", \"Middle East\",\n \"the Middle East\", \"Asia\", \"the Caucasus\", \"Africa\",\n \"Central Asia\", \"Balkans\", \"Eastern Europe\", \"Arctic\", \"Ottoman Empire\",\n \"Asia-Pacific\", \"East Asia\", \"Horn of Africa\", \"Americas\",\n \"North Africa\", \"the Strait of Hormuz\", \"Mediterranean\", \"East\", \"North\",\n \"South\", \"Latin America\", \"Southeast Asia\", \"Western Pacific\", \"South Asia\",\n \"Persian Gulf\", \"Central Europe\", \"Western Hemisphere\", \"Western Europe\",\n \"European Union (E.U.)\", \"EU\", \"European Union\", \"E.U.\", \"Asia-Pacific\",\n \"Europe\", \"Caribbean\", \"US\", \"U.S.\", \"Persian Gulf\", \"West Africa\", \"North\", \"East\",\n \"South\", \"West\", \"Western Countries\"\n ]\n\n # Some words are recurring spacy problems...\n spacy_problems = [\"Kurd\", \"Qur'an\"]\n\n #skip_list = list(cts.keys()) + special_terms\n skip_list = special_terms + spacy_problems\n skip_list = set(skip_list)\n return skip_list\n\n\ndef country_list_nlp(cts):\n \"\"\"NLP countries so we can use for vector comparisons\"\"\"\n ct_nlp = []\n for i in cts.keys():\n nlped = nlp(i)\n ct_nlp.append(nlped)\n return ct_nlp\n\n\ndef make_country_nationality_list(cts, ct_file):\n \"\"\"Combine list of countries and list of nationalities\"\"\"\n countries = pd.read_csv(ct_file)\n nationality = dict(zip(countries.nationality,countries.alpha_3_code))\n both_codes = {**nationality, **cts}\n return both_codes\n\n\ndef make_inv_cts(cts):\n \"\"\"\n cts is e.g. {\"Germany\" : \"DEU\"}. inv_cts is the inverse: {\"DEU\" : \"Germany\"}\n \"\"\"\n inv_ct = {}\n for old_k, old_v in cts.items():\n if old_v not in inv_ct.keys():\n inv_ct.update({old_v : old_k})\n return inv_ct\n\n\ndef read_in_admin1(filepath):\n \"\"\"\n Small helper function to read in a admin1 code <--> admin1 name document.\n\n Parameters\n ----------\n filepath: string\n path to the admin1 mapping JSON. This file is usually\n mordecai/resources/data/admin1CodesASCII.json\n\n Returns\n -------\n admin1_dict: dictionary\n keys are country + admin1codes, values are names\n Example: \"US.OK\" : \"Oklahoma\"\n Example: \"SE.21\": \"Uppsala\"\n \"\"\"\n with open(filepath) as admin1file:\n admin1_dict = json.loads(admin1file.read())\n return admin1_dict\n\n\n\ndef structure_results(res):\n \"\"\"Format Elasticsearch result as Python dictionary\"\"\"\n out = {'hits': {'hits': []}}\n keys = ['admin1_code', 'admin2_code', 'admin3_code', 'admin4_code',\n 'alternativenames', 'asciiname', 'coordinates',\n 'country_code2', 'country_code3', \n 'feature_class', 'feature_code', 'geonameid',\n 'modification_date', 'name', 'population']\n for i in res:\n i_out = {}\n for k in keys:\n i_out[k] = i[k]\n out['hits']['hits'].append(i_out)\n return out\n\ndef setup_es(hosts, port, use_ssl=False, auth=None):\n \"\"\"\n Setup an Elasticsearch connection\n\n Parameters\n ----------\n hosts: list\n Hostnames / IP addresses for elasticsearch cluster\n port: string\n Port for elasticsearch cluster\n use_ssl: boolean\n Whether to use SSL for the elasticsearch connection\n auth: tuple\n (username, password) to use with HTTP auth\n Returns\n -------\n es_conn: an elasticsearch_dsl Search connection object.\n \"\"\"\n kwargs = dict(\n hosts=hosts or ['localhost'],\n port=port or 9200,\n use_ssl=use_ssl,\n )\n if auth:\n kwargs.update(http_auth=auth)\n\n CLIENT = Elasticsearch(**kwargs)\n S = Search(using=CLIENT, index=\"geonames\")\n return S\n\ndef check_geonames_date(conn):\n r = Q(\"match\", geonameid='5128581') # New York City\n result = conn.query(r).execute()\n output = structure_results(result)\n return output['hits']['hits'][0]['modification_date']\n" ]
[ [ "pandas.read_csv" ] ]
yz-mao/jbdl
[ "a5380233b3795c8aaa9acd9e5c07fa44f8a5dadb" ]
[ "src/jbdl/experimental/contact/solve_contact_simple_lcp.py" ]
[ "import numpy as np\nfrom jbdl.experimental.contact import calc_contact_jacobian_core\nfrom jbdl.experimental.contact.calc_contact_jacobian import calc_contact_jacobian_core_jit_flag\nfrom jbdl.experimental.contact.calc_contact_jdot_qdot import calc_contact_jdot_qdot_core\nfrom jbdl.experimental.contact.calc_contact_jdot_qdot import calc_contact_jdot_qdot_core_jit_flag\nimport jax.numpy as jnp\nfrom jbdl.experimental.contact import get_contact_force\nfrom jbdl.rbdl.utils import xyz2int\n\n\ndef quad_loss(mm, d, lam):\n aa = 0.5 * (mm + jnp.transpose(mm))\n qdloss = 0.5 * jnp.matmul(jnp.transpose(lam), jnp.matmul(aa, lam)) + jnp.dot(jnp.transpose(d), lam)\n qdloss = jnp.squeeze(qdloss)\n return qdloss\n\n\ndef non_negative_z_projector(x, nf):\n x = x.at[nf-1::nf].set(jnp.maximum(x[nf-1::nf], 0))\n return x\n\n\ndef solve_contact_simple_lcp_core_jit_flag(\n x_tree, q, qdot, contactpoint, hh, tau, cc, flag_contact, idcontact,\n parent, jtype, jaxis, nb, nc, nf):\n\n jc = calc_contact_jacobian_core_jit_flag(\n x_tree, q, contactpoint, flag_contact, idcontact,\n parent, jtype, jaxis, nb, nc, nf)\n jcdot_qdot = calc_contact_jdot_qdot_core_jit_flag(\n x_tree, q, qdot, contactpoint, flag_contact, idcontact,\n parent, jtype, jaxis, nb, nc, nf)\n\n tau = jnp.reshape(tau, (-1, 1))\n cc = jnp.reshape(cc, (-1, 1))\n mm = jnp.matmul(jc, jnp.linalg.solve(hh, jnp.transpose(jc)))\n d0 = jnp.matmul(jc, jnp.linalg.solve(hh, tau - cc))\n d = jnp.add(d0, jcdot_qdot)\n #Todo: Fast differentiable QP solver.\n lam = -jnp.linalg.solve(mm, d)\n lam = non_negative_z_projector(lam, nf)\n\n fqp = lam\n flcp = jnp.matmul(jnp.transpose(jc), fqp)\n\n flcp = jnp.reshape(flcp, (-1,))\n return flcp, fqp\n\n\ndef solve_contact_simple_lcp_core(\n x_tree, q, qdot, contactpoint, hh, tau, cc, idcontact, flag_contact,\n parent, jtype, jaxis, nb, nc, nf):\n\n jc = calc_contact_jacobian_core(\n x_tree, q, contactpoint, idcontact, flag_contact,\n parent, jtype, jaxis, nb, nc, nf)\n jcdot_qdot = calc_contact_jdot_qdot_core(\n x_tree, q, qdot, contactpoint, idcontact, flag_contact,\n parent, jtype, jaxis, nb, nc, nf)\n\n tau = jnp.reshape(tau, (-1, 1))\n cc = jnp.reshape(cc, (-1, 1))\n mm = jnp.matmul(jc, jnp.linalg.solve(hh, jnp.transpose(jc)))\n d0 = jnp.matmul(jc, jnp.linalg.solve(hh, tau - cc))\n d = jnp.add(d0, jcdot_qdot)\n #Todo: Fast differentiable QP solver.\n lam = -jnp.linalg.solve(mm, d)\n lam = non_negative_z_projector(lam, nf)\n\n fqp = lam\n flcp = jnp.matmul(jnp.transpose(jc), fqp)\n\n flcp = jnp.reshape(flcp, (-1,))\n return flcp, fqp\n\n\ndef solve_contact_simple_lcp(model: dict, q: np.ndarray, qdot: np.ndarray, tau: np.ndarray, flag_contact: np.ndarray):\n nc = int(model[\"nc\"])\n nb = int(model[\"nb\"])\n nf = int(model[\"nf\"])\n x_tree = model[\"x_tree\"]\n contactpoint = model[\"contactpoint\"]\n idcontact = tuple(model[\"idcontact\"])\n parent = tuple(model[\"parent\"])\n jtype = tuple(model[\"jtype\"])\n jaxis = xyz2int(model[\"jaxis\"])\n contactpoint = model[\"contactpoint\"]\n flag_contact = flag_contact\n hh = model[\"hh\"]\n cc = model[\"cc\"]\n\n flcp, fqp = solve_contact_simple_lcp_core(x_tree, q, qdot, contactpoint, hh, tau, cc, \\\n idcontact, flag_contact, parent, jtype, jaxis, nb, nc, nf)\n\n fpd = np.zeros((3*nc, 1))\n fc, fcqp, fcpd = get_contact_force(model, fqp, fpd, flag_contact)\n return flcp, fqp, fc, fcqp, fcpd\n" ]
[ [ "numpy.zeros" ] ]
allnes/open_model_zoo
[ "693ba31b3b7671f5fb8ecf8f9b8d670cfec21bc3" ]
[ "tools/accuracy_checker/accuracy_checker/adapters/attributes_recognition.py" ]
[ "\"\"\"\nCopyright (c) 2019 Intel Corporation\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport numpy as np\n\nfrom ..adapters import Adapter\nfrom ..config import ConfigValidator, StringField\nfrom ..representation import (\n ContainerPrediction,\n RegressionPrediction,\n ClassificationPrediction,\n FacialLandmarksPrediction,\n MultiLabelRecognitionPrediction,\n GazeVectorPrediction\n)\n\nclass HeadPoseEstimatorAdapter(Adapter):\n \"\"\"\n Class for converting output of HeadPoseEstimator to HeadPosePrediction representation\n \"\"\"\n __provider__ = 'head_pose'\n prediction_types = (RegressionPrediction, )\n\n @classmethod\n def parameters(cls):\n parameters = super().parameters()\n parameters.update({\n 'angle_yaw': StringField(description=\"Output layer name for yaw angle.\"),\n 'angle_pitch': StringField(description=\"Output layer name for pitch angle.\"),\n 'angle_roll': StringField(description=\"Output layer name for roll angle.\")\n })\n return parameters\n\n def validate_config(self):\n super().validate_config(on_extra_argument=ConfigValidator.ERROR_ON_EXTRA_ARGUMENT)\n\n def configure(self):\n \"\"\"\n Specifies parameters of config entry\n \"\"\"\n self.angle_yaw = self.get_value_from_config('angle_yaw')\n self.angle_pitch = self.get_value_from_config('angle_pitch')\n self.angle_roll = self.get_value_from_config('angle_roll')\n\n def process(self, raw, identifiers=None, frame_meta=None):\n \"\"\"\n Args:\n identifiers: list of input data identifiers\n raw: output of model\n frame_meta: list of meta information about each frame\n Returns:\n list of ContainerPrediction objects\n \"\"\"\n result = []\n raw_output = self._extract_predictions(raw, frame_meta)\n for identifier, yaw, pitch, roll in zip(\n identifiers,\n raw_output[self.angle_yaw],\n raw_output[self.angle_pitch],\n raw_output[self.angle_roll]\n ):\n prediction = ContainerPrediction({'angle_yaw' : RegressionPrediction(identifier, yaw[0]),\n 'angle_pitch': RegressionPrediction(identifier, pitch[0]),\n 'angle_roll' : RegressionPrediction(identifier, roll[0])})\n result.append(prediction)\n\n return result\n\nclass VehicleAttributesRecognitionAdapter(Adapter):\n __provider__ = 'vehicle_attributes'\n prediction_types = (ClassificationPrediction,)\n\n @classmethod\n def parameters(cls):\n parameters = super().parameters()\n parameters.update({\n 'color_out' : StringField(description=\"Vehicle color attribute output layer name.\"),\n 'type_out' : StringField(description=\"Vehicle type attribute output layer name.\")\n })\n return parameters\n\n def validate_config(self):\n super().validate_config(on_extra_argument=ConfigValidator.ERROR_ON_EXTRA_ARGUMENT)\n\n def configure(self):\n \"\"\"\n Specifies parameters of config entry\n \"\"\"\n self.color_out = self.get_value_from_config('color_out')\n self.type_out = self.get_value_from_config('type_out')\n\n def process(self, raw, identifiers=None, frame_meta=None):\n res = []\n raw_output = self._extract_predictions(raw, frame_meta)\n for identifier, colors, types in zip(identifiers, raw_output[self.color_out], raw_output[self.type_out]):\n res.append(ContainerPrediction({'color': ClassificationPrediction(identifier, colors.reshape(-1)),\n 'type': ClassificationPrediction(identifier, types.reshape(-1))}))\n return res\n\nclass AgeGenderAdapter(Adapter):\n __provider__ = 'age_gender'\n prediction_types = (ClassificationPrediction, RegressionPrediction, )\n\n @classmethod\n def parameters(cls):\n parameters = super().parameters()\n parameters.update({\n 'age_out' : StringField(description=\"Output layer name for age recognition.\"),\n 'gender_out' : StringField(description=\"Output layer name for gender recognition.\")\n })\n return parameters\n\n def configure(self):\n self.age_out = self.get_value_from_config('age_out')\n self.gender_out = self.get_value_from_config('gender_out')\n\n def validate_config(self):\n super().validate_config(on_extra_argument=ConfigValidator.ERROR_ON_EXTRA_ARGUMENT)\n\n @staticmethod\n def get_age_scores(age):\n age_scores = np.zeros(4)\n if age < 19:\n age_scores[0] = 1\n return age_scores\n if age < 36:\n age_scores[1] = 1\n return age_scores\n if age < 66:\n age_scores[2] = 1\n return age_scores\n age_scores[3] = 1\n return age_scores\n\n def process(self, raw, identifiers=None, frame_meta=None):\n result = []\n raw_output = self._extract_predictions(raw, frame_meta)\n for identifier, age, gender in zip(identifiers, raw_output[self.age_out], raw_output[self.gender_out]):\n gender = gender.reshape(-1)\n age = age.reshape(-1)[0]*100\n gender_rep = ClassificationPrediction(identifier, gender)\n age_class_rep = ClassificationPrediction(identifier, self.get_age_scores(age))\n age_error_rep = RegressionPrediction(identifier, age)\n result.append(ContainerPrediction({'gender': gender_rep, 'age_classification': age_class_rep,\n 'age_error': age_error_rep}))\n return result\n\n\nclass LandmarksRegressionAdapter(Adapter):\n __provider__ = 'landmarks_regression'\n prediction_types = (FacialLandmarksPrediction, )\n\n def process(self, raw, identifiers=None, frame_meta=None):\n res = []\n raw_output = self._extract_predictions(raw, frame_meta)\n for identifier, values in zip(identifiers, raw_output[self.output_blob]):\n x_values, y_values = values[::2], values[1::2]\n res.append(FacialLandmarksPrediction(identifier, x_values.reshape(-1), y_values.reshape(-1)))\n return res\n\nclass PersonAttributesAdapter(Adapter):\n __provider__ = 'person_attributes'\n prediction_types = (MultiLabelRecognitionPrediction, )\n\n @classmethod\n def parameters(cls):\n parameters = super().parameters()\n parameters.update({\n 'attributes_recognition_out' : StringField(description=\"Output layer name for attributes recognition.\")\n })\n return parameters\n\n def validate_config(self):\n super().validate_config(on_extra_argument=ConfigValidator.IGNORE_ON_EXTRA_ARGUMENT)\n\n def configure(self):\n self.attributes_recognition_out = self.launcher_config.get('attributes_recognition_out', self.output_blob)\n\n def process(self, raw, identifiers=None, frame_meta=None):\n result = []\n raw_output = self._extract_predictions(raw, frame_meta)\n self.attributes_recognition_out = self.attributes_recognition_out or self.output_blob\n for identifier, multi_label in zip(identifiers, raw_output[self.attributes_recognition_out]):\n multi_label[multi_label > 0.5] = 1.\n multi_label[multi_label <= 0.5] = 0.\n\n result.append(MultiLabelRecognitionPrediction(identifier, multi_label.reshape(-1)))\n\n return result\n\n\nclass GazeEstimationAdapter(Adapter):\n __provider__ = 'gaze_estimation'\n prediction_types = (GazeVectorPrediction, )\n\n def process(self, raw, identifiers=None, frame_meta=None):\n result = []\n raw_output = self._extract_predictions(raw, frame_meta)\n for identifier, output in zip(identifiers, raw_output[self.output_blob]):\n result.append(GazeVectorPrediction(identifier, output))\n\n return result\n" ]
[ [ "numpy.zeros" ] ]
SubhamSingh285/greyatom-python-for-data-science
[ "16a70eaa2732f1cd30bcc013847150b8caf33f02" ]
[ "Loan-Approval-Analysis/code.py" ]
[ "# --------------\n# Import packages\nimport numpy as np\nimport pandas as pd\nfrom scipy.stats import mode \n\n\n\n# code starts here\n#Load Dataset\n\nbank = pd.read_csv(path)\n \n# Display categorical variable\n\n\ncategorical_var=bank.select_dtypes(include='object')\n\n\n#print(\"Categorical variables : \",categorical_var)\n\n\n \n#Code for numerical variable\n\nnumerical_var=bank.select_dtypes(include='number')\n\n#print(\"Numerical Variables : \",numerical_var)\n\n\n\n# code ends here\n\n\n# --------------\n# code starts here\n\n# load the dataset and drop the Loan_ID\nbanks= bank.drop(columns='Loan_ID')\n\n\n# check all the missing values filled.\n\nprint(banks.isnull().sum())\n\n# apply mode \n\nbank_mode = banks.mode().iloc[0]\n\n# Fill the missing values with \n\nbanks.fillna(bank_mode, inplace=True)\n\n# check again all the missing values filled.\n\nprint(banks.isnull().sum())\n\n\n\n\n\n#code ends here\n\n\n# --------------\n# code starts here\n\n# check the avg_loan_amount\navg_loan_amount = banks.pivot_table(values=[\"LoanAmount\"], index=[\"Gender\",\"Married\",\"Self_Employed\"], aggfunc=np.mean)\n\n\nprint (avg_loan_amount)\n# code ends here\n\n\n# --------------\n# code starts here\n\n# code for loan aprroved for self employed\nloan_approved_se = banks.loc[(banks[\"Self_Employed\"]==\"Yes\") & (banks[\"Loan_Status\"]==\"Y\"), [\"Loan_Status\"]].count()\nprint(loan_approved_se)\n\n# code for loan approved for non self employed\nloan_approved_nse = banks.loc[(banks[\"Self_Employed\"]==\"No\") & (banks[\"Loan_Status\"]==\"Y\"), [\"Loan_Status\"]].count()\nprint(loan_approved_nse)\n\n# percentage of loan approved for self employed\npercentage_se = (loan_approved_se * 100 / 614)\npercentage_se=percentage_se[0]\n# print percentage of loan approved for self employed\nprint(percentage_se)\n\n#percentage of loan for non self employed\npercentage_nse = (loan_approved_nse * 100 / 614)\npercentage_nse=percentage_nse[0]\n#print percentage of loan for non self employed\nprint (percentage_nse)\n\n# code ends here\n\n\n# --------------\n# code starts here\n\n\n# loan amount term \n\nloan_term = banks['Loan_Amount_Term'].apply(lambda x: int(x)/12 )\n\n\nbig_loan_term=len(loan_term[loan_term>=25])\n\nprint(big_loan_term)\n\n# code ends here\n\n\n# --------------\n# code starts here\n\ncolumns_to_show = ['ApplicantIncome', 'Credit_History']\n \nloan_groupby=banks.groupby(['Loan_Status'])\n\nloan_groupby=loan_groupby[columns_to_show]\n\n# Check the mean value \nmean_values=loan_groupby.agg([np.mean])\n\nprint(mean_values)\n\n# code ends here\n\n\n" ]
[ [ "pandas.read_csv" ] ]
gxxu-ml/Natural-Disaster-Image-Generation-to-raise-Environmental-Awareness
[ "930e30fb2b3eca0fa3aeeeb05462538669405d54" ]
[ "dalle-mini-custom/tools/inference/inference.py" ]
[ "import jax\nimport jax.numpy as jnp\nimport os\n# Load models & tokenizer\nfrom dalle_mini.model import DalleBart, DalleBartTokenizer\nfrom vqgan_jax.modeling_flax_vqgan import VQModel\nfrom transformers import CLIPProcessor, FlaxCLIPModel\nimport wandb\nfrom transformers import CLIPProcessor, CLIPModel\nfrom dalle_mini.model import DalleBart, DalleBartTokenizer\nfrom functools import partial\nfrom flax.jax_utils import replicate\nfrom flax.training.common_utils import shard_prng_key\nimport numpy as np\nfrom PIL import Image\nfrom tqdm.notebook import trange\nfrom flax.training.common_utils import shard\nimport random\nimport torch\nimport pandas as pd\nimport random\nfrom dalle_mini.text import TextNormalizer\n\n\n###################### HYPERPARAMS TO SET\n\n\nos.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\" # see issue #152\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1,2,3,4,5,6,7\"\n#whether to evaluate on the augmented validations set, if false, on 5k dataet.\nVAL_ON_AUG = False\n#whether to use the original pretrained model?\nORIGINAL = False\n\nfolders = [ \"aug_finetuned_model1_lr2_adafactor\",\n \"aug_finetuned_model1_lr3_5k\",\"aug_finetuned_model1_lr3_adafactor\",\n \"aug_finetuned_model1_lr4_5k\",\"aug_finetuned_model1_lr4_adafactor\",\n \"aug_finetuned_model1_lr5_5k\", \"aug_finetuned_model1_lr5_adafactor\"]\nfolders = [\"ori\"]\nfolders = [\"subset_finetuned_model_lr3_adafactor\",\"subset_finetuned_model_lr4_adafactor\",\"subset_finetuned_model_lr5_adafactor\"]\nfolders = [\"aug_finetuned_model1_lr5_5k\"]\n\n##################\n\n\n\n\n# check how many devices are available\njax.local_device_count()\n\n# type used for computation - use bfloat16 on TPU's\ndtype = jnp.bfloat16 if jax.local_device_count() == 8 else jnp.float32\n\n# TODO: fix issue with bfloat16\ndtype = jnp.float32\n\nDALLE_TOKENIZER = \"dalle-mini/dalle-mini/model-1reghx5l:latest\" \nDALLE_COMMIT_ID = None\n\n# VQGAN model\nVQGAN_REPO = \"dalle-mini/vqgan_imagenet_f16_16384\"\nVQGAN_COMMIT_ID = \"e93a26e7707683d349bf5d5c41c5b0ef69b677a9\"\n\n# CLIP model\nCLIP_REPO = \"openai/clip-vit-base-patch16\"\nCLIP_COMMIT_ID = None\n\n# number of predictions\nn_predictions = 32\n\n# We can customize top_k/top_p used for generating samples\ngen_top_k = None\ngen_top_p = None\n\n\n \n\n# Model references\n#set the model folder here; it determines model to load and output folder name\n\ndef load_model(DALLE_MODEL, config, ori):\n \n if ori:\n DALLE_MODEL = \"dalle-mini/dalle-mini/model-1reghx5l:latest\" \n config = \"../aug_finetuned_model1_lr5_adafactor/config.json\"\n # Load models & tokenizer\n model = DalleBart.from_pretrained(\n DALLE_MODEL,\n config=config,\n #seed=training_args.seed_model,\n dtype=dtype,\n abstract_init=True,\n #load_on_cpu=True,\n # initializing params with gradient checkpointing creates issues\n # we correctly set it later per training_args\n gradient_checkpointing=False,\n )\n\n tokenizer = DalleBartTokenizer.from_pretrained(DALLE_TOKENIZER, revision=DALLE_COMMIT_ID)\n\n # Load VQGAN\n vqgan = VQModel.from_pretrained(VQGAN_REPO, revision=VQGAN_COMMIT_ID)\n\n clip = CLIPModel.from_pretrained(\"openai/clip-vit-base-patch32\")\n processor = CLIPProcessor.from_pretrained(\"openai/clip-vit-base-patch32\")\n return model, tokenizer, vqgan, clip, processor\n\n\n\n# def generate(prompt,model,tokenizer,vqgan,clip,processor,text_normalizer,model_params,vqgan_params,p_generate,p_decode,key=key):\n\n# #takes the prompt, and generate image list and scores list: images and logits\n# processed_prompt = text_normalizer(prompt) if model.config.normalize_text else prompt\n# tokenized_prompt = tokenizer(\n# processed_prompt,\n# return_tensors=\"jax\",\n# padding=\"max_length\",\n# truncation=True,\n# max_length=128,\n# ).data\n# tokenized_prompt = replicate(tokenized_prompt)\n \n \n# # generate images\n# images = []\n# for i in trange(n_predictions // jax.device_count()):\n# # get a new key\n# key, subkey = jax.random.split(key)\n# # generate images\n# encoded_images = p_generate(\n# tokenized_prompt, shard_prng_key(subkey), model_params, gen_top_k, gen_top_p\n# )\n# # remove BOS\n# encoded_images = encoded_images.sequences[..., 1:]\n# #print(\"the length of the generated encoded image is: \",encoded_images.shape)\n# # decode images\n# decoded_images = p_decode(encoded_images, vqgan_params)\n# decoded_images = decoded_images.clip(0.0, 1.0).reshape((-1, 256, 256, 3))\n# #print(\"the shape of the decoded image is: \",decoded_images.shape)\n# for img in decoded_images:\n# images.append(Image.fromarray(np.asarray(img * 255, dtype=np.uint8)))\n# #print(\"the shape of the decoded image after post-processing is: \",len(images),images[0])\n \n# with torch.no_grad():\n# inputs = processor(text=[processed_prompt], images=images, \n# return_tensors=\"pt\", padding='max_length',\n# max_length=77, truncation=True)\n# outputs = clip(**inputs)\n# logits_per_image = outputs.logits_per_image\n# logits = logits_per_image.cpu().numpy().flatten()\n# return images, logits\n\n\n\n\ndef print_save(model_folder, val_on_aug=VAL_ON_AUG):\n\n DALLE_MODEL = os.path.join(\"..\", model_folder, \"flax_model.msgpack\")\n \n config = os.path.join(\"..\", model_folder, \"config.json\")\n\n #loading validation folder\n if val_on_aug:\n val_dir = \"/home/gxu21/dalle-mini/cs269/data40k\"\n else:\n val_dir = \"/home/gxu21/dalle-mini/cs269/data\"\n val_path = os.path.join(val_dir,\"validation.txt\")\n avg_max, avg_mean = 0,0\n df = pd.read_csv(val_path, sep=' ')\n #load model\n model, tokenizer, vqgan, clip, processor = load_model(DALLE_MODEL, config, ORIGINAL)\n \n # convert model parameters for inference if requested\n if dtype == jnp.bfloat16:\n model.params = model.to_bf16(model.params)\n\n model_params = replicate(model.params)\n vqgan_params = replicate(vqgan.params)\n #clip_params = replicate(clip.params)\n\n\n # model inference\n @partial(jax.pmap, axis_name=\"batch\", static_broadcasted_argnums=(3, 4))\n def p_generate(tokenized_prompt, key, params, top_k, top_p):\n return model.generate(\n **tokenized_prompt,\n do_sample=True,\n num_beams=1,\n prng_key=key,\n params=params,\n top_k=top_k,\n top_p=top_p,\n max_length=257\n )\n\n # decode images\n @partial(jax.pmap, axis_name=\"batch\")\n def p_decode(indices, params):\n return vqgan.decode_code(indices, params=params)\n\n text_normalizer = TextNormalizer() if model.config.normalize_text else None\n\n for i in range(200):\n prompt_path = os.path.join(val_dir, df[\"captions\"].iloc[i])\n with open(prompt_path) as f:\n prompt = f.readlines()[0]\n if val_on_aug:\n dir_path = os.path.join(model_folder,str(i))\n else:\n dir_path = os.path.join(model_folder+\"_5kval\",str(i))\n\n #generate images and logits\n# images,logits = generate(prompt=prompt,model=model, tokenizer=tokenizer, vqgan=vqgan, clip=clip, processor=processor,text_normalizer=text_normalizer,model_params=model_params,vqgan_params=vqgan_params,\n# p_generate=p_generate,p_decode=p_decode)\n #===========================================================================================\n # create a random key\n seed = random.randint(0, 2**32 - 1)\n key = jax.random.PRNGKey(seed)\n #takes the prompt, and generate image list and scores list: images and logits\n processed_prompt = text_normalizer(prompt) if model.config.normalize_text else prompt\n tokenized_prompt = tokenizer(\n processed_prompt,\n return_tensors=\"jax\",\n padding=\"max_length\",\n truncation=True,\n max_length=128,\n ).data\n tokenized_prompt = replicate(tokenized_prompt)\n\n\n # generate images\n images = []\n for i in trange(n_predictions // jax.device_count()):\n # get a new key\n key, subkey = jax.random.split(key)\n # generate images\n encoded_images = p_generate(\n tokenized_prompt, shard_prng_key(subkey), model_params, gen_top_k, gen_top_p\n )\n # remove BOS\n encoded_images = encoded_images.sequences[..., 1:]\n #print(\"the length of the generated encoded image is: \",encoded_images.shape)\n # decode images\n decoded_images = p_decode(encoded_images, vqgan_params)\n decoded_images = decoded_images.clip(0.0, 1.0).reshape((-1, 256, 256, 3))\n #print(\"the shape of the decoded image is: \",decoded_images.shape)\n for img in decoded_images:\n images.append(Image.fromarray(np.asarray(img * 255, dtype=np.uint8)))\n #print(\"the shape of the decoded image after post-processing is: \",len(images),images[0])\n\n with torch.no_grad():\n inputs = processor(text=[processed_prompt], images=images, \n return_tensors=\"pt\", padding='max_length',\n max_length=77, truncation=True)\n outputs = clip(**inputs)\n logits_per_image = outputs.logits_per_image\n logits = logits_per_image.cpu().numpy().flatten()\n\n \n \n \n #===========================================================================================\n #max_mean and avg_mean\n maxing = np.max(logits).item()\n mean = np.mean(logits).item()\n\n avg_max+=maxing\n avg_mean += mean\n\n # Check whether the specified path exists or not\n isExist = os.path.exists(dir_path)\n if not isExist:\n # Create a new directory because it does not exist \n os.makedirs(dir_path)\n\n for idx,img in enumerate(images):\n score = logits[idx].item()\n img.save(os.path.join(dir_path, str(score)+\"_\"+str(idx)+\".jpg\"))\n\n text_file = open(os.path.join(dir_path,\"prompt_content.txt\"), \"w\")\n text_file.write(prompt + \"\\n\")\n text_file.write(\"the max clip score is: \" + str(max) +\"\\n\")\n text_file.write(\"the mean clip score is: \" + str(mean) +\"\\n\")\n text_file.close()\n print(avg_max/float(i+1),avg_mean/float(i+1))\n\n \n if val_on_aug:\n text_file = open(os.path.join(model_folder,\"final_scores.txt\"), \"w\")\n else:\n text_file = open(os.path.join(model_folder+\"_5kval\",\"final_scores.txt\"), \"w\") \n \n text_file.write(\"the final average max score is: \"+ str(avg_max/200.0)+\"\\n\")\n text_file.write(\"the final average mean score is: \"+ str(avg_mean/200.0)+\"\\n\")\n text_file.close() \n\n print(\"##################\")\n print(\"the scoring for \", model_folder)\n print(\"the final average max score is: \", avg_max/200.0)\n print(\"the final average mean score is: \", avg_mean/200.0)\n\n \n\ndef main(folders=folders):\n for model_folder in folders:\n print_save(model_folder)\n \nif __name__ == \"__main__\":\n main()\n\n\n\n\n\n\n\n\n\n" ]
[ [ "pandas.read_csv", "torch.no_grad", "numpy.asarray", "numpy.max", "numpy.mean" ] ]
nmasse/Meta-Networks
[ "1ac4c233e71221ee6ce9acd96220b313f3560160" ]
[ "motifs.py" ]
[ "import numpy as np\nfrom itertools import product\nfrom itertools import permutations\nimport matplotlib.pyplot as plt\nimport pickle\nimport os\nimport stimulus\nimport parameters\nimport analysis\n\n\nclass Motifs:\n\n def __init__(self, data_dir, file_prefix, N = None):\n\n self.motifs = {}\n self.motif_sizes = [2,3,4]\n data_files = os.listdir(data_dir)\n\n for f in data_files:\n if f.startswith(file_prefix):\n print('Processing ', f)\n self.current_filename = f\n W, v = self.make_matrix(data_dir + f, 'elim_lesion', N)\n print(type(W))\n if type(W) is list:\n for i,w1 in enumerate(W):\n self.find_motifs(w1, v)\n else:\n self.find_motifs(W, v)\n\n self.print_motif_list()\n\n\n def make_matrix(self, filename, method, N):\n x = pickle.load(open(filename, 'rb'))\n beh_threshold = 0.1\n val_th = 0.1\n ind_accurate = np.where(np.array(x['accuracy_hist']) > 0.98)[0]\n #N = np.argmax(ind_accurate)\n #N = 6\n print('N = ', N)\n\n if method == 'elim_lesion' or method == 'elim':\n parameters.update_parameters(x['par'])\n s = stimulus.Stimulus()\n trial_info = s.generate_trial()\n\n\n if method == 'lesion':\n significant_weights_rnn = x['model_performance']['accuracy'][-1] - x['lesion_accuracy_rnn'][0,:,:] > beh_threshold\n significant_weights_out = x['model_performance']['accuracy'][-1] - x['lesion_accuracy_out'][0,:,:] > beh_threshold\n v = np.array([0]*x['parameters']['num_exc_units'] + [1]*x['parameters']['num_inh_units'] \\\n + [2]*x['parameters']['n_output'])\n W = np.vstack((significant_weights_rnn, significant_weights_out))\n d = W.shape[0] - W.shape[1]\n W = np.hstack((W, np.zeros((W.shape[0], d))))\n\n elif method == 'elim':\n num_units = 50 - N\n w1 = np.zeros((num_units, num_units))\n w2 = np.zeros((3, num_units))\n ind = np.where(x['gate_hist'][N]>0)[0]\n for i in range(num_units):\n for j in range(num_units):\n w1[i,j] = x['weights_hist'][N]['w_rnn'][ind[i], ind[j]] > val_th\n for j in range(3):\n w2[j,i] = x['weights_hist'][N]['w_out'][j, ind[i]] > val_th\n n_exc = int(np.sum(x['gate_hist'][N][:x['par']['num_exc']]))\n n_inh = int(np.sum(x['gate_hist'][N][x['par']['num_exc']:]))\n v = np.array([0]*n_exc + [1]*n_inh + [2]*x['par']['n_output'])\n W = np.vstack((w1, w2))\n d = W.shape[0] - W.shape[1]\n W = np.hstack((W, np.zeros((W.shape[0], d))))\n\n\n\n elif method == 'elim_lesion':\n num_units = 50 - N\n r = analysis.lesion_weights(trial_info, x['par']['h_init'], x['par']['syn_x_init'], x['par']['syn_u_init'], \\\n x['weights_hist'][N], x['gate_hist'][N])\n #plt.imshow(np.squeeze(r['lesion_accuracy_rnn']), aspect='auto', interpolation = 'none')\n #plt.colorbar()\n #plt.show()\n w1_full = np.tile(x['accuracy_hist'][N],(x['par']['n_hidden'],x['par']['n_hidden'])) - np.squeeze(r['lesion_accuracy_rnn']) > beh_threshold\n w2_full = np.tile(x['accuracy_hist'][N],(x['par']['n_output'],x['par']['n_hidden'])) - np.squeeze(r['lesion_accuracy_out']) > beh_threshold\n w1 = np.zeros((num_units, num_units))\n w2 = np.zeros((3, num_units))\n ind = np.where(x['gate_hist'][N]>0)[0]\n for i in range(num_units):\n for j in range(num_units):\n w1[i,j] = w1_full[ind[i], ind[j]]\n for j in range(3):\n w2[j,i] = w2_full[j, ind[i]]\n #plt.imshow(w1, aspect='auto', interpolation = 'none')\n #plt.colorbar()\n #plt.show()\n print('accuracy ', x['accuracy_hist'][N])\n n_exc = int(np.sum(x['gate_hist'][N][:x['par']['num_exc']]))\n n_inh = int(np.sum(x['gate_hist'][N][x['par']['num_exc']:]))\n v = np.array([0]*n_exc + [1]*n_inh + [2]*x['par']['n_output'])\n W = np.vstack((w1, w2))\n d = W.shape[0] - W.shape[1]\n W = np.hstack((W, np.zeros((W.shape[0], d))))\n plt.imshow(W, aspect='auto', interpolation = 'none')\n plt.colorbar()\n plt.show()\n print(v)\n\n elif method == 'stacked':\n W = []\n for i in range(x['W_rnn'].shape[0]):\n w1 = np.reshape(x['W_rnn'][i,:], (50,50))>0.2\n w2 = np.reshape(x['W_out'][i,:], (3,50))>0.2\n v = np.array([0]*40 + [1]*10 + [2]*3)\n W1 = np.vstack((w1, w2))\n d = W1.shape[0] - W1.shape[1]\n W1 = np.hstack((W1, np.zeros((W1.shape[0], d))))\n W.append(W1)\n\n return W, v\n\n\n def connection_probs(self):\n\n unique_labels = np.unique(self.v).tolist() # [Inhibitory, Excitatory, Output]\n N = len(unique_labels)\n\n total = np.zeros([N,N], dtype=np.float32)\n connection = np.zeros([N,N], dtype=np.float32)\n for (i, v_in), (j, v_out) in product(enumerate(input_labels), enumerate(output_labels)):\n l_in = unique_labels.index(v_in)\n l_out = unique_labels.index(v_out)\n if i != j:\n total[l_in, l_out] += 1\n if self.W[j,i] > 0:\n connection[l_in, l_out] += 1\n\n self.p_connection = np.zeros((N,N), dtype = np.float32)\n for n1, n2 in product(range(N), range(N)):\n self.p_connection[n1, n2] = connection[n1, n2]/total[n1,n2] if total[n1,n2] != 0 else -1\n\n\n def find_motifs(self, W ,v):\n\n W, v = self.prune_network(W, v)\n for i in self.motif_sizes:\n self.find_motif_set_size(W, v, i)\n\n def return_motifs(self):\n\n return self.motifs\n\n\n def find_motif_set_size(self,W, v, c):\n\n\n N = W.shape[0]\n for i0 in range(N):\n ind0 = np.where((W[:, i0] > 0) + (W[i0, :] > 0))[0]\n for i1 in np.setdiff1d(ind0, i0):\n if c == 2:\n self.motif_properties(W, v, [i0, i1])\n else:\n ind1 = np.where((W[:, i1] > 0) + (W[i1, :] > 0))[0]\n for i2 in np.setdiff1d(ind1,[i0,i1]):\n if c == 3:\n self.motif_properties(W, v, [i0, i1, i2])\n else:\n ind2 = np.where((W[:, i2] > 0) + (W[i2, :] > 0))[0]\n for i3 in np.setdiff1d(ind2,[i0,i1,i2]):\n if c == 4:\n self.motif_properties(W, v, [i0, i1, i2, i3])\n else:\n ind3 = np.where((W[:, i3] > 0) + (W[i3, :] > 0))[0]\n for i4 in np.setdiff1d(ind3,[i0,i1,i2,i3]):\n if c == 5:\n self.motif_properties(W, v, [i0, i1, i2, i3, i4])\n else:\n ind4 = np.where((W[:, i4] > 0) + (W[i4, :] > 0))[0]\n for i5 in np.setdiff1d(ind4,[i0,i1,i2,i3,i4]):\n if c == 6:\n self.motif_properties(W, v, [i0, i1, i2, i3, i4, i5])\n\n\n def motif_properties(self, W, v, u):\n\n u = sorted(u)\n W1 = W[:, u]\n W1 = W1[u, :]\n v1 = v[u]\n\n if np.sum(W1) < len(u):\n return\n\n # check for loops\n #for i in range(len(v)):\n\n\n s = [str(int(i)) for i in v1]\n id0 = ''.join(s)\n\n s = [str(int(i)) for i in np.reshape(np.where(W1 > 0, 1, 0), (len(v1)**2), order='F')]\n id1 = ''.join(s)\n\n s = [str(int(i)) for i in np.sort(u)]\n location = [''.join(s)]\n\n #print(id0, id1, W1)\n\n if id0 not in self.motifs.keys():\n self.motifs[id0] = {id1: {}}\n self.motifs[id0][id1]['count'] = 1\n self.motifs[id0][id1]['W'] = W1\n self.motifs[id0][id1]['v'] = v1\n self.motifs[id0][id1]['location'] = {self.current_filename : [location]}\n\n else:\n if id1 not in self.motifs[id0].keys():\n for key, val in self.motifs[id0].items():\n if self.is_isomorphic(W1, v1, val['W'], val['v']):\n for k, v in self.motifs[id0][key]['location'].items():\n if self.current_filename == k and location in v:\n return\n self.motifs[id0][key]['count'] += 1\n if self.current_filename in self.motifs[id0][key]['location'].keys():\n self.motifs[id0][key]['location'][self.current_filename].append(location)\n else:\n self.motifs[id0][key]['location'][self.current_filename] = location\n return\n\n self.motifs[id0][id1] = {}\n self.motifs[id0][id1]['count'] = 1\n self.motifs[id0][id1]['W'] = W1\n self.motifs[id0][id1]['v'] = v1\n self.motifs[id0][id1]['location'] = {self.current_filename : [location]}\n else:\n for k, v in self.motifs[id0][id1]['location'].items():\n if self.current_filename == k and location in v:\n return\n self.motifs[id0][id1]['count'] += 1\n if self.current_filename in self.motifs[id0][id1]['location'].keys():\n self.motifs[id0][id1]['location'][self.current_filename].append(location)\n else:\n self.motifs[id0][id1]['location'][self.current_filename] = location\n\n def is_isomorphic(self, W1, v1, W2, v2):\n\n N = len(v1)\n if not np.sum(W1) == np.sum(W2):\n return False\n\n perms = list(permutations(range(N)))\n for ind in perms:\n #print(ind, v1.shape, v1, type(v1), type(ind))\n ind = np.array(ind)\n #print(v1[ind])\n v_test = v1[ind]\n if not (v_test == v2).all():\n continue\n W_test = W1[:,ind]\n W_test = W_test[ind,:]\n if (W_test == W2).all():\n return True\n #print('compare ', W1, W2)\n\n return False\n\n\n def prune_network(self,W,v):\n\n inputs = np.sum(W, axis = 0)\n outputs = np.sum(W, axis = 1)\n connections = inputs + outputs\n neurons_with_connections = np.where(connections > 0)[0]\n W = W[:, neurons_with_connections]\n W = W[neurons_with_connections, :]\n v = v[neurons_with_connections]\n print('neurons_with_connections', len(neurons_with_connections))\n return W, v\n\n\n def print_motif_list(self):\n\n short_ids = sorted(list(self.motifs.keys()))\n long_ids = []\n for s in short_ids:\n long_ids.append(self.motifs[s].keys())\n\n for s, l in zip(short_ids, long_ids):\n print('\\nShort ID:', s, '\\t(Neuron types: Inh=0, Exc=1, Out=2)')\n print('Long ID:\\t(Rounded weights: if w > 0, is 1)')\n for lid in sorted(l, key=lambda k : -self.motifs[s][k]['count']):\n print('-->', lid, '| c =', self.motifs[s][lid]['count'])\n" ]
[ [ "numpy.vstack", "numpy.sum", "numpy.tile", "numpy.sort", "numpy.zeros", "numpy.squeeze", "numpy.setdiff1d", "numpy.reshape", "matplotlib.pyplot.imshow", "matplotlib.pyplot.show", "numpy.array", "matplotlib.pyplot.colorbar", "numpy.where", "numpy.unique" ] ]
D-Sokol/lazy-pattern-classifier
[ "a0cc5e7a5db8177b6e12cc9a0f0e4acf61233603" ]
[ "datasets/__init__.py" ]
[ "from functools import wraps\nimport os\nimport pandas as pd\n\n\ndef _read_csv(name, **kw):\n path = os.path.join(os.path.dirname(__file__), 'data', name)\n return pd.read_csv(path, **kw)\n\n\ndef apply_one_hot(fun):\n @wraps(fun)\n def wrapper(*args, **kw):\n X, y = fun(*args, **kw)\n X = pd.get_dummies(X, dtype=float)\n return X, y\n return wrapper\n\n\n@apply_one_hot\ndef get_breast_cancer():\n df = _read_csv('breast-cancer-wisconsin.zip', index_col=0)\n df.drop('Unnamed: 32', axis=1, inplace=True)\n X, y = df.drop('diagnosis', axis=1), df['diagnosis'] == 'M'\n return X, y\n\n\n@apply_one_hot\ndef get_heart_disease():\n df = _read_csv('heart-disease-uci.zip')\n X, y = df.drop('target', axis=1), df['target']\n X = X.astype(float, copy=False)\n return X, y\n\n\n@apply_one_hot\ndef get_mammographic_mass():\n df = _read_csv('mammographic-mass.zip')\n X, y = df.drop('Severity', axis=1), df['Severity']\n X = X.astype(float, copy=False)\n return X, y\n\n\n@apply_one_hot\ndef get_seismic_bumps():\n df = _read_csv('seismic-bumps.zip')\n X, y = df.drop('class', axis=1), df['class']\n X.drop(['nbumps6', 'nbumps7', 'nbumps89'], axis=1, inplace=True)\n X['seismic'] = (X['seismic'] == 'a')\n X['shift'] = (X['shift'] == 'N')\n X = X.astype({col: float for col in X if col not in ('seismoacoustic', 'hazard')}, copy=False)\n return X, y\n\n\n@apply_one_hot\ndef get_titanic():\n df = _read_csv('titanic.zip', index_col=0)\n X, y = df.drop('Survived', axis=1), df['Survived']\n X.drop(['Name', 'Ticket', 'Cabin'], axis=1, inplace=True)\n X['Sex'] = (X['Sex'] == 'male')\n X.fillna({'Age': X['Age'].median(), 'Embarked': 'X'}, inplace=True)\n X = X.astype({col: float for col in X if col not in ('Embarked',)}, copy=False)\n return X, y\n\ndef make_GSE_getter(name):\n def getter():\n df = _read_csv(name, index_col=0)\n X, y = df.drop('type', axis=1), df['type']\n y = (y == 'normal')\n assert X.values.dtype.kind == 'f'\n return X, y\n return getter\n\n\nget_breast_GSE = make_GSE_getter('Breast_GSE70947.csv')\nget_liver_GSE = make_GSE_getter('Liver_GSE14520_U133A.csv')\nget_prostate_GSE = make_GSE_getter('Prostate_GSE6919_U95B.csv')\n" ]
[ [ "pandas.read_csv", "pandas.get_dummies" ] ]
hienerd/molecool
[ "35817f23c0f2a64cbaffa7c8ed3b421bf4774130" ]
[ "molecool/measure.py" ]
[ "import numpy as np\n\ndef calculate_distance(rA, rB):\n \"\"\"\n This function calculates the distance between two points.\n\n Parameters\n ----------\n rA, rB : np.ndarray\n The coordinates of each point.\n\n Returns\n -------\n distance : float\n The distance between two points.\n\n Examples\n --------\n >>> r1 = np.array([0, 0, 0])\n >>> r2 = np.array([3.0, 0, 0])\n >>> calculate_distance(r1, r2)\n 3.0\n \"\"\"\n\n dist_vec = (rA - rB)\n distance = np.linalg.norm(dist_vec)\n return distance\n\ndef calculate_angle(rA, rB, rC, degrees=False):\n # Calculate the angle between three points. Answer is given in radians by default, but can be given in degrees\n # by setting degrees=True\n AB = rB - rA\n BC = rB - rC\n theta=np.arccos(np.dot(AB, BC)/(np.linalg.norm(AB)*np.linalg.norm(BC)))\n\n if degrees:\n return np.degrees(theta)\n else:\n return theta\n" ]
[ [ "numpy.degrees", "numpy.dot", "numpy.linalg.norm" ] ]
agkphysics/emotion
[ "36bb9265f9439b10676fb539d5334cce645e49ef" ]
[ "src/ertk/sklearn/utils.py" ]
[ "import warnings\n\nimport numpy as np\nfrom joblib import Parallel, delayed\nfrom sklearn.base import MetaEstimatorMixin, clone\nfrom sklearn.multiclass import OneVsRestClassifier as _SKOvR\nfrom sklearn.multiclass import _ConstantPredictor\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import LabelBinarizer\n\n\n# Workaround for OneVsRest\n# https://github.com/scikit-learn/scikit-learn/issues/10882#issuecomment-376912896\n# https://stackoverflow.com/a/49535681/10044861\ndef _fit_binary(estimator, X, y, classes=None, **kwargs):\n unique_y = np.unique(y)\n if len(unique_y) == 1:\n if classes is not None:\n if y[0] == -1:\n c = 0\n else:\n c = y[0]\n warnings.warn(\n \"Label %s is present in all training examples.\" % str(classes[c])\n )\n estimator = _ConstantPredictor().fit(X, unique_y)\n else:\n estimator = clone(estimator)\n estimator.fit(X, y, **kwargs)\n return estimator\n\n\nclass OneVsRestClassifier(_SKOvR):\n def fit(self, X, y, **kwargs):\n self.label_binarizer_ = LabelBinarizer(sparse_output=True)\n Y = self.label_binarizer_.fit_transform(y)\n Y = Y.tocsc()\n self.classes_ = self.label_binarizer_.classes_\n columns = (col.toarray().ravel() for col in Y.T)\n self.estimators_ = Parallel(n_jobs=self.n_jobs)(\n delayed(_fit_binary)(\n self.estimator,\n X,\n column,\n classes=[\n \"not %s\" % self.label_binarizer_.classes_[i],\n self.label_binarizer_.classes_[i],\n ],\n **kwargs,\n )\n for i, column in enumerate(columns)\n )\n return self\n\n\ndef get_base_estimator(clf):\n \"\"\"Gets the base estimator of a pipeline or meta-estimator, assuming\n there is only one.\n\n Parameters\n ----------\n clf: estimator\n An estimator.\n\n Returns\n -------\n estimator\n The base estimator of `clf`.\n \"\"\"\n if isinstance(clf, Pipeline):\n if clf._final_estimator == \"passthrough\":\n raise ValueError(\"Pipeline final estimator is 'passthrough'.\")\n return get_base_estimator(clf._final_estimator)\n if isinstance(clf, MetaEstimatorMixin):\n for attr in [\"base_estimator\", \"estimator\"]:\n if hasattr(clf, attr):\n return get_base_estimator(getattr(clf, attr))\n raise RuntimeError(\"Couldn't get base estimator from meta estimator\")\n return clf\n" ]
[ [ "sklearn.multiclass._ConstantPredictor", "sklearn.preprocessing.LabelBinarizer", "sklearn.base.clone", "numpy.unique" ] ]
shizukanaskytree/tfx
[ "6490e102850e91489722a49b20efef9ef64ef2d2" ]
[ "tfx/orchestration/kubeflow/kubeflow_gcp_integration_test.py" ]
[ "# Lint as: python2, python3\n# Copyright 2019 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Integration tests for Kubeflow-based orchestrator and GCP backend.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport logging\nimport os\nimport subprocess\nimport sys\n\nimport absl\nimport tensorflow as tf\n\nfrom tfx.components.base import executor_spec\nfrom tfx.components.common_nodes.importer_node import ImporterNode\nfrom tfx.components.evaluator.component import Evaluator\nfrom tfx.components.example_gen.csv_example_gen.component import CsvExampleGen\nfrom tfx.components.model_validator.component import ModelValidator\nfrom tfx.components.pusher.component import Pusher\nfrom tfx.components.statistics_gen.component import StatisticsGen\nfrom tfx.components.trainer.component import Trainer\nfrom tfx.components.transform.component import Transform\nfrom tfx.extensions.google_cloud_ai_platform.pusher import executor as ai_platform_pusher_executor\nfrom tfx.extensions.google_cloud_ai_platform.trainer import executor as ai_platform_trainer_executor\nfrom tfx.orchestration.kubeflow import test_utils\nfrom tfx.proto import evaluator_pb2\nfrom tfx.proto import trainer_pb2\nfrom tfx.types import standard_artifacts\nfrom tfx.utils import dsl_utils\n\n\nclass KubeflowGCPIntegrationTest(test_utils.BaseKubeflowTest):\n\n def _delete_ai_platform_model(self, model_name):\n \"\"\"Delete pushed model in AI Platform.\"\"\"\n # In order to delete model, all versions in the model must be deleted first.\n versions_command = [\n 'gcloud', 'ai-platform', 'versions', 'list',\n '--model=%s' % model_name\n ]\n versions = subprocess.run(versions_command, stdout=subprocess.PIPE)\n\n if versions.returncode == 0:\n absl.logging.info('Model %s has versions %s' %\n (model_name, versions.stdout))\n\n # First line of the output is the header: [NAME] [DEPLOYMENT_URI] [STATE]\n # By specification of test case, the latest version is the default model,\n # which needs to be deleted last.\n for version in versions.stdout.decode('utf-8').strip('\\n').split(\n '\\n')[1:]:\n version = version.split()[0]\n absl.logging.info('Deleting version %s of model %s' %\n (version, model_name))\n version_delete_command = [\n 'gcloud', '--quiet', 'ai-platform', 'versions', 'delete', version,\n '--model=%s' % model_name\n ]\n subprocess.run(version_delete_command, check=True)\n\n absl.logging.info('Deleting model %s' % model_name)\n subprocess.run(\n ['gcloud', '--quiet', 'ai-platform', 'models', 'delete', model_name],\n check=True)\n\n def setUp(self):\n super(KubeflowGCPIntegrationTest, self).setUp()\n\n # Example artifacts for testing.\n self.raw_examples_importer = ImporterNode(\n instance_name='raw_examples',\n source_uri=os.path.join(self._testdata_root, 'csv_example_gen'),\n artifact_type=standard_artifacts.Examples,\n reimport=True,\n properties={'split_names': '[\"train\", \"eval\"]'})\n\n # Transformed Example artifacts for testing.\n self.transformed_examples_importer = ImporterNode(\n instance_name='transformed_examples',\n source_uri=os.path.join(self._testdata_root, 'transform',\n 'transformed_examples'),\n artifact_type=standard_artifacts.Examples,\n reimport=True,\n properties={'split_names': '[\"train\", \"eval\"]'})\n\n # Schema artifact for testing.\n self.schema_importer = ImporterNode(\n instance_name='schema',\n source_uri=os.path.join(self._testdata_root, 'schema_gen'),\n artifact_type=standard_artifacts.Schema,\n reimport=True)\n\n # TransformGraph artifact for testing.\n self.transform_graph_importer = ImporterNode(\n instance_name='transform_graph',\n source_uri=os.path.join(self._testdata_root, 'transform',\n 'transform_output'),\n artifact_type=standard_artifacts.TransformGraph,\n reimport=True)\n\n # Model artifact for testing.\n self.model_1_importer = ImporterNode(\n instance_name='model_1',\n source_uri=os.path.join(self._testdata_root, 'trainer', 'previous'),\n artifact_type=standard_artifacts.Model,\n reimport=True)\n\n self.model_2_importer = ImporterNode(\n instance_name='model_2',\n source_uri=os.path.join(self._testdata_root, 'trainer', 'current'),\n artifact_type=standard_artifacts.Model,\n reimport=True)\n\n # ModelBlessing artifact for testing.\n self.model_blessing_importer = ImporterNode(\n instance_name='model_blessing',\n source_uri=os.path.join(self._testdata_root, 'model_validator',\n 'blessed'),\n artifact_type=standard_artifacts.ModelBlessing,\n reimport=True,\n custom_properties={'blessed': 1})\n\n def testCsvExampleGenOnDataflowRunner(self):\n \"\"\"CsvExampleGen-only test pipeline on DataflowRunner invocation.\"\"\"\n pipeline_name = 'kubeflow-csv-example-gen-dataflow-test-{}'.format(\n self._random_id())\n pipeline = self._create_dataflow_pipeline(pipeline_name, [\n CsvExampleGen(input=dsl_utils.csv_input(self._data_root)),\n ])\n self._compile_and_run_pipeline(pipeline)\n\n def testStatisticsGenOnDataflowRunner(self):\n \"\"\"StatisticsGen-only test pipeline on DataflowRunner.\"\"\"\n pipeline_name = 'kubeflow-statistics-gen-dataflow-test-{}'.format(\n self._random_id())\n pipeline = self._create_dataflow_pipeline(pipeline_name, [\n self.raw_examples_importer,\n StatisticsGen(examples=self.raw_examples_importer.outputs['result'])\n ])\n self._compile_and_run_pipeline(pipeline)\n\n def testTransformOnDataflowRunner(self):\n \"\"\"Transform-only test pipeline on DataflowRunner.\"\"\"\n pipeline_name = 'kubeflow-transform-dataflow-test-{}'.format(\n self._random_id())\n pipeline = self._create_dataflow_pipeline(pipeline_name, [\n self.raw_examples_importer, self.schema_importer,\n Transform(\n examples=self.raw_examples_importer.outputs['result'],\n schema=self.schema_importer.outputs['result'],\n module_file=self._transform_module)\n ])\n self._compile_and_run_pipeline(pipeline)\n\n def testEvaluatorOnDataflowRunner(self):\n \"\"\"Evaluator-only test pipeline on DataflowRunner.\"\"\"\n pipeline_name = 'kubeflow-evaluator-dataflow-test-{}'.format(\n self._random_id())\n pipeline = self._create_dataflow_pipeline(pipeline_name, [\n self.raw_examples_importer, self.model_1_importer,\n Evaluator(\n examples=self.raw_examples_importer.outputs['result'],\n model_exports=self.model_1_importer.outputs['result'],\n feature_slicing_spec=evaluator_pb2.FeatureSlicingSpec(specs=[\n evaluator_pb2.SingleSlicingSpec(\n column_for_slicing=['trip_start_hour'])\n ]))\n ])\n self._compile_and_run_pipeline(pipeline)\n\n def testModelValidatorOnDataflowRunner(self):\n \"\"\"ModelValidator-only test pipeline on DataflowRunner.\"\"\"\n pipeline_name = 'kubeflow-evaluator-dataflow-test-{}'.format(\n self._random_id())\n pipeline = self._create_dataflow_pipeline(pipeline_name, [\n self.raw_examples_importer, self.model_1_importer,\n ModelValidator(\n examples=self.raw_examples_importer.outputs['result'],\n model=self.model_1_importer.outputs['result'])\n ])\n self._compile_and_run_pipeline(pipeline)\n\n def testAIPlatformTrainerPipeline(self):\n \"\"\"Trainer-only test pipeline on AI Platform Training.\"\"\"\n pipeline_name = 'kubeflow-aip-trainer-test-{}'.format(self._random_id())\n pipeline = self._create_pipeline(pipeline_name, [\n self.schema_importer,\n self.transformed_examples_importer,\n self.transform_graph_importer,\n Trainer(\n custom_executor_spec=executor_spec.ExecutorClassSpec(\n ai_platform_trainer_executor.Executor),\n module_file=self._trainer_module,\n transformed_examples=self.transformed_examples_importer\n .outputs['result'],\n schema=self.schema_importer.outputs['result'],\n transform_graph=self.transform_graph_importer.outputs['result'],\n train_args=trainer_pb2.TrainArgs(num_steps=10),\n eval_args=trainer_pb2.EvalArgs(num_steps=5),\n custom_config={\n ai_platform_trainer_executor.TRAINING_ARGS_KEY: {\n 'project':\n self._gcp_project_id,\n 'region':\n self._gcp_region,\n 'jobDir':\n os.path.join(self._pipeline_root(pipeline_name), 'tmp'),\n 'masterConfig': {\n 'imageUri': self._container_image,\n }\n }\n }),\n ])\n self._compile_and_run_pipeline(pipeline)\n\n # TODO(muchida): Identify more model types to ensure models trained in TF 2\n # works with CAIP prediction service.\n def testAIPlatformPusherPipeline(self):\n \"\"\"Pusher-only test pipeline to AI Platform Prediction.\"\"\"\n pipeline_name_base = 'kubeflow-aip-pusher-test-{}'.format(self._random_id())\n # AI Platform does not accept '-' in the model name.\n model_name = ('%s_model' % pipeline_name_base).replace('-', '_')\n self.addCleanup(self._delete_ai_platform_model, model_name)\n\n def _pusher(model_importer, model_blessing_importer):\n return Pusher(\n custom_executor_spec=executor_spec.ExecutorClassSpec(\n ai_platform_pusher_executor.Executor),\n model=model_importer.outputs['result'],\n model_blessing=model_blessing_importer.outputs['result'],\n custom_config={\n ai_platform_pusher_executor.SERVING_ARGS_KEY: {\n 'model_name': model_name,\n 'project_id': self._gcp_project_id,\n }\n },\n )\n\n # Test creation of multiple versions under the same model_name.\n pipeline_name_1 = '%s-1' % pipeline_name_base\n pipeline_1 = self._create_pipeline(pipeline_name_1, [\n self.model_1_importer,\n self.model_blessing_importer,\n _pusher(self.model_1_importer, self.model_blessing_importer),\n ])\n self._compile_and_run_pipeline(pipeline_1)\n\n pipeline_name_2 = '%s-2' % pipeline_name_base\n pipeline_2 = self._create_pipeline(pipeline_name_2, [\n self.model_2_importer,\n self.model_blessing_importer,\n _pusher(self.model_2_importer, self.model_blessing_importer),\n ])\n self._compile_and_run_pipeline(pipeline_2)\n\n\nif __name__ == '__main__':\n logging.basicConfig(stream=sys.stdout, level=logging.INFO)\n tf.test.main()\n" ]
[ [ "tensorflow.test.main" ] ]
Trustworthy-Software/BATS
[ "eb122150dff61543bd8c88ac7e08987a0a3e47e0" ]
[ "experiment/evaluate.py" ]
[ "import pickle\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport Levenshtein\nimport math\nimport pandas as pd\nfrom sklearn.metrics import silhouette_score ,calinski_harabasz_score,davies_bouldin_score\nfrom scipy.spatial import distance\nfrom sklearn.preprocessing import StandardScaler, Normalizer, MinMaxScaler\nimport os\nfrom representation.word2vector import Word2vector\nimport json\nimport sklearn.metrics as metrics\nfrom sklearn.metrics import roc_curve, auc, accuracy_score, recall_score, precision_score\nfrom sklearn.metrics import confusion_matrix, average_precision_score\nfrom experiment.ML4prediction import MlPrediction\nfrom tqdm import tqdm\nimport seaborn as sns\nfrom matplotlib.patches import PathPatch\nimport scipy.stats as stats\nfrom sklearn.model_selection import KFold, StratifiedKFold\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import RandomForestClassifier\nfrom representation.CC2Vec import lmg_cc2ftr_interface\nimport time\n\nnotRecognizedByBert = ['Correct-Lang-6-patch1', 'Correct-Lang-6-patch1', 'Correct-Lang-6-patch1', 'Correct-Lang-6-patch1', 'Correct-Lang-6-patch1', 'Correct-Lang-6-patch1', 'Correct-Lang-6-patch1', 'Correct-Lang-6-patch1', 'Correct-Lang-6-patch1', 'Correct-Lang-25-patch1', 'Correct-Lang-53-patch1', 'Incorrect-Math-6-patch2', 'Incorrect-Math-6-patch2', 'Incorrect-Math-6-patch1', 'Correct-Math-56-patch1', 'Incorrect-Math-80-patch1', 'Incorrect-Math-104-patch1']\nnotRecognizedByCC2Vec = ['Correct-Lang-25-patch1', 'Correct-Lang-53-patch1', 'Correct-Math-56-patch1', 'Incorrect-Math-80-patch1']\nnotRecognized = notRecognizedByBert + notRecognizedByCC2Vec\n\nMODEL_MODEL_LOAD_PATH = '/Users/haoye.tian/Documents/University/model/java14_model/saved_model_iter8.release'\nMODEL_CC2Vec = '../representation/CC2Vec/'\n\n# Root_ODS_feature = '/Users/haoye.tian/Documents/University/data/PatchCollectingTOSEMYe'\n\nclass evaluation:\n def __init__(self, patch_w2v, test_data, test_name, test_vector, patch_vector, exception_type):\n self.patch_w2v = patch_w2v\n\n self.test_data = test_data\n\n self.test_name = test_name\n # self.patch_name = None\n self.test_vector = test_vector\n self.patch_vector = patch_vector\n self.exception_type = exception_type\n\n def sigmoid(self, x):\n return 1 / (1 + math.exp(-x))\n\n def find_path_patch(self, path_patch_sliced, project_id):\n available_path_patch = []\n\n project = project_id.split('_')[0]\n id = project_id.split('_')[1]\n\n tools = os.listdir(path_patch_sliced)\n for label in ['Correct', 'Incorrect']:\n for tool in tools:\n path_bugid = os.path.join(path_patch_sliced, tool, label, project, id)\n if os.path.exists(path_bugid):\n patches = os.listdir(path_bugid)\n for p in patches:\n path_patch = os.path.join(path_bugid, p)\n if os.path.isdir(path_patch):\n available_path_patch.append(path_patch)\n return available_path_patch\n\n def engineered_features(self, path_json):\n other_vector = []\n P4J_vector = []\n repair_patterns = []\n repair_patterns2 = []\n try:\n with open(path_json, 'r') as f:\n feature_json = json.load(f)\n features_list = feature_json['files'][0]['features']\n P4J = features_list[-3]\n RP = features_list[-2]\n RP2 = features_list[-1]\n\n '''\n # other\n for k,v in other.items():\n # if k.startswith('FEATURES_BINARYOPERATOR'):\n # for k2,v2 in other[k].items():\n # for k3,v3 in other[k][k2].items():\n # if v3 == 'true':\n # other_vector.append('1')\n # elif v3 == 'false':\n # other_vector.append('0')\n # else:\n # other_vector.append('0.5')\n if k.startswith('S'):\n if k.startswith('S6'):\n continue\n other_vector.append(v)\n else:\n continue\n '''\n\n # P4J\n if not list(P4J.keys())[100].startswith('P4J'):\n raise\n for k, v in P4J.items():\n # dict = P4J[i]\n # value = list(dict.values())[0]\n P4J_vector.append(int(v))\n\n # repair pattern\n for k, v in RP['repairPatterns'].items():\n repair_patterns.append(v)\n\n # repair pattern 2\n for k, v in RP2.items():\n repair_patterns2.append(v)\n\n # for i in range(len(features_list)):\n # dict_fea = features_list[i]\n # if 'repairPatterns' in dict_fea.keys():\n # # continue\n # for k,v in dict_fea['repairPatterns'].items():\n # repair_patterns.append(int(v))\n # else:\n # value = list(dict_fea.values())[0]\n # engineered_vector.append(value)\n except Exception as e:\n print('name: {}, exception: {}'.format(path_json, e))\n return []\n\n if len(P4J_vector) != 156 or len(repair_patterns) != 26 or len(repair_patterns2) != 13:\n print('name: {}, exception: {}'.format(path_json, 'null feature or shape error'))\n return []\n\n # return engineered_vector\n # return P4J_vector + repair_patterns + repair_patterns2\n return P4J_vector + repair_patterns\n\n # def getODSFeature(self, p):\n # tool = p.split('/')[-5]\n # label = p.split('/')[-4]\n # project = p.split('/')[-3]\n # id = p.split('/')[-2]\n #\n # name = p.split('/')[-1]\n # folder1, folder2 = '-'.join([name, project, id]), tool\n # all_root = '/'.join(\n # [Root_ODS_feature, tool, label, project, id, folder1 + '_' + folder2, folder1, folder2])\n # all_path_feature = all_root + '/features_' + name + '-' + project + '-' + id + '.json'\n # ods_feature_all = []\n #\n # if os.path.exists(all_path_feature):\n # ods_feature_all = self.engineered_features(all_path_feature)\n # else:\n # tmp = []\n # for i in range(9):\n # name_concatenated = name.split('-')\n # name_concatenated[0] += '#' + str(i)\n # name_concatenated = '-'.join(name_concatenated)\n # folder1, folder2 = '-'.join([name_concatenated, project, id]), tool\n # all_root = '/'.join(\n # [Root_ODS_feature, tool, label, project, id, folder1 + '_' + folder2, folder1, folder2])\n # all_path_feature = all_root + '/features_' + name_concatenated + '-' + project + '-' + id + '.json'\n # if os.path.exists(all_path_feature):\n # ods_feature = self.engineered_features(all_path_feature)\n # if ods_feature != []:\n # tmp.append(ods_feature)\n # else:\n # pass\n # if tmp != []:\n # ods_feature_all = np.sum(tmp, axis=0).tolist()\n # if ods_feature_all == []:\n # print('NoODS')\n # return ods_feature_all if ods_feature_all != [] else [0 for i in range(182)]\n\n def vector4patch(self, available_path_patch, compare=True,):\n vector_list = []\n vector_ML_list = []\n label_list = []\n name_list = []\n for p in available_path_patch:\n recogName = '-'.join([p.split('/')[-4], p.split('/')[-3], p.split('/')[-2], p.split('/')[-1]])\n if recogName in notRecognized: # some specific patches can not be recognized\n continue\n\n # vector\n json_key = p + '_.json' # pre-saved bert vector of BATs\n json_key_cross = p + '_cross.json' # pre-saved bert feature in Haoye's ASE2020\n if self.patch_w2v == 'bert':\n if os.path.exists(json_key):\n with open(json_key, 'r+') as f:\n vector_str = json.load(f)\n vector = np.array(list(map(float, vector_str)))\n else:\n w2v = Word2vector(patch_w2v='bert', )\n vector, vector_ML = w2v.convert_single_patch(p)\n vector_json = list(map(str, list(vector)))\n vector_json_cross = list(map(str, list(vector)))\n with open(json_key, 'w+') as f:\n jsonstr = json.dumps(vector_json, )\n f.write(jsonstr)\n # with open(json_key_cross, 'w+') as f:\n # jsonstr = json.dumps(vector_json_cross, )\n # f.write(jsonstr)\n elif self.patch_w2v == 'cc2vec':\n w2v = Word2vector(patch_w2v=self.patch_w2v, )\n vector, _ = w2v.convert_single_patch(p)\n elif self.patch_w2v == 'string':\n w2v = Word2vector(patch_w2v=self.patch_w2v, )\n vector, _ = w2v.convert_single_patch(p)\n else:\n raise\n # if list(vector.astype(float)) == list(np.zeros(240).astype(float)) or list(vector.astype(float)) == list(np.zeros(1024).astype(float)):\n # ttt = '-'.join([p.split('/')[-4], p.split('/')[-3], p.split('/')[-2], p.split('/')[-1]])\n # notRecognized.append(ttt)\n vector_list.append(vector)\n\n # compared with Haoye's ASE2020\n if compare:\n # Bert\n with open(json_key_cross, 'r+') as f:\n vector_str = json.load(f)\n vector_ML = np.array(list(map(float, vector_str)))\n vector_ML_list.append(vector_ML)\n\n # label\n if 'Correct' in p:\n label_list.append(1)\n label = 'Correct'\n elif 'Incorrect' in p:\n label_list.append(0)\n label = 'Incorrect'\n else:\n raise Exception('wrong label')\n\n # name\n tool = p.split('/')[-5]\n patchid = p.split('/')[-1]\n # name = tool + '-' + label + '-' + patchid\n # name = tool[:3] + patchid.replace('patch','')\n name = tool + patchid.replace('patch','')\n name_list.append(name)\n\n return name_list, label_list, vector_list, vector_ML_list\n\n def vector4patch_patchsim(self, available_path_patch, compare=True,):\n vector_list = []\n vector_ML_list = []\n label_list = []\n name_list = []\n for p in available_path_patch:\n\n # vector\n json_key = p + '_.json'\n json_key_cross = p + '_cross.json'\n if self.patch_w2v == 'bert':\n if os.path.exists(json_key):\n with open(json_key, 'r+') as f:\n vector_str = json.load(f)\n vector = np.array(list(map(float, vector_str)))\n else:\n w2v = Word2vector(patch_w2v='bert', )\n vector, vector_ML = w2v.convert_single_patch(p)\n vector_json = list(map(str, list(vector)))\n vector_json_cross = list(map(str, list(vector)))\n with open(json_key, 'w+') as f:\n jsonstr = json.dumps(vector_json, )\n f.write(jsonstr)\n # with open(json_key_cross, 'w+') as f:\n # jsonstr = json.dumps(vector_json_cross, )\n # f.write(jsonstr)\n elif self.patch_w2v == 'cc2vec':\n w2v = Word2vector(patch_w2v=self.patch_w2v, )\n vector, _ = w2v.convert_single_patch(p)\n elif self.patch_w2v == 'string':\n w2v = Word2vector(patch_w2v=self.patch_w2v, )\n vector, _ = w2v.convert_single_patch(p)\n else:\n raise\n # if list(vector.astype(float)) == list(np.zeros(240).astype(float)) or list(vector.astype(float)) == list(np.zeros(1024).astype(float)):\n # ttt = '-'.join([p.split('/')[-4], p.split('/')[-3], p.split('/')[-2], p.split('/')[-1]])\n # notRecognized.append(ttt)\n vector_list.append(vector)\n if compare:\n with open(json_key_cross, 'r+') as f:\n vector_str = json.load(f)\n vector_ML = np.array(list(map(float, vector_str)))\n vector_ML_list.append(vector_ML)\n\n # label\n if 'Correct' in p:\n label_list.append(1)\n label = 'Correct'\n elif 'Incorrect' in p:\n label_list.append(0)\n label = 'Incorrect'\n else:\n raise Exception('wrong label')\n\n # name\n tool = p.split('/')[-5]\n patchid = p.split('/')[-1]\n # name = tool + '-' + label + '-' + patchid\n # name = tool[:3] + patchid.replace('patch','')\n name = '-'.join([tool[:3], p.split('/')[-4], p.split('/')[-3], p.split('/')[-2], patchid])\n name_list.append(name)\n\n return name_list, label_list, vector_list, vector_ML_list\n\n # baseline\n def get_correct_patch_list(self, failed_test_index, model=None):\n scaler = Normalizer()\n all_test_vector = scaler.fit_transform(self.test_vector)\n\n scaler_patch = None\n if model == 'string':\n all_patch_vector = self.patch_vector\n else:\n scaler_patch = scaler.fit(self.patch_vector)\n all_patch_vector = scaler_patch.transform(self.patch_vector)\n\n # construct new test and patch dataset(repository) by excluding the current failed test cases being predicted\n dataset_test = np.delete(all_test_vector, failed_test_index, axis=0)\n dataset_patch = np.delete(all_patch_vector, failed_test_index, axis=0)\n dataset_name = np.delete(self.test_name, failed_test_index, axis=0)\n dataset_func = np.delete(self.test_data[3], failed_test_index, axis=0)\n dataset_exp = np.delete(self.exception_type, failed_test_index, axis=0)\n\n return dataset_patch\n def get_correct_patch_list2(self, project_id, model=None):\n scaler = Normalizer()\n\n scaler_patch = None\n # if model == 'string':\n # all_patch_vector = self.patch_vector\n # else:\n # scaler_patch = scaler.fit(self.patch_vector)\n # all_patch_vector = scaler_patch.transform(self.patch_vector)\n\n all_patch_vector = []\n with open('../cc2vec_correct_patch.pickle', 'rb') as f:\n dict = pickle.load(f)\n for k in dict.keys():\n project = k.split('-')[0]\n id = k.split('-')[1]\n if project_id != (project+'_'+id):\n patch_vector = dict[k]\n all_patch_vector.append(patch_vector)\n\n return np.array(all_patch_vector)\n\n def get_associated_patch_list(self, failed_test_index, k=5, cut_off=0.0, model=None):\n scaler = Normalizer()\n all_test_vector = scaler.fit_transform(self.test_vector)\n\n scaler_patch = None\n if model == 'string':\n all_patch_vector = self.patch_vector\n else:\n scaler_patch = scaler.fit(self.patch_vector)\n all_patch_vector = scaler_patch.transform(self.patch_vector)\n\n # construct new test and patch dataset(repository) by excluding the current failed test cases being predicted\n dataset_test = np.delete(all_test_vector, failed_test_index, axis=0)\n dataset_patch = np.delete(all_patch_vector, failed_test_index, axis=0)\n dataset_name = np.delete(self.test_name, failed_test_index, axis=0)\n dataset_func = np.delete(self.test_data[3], failed_test_index, axis=0)\n dataset_exp = np.delete(self.exception_type, failed_test_index, axis=0)\n\n patch_list = [] # the associated patches of similar test cases\n cut_off_list = []\n closest_score = []\n for i in failed_test_index:\n failed_test_vector = all_test_vector[i]\n\n # Deprecated. exception type of current bug id.\n exp_type = self.exception_type[i]\n if ':' in exp_type:\n exp_type = exp_type.split(':')[0]\n\n score_test = []\n # find the k most closest test vector from other bug-id\n for j in range(len(dataset_test)):\n simi_test_vec = dataset_test[j]\n\n # Deprecated. exception type from other bug-id.\n simi_exp_type = dataset_exp[j]\n if ':' in simi_exp_type:\n simi_exp_type = simi_exp_type.split(':')[0]\n flag = 1 if exp_type == simi_exp_type else 0\n\n dist = distance.euclidean(simi_test_vec, failed_test_vector) / (1 + distance.euclidean(simi_test_vec, failed_test_vector))\n score_test.append([j, 1-dist, flag]) # we use similarity instead of distance\n k_index_list = sorted(score_test, key=lambda x: float(x[1]), reverse=True)[:k]\n closest_score.append(k_index_list[0][1])\n # print('the closest test score is {}'.format(k_index_list[0][1]))\n\n # keep the test case with simi score >= 0.8 or *\n k_index = np.array([v[0] for v in k_index_list if v[1] >= cut_off])\n cut_offs = np.array([v[1] for v in k_index_list if v[1] >= cut_off])\n\n if k_index.size == 0:\n continue\n\n # exhibit the similar test case\n print('******')\n print('{}'.format(self.test_name[i]))\n print('the similar test cases:')\n k_simi_test = dataset_name[k_index]\n func = dataset_func[k_index]\n for t in range(len(k_simi_test)):\n print('{}'.format(k_simi_test[t]))\n # print('{}'.format(func[t]))\n\n k_patch_vector = dataset_patch[k_index]\n patch_list.append(k_patch_vector)\n # cut_off_list.append(cut_offs)\n\n # print('exception type: {}'.format(exp_type.split('.')[-1]))\n return patch_list, scaler_patch, closest_score,\n\n # deprecated\n def evaluate_collected_projects(self, path_collected_patch):\n projects = {'Chart': 26, 'Lang': 65, 'Math': 106, 'Time': 27}\n # projects = {'Math': 106}\n all_closest_score = []\n similarity_correct_minimum = 1\n similarity_incorrect = []\n for project, number in projects.items():\n recommend_list_project = []\n print('Testing {}'.format(project))\n for id in range(1, number + 1):\n recommend_list = []\n print('{}_{} ------'.format(project, id))\n # extract failed test index according to bug_id\n project_id = '_'.join([project, str(id)])\n failed_test_index = [i for i in range(len(self.test_name)) if self.test_name[i].startswith(project_id+'-')]\n if failed_test_index == []:\n print('failed tests of this bugid not found:{}'.format(project_id))\n continue\n # find corresponding patches generated by tools\n available_path_patch = self.find_path_patch(path_collected_patch, project_id)\n if available_path_patch == []:\n print('No tool patches found:{}'.format(project_id))\n continue\n\n correct = incorrect = 0\n for p in available_path_patch:\n if 'Correct' in p:\n correct += 1\n elif 'Incorrect' in p:\n incorrect += 1\n\n # get patch list for failed test case\n patch_list, scaler_patch, closest_score = self.get_patch_list(failed_test_index, k=1, cut_off=0.7, model=self.patch_w2v)\n all_closest_score += closest_score\n if patch_list == []:\n print('no closest test case found')\n continue\n\n # return vector for path patch\n name_list, label_list, vector_list, vector_ML_list = self.vector4patch(available_path_patch, 'False')\n # if not 0 in label_list or not 1 in label_list:\n # print('all same')\n # continue\n\n for i in range(len(name_list)):\n name = name_list[i]\n label = label_list[i]\n vector_new_patch = vector_list[i]\n dist = self.predict(patch_list, vector_new_patch, scaler_patch)\n if self.patch_w2v == 'string':\n score = 2800 - dist\n else:\n score = 1 - dist\n if math.isnan(score):\n continue\n # record\n recommend_list.append([name, label, score])\n recommend_list_project.append([name, label, score])\n if recommend_list == []:\n continue\n print('{} recommend list:'.format(project))\n recommend_list = pd.DataFrame(sorted(recommend_list, key=lambda x: x[2], reverse=True))\n Correct = recommend_list[recommend_list[1] == 1]\n Incorrect = recommend_list[recommend_list[1] == 0]\n plt.figure(figsize=(10, 4))\n plt.bar(Correct[:].index.tolist(), Correct[:][2], color=\"red\")\n plt.bar(Incorrect[:].index.tolist(), Incorrect[:][2], color=\"lightgrey\",)\n plt.xticks(recommend_list[:].index.tolist(), recommend_list[:][0].tolist())\n plt.xlabel('patchid by tool')\n plt.ylabel('Score of patch')\n plt.savefig('../fig/RQ3/recommend_{}'.format(project_id))\n plt.cla()\n plt.close()\n # plt.show()\n\n # print('{} recommend project:'.format(project))\n if recommend_list_project == []:\n continue\n recommend_list_project = pd.DataFrame(sorted(recommend_list_project, key=lambda x: x[2], reverse=True))\n Correct = recommend_list_project[recommend_list_project[1] == 1]\n Incorrect = recommend_list_project[recommend_list_project[1] == 0]\n print('{}: {}'.format(project, recommend_list_project.shape[0]), end=' ')\n if Incorrect.shape[0] != 0 and Correct.shape[0] != 0:\n filter_out_incorrect = recommend_list_project.shape[0] - Correct[:].index.tolist()[-1] - 1\n print('Incorrect filter rate: {}'.format(filter_out_incorrect/Incorrect.shape[0]))\n # print('The minimum similarity score of the correct patch: {}'.format(np.array(Correct)[-1][2]))\n if np.array(Correct)[-1][2] < similarity_correct_minimum:\n similarity_correct_minimum = np.array(Correct)[-1][2]\n similarity_incorrect.append(list(Incorrect[:][2]))\n plt.bar(Correct[:].index.tolist(), Correct[:][2], color=\"red\")\n plt.bar(Incorrect[:].index.tolist(), Incorrect[:][2], color=\"lightgrey\")\n # plt.xticks(recommend_list_project[:].index.tolist(), recommend_list_project[:][0].tolist())\n plt.xlabel('patchid by tool')\n plt.ylabel('Score of patch')\n plt.title('recommend for {}'.format(project))\n plt.savefig('../fig/RQ3/{}_recommend.png'.format(project))\n plt.cla()\n plt.close()\n print('The minimum similarity score of the correct patch: {}'.format(similarity_correct_minimum))\n for i in range(len(similarity_incorrect)):\n print('The number of incorrect patch: {}'.format(np.where(np.array(similarity_incorrect[i]) < similarity_correct_minimum)[0].size))\n plt.bar(range(len(all_closest_score)), sorted(all_closest_score, reverse=True),)\n plt.xlabel('the closest test case')\n plt.ylabel('Similarity Score of the closest test case')\n plt.title('Similarity of test case')\n plt.savefig('../fig/RQ3/Similarity_Test.png')\n\n def predict_collected_projects(self, path_collected_patch=None, cut_off=0.8, distance_method = distance.cosine, ASE2020=False, patchsim=False,):\n print('Research Question 2')\n projects = {'Chart': 26, 'Lang': 65, 'Math': 106, 'Time': 27}\n y_preds, y_trues = [], []\n y_preds_baseline, y_trues = [], []\n MAP, MRR, number_patch_MAP = [], [], 0\n MAP_baseline, MRR_baseline, number_patch_MAP_baseline = [], [], 0\n recommend_list_project = []\n x_train, y_train, x_test, y_test = [], [], [], []\n box_projecs_co, box_projecs_inco, projects_name = [], [], []\n mean_stand_dict = {0.0: [443, 816], 0.5: ['', ''], 0.6: [273, 246], 0.7: [231, 273], 0.8: [180, 235], 0.9: [130, 130]}\n print('test case similarity cut-off: {}'.format(cut_off))\n test_case_similarity_list, patch1278_list_short = [], []\n patch_available_distribution = {}\n patch1278_list = []\n for project, number in projects.items():\n print('Testing {}'.format(project))\n for id in range(1, number + 1):\n print('----------------')\n print('{}_{}'.format(project, id))\n project_id = '_'.join([project, str(id)])\n # if project_id != 'Chart_26':\n # continue\n # extract failed test index according to bug_id\n failed_test_index = [i for i in range(len(self.test_name)) if self.test_name[i].startswith(project_id+'-')]\n if failed_test_index == []:\n print('Couldnt find any failed test case for this bugid: {}'.format(project_id))\n # print('{} patches skipped'.format(len(available_path_patch)))\n continue\n\n # find paths of patches generated by tools\n available_path_patch = self.find_path_patch(path_collected_patch, project_id)\n if available_path_patch == []:\n print('No generated patches of APR tools found:{}'.format(project_id))\n continue\n\n # return vector according to available_path_patch\n # if patchsim:\n # name_list, label_list, generated_patch_list, vector_ML_list = self.vector4patch_patchsim(available_path_patch, compare=ASE2020,)\n # else:\n name_list, label_list, generated_patch_list, vector_ML_list = self.vector4patch(available_path_patch, compare=ASE2020,)\n\n # # depulicate\n # index_to_delete = []\n # for i in range(len(vector_ML_list)):\n # if list(vector_ML_list[i]) in unique:\n # index_to_delete.append(i)\n # else:\n # unique.append(list(vector_ML_list[i]))\n # for counter, index in enumerate(index_to_delete):\n # index = index - counter\n # name_list.pop(index)\n # label_list.pop(index)\n # generated_patch_list.pop(index)\n # vector_ML_list.pop(index)\n\n if name_list == []:\n print('all the patches can not be recognized')\n continue\n\n # plot distribution of correct and incorrect patches\n co = label_list.count(1)\n inco = label_list.count(0)\n box_projecs_co.append(co)\n box_projecs_inco.append(inco)\n projects_name.append(project)\n\n # access the associated patch list(patch search space) of similar failed test cases\n associated_patch_list, scaler_patch, closest_score = self.get_associated_patch_list(failed_test_index, k=5, cut_off=cut_off, model=self.patch_w2v)\n # baseline\n correct_patches_baseline = self.get_correct_patch_list(failed_test_index, model=self.patch_w2v)\n if associated_patch_list == []:\n print('No closest test case that satisfied with the condition of cut-off similarity')\n print('save train data for ML model of ASE2020')\n # comparison with ML prediction in ASE2020\n if ASE2020 and vector_ML_list != []:\n for i in range(len(label_list)):\n # if list(vector_list[i].astype(float)) != list(np.zeros(240).astype(float)):\n x_train.append(vector_ML_list[i])\n y_train.append(label_list[i])\n continue\n\n recommend_list, recommend_list_baseline = [], []\n # calculate the center of associated patches(repository)\n centers = self.dynamic_threshold2(associated_patch_list, distance_method=distance_method, sumup='mean')\n centers_baseline = [correct_patches_baseline.mean(axis=0)]\n for i in range(len(name_list)):\n name = name_list[i]\n tested_patch = generated_patch_list[i]\n y_true = label_list[i]\n # y_pred = self.predict_label(centers, threshold_list, vector_new_patch, scaler_patch)\n # y_pred_prob = self.predict_prob(centers, threshold_list, vector_new_patch, scaler_patch)\n y_pred_prob, y_pred = self.predict_recom(centers, tested_patch, scaler_patch, mean_stand_dict[cut_off], distance_method=distance_method,)\n y_pred_prob_baseline, y_pred_baseline = self.predict_recom(centers_baseline, tested_patch, scaler_patch, mean_stand_dict[cut_off], distance_method=distance_method,)\n\n if not math.isnan(y_pred_prob):\n recommend_list.append([name, y_pred, y_true, y_pred_prob])\n recommend_list_baseline.append([name, y_pred_baseline, y_true, y_pred_prob_baseline])\n\n y_preds.append(y_pred_prob)\n y_preds_baseline.append(y_pred_prob_baseline)\n y_trues.append(y_true)\n\n # the current patches addressing this bug. The highest test case similarity we can find for test case of this bug is in 'test_case_similarity_list'\n test_case_similarity_list.append(max(closest_score))\n\n # ML prediction for comparison\n if ASE2020:\n x_test.append(vector_ML_list[i])\n y_test.append(y_true)\n\n # ML prediction for comparison\n\n # record distribution of available patches\n key = name[:3]+str(y_true)\n if key not in patch_available_distribution:\n patch_available_distribution[key] = 1\n else:\n patch_available_distribution[key] += 1\n\n # save the name of 1278 patches for evaluating\n path = available_path_patch[i]\n patchname = path.split('/')[-1]\n tool = path.split('/')[-5]\n patchname_complete = '-'.join([patchname, project, str(id), tool, str(y_true)])\n patch1278_list.append(patchname_complete)\n\n if not (not 1 in label_list or not 0 in label_list) and recommend_list != []: # ensure there are correct and incorrect patches in recommended list\n AP, RR = self.evaluate_recommend_list(recommend_list)\n if AP != None and RR != None:\n MAP.append(AP)\n MRR.append(RR)\n number_patch_MAP += len(recommend_list)\n if not (not 1 in label_list or not 0 in label_list) and recommend_list_baseline != []: # ensure there are correct and incorrect patches in recommended list\n AP, RR = self.evaluate_recommend_list(recommend_list_baseline)\n if AP != None and RR != None:\n MAP_baseline.append(AP)\n MRR_baseline.append(RR)\n number_patch_MAP_baseline += len(recommend_list_baseline)\n\n recommend_list_project += recommend_list\n\n # patch distribution\n # print(patch_available_distribution)\n\n if y_trues == [] or not 1 in y_trues or not 0 in y_trues:\n return\n\n # evaluation based on a few metrics\n\n # 1. independently\n if not ASE2020 and not patchsim:\n ## baseline\n if cut_off == 0.0:\n print('\\nBaseline: ')\n self.evaluation_metrics(y_trues, y_preds_baseline)\n self.MAP_MRR_Mean(MAP_baseline, MRR_baseline, number_patch_MAP_baseline)\n ## BATS\n print('\\nBATS: ')\n recall_p, recall_n, acc, prc, rc, f1, auc_, result_APR = self.evaluation_metrics(y_trues, y_preds)\n self.MAP_MRR_Mean(MAP, MRR, number_patch_MAP)\n\n # 2. Compare and Combine\n if ASE2020 and cut_off > 0.0:\n print('------')\n print('Evaluating ASE2020 Performance')\n MlPrediction(x_train, y_train, x_test, y_test, y_pred_bats=y_preds, test_case_similarity_list=test_case_similarity_list, algorithm='lr', comparison=ASE2020, cutoff=cut_off).predict()\n MlPrediction(x_train, y_train, x_test, y_test, y_pred_bats=y_preds, test_case_similarity_list=test_case_similarity_list, algorithm='rf', comparison=ASE2020, cutoff=cut_off).predict()\n\n with open('./patch'+str(len(patch1278_list))+'.txt', 'w+') as f:\n for p in patch1278_list:\n f.write(p + '\\n')\n\n if patchsim:\n print('------')\n print('Evaluating PatchSim improvement')\n y_combine, y_combine_trues = [], []\n y_patchsim = []\n BATs_cnt = 0\n with open('patch325_result.txt', 'r+') as f_patchsim:\n for line in f_patchsim:\n line = line.strip()\n name_ps, prediction_ps = line.split(',')[0], line.split(',')[1]\n i = patch1278_list.index(name_ps)\n y_combine_trues.append(y_trues[i])\n y_patchsim.append(float(prediction_ps))\n if test_case_similarity_list[i] >= 0.8:\n y_combine.append(y_preds[i])\n BATs_cnt += 1\n else:\n y_combine.append(float(prediction_ps))\n print('BATs_cnt: {}, PatchSim_cnt: {}'.format(BATs_cnt, len(y_combine)-BATs_cnt))\n self.evaluation_metrics(y_combine_trues, y_patchsim)\n print('----------')\n self.evaluation_metrics(y_combine_trues, y_combine)\n\n '''\n if patchsim:\n print('------')\n print('Evaluating Incorrect Excluded on PatchSim')\n # [name, y_pred, y_true, y_pred_prob]\n recommend_list_project = pd.DataFrame(sorted(recommend_list_project, key=lambda x: x[3], reverse=True))\n Correct = recommend_list_project[recommend_list_project[2]==1]\n filter_out_incorrect = recommend_list_project.shape[0] - Correct[:].index.tolist()[-1] - 1\n\n print('Test data size: {}, Incorrect: {}, Correct: {}'.format(recommend_list_project.shape[0], recommend_list_project.shape[0]-Correct.shape[0],\n Correct.shape[0]))\n # print('Exclude incorrect: {}'.format(filter_out_incorrect))\n # print('Exclude rate: {}'.format(filter_out_incorrect/(recommend_list_project.shape[0]-Correct.shape[0])))\n # print('Excluded name: {}'.format(recommend_list_project.iloc[Correct[:].index.tolist()[-1]+1:][0].values))\n\n # topHalf = recommend_list_project.iloc[:Correct[:].index.tolist()[-1] + 1]\n # topHalfIncorrect = topHalf[topHalf[2] == 0][0].values\n # print('Noe excluded name: {}'.format(topHalfIncorrect))\n '''\n self.statistics_box(box_projecs_co, box_projecs_inco, projects_name)\n\n # def improve_ML(self, path_collected_patch=None, cut_off=0.8, distance_method = distance.cosine, kfold=10, algorithm='lr', method='combine'):\n # print('Research Question 3: Improvement')\n # projects = {'Chart': 26, 'Lang': 65, 'Math': 106, 'Time': 27}\n # y_preds_bats, y_preds_prob_bats, y_trues = [], [], []\n # x_all, y_all, x_test, y_test = [], [], [], []\n # # comparison = 'ASE2020' # will make comparison if the value equals to 'ASE2020'\n # mean_stand_dict = {0.0: [443, 816], 0.6: [273, 246], 0.7: [231, 273], 0.8: [180, 235], 0.9: [130, 130]}\n # print('test case similarity cut-off: {}'.format(cut_off))\n # unique_dict = []\n # for project, number in projects.items():\n # print('Testing {}'.format(project))\n # for id in range(1, number + 1):\n # print('----------------')\n # print('{}_{}'.format(project, id))\n # project_id = '_'.join([project, str(id)])\n #\n # # extract failed test index according to bug_id\n # failed_test_index = [i for i in range(len(self.test_name)) if self.test_name[i].startswith(project_id+'-')]\n # if failed_test_index == []:\n # print('Couldnt find any failed test case for this bugid: {}'.format(project_id))\n # # print('{} patches skipped'.format(len(available_path_patch)))\n # continue\n #\n # # find paths of patches generated by tools\n # available_path_patch = self.find_path_patch(path_collected_patch, project_id)\n # if available_path_patch == []:\n # print('No generated patches of APR tools found:{}'.format(project_id))\n # continue\n #\n # # return vector according to available_path_patch\n # name_list, label_list, generated_patch_list, vector_ML_list, vector_ODS_list = self.vector4patch(available_path_patch, compare=ASE2020,)\n # if name_list == []:\n # print('all the patches can not be recognized')\n # continue\n #\n # # access the associated patch list(patch repository) of similar failed test cases\n # associated_patch_list, scaler_patch, closest_score = self.get_associated_patch_list(failed_test_index, k=5, cut_off=cut_off, model=self.patch_w2v)\n #\n # # print('save train data for ML model of ASE2020')\n # if ASE2020 and vector_ML_list != []:\n # for i in range(len(vector_ML_list)):\n # # if list(vector_list[i].astype(float)) != list(np.zeros(240).astype(float)):\n # if vector_ML_list[i] in unique_dict:\n # continue\n # else:\n # x_all.append(vector_ML_list[i])\n # y_all.append(label_list[i])\n #\n # # calculate the center of associated patches(repository)\n # if associated_patch_list == []:\n # # fill value for the prediction of BATS to keep it the same length as ML prediction\n # y_preds_bats += [-999 for i in range(len(vector_ML_list))]\n # y_preds_prob_bats += [-999 for i in range(len(vector_ML_list))]\n # y_trues += [i for i in label_list]\n # else:\n # centers = self.dynamic_threshold2(associated_patch_list, distance_method=distance_method, sumup='mean')\n # for i in range(len(vector_ML_list)):\n # name = name_list[i]\n # tested_patch = generated_patch_list[i]\n # y_true = label_list[i]\n # # y_pred = self.predict_label(centers, threshold_list, vector_new_patch, scaler_patch)\n # # y_pred_prob = self.predict_prob(centers, threshold_list, vector_new_patch, scaler_patch)\n # y_pred_prob, y_pred = self.predict_recom(centers, tested_patch, scaler_patch, mean_stand_dict[cut_off], distance_method=distance_method,)\n #\n # if math.isnan(y_pred_prob):\n # y_preds_bats.append(-999)\n # y_preds_prob_bats.append(-999)\n # y_trues.append(y_true)\n # else:\n # y_preds_bats.append(y_pred)\n # y_preds_prob_bats.append(y_pred_prob)\n # y_trues.append(y_true)\n #\n # # run cross validation for ML-based approach in ASE2020\n # x_all_unique, y_all_unique, y_preds_prob_bats_unique = [], [], []\n # for i in range(len(x_all)):\n # if list(x_all[i]) in unique_dict:\n # continue\n # else:\n # unique_dict.append(list(x_all[i]))\n # x_all_unique.append(x_all[i])\n # y_all_unique.append(y_all[i])\n # y_preds_prob_bats_unique.append(y_preds_prob_bats[i])\n # x_all_unique = np.array(x_all_unique)\n # y_all_unique = np.array(y_all_unique)\n # y_preds_prob_bats_unique = np.array(y_preds_prob_bats_unique)\n # skf = StratifiedKFold(n_splits=kfold, shuffle=True)\n # accs, prcs, rcs, f1s, aucs = list(), list(), list(), list(), list()\n # rcs_p, rcs_n = list(), list()\n # for train_index, test_index in skf.split(x_all_unique, y_all_unique):\n # x_train, y_train = x_all_unique[train_index], y_all_unique[train_index]\n # x_test, y_test = x_all_unique[test_index], y_all_unique[test_index]\n #\n # # prediction by BATs\n # # y_pred_bats = y_preds_bats[test_index]\n # y_test_pred_prob_bats = y_preds_prob_bats_unique[test_index]\n #\n # # standard data\n # scaler = StandardScaler().fit(x_train)\n # # scaler = MinMaxScaler().fit(x_train)\n # x_train = scaler.transform(x_train)\n # x_test = scaler.transform(x_test)\n #\n # print('\\ntrain data: {}, test data: {}'.format(len(x_train), len(x_test)), end='')\n #\n # clf = None\n # if algorithm == 'lr':\n # clf = LogisticRegression(solver='lbfgs', class_weight={1: 1},).fit(X=x_train, y=y_train)\n # elif algorithm == 'dt':\n # clf = DecisionTreeClassifier().fit(X=x_train, y=y_train, sample_weight=None)\n # elif algorithm == 'rf':\n # clf = RandomForestClassifier(class_weight={1: 1}, ).fit(X=x_train, y=y_train)\n #\n # if method == 'combine':\n # # combine both\n # number_bats = 0\n # number_ML = 0\n # y_pred_final = []\n # for i in range(len(y_test)):\n #\n # # apply BATs first\n # if y_test_pred_prob_bats[i] != -999:\n # number_bats += 1\n # y_pred_final.append(y_test_pred_prob_bats[i])\n # else:\n # number_ML += 1\n # y_test_pred_prob_ML = clf.predict_proba(x_test[i].reshape(1,-1))[:, 1]\n # y_pred_final.append(y_test_pred_prob_ML)\n #\n # # y_pred_final.append((y_test_pred_prob_bats[i] + clf.predict_proba(x_test[i].reshape(1,-1))[:, 1])/2.0)\n # print('\\nNumber of BATs and ML: {} {}'.format(number_bats, number_ML))\n # else:\n # y_pred_final = clf.predict_proba(x_test)[:, 1]\n #\n # # print('{}: '.format(algorithm))\n # recall_p, recall_n, acc, prc, rc, f1, auc_, _ = self.evaluation_metrics(list(y_test), y_pred_final)\n #\n # accs.append(acc)\n # prcs.append(prc)\n # rcs.append(rc)\n # f1s.append(f1)\n #\n # aucs.append(auc_)\n # rcs_p.append(recall_p)\n # rcs_n.append(recall_n)\n #\n # print('\\n{}-fold cross validation mean: '.format(kfold))\n # print('Accuracy: {:.1f} -- Precision: {:.1f} -- +Recall: {:.1f} -- F1: {:.1f} -- AUC: {:.3f}'.format(np.array(accs).mean() * 100, np.array(prcs).mean() * 100, np.array(rcs).mean() * 100, np.array(f1s).mean() * 100, np.array(aucs).mean()))\n # print('AUC: {:.3f}, +Recall: {:.3f}, -Recall: {:.3f}'.format(np.array(aucs).mean(), np.array(rcs_p).mean(), np.array(rcs_n).mean()))\n #\n\n def predict(self, patch_list, new_patch, scaler_patch):\n if self.patch_w2v != 'string':\n new_patch = scaler_patch.transform(new_patch.reshape((1, -1)))\n dist_final = []\n # patch list includes multiple patches for multi failed test cases\n for y in range(len(patch_list)):\n patches = patch_list[y]\n dist_k = []\n for z in range(len(patches)):\n vec = patches[z]\n # dist = np.linalg.norm(vec - new_patch)\n if self.patch_w2v == 'string':\n dist = Levenshtein.distance(vec[0], new_patch[0])\n dist_k.append(dist)\n else:\n # choose method to calculate distance\n dist = distance.cosine(vec, new_patch)\n # dist = distance.euclidean(vec, new_patch)/(1 + distance.euclidean(vec, new_patch))\n dist_k.append(dist)\n\n dist_mean = np.array(dist_k).mean()\n dist_min = np.array(dist_k).min()\n\n # print('mean:{} min:{}'.format(dist_mean, dist_min))\n dist_final.append(dist_min)\n\n dist_final = np.array(dist_final).mean()\n return dist_final\n\n def dynamic_threshold(self, patch_list):\n centers = []\n threshold_list = []\n # patch list includes multiple patches for multi failed test cases\n for y in range(len(patch_list)):\n patches = patch_list[y]\n\n # threshold 1: center of patch list\n center = np.array(patches).mean(axis=0)\n dist_mean = np.array([distance.cosine(p, center) for p in patches]).mean()\n # dist_mean = np.array([distance.cosine(p, center) for p in patches]).max()\n score_mean = 1-dist_mean\n\n centers.append(center)\n threshold_list.append(score_mean)\n return centers, threshold_list\n\n def dynamic_threshold2(self, patch_list, distance_method=distance.euclidean, sumup='mean'):\n # patch_list: [[top-5 patches for failed test case 1], [top-5 patches failed test case 2], [top-5 patches failed test case 3]]\n if self.patch_w2v != 'string':\n if len(patch_list) == 1:\n center = patch_list[0].mean(axis=0)\n # if sumup == 'mean':\n # dist_mean = np.array([distance_method(p, center) for p in patch_list[0]]).mean()\n # elif sumup == 'max':\n # dist_mean = np.array([distance_method(p, center) for p in patch_list[0]]).max()\n else:\n # calculate center\n patches = patch_list[0]\n for i in range(1, len(patch_list)):\n patches = np.concatenate((patches, patch_list[i]), axis=0)\n center = patches.mean(axis=0)\n # if sumup == 'mean':\n # dist_mean = np.array([distance_method(p, center) for p in patches]).mean()\n # elif sumup == 'max':\n # dist_mean = np.array([distance_method(p, center) for p in patches]).max()\n else:\n return patch_list\n\n return [center]\n\n def predict_label(self, centers, threshold_list, new_patch, scaler_patch, ):\n if self.patch_w2v != 'string':\n new_patch = scaler_patch.transform(new_patch.reshape((1, -1)))\n\n vote_list = []\n # patch list includes multiple patches for multi failed test cases\n for y in range(len(centers)):\n center = centers[y]\n score_mean = threshold_list[y]\n\n # choose method to calculate distance\n dist_new = distance.cosine(new_patch, center)\n # dist_new = distance.euclidean(vec, new_patch)/(1 + distance.euclidean(vec, new_patch))\n\n score_new = 1 - dist_new\n\n vote_list.append(1 if score_new >= score_mean else 0)\n if vote_list.count(1) >= len(centers) / 2.0:\n return 1\n else:\n return 0\n\n def predict_prob(self, centers, threshold_list, new_patch, scaler_patch, distance_method=distance.euclidean):\n if self.patch_w2v != 'string':\n new_patch = scaler_patch.transform(new_patch.reshape((1, -1)))\n\n center = centers[0]\n\n dist_new = distance_method(new_patch, center)\n\n # normalize range\n if distance_method == distance.euclidean:\n dist_new = dist_new / (1+dist_new)\n score_prob_new = 1 - dist_new\n\n elif distance_method == distance.cosine:\n dist_new = dist_new / (1 + dist_new)\n score_prob_new = 1 - dist_new\n\n return score_prob_new\n\n def predict_recom(self, centers, new_patch, scaler_patch, mean_stand=None, distance_method=distance.euclidean):\n if self.patch_w2v != 'string':\n new_patch = scaler_patch.transform(new_patch.reshape((1, -1)))\n\n center = centers[0]\n dist_new = distance_method(new_patch, center)\n # dist_new = 1-distance_method(new_patch, center)[0][1] # pearson\n\n # normalize range\n # score_prob_new = self.sigmoid(1 - dist_new)\n dist_new = dist_new / (1+dist_new)\n score_prob_new = 1 - dist_new\n\n # if score_prob_new >= score_mean:\n if score_prob_new >= 0.5:\n y_pred = 1\n else:\n y_pred = 0\n return score_prob_new, y_pred\n\n else:\n new_patch = new_patch[0]\n dist_new = []\n # mean distance to every patch\n for i in range(len(centers)):\n patches_top5 = centers[i]\n for p in patches_top5:\n dist_new.append(Levenshtein.distance(new_patch, str(p)))\n dist_new = np.array(dist_new).mean()\n\n # (dist_new-mean)/stand\n dist_new = (dist_new-mean_stand[0])/mean_stand[1]\n try:\n score_prob_new = self.sigmoid(-dist_new)\n except:\n print(dist_new)\n\n if score_prob_new >= 0.5:\n y_pred = 1\n else:\n y_pred = 0\n\n return score_prob_new, y_pred\n\n def evaluation_metrics(self, y_trues, y_pred_probs):\n fpr, tpr, thresholds = roc_curve(y_true=y_trues, y_score=y_pred_probs, pos_label=1)\n auc_ = auc(fpr, tpr)\n\n y_preds = [1 if p >= 0.5 else 0 for p in y_pred_probs]\n\n acc = accuracy_score(y_true=y_trues, y_pred=y_preds)\n prc = precision_score(y_true=y_trues, y_pred=y_preds)\n rc = recall_score(y_true=y_trues, y_pred=y_preds)\n f1 = 2 * prc * rc / (prc + rc)\n\n tn, fp, fn, tp = confusion_matrix(y_trues, y_preds).ravel()\n recall_p = tp / (tp + fn)\n recall_n = tn / (tn + fp)\n\n result = '***------------***\\n'\n result += 'Evaluating AUC, F1, +Recall, -Recall\\n'\n result += 'Test data size: {}, Correct: {}, Incorrect: {}\\n'.format(len(y_trues), y_trues.count(1), y_trues.count(0))\n result += 'Accuracy: %f -- Precision: %f -- +Recall: %f -- F1: %f \\n' % (acc, prc, rc, f1)\n result += 'AUC: {:.3f}, +Recall: {:.3f}, -Recall: {:.3f}'.format(auc_, recall_p, recall_n)\n # return , auc_\n\n print(result)\n # print('AP: {}'.format(average_precision_score(y_trues, y_pred_probs)))\n return recall_p, recall_n, acc, prc, rc, f1, auc_, result\n\n def evaluate_defects4j_projects(self, option1=True, option2=0.6):\n print('Research Question 1.2')\n scaler = Normalizer()\n all_test_vector = scaler.fit_transform(self.test_vector)\n scaler_patch = scaler.fit(self.patch_vector)\n all_patch_vector = scaler_patch.transform(self.patch_vector)\n\n projects = {'Chart': 26, 'Lang': 65, 'Time': 27, 'Closure': 176, 'Math': 106, 'Cli': 40, 'Codec': 18, 'Compress': 47, 'Collections': 28, 'JacksonCore': 26, 'JacksonDatabind': 112, 'JacksonXml': 6, 'Jsoup': 93, 'Csv': 16, 'Gson': 18, 'JxPath': 22, 'Mockito': 38}\n # projects = {'Chart': 26, 'Lang': 65, 'Time': 27, 'Math': 106, }\n all_closest_score = []\n box_plot = []\n for project, number in projects.items():\n print('Testing {}'.format(project))\n\n # go through all test cases\n cnt = 0\n for i in range(len(self.test_name)):\n # skip other projects while testing one project\n if not self.test_name[i].startswith(project):\n continue\n # project = self.test_name[i].split('-')[0].split('_')[0]\n id = self.test_name[i].split('-')[0].split('_')[1]\n print('{}'.format(self.test_name[i]))\n this_test = all_test_vector[i]\n this_patch = all_patch_vector[i]\n\n # find the closest test case\n dist_min_index = None\n dist_min = np.inf\n for j in range(len(all_test_vector)):\n # skip itself\n if j == i:\n continue\n # option 1: whether skip current project-id\n if option1 and self.test_name[j].startswith(project+'_'+id+'-'):\n continue\n dist = distance.euclidean(this_test, all_test_vector[j])/(1 + distance.euclidean(this_test, all_test_vector[j]))\n if dist < dist_min:\n dist_min = dist\n dist_min_index = j\n sim_test = 1 - dist_min\n all_closest_score.append(sim_test)\n # option 2: threshold for test cases similarity\n if sim_test >= option2:\n # find associated patches similarity\n print('the closest test: {}'.format(self.test_name[dist_min_index]))\n closest_patch = all_patch_vector[dist_min_index]\n # distance_patch = distance.euclidean(closest_patch, this_patch)/(1 + distance.euclidean(closest_patch, this_patch))\n distance_patch = distance.cosine(closest_patch, this_patch)/(1 + distance.cosine(closest_patch, this_patch))\n score_patch = 1 - distance_patch\n if math.isnan(score_patch):\n continue\n box_plot.append([project, 'H', score_patch])\n\n # find average patch similarity\n simi_patch_average = []\n for p in range(len(all_patch_vector)):\n if p == i:\n continue\n # dist = distance.euclidean(this_patch, all_patch_vector[p]) / (1 + distance.euclidean(this_patch, all_patch_vector[p]))\n dist = distance.cosine(this_patch, all_patch_vector[p]) / (1 + distance.cosine(this_patch, all_patch_vector[p]))\n simi_patch = 1 - dist\n if math.isnan(simi_patch):\n continue\n simi_patch_average.append(simi_patch)\n box_plot.append([project, 'N', np.array(simi_patch_average).mean()])\n\n # project_list.append([self.test_name[i], score_patch])\n\n\n # if project_list == []:\n # print('{} no found'.format(project))\n # continue\n # recommend_list_project = pd.DataFrame(sorted(project_list, key=lambda x: x[1], reverse=True))\n # plt.bar(recommend_list_project.index.tolist(), recommend_list_project[:][1], color='chocolate')\n # # plt.bar(recommend_list_project.index.tolist(), recommend_list_project[:][1], color='steelblue')\n # plt.xlabel('Failed test cases', fontsize=14)\n # plt.ylabel('Similarity of the associated patches', fontsize=14)\n # # plt.title('Similarity distribution of {}'.format(project))\n # plt.savefig('../fig/RQ2/distance_patch_{}'.format(project))\n # plt.cla()\n\n # Distribution of similarities between closest pairs of test cases.\n plt.figure(figsize=(10, 5))\n plt.xticks(fontsize=15, )\n plt.yticks(fontsize=15, )\n plt.bar(range(len(all_closest_score)), sorted(all_closest_score, reverse=True),)\n plt.xlabel('Distribution on the similarities between each failing test case of each bug and its closest similar test case.', fontsize=20)\n plt.ylabel('Similarity score', fontsize=20)\n # plt.title('Similarity of test case')\n plt.savefig('../fig/RQ1/distribution_test_similarity.png')\n plt.close()\n\n dfl = pd.DataFrame(box_plot)\n dfl.columns = ['Project', 'Label', 'Similarity of patch']\n # put H on left side in plot\n if dfl.iloc[0]['Label'] != 'H':\n b, c = dfl.iloc[0].copy(), dfl[dfl['Label']=='H'].iloc[0].copy()\n dfl.iloc[0], dfl[dfl['Label']=='H'].iloc[0] = c, b\n colors = {'H': 'white', 'N': 'grey'}\n fig = plt.figure(figsize=(15, 8))\n plt.xticks(fontsize=28, )\n plt.yticks(fontsize=28, )\n bp = sns.boxplot(x='Project', y='Similarity of patch', data=dfl, showfliers=False, palette=colors, hue='Label', width=0.7, )\n bp.set_xticklabels(bp.get_xticklabels(), rotation=320)\n # bp.set_xticklabels(bp.get_xticklabels(), fontsize=28)\n # bp.set_yticklabels(bp.get_yticklabels(), fontsize=28)\n plt.xlabel('Project', size=31)\n plt.ylabel('Similarity score', size=30)\n plt.legend(bbox_to_anchor=(0, 1.02, 1, 0.2), loc=\"lower left\",\n borderaxespad=0, ncol=3, fontsize=30, )\n self.adjust_box_widths(fig, 0.8)\n plt.tight_layout()\n # plt.show()\n plt.savefig('../fig/RQ1/distribution_pairwise_patches.png')\n\n # # MMW\n # H_stat = dfl[dfl['Label'] == 'H'].iloc[:, 2].tolist()\n # N_stat = dfl[dfl['Label'] == 'N'].iloc[:, 2].tolist()\n # hypo = stats.mannwhitneyu(H_stat, N_stat, alternative='two-sided')\n # print(hypo)\n\n\n def evaluate_recommend_list(self, recommend_list):\n # recommend_list: [name, y_pred, y_true, y_pred_prob]\n recommend_list = pd.DataFrame(sorted(recommend_list, key=lambda x: x[3], reverse=True), columns=['name', 'y_pred', 'y_true', 'y_pred_prob']) # rank by prediction probability\n\n\n # # plot example for Chart-26\n # Correct = recommend_list[recommend_list['y_true'] == 1]\n # Incorrect = recommend_list[recommend_list[ 'y_true'] == 0]\n # plt.figure(figsize=(10, 5))\n # plt.bar(Correct[:].index.tolist(), Correct[:]['y_pred_prob'], color=\"grey\", edgecolor=\"black\")\n # plt.bar(Incorrect[:].index.tolist(), Incorrect[:]['y_pred_prob'], color=\"white\", edgecolor=\"black\")\n # plt.xticks(recommend_list.index.tolist(), list(recommend_list.iloc[:]['name']), rotation=320)\n\n # plt.xticks(recommend_list_project[:].index.tolist(), recommend_list_project[:][0].tolist())\n # fontsize = 22\n # plt.xlabel('Patches', fontsize=fontsize)\n # plt.ylabel('Similarity of patch', fontsize=fontsize)\n # plt.xticks(fontsize=18, )\n # plt.yticks(fontsize=20, )\n # plt.legend(['Correct', 'Incorrect'], fontsize=20, )\n\n number_correct = 0.0\n precision_all = 0.0\n for i in range(recommend_list.shape[0]):\n if recommend_list.loc[i]['y_true'] == 1:\n number_correct += 1.0\n precision_all += (number_correct / (i + 1))\n\n if number_correct == 0.0:\n print('No correct patch found on the recommended list')\n return None, None\n else:\n AP = precision_all / number_correct\n RR = 1.0 / (list(recommend_list[:]['y_true']).index(1) + 1)\n\n print('AP: {}'.format(AP))\n print('RR: {}'.format(RR))\n\n return AP, RR\n\n def MAP_MRR_Mean(self, MAP, MRR, number_patch_MAP):\n print('------')\n print('Evaluating MAP, MRR on Recommended List')\n print('Patch size: {}'.format(number_patch_MAP))\n print('Bug project size: {}'.format(len(MAP)))\n print('MAP: {}, MRR: {}'.format(np.array(MAP).mean(), np.array(MRR).mean()))\n\n def statistics_box(self, box_projecs_co, box_projecs_inco, projects_name):\n data = {\n 'Correct': box_projecs_co,\n 'Incorrect': box_projecs_inco,\n 'Project': projects_name\n }\n df = pd.DataFrame(data)\n dfl = pd.melt(df, id_vars='Project', value_vars=['Correct', 'Incorrect'], )\n dfl.columns = ['Project', 'Label', 'Number of Patches']\n colors = {'Correct': 'white', 'Incorrect': 'darkgrey'}\n\n fig = plt.figure(figsize=(10, 5))\n plt.xticks(fontsize=20, )\n plt.yticks(fontsize=20, )\n plt.legend(fontsize=20)\n bp = sns.boxplot(x='Project', y='Number of Patches', data=dfl, showfliers=False, palette=colors, hue='Label', width=0.6, )\n # plt.xlabel('Project', fontsize=17)\n plt.xlabel('')\n plt.ylabel('Number of Patches', fontsize=20)\n self.adjust_box_widths(fig, 0.8)\n # plt.show()\n plt.savefig('../fig/RQ2/boxplot.png')\n\n def adjust_box_widths(self, g, fac):\n \"\"\"\n Adjust the widths of a seaborn-generated boxplot.\n \"\"\"\n\n # iterating through Axes instances\n for ax in g.axes:\n # iterating through axes artists:\n for c in ax.get_children():\n\n # searching for PathPatches\n if isinstance(c, PathPatch):\n # getting current width of box:\n p = c.get_path()\n verts = p.vertices\n verts_sub = verts[:-1]\n xmin = np.min(verts_sub[:, 0])\n xmax = np.max(verts_sub[:, 0])\n xmid = 0.5 * (xmin + xmax)\n xhalf = 0.5 * (xmax - xmin)\n\n # setting new width of box\n xmin_new = xmid - fac * xhalf\n xmax_new = xmid + fac * xhalf\n verts_sub[verts_sub[:, 0] == xmin, 0] = xmin_new\n verts_sub[verts_sub[:, 0] == xmax, 0] = xmax_new\n\n # setting new width of median line\n for l in ax.lines:\n if np.all(l.get_xdata() == [xmin, xmax]):\n l.set_xdata([xmin_new, xmax_new])\n" ]
[ [ "matplotlib.pyplot.cla", "matplotlib.pyplot.tight_layout", "sklearn.metrics.precision_score", "matplotlib.pyplot.ylabel", "sklearn.preprocessing.Normalizer", "matplotlib.pyplot.xticks", "sklearn.metrics.roc_curve", "matplotlib.pyplot.savefig", "sklearn.metrics.auc", "matplotlib.pyplot.figure", "matplotlib.pyplot.title", "numpy.delete", "sklearn.metrics.recall_score", "scipy.spatial.distance.cosine", "scipy.spatial.distance.euclidean", "sklearn.metrics.confusion_matrix", "sklearn.metrics.accuracy_score", "numpy.max", "numpy.min", "matplotlib.pyplot.close", "matplotlib.pyplot.legend", "pandas.DataFrame", "pandas.melt", "numpy.array", "numpy.concatenate", "matplotlib.pyplot.yticks", "matplotlib.pyplot.xlabel" ] ]
monk1337/tensorflow
[ "885577afd802204096cde524ed2a3acd9e29a75c" ]
[ "tensorflow/python/tools/saved_model_cli.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Command-line interface to inspect and execute a graph in a SavedModel.\n\nFor detailed usages and examples, please refer to:\nhttps://www.tensorflow.org/guide/saved_model#cli_to_inspect_and_execute_savedmodel\n\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport collections\nimport os\nimport re\nimport sys\nimport warnings\n\nimport numpy as np\nimport six\n\nfrom tensorflow.core.example import example_pb2\nfrom tensorflow.core.framework import types_pb2\nfrom tensorflow.python.client import session\nfrom tensorflow.python.debug.wrappers import local_cli_wrapper\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.eager import function as defun\nfrom tensorflow.python.framework import meta_graph as meta_graph_lib\nfrom tensorflow.python.framework import ops as ops_lib\nfrom tensorflow.python.framework import tensor_spec\nfrom tensorflow.python.lib.io import file_io\nfrom tensorflow.python.platform import app # pylint: disable=unused-import\nfrom tensorflow.python.saved_model import load\nfrom tensorflow.python.saved_model import loader\nfrom tensorflow.python.saved_model import save\nfrom tensorflow.python.tools import saved_model_utils\n\n# Set of ops to blacklist.\n_OP_BLACKLIST = set(['WriteFile', 'ReadFile', 'PrintV2'])\n\n\ndef _show_tag_sets(saved_model_dir):\n \"\"\"Prints the tag-sets stored in SavedModel directory.\n\n Prints all the tag-sets for MetaGraphs stored in SavedModel directory.\n\n Args:\n saved_model_dir: Directory containing the SavedModel to inspect.\n \"\"\"\n tag_sets = saved_model_utils.get_saved_model_tag_sets(saved_model_dir)\n print('The given SavedModel contains the following tag-sets:')\n for tag_set in sorted(tag_sets):\n print('%r' % ', '.join(sorted(tag_set)))\n\n\ndef _show_signature_def_map_keys(saved_model_dir, tag_set):\n \"\"\"Prints the keys for each SignatureDef in the SignatureDef map.\n\n Prints the list of SignatureDef keys from the SignatureDef map specified by\n the given tag-set and SavedModel directory.\n\n Args:\n saved_model_dir: Directory containing the SavedModel to inspect.\n tag_set: Group of tag(s) of the MetaGraphDef to get SignatureDef map from,\n in string format, separated by ','. For tag-set contains multiple tags,\n all tags must be passed in.\n \"\"\"\n signature_def_map = get_signature_def_map(saved_model_dir, tag_set)\n print('The given SavedModel MetaGraphDef contains SignatureDefs with the '\n 'following keys:')\n for signature_def_key in sorted(signature_def_map.keys()):\n print('SignatureDef key: \\\"%s\\\"' % signature_def_key)\n\n\ndef _get_inputs_tensor_info_from_meta_graph_def(meta_graph_def,\n signature_def_key):\n \"\"\"Gets TensorInfo for all inputs of the SignatureDef.\n\n Returns a dictionary that maps each input key to its TensorInfo for the given\n signature_def_key in the meta_graph_def\n\n Args:\n meta_graph_def: MetaGraphDef protocol buffer with the SignatureDef map to\n look up SignatureDef key.\n signature_def_key: A SignatureDef key string.\n\n Returns:\n A dictionary that maps input tensor keys to TensorInfos.\n \"\"\"\n return meta_graph_def.signature_def[signature_def_key].inputs\n\n\ndef _get_outputs_tensor_info_from_meta_graph_def(meta_graph_def,\n signature_def_key):\n \"\"\"Gets TensorInfos for all outputs of the SignatureDef.\n\n Returns a dictionary that maps each output key to its TensorInfo for the given\n signature_def_key in the meta_graph_def.\n\n Args:\n meta_graph_def: MetaGraphDef protocol buffer with the SignatureDefmap to\n look up signature_def_key.\n signature_def_key: A SignatureDef key string.\n\n Returns:\n A dictionary that maps output tensor keys to TensorInfos.\n \"\"\"\n return meta_graph_def.signature_def[signature_def_key].outputs\n\n\ndef _show_inputs_outputs(saved_model_dir, tag_set, signature_def_key, indent=0):\n \"\"\"Prints input and output TensorInfos.\n\n Prints the details of input and output TensorInfos for the SignatureDef mapped\n by the given signature_def_key.\n\n Args:\n saved_model_dir: Directory containing the SavedModel to inspect.\n tag_set: Group of tag(s) of the MetaGraphDef, in string format, separated by\n ','. For tag-set contains multiple tags, all tags must be passed in.\n signature_def_key: A SignatureDef key string.\n indent: How far (in increments of 2 spaces) to indent each line of output.\n \"\"\"\n meta_graph_def = saved_model_utils.get_meta_graph_def(saved_model_dir,\n tag_set)\n inputs_tensor_info = _get_inputs_tensor_info_from_meta_graph_def(\n meta_graph_def, signature_def_key)\n outputs_tensor_info = _get_outputs_tensor_info_from_meta_graph_def(\n meta_graph_def, signature_def_key)\n\n indent_str = ' ' * indent\n def in_print(s):\n print(indent_str + s)\n\n in_print('The given SavedModel SignatureDef contains the following input(s):')\n for input_key, input_tensor in sorted(inputs_tensor_info.items()):\n in_print(' inputs[\\'%s\\'] tensor_info:' % input_key)\n _print_tensor_info(input_tensor, indent+1)\n\n in_print('The given SavedModel SignatureDef contains the following '\n 'output(s):')\n for output_key, output_tensor in sorted(outputs_tensor_info.items()):\n in_print(' outputs[\\'%s\\'] tensor_info:' % output_key)\n _print_tensor_info(output_tensor, indent+1)\n\n in_print('Method name is: %s' %\n meta_graph_def.signature_def[signature_def_key].method_name)\n\n\ndef _show_defined_functions(saved_model_dir):\n \"\"\"Prints the callable concrete and polymorphic functions of the Saved Model.\n\n Args:\n saved_model_dir: Directory containing the SavedModel to inspect.\n \"\"\"\n meta_graphs = saved_model_utils.read_saved_model(saved_model_dir).meta_graphs\n has_object_graph_def = False\n\n for meta_graph_def in meta_graphs:\n has_object_graph_def |= meta_graph_def.HasField('object_graph_def')\n if not has_object_graph_def:\n return\n with ops_lib.Graph().as_default():\n trackable_object = load.load(saved_model_dir)\n\n print('\\nDefined Functions:', end='')\n functions = (\n save._AugmentedGraphView(trackable_object) # pylint: disable=protected-access\n .list_functions(trackable_object))\n functions = sorted(functions.items(), key=lambda x: x[0])\n for name, function in functions:\n print('\\n Function Name: \\'%s\\'' % name)\n concrete_functions = []\n if isinstance(function, defun.ConcreteFunction):\n concrete_functions.append(function)\n if isinstance(function, def_function.Function):\n concrete_functions.extend(\n function._list_all_concrete_functions_for_serialization()) # pylint: disable=protected-access\n concrete_functions = sorted(concrete_functions, key=lambda x: x.name)\n for index, concrete_function in enumerate(concrete_functions, 1):\n args, kwargs = None, None\n if concrete_function.structured_input_signature:\n args, kwargs = concrete_function.structured_input_signature\n elif concrete_function._arg_keywords: # pylint: disable=protected-access\n # For pure ConcreteFunctions we might have nothing better than\n # _arg_keywords.\n args = concrete_function._arg_keywords # pylint: disable=protected-access\n if args:\n print(' Option #%d' % index)\n print(' Callable with:')\n _print_args(args, indent=4)\n if kwargs:\n _print_args(kwargs, 'Named Argument', indent=4)\n\n\ndef _print_args(arguments, argument_type='Argument', indent=0):\n \"\"\"Formats and prints the argument of the concrete functions defined in the model.\n\n Args:\n arguments: Arguments to format print.\n argument_type: Type of arguments.\n indent: How far (in increments of 2 spaces) to indent each line of\n output.\n \"\"\"\n indent_str = ' ' * indent\n\n def _maybe_add_quotes(value):\n is_quotes = '\\'' * isinstance(value, str)\n return is_quotes + str(value) + is_quotes\n\n def in_print(s, end='\\n'):\n print(indent_str + s, end=end)\n\n for index, element in enumerate(arguments, 1):\n if indent == 4:\n in_print('%s #%d' % (argument_type, index))\n if isinstance(element, six.string_types):\n in_print(' %s' % element)\n elif isinstance(element, tensor_spec.TensorSpec):\n print((indent + 1) * ' ' + '%s: %s' % (element.name, repr(element)))\n elif (isinstance(element, collections.Iterable) and\n not isinstance(element, dict)):\n in_print(' DType: %s' % type(element).__name__)\n in_print(' Value: [', end='')\n for value in element:\n print('%s' % _maybe_add_quotes(value), end=', ')\n print('\\b\\b]')\n elif isinstance(element, dict):\n in_print(' DType: %s' % type(element).__name__)\n in_print(' Value: {', end='')\n for (key, value) in element.items():\n print('\\'%s\\': %s' % (str(key), _maybe_add_quotes(value)), end=', ')\n print('\\b\\b}')\n else:\n in_print(' DType: %s' % type(element).__name__)\n in_print(' Value: %s' % str(element))\n\n\ndef _print_tensor_info(tensor_info, indent=0):\n \"\"\"Prints details of the given tensor_info.\n\n Args:\n tensor_info: TensorInfo object to be printed.\n indent: How far (in increments of 2 spaces) to indent each line output\n \"\"\"\n indent_str = ' ' * indent\n def in_print(s):\n print(indent_str + s)\n\n in_print(' dtype: ' +\n {value: key\n for (key, value) in types_pb2.DataType.items()}[tensor_info.dtype])\n # Display shape as tuple.\n if tensor_info.tensor_shape.unknown_rank:\n shape = 'unknown_rank'\n else:\n dims = [str(dim.size) for dim in tensor_info.tensor_shape.dim]\n shape = ', '.join(dims)\n shape = '(' + shape + ')'\n in_print(' shape: ' + shape)\n in_print(' name: ' + tensor_info.name)\n\n\ndef _show_all(saved_model_dir):\n \"\"\"Prints tag-set, SignatureDef and Inputs/Outputs information in SavedModel.\n\n Prints all tag-set, SignatureDef and Inputs/Outputs information stored in\n SavedModel directory.\n\n Args:\n saved_model_dir: Directory containing the SavedModel to inspect.\n \"\"\"\n tag_sets = saved_model_utils.get_saved_model_tag_sets(saved_model_dir)\n for tag_set in sorted(tag_sets):\n print(\"\\nMetaGraphDef with tag-set: '%s' \"\n \"contains the following SignatureDefs:\" % ', '.join(tag_set))\n\n tag_set = ','.join(tag_set)\n signature_def_map = get_signature_def_map(saved_model_dir, tag_set)\n for signature_def_key in sorted(signature_def_map.keys()):\n print('\\nsignature_def[\\'' + signature_def_key + '\\']:')\n _show_inputs_outputs(saved_model_dir, tag_set, signature_def_key,\n indent=1)\n _show_defined_functions(saved_model_dir)\n\n\ndef get_meta_graph_def(saved_model_dir, tag_set):\n \"\"\"DEPRECATED: Use saved_model_utils.get_meta_graph_def instead.\n\n Gets MetaGraphDef from SavedModel. Returns the MetaGraphDef for the given\n tag-set and SavedModel directory.\n\n Args:\n saved_model_dir: Directory containing the SavedModel to inspect or execute.\n tag_set: Group of tag(s) of the MetaGraphDef to load, in string format,\n separated by ','. For tag-set contains multiple tags, all tags must be\n passed in.\n\n Raises:\n RuntimeError: An error when the given tag-set does not exist in the\n SavedModel.\n\n Returns:\n A MetaGraphDef corresponding to the tag-set.\n \"\"\"\n return saved_model_utils.get_meta_graph_def(saved_model_dir, tag_set)\n\n\ndef get_signature_def_map(saved_model_dir, tag_set):\n \"\"\"Gets SignatureDef map from a MetaGraphDef in a SavedModel.\n\n Returns the SignatureDef map for the given tag-set in the SavedModel\n directory.\n\n Args:\n saved_model_dir: Directory containing the SavedModel to inspect or execute.\n tag_set: Group of tag(s) of the MetaGraphDef with the SignatureDef map, in\n string format, separated by ','. For tag-set contains multiple tags, all\n tags must be passed in.\n\n Returns:\n A SignatureDef map that maps from string keys to SignatureDefs.\n \"\"\"\n meta_graph = saved_model_utils.get_meta_graph_def(saved_model_dir, tag_set)\n return meta_graph.signature_def\n\n\ndef scan_meta_graph_def(meta_graph_def):\n \"\"\"Scans meta_graph_def and reports if there are ops on blacklist.\n\n Print ops if they are on black list, or print success if no blacklisted ops\n found.\n\n Args:\n meta_graph_def: MetaGraphDef protocol buffer.\n \"\"\"\n all_ops_set = set(\n meta_graph_lib.ops_used_by_graph_def(meta_graph_def.graph_def))\n blacklisted_ops = _OP_BLACKLIST & all_ops_set\n if blacklisted_ops:\n # TODO(yifeif): print more warnings\n print('MetaGraph with tag set %s contains the following blacklisted ops:' %\n meta_graph_def.meta_info_def.tags, blacklisted_ops)\n else:\n print('MetaGraph with tag set %s does not contain blacklisted ops.' %\n meta_graph_def.meta_info_def.tags)\n\n\ndef run_saved_model_with_feed_dict(saved_model_dir, tag_set, signature_def_key,\n input_tensor_key_feed_dict, outdir,\n overwrite_flag, worker=None, init_tpu=False,\n tf_debug=False):\n \"\"\"Runs SavedModel and fetch all outputs.\n\n Runs the input dictionary through the MetaGraphDef within a SavedModel\n specified by the given tag_set and SignatureDef. Also save the outputs to file\n if outdir is not None.\n\n Args:\n saved_model_dir: Directory containing the SavedModel to execute.\n tag_set: Group of tag(s) of the MetaGraphDef with the SignatureDef map, in\n string format, separated by ','. For tag-set contains multiple tags, all\n tags must be passed in.\n signature_def_key: A SignatureDef key string.\n input_tensor_key_feed_dict: A dictionary maps input keys to numpy ndarrays.\n outdir: A directory to save the outputs to. If the directory doesn't exist,\n it will be created.\n overwrite_flag: A boolean flag to allow overwrite output file if file with\n the same name exists.\n worker: If provided, the session will be run on the worker. Valid worker\n specification is a bns or gRPC path.\n init_tpu: If true, the TPU system will be initialized after the session\n is created.\n tf_debug: A boolean flag to use TensorFlow Debugger (TFDBG) to observe the\n intermediate Tensor values and runtime GraphDefs while running the\n SavedModel.\n\n Raises:\n ValueError: When any of the input tensor keys is not valid.\n RuntimeError: An error when output file already exists and overwrite is not\n enabled.\n \"\"\"\n # Get a list of output tensor names.\n meta_graph_def = saved_model_utils.get_meta_graph_def(saved_model_dir,\n tag_set)\n\n # Re-create feed_dict based on input tensor name instead of key as session.run\n # uses tensor name.\n inputs_tensor_info = _get_inputs_tensor_info_from_meta_graph_def(\n meta_graph_def, signature_def_key)\n\n # Check if input tensor keys are valid.\n for input_key_name in input_tensor_key_feed_dict.keys():\n if input_key_name not in inputs_tensor_info:\n raise ValueError(\n '\"%s\" is not a valid input key. Please choose from %s, or use '\n '--show option.' %\n (input_key_name, '\"' + '\", \"'.join(inputs_tensor_info.keys()) + '\"'))\n\n inputs_feed_dict = {\n inputs_tensor_info[key].name: tensor\n for key, tensor in input_tensor_key_feed_dict.items()\n }\n # Get outputs\n outputs_tensor_info = _get_outputs_tensor_info_from_meta_graph_def(\n meta_graph_def, signature_def_key)\n # Sort to preserve order because we need to go from value to key later.\n output_tensor_keys_sorted = sorted(outputs_tensor_info.keys())\n output_tensor_names_sorted = [\n outputs_tensor_info[tensor_key].name\n for tensor_key in output_tensor_keys_sorted\n ]\n\n with session.Session(worker, graph=ops_lib.Graph()) as sess:\n if init_tpu:\n print('Initializing TPU System ...')\n # This is needed for freshly started worker, or if the job\n # restarts after a preemption.\n sess.run(tf.contrib.tpu.initialize_system())\n\n loader.load(sess, tag_set.split(','), saved_model_dir)\n\n if tf_debug:\n sess = local_cli_wrapper.LocalCLIDebugWrapperSession(sess)\n\n outputs = sess.run(output_tensor_names_sorted, feed_dict=inputs_feed_dict)\n\n for i, output in enumerate(outputs):\n output_tensor_key = output_tensor_keys_sorted[i]\n print('Result for output key %s:\\n%s' % (output_tensor_key, output))\n\n # Only save if outdir is specified.\n if outdir:\n # Create directory if outdir does not exist\n if not os.path.isdir(outdir):\n os.makedirs(outdir)\n output_full_path = os.path.join(outdir, output_tensor_key + '.npy')\n\n # If overwrite not enabled and file already exist, error out\n if not overwrite_flag and os.path.exists(output_full_path):\n raise RuntimeError(\n 'Output file %s already exists. Add \\\"--overwrite\\\" to overwrite'\n ' the existing output files.' % output_full_path)\n\n np.save(output_full_path, output)\n print('Output %s is saved to %s' % (output_tensor_key,\n output_full_path))\n\n\ndef preprocess_inputs_arg_string(inputs_str):\n \"\"\"Parses input arg into dictionary that maps input to file/variable tuple.\n\n Parses input string in the format of, for example,\n \"input1=filename1[variable_name1],input2=filename2\" into a\n dictionary looks like\n {'input_key1': (filename1, variable_name1),\n 'input_key2': (file2, None)}\n , which maps input keys to a tuple of file name and variable name(None if\n empty).\n\n Args:\n inputs_str: A string that specified where to load inputs. Inputs are\n separated by semicolons.\n * For each input key:\n '<input_key>=<filename>' or\n '<input_key>=<filename>[<variable_name>]'\n * The optional 'variable_name' key will be set to None if not specified.\n\n Returns:\n A dictionary that maps input keys to a tuple of file name and variable name.\n\n Raises:\n RuntimeError: An error when the given input string is in a bad format.\n \"\"\"\n input_dict = {}\n inputs_raw = inputs_str.split(';')\n for input_raw in filter(bool, inputs_raw): # skip empty strings\n # Format of input=filename[variable_name]'\n match = re.match(r'([^=]+)=([^\\[\\]]+)\\[([^\\[\\]]+)\\]$', input_raw)\n\n if match:\n input_dict[match.group(1)] = match.group(2), match.group(3)\n else:\n # Format of input=filename'\n match = re.match(r'([^=]+)=([^\\[\\]]+)$', input_raw)\n if match:\n input_dict[match.group(1)] = match.group(2), None\n else:\n raise RuntimeError(\n '--inputs \"%s\" format is incorrect. Please follow'\n '\"<input_key>=<filename>\", or'\n '\"<input_key>=<filename>[<variable_name>]\"' % input_raw)\n\n return input_dict\n\n\ndef preprocess_input_exprs_arg_string(input_exprs_str):\n \"\"\"Parses input arg into dictionary that maps input key to python expression.\n\n Parses input string in the format of 'input_key=<python expression>' into a\n dictionary that maps each input_key to its python expression.\n\n Args:\n input_exprs_str: A string that specifies python expression for input keys.\n Each input is separated by semicolon. For each input key:\n 'input_key=<python expression>'\n\n Returns:\n A dictionary that maps input keys to their values.\n\n Raises:\n RuntimeError: An error when the given input string is in a bad format.\n \"\"\"\n input_dict = {}\n\n for input_raw in filter(bool, input_exprs_str.split(';')):\n if '=' not in input_exprs_str:\n raise RuntimeError('--input_exprs \"%s\" format is incorrect. Please follow'\n '\"<input_key>=<python expression>\"' % input_exprs_str)\n input_key, expr = input_raw.split('=', 1)\n # ast.literal_eval does not work with numpy expressions\n input_dict[input_key] = eval(expr) # pylint: disable=eval-used\n return input_dict\n\n\ndef preprocess_input_examples_arg_string(input_examples_str):\n \"\"\"Parses input into dict that maps input keys to lists of tf.Example.\n\n Parses input string in the format of 'input_key1=[{feature_name:\n feature_list}];input_key2=[{feature_name:feature_list}];' into a dictionary\n that maps each input_key to its list of serialized tf.Example.\n\n Args:\n input_examples_str: A string that specifies a list of dictionaries of\n feature_names and their feature_lists for each input.\n Each input is separated by semicolon. For each input key:\n 'input=[{feature_name1: feature_list1, feature_name2:feature_list2}]'\n items in feature_list can be the type of float, int, long or str.\n\n Returns:\n A dictionary that maps input keys to lists of serialized tf.Example.\n\n Raises:\n ValueError: An error when the given tf.Example is not a list.\n \"\"\"\n input_dict = preprocess_input_exprs_arg_string(input_examples_str)\n for input_key, example_list in input_dict.items():\n if not isinstance(example_list, list):\n raise ValueError(\n 'tf.Example input must be a list of dictionaries, but \"%s\" is %s' %\n (example_list, type(example_list)))\n input_dict[input_key] = [\n _create_example_string(example) for example in example_list\n ]\n return input_dict\n\n\ndef _create_example_string(example_dict):\n \"\"\"Create a serialized tf.example from feature dictionary.\"\"\"\n example = example_pb2.Example()\n for feature_name, feature_list in example_dict.items():\n if not isinstance(feature_list, list):\n raise ValueError('feature value must be a list, but %s: \"%s\" is %s' %\n (feature_name, feature_list, type(feature_list)))\n if isinstance(feature_list[0], float):\n example.features.feature[feature_name].float_list.value.extend(\n feature_list)\n elif isinstance(feature_list[0], str):\n example.features.feature[feature_name].bytes_list.value.extend(\n feature_list)\n elif isinstance(feature_list[0], six.integer_types):\n example.features.feature[feature_name].int64_list.value.extend(\n feature_list)\n else:\n raise ValueError(\n 'Type %s for value %s is not supported for tf.train.Feature.' %\n (type(feature_list[0]), feature_list[0]))\n return example.SerializeToString()\n\n\ndef load_inputs_from_input_arg_string(inputs_str, input_exprs_str,\n input_examples_str):\n \"\"\"Parses input arg strings and create inputs feed_dict.\n\n Parses '--inputs' string for inputs to be loaded from file, and parses\n '--input_exprs' string for inputs to be evaluated from python expression.\n '--input_examples' string for inputs to be created from tf.example feature\n dictionary list.\n\n Args:\n inputs_str: A string that specified where to load inputs. Each input is\n separated by semicolon.\n * For each input key:\n '<input_key>=<filename>' or\n '<input_key>=<filename>[<variable_name>]'\n * The optional 'variable_name' key will be set to None if not specified.\n * File specified by 'filename' will be loaded using numpy.load. Inputs\n can be loaded from only .npy, .npz or pickle files.\n * The \"[variable_name]\" key is optional depending on the input file type\n as descripted in more details below.\n When loading from a npy file, which always contains a numpy ndarray, the\n content will be directly assigned to the specified input tensor. If a\n variable_name is specified, it will be ignored and a warning will be\n issued.\n When loading from a npz zip file, user can specify which variable within\n the zip file to load for the input tensor inside the square brackets. If\n nothing is specified, this function will check that only one file is\n included in the zip and load it for the specified input tensor.\n When loading from a pickle file, if no variable_name is specified in the\n square brackets, whatever that is inside the pickle file will be passed\n to the specified input tensor, else SavedModel CLI will assume a\n dictionary is stored in the pickle file and the value corresponding to\n the variable_name will be used.\n input_exprs_str: A string that specifies python expressions for inputs.\n * In the format of: '<input_key>=<python expression>'.\n * numpy module is available as np.\n input_examples_str: A string that specifies tf.Example with dictionary.\n * In the format of: '<input_key>=<[{feature:value list}]>'\n\n Returns:\n A dictionary that maps input tensor keys to numpy ndarrays.\n\n Raises:\n RuntimeError: An error when a key is specified, but the input file contains\n multiple numpy ndarrays, none of which matches the given key.\n RuntimeError: An error when no key is specified, but the input file contains\n more than one numpy ndarrays.\n \"\"\"\n tensor_key_feed_dict = {}\n\n inputs = preprocess_inputs_arg_string(inputs_str)\n input_exprs = preprocess_input_exprs_arg_string(input_exprs_str)\n input_examples = preprocess_input_examples_arg_string(input_examples_str)\n\n for input_tensor_key, (filename, variable_name) in inputs.items():\n data = np.load(file_io.FileIO(filename, mode='rb'), allow_pickle=True)\n\n # When a variable_name key is specified for the input file\n if variable_name:\n # if file contains a single ndarray, ignore the input name\n if isinstance(data, np.ndarray):\n warnings.warn(\n 'Input file %s contains a single ndarray. Name key \\\"%s\\\" ignored.'\n % (filename, variable_name))\n tensor_key_feed_dict[input_tensor_key] = data\n else:\n if variable_name in data:\n tensor_key_feed_dict[input_tensor_key] = data[variable_name]\n else:\n raise RuntimeError(\n 'Input file %s does not contain variable with name \\\"%s\\\".' %\n (filename, variable_name))\n # When no key is specified for the input file.\n else:\n # Check if npz file only contains a single numpy ndarray.\n if isinstance(data, np.lib.npyio.NpzFile):\n variable_name_list = data.files\n if len(variable_name_list) != 1:\n raise RuntimeError(\n 'Input file %s contains more than one ndarrays. Please specify '\n 'the name of ndarray to use.' % filename)\n tensor_key_feed_dict[input_tensor_key] = data[variable_name_list[0]]\n else:\n tensor_key_feed_dict[input_tensor_key] = data\n\n # When input is a python expression:\n for input_tensor_key, py_expr_evaluated in input_exprs.items():\n if input_tensor_key in tensor_key_feed_dict:\n warnings.warn(\n 'input_key %s has been specified with both --inputs and --input_exprs'\n ' options. Value in --input_exprs will be used.' % input_tensor_key)\n tensor_key_feed_dict[input_tensor_key] = py_expr_evaluated\n\n # When input is a tf.Example:\n for input_tensor_key, example in input_examples.items():\n if input_tensor_key in tensor_key_feed_dict:\n warnings.warn(\n 'input_key %s has been specified in multiple options. Value in '\n '--input_examples will be used.' % input_tensor_key)\n tensor_key_feed_dict[input_tensor_key] = example\n return tensor_key_feed_dict\n\n\ndef show(args):\n \"\"\"Function triggered by show command.\n\n Args:\n args: A namespace parsed from command line.\n \"\"\"\n # If all tag is specified, display all information.\n if args.all:\n _show_all(args.dir)\n else:\n # If no tag is specified, display all tag_set, if no signaure_def key is\n # specified, display all SignatureDef keys, else show input output tensor\n # information corresponding to the given SignatureDef key\n if args.tag_set is None:\n _show_tag_sets(args.dir)\n else:\n if args.signature_def is None:\n _show_signature_def_map_keys(args.dir, args.tag_set)\n else:\n _show_inputs_outputs(args.dir, args.tag_set, args.signature_def)\n\n\ndef run(args):\n \"\"\"Function triggered by run command.\n\n Args:\n args: A namespace parsed from command line.\n\n Raises:\n AttributeError: An error when neither --inputs nor --input_exprs is passed\n to run command.\n \"\"\"\n if not args.inputs and not args.input_exprs and not args.input_examples:\n raise AttributeError(\n 'At least one of --inputs, --input_exprs or --input_examples must be '\n 'required')\n tensor_key_feed_dict = load_inputs_from_input_arg_string(\n args.inputs, args.input_exprs, args.input_examples)\n run_saved_model_with_feed_dict(args.dir, args.tag_set, args.signature_def,\n tensor_key_feed_dict, args.outdir,\n args.overwrite, worker=args.worker,\n init_tpu=args.init_tpu, tf_debug=args.tf_debug)\n\n\ndef scan(args):\n \"\"\"Function triggered by scan command.\n\n Args:\n args: A namespace parsed from command line.\n \"\"\"\n if args.tag_set:\n scan_meta_graph_def(\n saved_model_utils.get_meta_graph_def(args.dir, args.tag_set))\n else:\n saved_model = saved_model_utils.read_saved_model(args.dir)\n for meta_graph_def in saved_model.meta_graphs:\n scan_meta_graph_def(meta_graph_def)\n\n\ndef convert_with_tensorrt(args):\n \"\"\"Function triggered by 'convert tensorrt' command.\n\n Args:\n args: A namespace parsed from command line.\n \"\"\"\n # Import here instead of at top, because this will crash if TensorRT is\n # not installed\n from tensorflow.python.compiler.tensorrt import trt_convert as trt # pylint: disable=g-import-not-at-top\n\n params = trt.DEFAULT_TRT_CONVERSION_PARAMS._replace(\n max_workspace_size_bytes=args.max_workspace_size_bytes,\n precision_mode=args.precision_mode,\n minimum_segment_size=args.minimum_segment_size)\n converter = trt.TrtGraphConverterV2(\n input_saved_model_dir=args.dir,\n input_saved_model_tags=args.tag_set.split(','),\n conversion_params=params)\n converter.convert()\n converter.save(output_saved_model_dir=args.output_dir)\n\n\ndef create_parser():\n \"\"\"Creates a parser that parse the command line arguments.\n\n Returns:\n A namespace parsed from command line arguments.\n \"\"\"\n parser = argparse.ArgumentParser(\n description='saved_model_cli: Command-line interface for SavedModel')\n parser.add_argument('-v', '--version', action='version', version='0.1.0')\n\n subparsers = parser.add_subparsers(\n title='commands', description='valid commands', help='additional help')\n\n # show command\n show_msg = (\n 'Usage examples:\\n'\n 'To show all tag-sets in a SavedModel:\\n'\n '$saved_model_cli show --dir /tmp/saved_model\\n\\n'\n 'To show all available SignatureDef keys in a '\n 'MetaGraphDef specified by its tag-set:\\n'\n '$saved_model_cli show --dir /tmp/saved_model --tag_set serve\\n\\n'\n 'For a MetaGraphDef with multiple tags in the tag-set, all tags must be '\n 'passed in, separated by \\';\\':\\n'\n '$saved_model_cli show --dir /tmp/saved_model --tag_set serve,gpu\\n\\n'\n 'To show all inputs and outputs TensorInfo for a specific'\n ' SignatureDef specified by the SignatureDef key in a'\n ' MetaGraph.\\n'\n '$saved_model_cli show --dir /tmp/saved_model --tag_set serve'\n ' --signature_def serving_default\\n\\n'\n 'To show all available information in the SavedModel:\\n'\n '$saved_model_cli show --dir /tmp/saved_model --all')\n parser_show = subparsers.add_parser(\n 'show',\n description=show_msg,\n formatter_class=argparse.RawTextHelpFormatter)\n parser_show.add_argument(\n '--dir',\n type=str,\n required=True,\n help='directory containing the SavedModel to inspect')\n parser_show.add_argument(\n '--all',\n action='store_true',\n help='if set, will output all information in given SavedModel')\n parser_show.add_argument(\n '--tag_set',\n type=str,\n default=None,\n help='tag-set of graph in SavedModel to show, separated by \\',\\'')\n parser_show.add_argument(\n '--signature_def',\n type=str,\n default=None,\n metavar='SIGNATURE_DEF_KEY',\n help='key of SignatureDef to display input(s) and output(s) for')\n parser_show.set_defaults(func=show)\n\n # run command\n run_msg = ('Usage example:\\n'\n 'To run input tensors from files through a MetaGraphDef and save'\n ' the output tensors to files:\\n'\n '$saved_model_cli show --dir /tmp/saved_model --tag_set serve \\\\\\n'\n ' --signature_def serving_default \\\\\\n'\n ' --inputs input1_key=/tmp/124.npz[x],input2_key=/tmp/123.npy '\n '\\\\\\n'\n ' --input_exprs \\'input3_key=np.ones(2)\\' \\\\\\n'\n ' --input_examples '\n '\\'input4_key=[{\"id\":[26],\"weights\":[0.5, 0.5]}]\\' \\\\\\n'\n ' --outdir=/out\\n\\n'\n 'For more information about input file format, please see:\\n'\n 'https://www.tensorflow.org/guide/saved_model_cli\\n')\n parser_run = subparsers.add_parser(\n 'run', description=run_msg, formatter_class=argparse.RawTextHelpFormatter)\n parser_run.add_argument(\n '--dir',\n type=str,\n required=True,\n help='directory containing the SavedModel to execute')\n parser_run.add_argument(\n '--tag_set',\n type=str,\n required=True,\n help='tag-set of graph in SavedModel to load, separated by \\',\\'')\n parser_run.add_argument(\n '--signature_def',\n type=str,\n required=True,\n metavar='SIGNATURE_DEF_KEY',\n help='key of SignatureDef to run')\n msg = ('Loading inputs from files, in the format of \\'<input_key>=<filename>,'\n ' or \\'<input_key>=<filename>[<variable_name>]\\', separated by \\';\\'.'\n ' The file format can only be from .npy, .npz or pickle.')\n parser_run.add_argument('--inputs', type=str, default='', help=msg)\n msg = ('Specifying inputs by python expressions, in the format of'\n ' \"<input_key>=\\'<python expression>\\'\", separated by \\';\\'. '\n 'numpy module is available as \\'np\\'. '\n 'Will override duplicate input keys from --inputs option.')\n parser_run.add_argument('--input_exprs', type=str, default='', help=msg)\n msg = (\n 'Specifying tf.Example inputs as list of dictionaries. For example: '\n '<input_key>=[{feature0:value_list,feature1:value_list}]. Use \";\" to '\n 'separate input keys. Will override duplicate input keys from --inputs '\n 'and --input_exprs option.')\n parser_run.add_argument('--input_examples', type=str, default='', help=msg)\n parser_run.add_argument(\n '--outdir',\n type=str,\n default=None,\n help='if specified, output tensor(s) will be saved to given directory')\n parser_run.add_argument(\n '--overwrite',\n action='store_true',\n help='if set, output file will be overwritten if it already exists.')\n parser_run.add_argument(\n '--tf_debug',\n action='store_true',\n help='if set, will use TensorFlow Debugger (tfdbg) to watch the '\n 'intermediate Tensors and runtime GraphDefs while running the '\n 'SavedModel.')\n parser_run.add_argument(\n '--worker',\n type=str,\n default=None,\n help='if specified, a Session will be run on the worker. '\n 'Valid worker specification is a bns or gRPC path.')\n parser_run.add_argument(\n '--init_tpu',\n action='store_true',\n default=None,\n help='if specified, tpu.initialize_system will be called on the Session. '\n 'This option should be only used if the worker is a TPU job.')\n parser_run.set_defaults(func=run)\n\n # scan command\n scan_msg = ('Usage example:\\n'\n 'To scan for blacklisted ops in SavedModel:\\n'\n '$saved_model_cli scan --dir /tmp/saved_model\\n'\n 'To scan a specific MetaGraph, pass in --tag_set\\n')\n parser_scan = subparsers.add_parser(\n 'scan',\n description=scan_msg,\n formatter_class=argparse.RawTextHelpFormatter)\n parser_scan.add_argument(\n '--dir',\n type=str,\n required=True,\n help='directory containing the SavedModel to execute')\n parser_scan.add_argument(\n '--tag_set',\n type=str,\n help='tag-set of graph in SavedModel to scan, separated by \\',\\'')\n parser_scan.set_defaults(func=scan)\n\n # convert command\n convert_msg = ('Usage example:\\n'\n 'To convert the SavedModel to one that have TensorRT ops:\\n'\n '$saved_model_cli convert \\\\\\n'\n ' --dir /tmp/saved_model \\\\\\n'\n ' --tag_set serve \\\\\\n'\n ' --output_dir /tmp/saved_model_trt \\\\\\n'\n ' tensorrt \\n')\n parser_convert = subparsers.add_parser(\n 'convert',\n description=convert_msg,\n formatter_class=argparse.RawTextHelpFormatter)\n parser_convert.add_argument(\n '--dir',\n type=str,\n required=True,\n help='directory containing the SavedModel to convert')\n parser_convert.add_argument(\n '--output_dir',\n type=str,\n required=True,\n help='output directory for the converted SavedModel')\n parser_convert.add_argument(\n '--tag_set',\n type=str,\n required=True,\n help='tag-set of graph in SavedModel to convert, separated by \\',\\'')\n convert_subparsers = parser_convert.add_subparsers(\n title='conversion methods',\n description='valid conversion methods',\n help='the conversion to run with the SavedModel')\n parser_convert_with_tensorrt = convert_subparsers.add_parser(\n 'tensorrt',\n description='Convert the SavedModel with Tensorflow-TensorRT integration',\n formatter_class=argparse.RawTextHelpFormatter)\n parser_convert_with_tensorrt.add_argument(\n '--max_workspace_size_bytes',\n type=int,\n default=2 << 20,\n help=('the maximum GPU temporary memory which the TRT engine can use at '\n 'execution time'))\n parser_convert_with_tensorrt.add_argument(\n '--precision_mode',\n type=str,\n default='FP32',\n help='one of FP32, FP16 and INT8')\n parser_convert_with_tensorrt.add_argument(\n '--minimum_segment_size',\n type=int,\n default=3,\n help=('the minimum number of nodes required for a subgraph to be replaced'\n 'in a TensorRT node'))\n parser_convert_with_tensorrt.set_defaults(func=convert_with_tensorrt)\n\n return parser\n\n\ndef main():\n parser = create_parser()\n args = parser.parse_args()\n if not hasattr(args, 'func'):\n parser.error('too few arguments')\n args.func(args)\n\n\nif __name__ == '__main__':\n sys.exit(main())\n" ]
[ [ "tensorflow.python.debug.wrappers.local_cli_wrapper.LocalCLIDebugWrapperSession", "tensorflow.python.saved_model.load.load", "tensorflow.python.compiler.tensorrt.trt_convert.DEFAULT_TRT_CONVERSION_PARAMS._replace", "numpy.save", "tensorflow.python.lib.io.file_io.FileIO", "tensorflow.python.framework.ops.Graph", "tensorflow.python.tools.saved_model_utils.get_meta_graph_def", "tensorflow.python.saved_model.save._AugmentedGraphView", "tensorflow.python.framework.meta_graph.ops_used_by_graph_def", "tensorflow.core.framework.types_pb2.DataType.items", "tensorflow.python.tools.saved_model_utils.read_saved_model", "tensorflow.core.example.example_pb2.Example", "tensorflow.python.tools.saved_model_utils.get_saved_model_tag_sets" ] ]
miraaitsaada/cclust_package
[ "4256693b424350ba3332861c7e937100f7a432c5" ]
[ "coclust/coclustering/coclust_mod.py" ]
[ "# -*- coding: utf-8 -*-\n\n\"\"\"\nThe :mod:`coclust.coclustering.coclust_mod` module provides an implementation\nof a co-clustering algorithm by direct maximization of graph modularity.\n\"\"\"\n\n# Author: Francois Role <[email protected]>\n# Stanislas Morbieu <[email protected]>\n\n# License: BSD 3 clause\n\nimport numpy as np\nfrom sklearn.utils import check_random_state, check_array\nfrom joblib import Parallel, delayed, effective_n_jobs\n\nfrom ..initialization import random_init\nfrom .base_diagonal_coclust import BaseDiagonalCoclust\n\n\ndef _fit_single(X, n_clusters, random_state, init, max_iter, tol, y=None):\n \"\"\"Perform one run of co-clustering by direct maximization of graph\n modularity.\n\n Parameters\n ----------\n X : numpy array or scipy sparse matrix, shape=(n_samples, n_features)\n Matrix to be analyzed\n \"\"\"\n if init is None:\n W = random_init(n_clusters, X.shape[1], random_state)\n else:\n W = np.matrix(init, dtype=float)\n\n Z = np.zeros((X.shape[0], n_clusters))\n\n # Compute the modularity matrix\n row_sums = np.matrix(X.sum(axis=1))\n col_sums = np.matrix(X.sum(axis=0))\n N = float(X.sum())\n indep = (row_sums.dot(col_sums)) / N\n\n # B is a numpy matrix\n B = X - indep\n\n modularities = []\n\n # Loop\n m_begin = float(\"-inf\")\n change = True\n iteration = 0\n while change:\n change = False\n\n # Reassign rows\n BW = B.dot(W)\n for idx, k in enumerate(np.argmax(BW, axis=1)):\n Z[idx, :] = 0\n Z[idx, k] = 1\n\n # Reassign columns\n BtZ = (B.T).dot(Z)\n for idx, k in enumerate(np.argmax(BtZ, axis=1)):\n W[idx, :] = 0\n W[idx, k] = 1\n\n k_times_k = (Z.T).dot(BW)\n m_end = np.trace(k_times_k)\n iteration += 1\n if (np.abs(m_end - m_begin) > tol and\n iteration < max_iter):\n modularities.append(m_end/N)\n m_begin = m_end\n change = True\n\n row_labels_ = np.argmax(Z, axis=1).tolist()\n column_labels_ = np.argmax(W, axis=1).tolist()\n modularity = m_end / N\n nb_iterations = iteration\n return row_labels_, column_labels_, modularity, modularities, nb_iterations\n\n\nclass CoclustMod(BaseDiagonalCoclust):\n \"\"\"Co-clustering by direct maximization of graph modularity.\n\n Parameters\n ----------\n n_clusters : int, optional, default: 2\n Number of co-clusters to form\n\n init : numpy array or scipy sparse matrix, \\\n shape (n_features, n_clusters), optional, default: None\n Initial column labels\n\n max_iter : int, optional, default: 20\n Maximum number of iterations\n\n n_init : int, optional, default: 1\n Number of time the algorithm will be run with different\n initializations. The final results will be the best output of `n_init`\n consecutive runs in terms of modularity.\n\n random_state : integer or numpy.RandomState, optional\n The generator used to initialize the centers. If an integer is\n given, it fixes the seed. Defaults to the global numpy random\n number generator.\n\n tol : float, default: 1e-9\n Relative tolerance with regards to modularity to declare convergence\n\n Attributes\n ----------\n row_labels_ : array-like, shape (n_rows,)\n Bicluster label of each row\n\n column_labels_ : array-like, shape (n_cols,)\n Bicluster label of each column\n\n modularity : float\n Final value of the modularity\n\n modularities : list\n Record of all computed modularity values for all iterations\n\n References\n ----------\n * Ailem M., Role F., Nadif M., Co-clustering Document-term Matrices by \\\n Direct Maximization of Graph Modularity. CIKM 2015: 1807-1810\n \"\"\"\n\n def __init__(self, n_clusters=2, init=None, max_iter=20, n_init=1,\n tol=1e-9, random_state=None, n_jobs=1):\n self.n_clusters = n_clusters\n self.init = init\n self.max_iter = max_iter\n self.n_init = n_init\n self.tol = tol\n self.random_state = random_state\n self.n_jobs = n_jobs\n # to remove except for self.modularity = -np.inf!!!\n self.row_labels_ = None\n self.column_labels_ = None\n self.modularity = -np.inf\n self.modularities = []\n\n\n\n def fit(self, X, y=None):\n \"\"\"Perform co-clustering by direct maximization of graph modularity.\n\n Parameters\n ----------\n X : numpy array or scipy sparse matrix, shape=(n_samples, n_features)\n Matrix to be analyzed\n \"\"\"\n\n random_state = check_random_state(self.random_state)\n\n check_array(X, accept_sparse=True, dtype=\"numeric\", order=None,\n copy=False, force_all_finite=True, ensure_2d=True,\n allow_nd=False, ensure_min_samples=self.n_clusters,\n ensure_min_features=self.n_clusters,\n warn_on_dtype=False, estimator=None)\n\n if type(X) == np.ndarray:\n X = np.matrix(X)\n\n X = X.astype(float)\n\n modularity = self.modularity\n modularities = []\n row_labels = None\n column_labels = None\n seeds = random_state.randint(np.iinfo(np.int32).max, size=self.n_init)\n if effective_n_jobs(self.n_jobs) == 1:\n for seed in seeds:\n new_row_labels, new_column_labels, new_modularity, new_modularities, new_nb_iterations = _fit_single(X, self.n_clusters, seed, self.init, self.max_iter, self.tol, y)\n if np.isnan(new_modularity):\n raise ValueError(\"matrix may contain unexpected NaN values\")\n # remember attributes corresponding to the best modularity\n if (new_modularity > modularity):\n modularity = new_modularity\n modularities = new_modularities\n row_labels = new_row_labels\n column_labels = new_column_labels\n else:\n results = Parallel(n_jobs=self.n_jobs, verbose=0)(\n delayed(_fit_single)(X, self.n_clusters, seed, self.init, self.max_iter, self.tol, y)\n for seed in seeds)\n list_of_row_labels, list_of_column_labels, list_of_modularity, list_of_modularities, list_of_nb_iterations = zip(*results)\n best = np.argmax(list_of_modularity)\n row_labels = list_of_row_labels[best]\n column_labels = list_of_column_labels[best]\n modularity = list_of_modularity[best]\n modularities = list_of_modularities[best]\n n_iter = list_of_nb_iterations[best]\n\n \n\n # update instance variables\n self.modularity = modularity\n self.modularities = modularities\n self.row_labels_ = row_labels\n self.column_labels_ = column_labels\n\n return self\n" ]
[ [ "sklearn.utils.check_random_state", "numpy.zeros", "numpy.matrix", "sklearn.utils.check_array", "numpy.abs", "numpy.argmax", "numpy.trace", "numpy.iinfo", "numpy.isnan" ] ]
FHsong/reinforcement-learning-an-introduction
[ "4eb5b36a5d59487ae1c3ea6cc90010c11bda5897" ]
[ "chapter08/maze.py" ]
[ "#######################################################################\n# Copyright (C) #\n# 2016-2018 Shangtong Zhang([email protected]) #\n# 2016 Kenta Shimada([email protected]) #\n# Permission given to modify the code as long as you keep this #\n# declaration at the top #\n#######################################################################\n\nimport numpy as np\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\nimport heapq\nfrom copy import deepcopy\n\nclass PriorityQueue:\n def __init__(self):\n self.pq = []\n self.entry_finder = {}\n self.REMOVED = '<removed-task>'\n self.counter = 0\n\n def add_item(self, item, priority=0):\n if item in self.entry_finder:\n self.remove_item(item)\n entry = [priority, self.counter, item]\n self.counter += 1\n self.entry_finder[item] = entry\n heapq.heappush(self.pq, entry)\n\n def remove_item(self, item):\n entry = self.entry_finder.pop(item)\n entry[-1] = self.REMOVED\n\n def pop_item(self):\n while self.pq:\n priority, count, item = heapq.heappop(self.pq)\n if item is not self.REMOVED:\n del self.entry_finder[item]\n return item, priority\n raise KeyError('pop from an empty priority queue')\n\n def empty(self):\n return not self.entry_finder\n\n# A wrapper class for a maze, containing all the information about the maze.\n# Basically it's initialized to DynaMaze by default, however it can be easily adapted\n# to other maze\nclass Maze:\n def __init__(self):\n # maze width\n self.WORLD_WIDTH = 9\n\n # maze height\n self.WORLD_HEIGHT = 6\n\n # all possible actions\n self.ACTION_UP = 0\n self.ACTION_DOWN = 1\n self.ACTION_LEFT = 2\n self.ACTION_RIGHT = 3\n self.actions = [self.ACTION_UP, self.ACTION_DOWN, self.ACTION_LEFT, self.ACTION_RIGHT]\n\n # start state\n self.START_STATE = [2, 0]\n\n # goal state\n self.GOAL_STATES = [[0, 8]]\n\n # all obstacles\n self.obstacles = [[1, 2], [2, 2], [3, 2], [0, 7], [1, 7], [2, 7], [4, 5]]\n self.old_obstacles = None\n self.new_obstacles = None\n\n # time to change obstacles\n self.obstacle_switch_time = None\n\n # initial state action pair values\n # self.stateActionValues = np.zeros((self.WORLD_HEIGHT, self.WORLD_WIDTH, len(self.actions)))\n\n # the size of q value\n self.q_size = (self.WORLD_HEIGHT, self.WORLD_WIDTH, len(self.actions))\n\n # max steps\n self.max_steps = float('inf')\n\n # track the resolution for this maze\n self.resolution = 1\n\n # extend a state to a higher resolution maze\n # @state: state in lower resoultion maze\n # @factor: extension factor, one state will become factor^2 states after extension\n def extend_state(self, state, factor):\n new_state = [state[0] * factor, state[1] * factor]\n new_states = []\n for i in range(0, factor):\n for j in range(0, factor):\n new_states.append([new_state[0] + i, new_state[1] + j])\n return new_states\n\n # extend a state into higher resolution\n # one state in original maze will become @factor^2 states in @return new maze\n def extend_maze(self, factor):\n new_maze = Maze()\n new_maze.WORLD_WIDTH = self.WORLD_WIDTH * factor\n new_maze.WORLD_HEIGHT = self.WORLD_HEIGHT * factor\n new_maze.START_STATE = [self.START_STATE[0] * factor, self.START_STATE[1] * factor]\n new_maze.GOAL_STATES = self.extend_state(self.GOAL_STATES[0], factor)\n new_maze.obstacles = []\n for state in self.obstacles:\n new_maze.obstacles.extend(self.extend_state(state, factor))\n new_maze.q_size = (new_maze.WORLD_HEIGHT, new_maze.WORLD_WIDTH, len(new_maze.actions))\n # new_maze.stateActionValues = np.zeros((new_maze.WORLD_HEIGHT, new_maze.WORLD_WIDTH, len(new_maze.actions)))\n new_maze.resolution = factor\n return new_maze\n\n # take @action in @state\n # @return: [new state, reward]\n def step(self, state, action):\n x, y = state\n if action == self.ACTION_UP:\n x = max(x - 1, 0)\n elif action == self.ACTION_DOWN:\n x = min(x + 1, self.WORLD_HEIGHT - 1)\n elif action == self.ACTION_LEFT:\n y = max(y - 1, 0)\n elif action == self.ACTION_RIGHT:\n y = min(y + 1, self.WORLD_WIDTH - 1)\n if [x, y] in self.obstacles:\n x, y = state\n if [x, y] in self.GOAL_STATES:\n reward = 1.0\n else:\n reward = 0.0\n return [x, y], reward\n\n# a wrapper class for parameters of dyna algorithms\nclass DynaParams:\n def __init__(self):\n # discount\n self.gamma = 0.95\n\n # probability for exploration\n self.epsilon = 0.1\n\n # step size\n self.alpha = 0.1\n\n # weight for elapsed time\n self.time_weight = 0\n\n # n-step planning\n self.planning_steps = 5\n\n # average over several independent runs\n self.runs = 10\n\n # algorithm names\n self.methods = ['Dyna-Q', 'Dyna-Q+']\n\n # threshold for priority queue\n self.theta = 0\n\n\n# choose an action based on epsilon-greedy algorithm\ndef choose_action(state, q_value, maze, dyna_params):\n if np.random.binomial(1, dyna_params.epsilon) == 1:\n return np.random.choice(maze.actions)\n else:\n values = q_value[state[0], state[1], :]\n return np.random.choice([action for action, value in enumerate(values) if value == np.max(values)])\n\n# Trivial model for planning in Dyna-Q\nclass TrivialModel:\n # @rand: an instance of np.random.RandomState for sampling\n def __init__(self, rand=np.random):\n self.model = dict()\n self.rand = rand\n\n # feed the model with previous experience\n def feed(self, state, action, next_state, reward):\n state = deepcopy(state)\n next_state = deepcopy(next_state)\n if tuple(state) not in self.model.keys():\n self.model[tuple(state)] = dict()\n self.model[tuple(state)][action] = [list(next_state), reward]\n\n # randomly sample from previous experience\n def sample(self):\n state_index = self.rand.choice(range(len(self.model.keys())))\n state = list(self.model)[state_index]\n action_index = self.rand.choice(range(len(self.model[state].keys())))\n action = list(self.model[state])[action_index]\n next_state, reward = self.model[state][action]\n state = deepcopy(state)\n next_state = deepcopy(next_state)\n return list(state), action, list(next_state), reward\n\n# Time-based model for planning in Dyna-Q+\nclass TimeModel:\n # @maze: the maze instance. Indeed it's not very reasonable to give access to maze to the model.\n # @timeWeight: also called kappa, the weight for elapsed time in sampling reward, it need to be small\n # @rand: an instance of np.random.RandomState for sampling\n def __init__(self, maze, time_weight=1e-4, rand=np.random):\n self.rand = rand\n self.model = dict()\n\n # track the total time\n self.time = 0\n\n self.time_weight = time_weight\n self.maze = maze\n\n # feed the model with previous experience\n def feed(self, state, action, next_state, reward):\n state = deepcopy(state)\n next_state = deepcopy(next_state)\n self.time += 1\n if tuple(state) not in self.model.keys():\n self.model[tuple(state)] = dict()\n\n # Actions that had never been tried before from a state were allowed to be considered in the planning step\n for action_ in self.maze.actions:\n if action_ != action:\n # Such actions would lead back to the same state with a reward of zero\n # Notice that the minimum time stamp is 1 instead of 0\n self.model[tuple(state)][action_] = [list(state), 0, 1]\n\n self.model[tuple(state)][action] = [list(next_state), reward, self.time]\n\n # randomly sample from previous experience\n def sample(self):\n state_index = self.rand.choice(range(len(self.model.keys())))\n state = list(self.model)[state_index]\n action_index = self.rand.choice(range(len(self.model[state].keys())))\n action = list(self.model[state])[action_index]\n next_state, reward, time = self.model[state][action]\n\n # adjust reward with elapsed time since last vist\n reward += self.time_weight * np.sqrt(self.time - time)\n\n state = deepcopy(state)\n next_state = deepcopy(next_state)\n\n return list(state), action, list(next_state), reward\n\n# Model containing a priority queue for Prioritized Sweeping\nclass PriorityModel(TrivialModel):\n def __init__(self, rand=np.random):\n TrivialModel.__init__(self, rand)\n # maintain a priority queue\n self.priority_queue = PriorityQueue()\n # track predecessors for every state\n self.predecessors = dict()\n\n # add a @state-@action pair into the priority queue with priority @priority\n def insert(self, priority, state, action):\n # note the priority queue is a minimum heap, so we use -priority\n self.priority_queue.add_item((tuple(state), action), -priority)\n\n # @return: whether the priority queue is empty\n def empty(self):\n return self.priority_queue.empty()\n\n # get the first item in the priority queue\n def sample(self):\n (state, action), priority = self.priority_queue.pop_item()\n next_state, reward = self.model[state][action]\n state = deepcopy(state)\n next_state = deepcopy(next_state)\n return -priority, list(state), action, list(next_state), reward\n\n # feed the model with previous experience\n def feed(self, state, action, next_state, reward):\n state = deepcopy(state)\n next_state = deepcopy(next_state)\n TrivialModel.feed(self, state, action, next_state, reward)\n if tuple(next_state) not in self.predecessors.keys():\n self.predecessors[tuple(next_state)] = set()\n self.predecessors[tuple(next_state)].add((tuple(state), action))\n\n # get all seen predecessors of a state @state\n def predecessor(self, state):\n if tuple(state) not in self.predecessors.keys():\n return []\n predecessors = []\n for state_pre, action_pre in list(self.predecessors[tuple(state)]):\n predecessors.append([list(state_pre), action_pre, self.model[state_pre][action_pre][1]])\n return predecessors\n\n\n# play for an episode for Dyna-Q algorithm\n# @q_value: state action pair values, will be updated\n# @model: model instance for planning\n# @maze: a maze instance containing all information about the environment\n# @dyna_params: several params for the algorithm\ndef dyna_q(q_value, model, maze, dyna_params):\n state = maze.START_STATE\n steps = 0\n while state not in maze.GOAL_STATES:\n # track the steps\n steps += 1\n\n # get action\n action = choose_action(state, q_value, maze, dyna_params)\n\n # take action\n next_state, reward = maze.step(state, action)\n\n # Q-Learning update\n q_value[state[0], state[1], action] += \\\n dyna_params.alpha * (reward + dyna_params.gamma * np.max(q_value[next_state[0], next_state[1], :]) -\n q_value[state[0], state[1], action])\n\n # feed the model with experience\n model.feed(state, action, next_state, reward)\n\n # sample experience from the model\n for t in range(0, dyna_params.planning_steps):\n state_, action_, next_state_, reward_ = model.sample()\n q_value[state_[0], state_[1], action_] += \\\n dyna_params.alpha * (reward_ + dyna_params.gamma * np.max(q_value[next_state_[0], next_state_[1], :]) -\n q_value[state_[0], state_[1], action_])\n\n state = next_state\n\n # check whether it has exceeded the step limit\n if steps > maze.max_steps:\n break\n\n return steps\n\n# play for an episode for prioritized sweeping algorithm\n# @q_value: state action pair values, will be updated\n# @model: model instance for planning\n# @maze: a maze instance containing all information about the environment\n# @dyna_params: several params for the algorithm\n# @return: # of backups during this episode\ndef prioritized_sweeping(q_value, model, maze, dyna_params):\n state = maze.START_STATE\n\n # track the steps in this episode\n steps = 0\n\n # track the backups in planning phase\n backups = 0\n\n while state not in maze.GOAL_STATES:\n steps += 1\n\n # get action\n action = choose_action(state, q_value, maze, dyna_params)\n\n # take action\n next_state, reward = maze.step(state, action)\n\n # feed the model with experience\n model.feed(state, action, next_state, reward)\n\n # get the priority for current state action pair\n priority = np.abs(reward + dyna_params.gamma * np.max(q_value[next_state[0], next_state[1], :]) -\n q_value[state[0], state[1], action])\n\n if priority > dyna_params.theta:\n model.insert(priority, state, action)\n\n # start planning\n planning_step = 0\n\n # planning for several steps,\n # although keep planning until the priority queue becomes empty will converge much faster\n while planning_step < dyna_params.planning_steps and not model.empty():\n # get a sample with highest priority from the model\n priority, state_, action_, next_state_, reward_ = model.sample()\n\n # update the state action value for the sample\n delta = reward_ + dyna_params.gamma * np.max(q_value[next_state_[0], next_state_[1], :]) - \\\n q_value[state_[0], state_[1], action_]\n q_value[state_[0], state_[1], action_] += dyna_params.alpha * delta\n\n # deal with all the predecessors of the sample state\n for state_pre, action_pre, reward_pre in model.predecessor(state_):\n priority = np.abs(reward_pre + dyna_params.gamma * np.max(q_value[state_[0], state_[1], :]) -\n q_value[state_pre[0], state_pre[1], action_pre])\n if priority > dyna_params.theta:\n model.insert(priority, state_pre, action_pre)\n planning_step += 1\n\n state = next_state\n\n # update the # of backups\n backups += planning_step + 1\n\n return backups\n\n# Figure 8.2, DynaMaze, use 10 runs instead of 30 runs\ndef figure_8_2():\n # set up an instance for DynaMaze\n dyna_maze = Maze()\n dyna_params = DynaParams()\n\n runs = 10\n episodes = 50\n planning_steps = [0, 5, 50]\n steps = np.zeros((len(planning_steps), episodes))\n\n for run in tqdm(range(runs)):\n for i, planning_step in enumerate(planning_steps):\n dyna_params.planning_steps = planning_step\n q_value = np.zeros(dyna_maze.q_size)\n\n # generate an instance of Dyna-Q model\n model = TrivialModel()\n for ep in range(episodes):\n # print('run:', run, 'planning step:', planning_step, 'episode:', ep)\n steps[i, ep] += dyna_q(q_value, model, dyna_maze, dyna_params)\n\n # averaging over runs\n steps /= runs\n\n for i in range(len(planning_steps)):\n plt.plot(steps[i, :], label='%d planning steps' % (planning_steps[i]))\n plt.xlabel('episodes')\n plt.ylabel('steps per episode')\n plt.legend()\n\n plt.savefig('../images/figure_8_2.png')\n plt.close()\n\n# wrapper function for changing maze\n# @maze: a maze instance\n# @dynaParams: several parameters for dyna algorithms\ndef changing_maze(maze, dyna_params):\n\n # set up max steps\n max_steps = maze.max_steps\n\n # track the cumulative rewards\n rewards = np.zeros((dyna_params.runs, 2, max_steps))\n\n for run in tqdm(range(dyna_params.runs)):\n # set up models\n models = [TrivialModel(), TimeModel(maze, time_weight=dyna_params.time_weight)]\n\n # initialize state action values\n q_values = [np.zeros(maze.q_size), np.zeros(maze.q_size)]\n\n for i in range(len(dyna_params.methods)):\n # print('run:', run, dyna_params.methods[i])\n\n # set old obstacles for the maze\n maze.obstacles = maze.old_obstacles\n\n steps = 0\n last_steps = steps\n while steps < max_steps:\n # play for an episode\n steps += dyna_q(q_values[i], models[i], maze, dyna_params)\n\n # update cumulative rewards\n rewards[run, i, last_steps: steps] = rewards[run, i, last_steps]\n rewards[run, i, min(steps, max_steps - 1)] = rewards[run, i, last_steps] + 1\n last_steps = steps\n\n if steps > maze.obstacle_switch_time:\n # change the obstacles\n maze.obstacles = maze.new_obstacles\n\n # averaging over runs\n rewards = rewards.mean(axis=0)\n\n return rewards\n\n# Figure 8.4, BlockingMaze\ndef figure_8_4():\n # set up a blocking maze instance\n blocking_maze = Maze()\n blocking_maze.START_STATE = [5, 3]\n blocking_maze.GOAL_STATES = [[0, 8]]\n blocking_maze.old_obstacles = [[3, i] for i in range(0, 8)]\n\n # new obstalces will block the optimal path\n blocking_maze.new_obstacles = [[3, i] for i in range(1, 9)]\n\n # step limit\n blocking_maze.max_steps = 3000\n\n # obstacles will change after 1000 steps\n # the exact step for changing will be different\n # However given that 1000 steps is long enough for both algorithms to converge,\n # the difference is guaranteed to be very small\n blocking_maze.obstacle_switch_time = 1000\n\n # set up parameters\n dyna_params = DynaParams()\n dyna_params.alpha = 1.0\n dyna_params.planning_steps = 10\n dyna_params.runs = 20\n\n # kappa must be small, as the reward for getting the goal is only 1\n dyna_params.time_weight = 1e-4\n\n # play\n rewards = changing_maze(blocking_maze, dyna_params)\n\n for i in range(len(dyna_params.methods)):\n plt.plot(rewards[i, :], label=dyna_params.methods[i])\n plt.xlabel('time steps')\n plt.ylabel('cumulative reward')\n plt.legend()\n\n plt.savefig('../images/figure_8_4.png')\n plt.close()\n\n# Figure 8.5, ShortcutMaze\ndef figure_8_5():\n # set up a shortcut maze instance\n shortcut_maze = Maze()\n shortcut_maze.START_STATE = [5, 3]\n shortcut_maze.GOAL_STATES = [[0, 8]]\n shortcut_maze.old_obstacles = [[3, i] for i in range(1, 9)]\n\n # new obstacles will have a shorter path\n shortcut_maze.new_obstacles = [[3, i] for i in range(1, 8)]\n\n # step limit\n shortcut_maze.max_steps = 6000\n\n # obstacles will change after 3000 steps\n # the exact step for changing will be different\n # However given that 3000 steps is long enough for both algorithms to converge,\n # the difference is guaranteed to be very small\n shortcut_maze.obstacle_switch_time = 3000\n\n # set up parameters\n dyna_params = DynaParams()\n\n # 50-step planning\n dyna_params.planning_steps = 50\n dyna_params.runs = 5\n dyna_params.time_weight = 1e-3\n dyna_params.alpha = 1.0\n\n # play\n rewards = changing_maze(shortcut_maze, dyna_params)\n\n for i in range(len(dyna_params.methods)):\n plt.plot( rewards[i, :], label=dyna_params.methods[i])\n plt.xlabel('time steps')\n plt.ylabel('cumulative reward')\n plt.legend()\n\n plt.savefig('../images/figure_8_5.png')\n plt.close()\n\n# Check whether state-action values are already optimal\ndef check_path(q_values, maze):\n # get the length of optimal path\n # 14 is the length of optimal path of the original maze\n # 1.2 means it's a relaxed optifmal path\n max_steps = 14 * maze.resolution * 1.2\n state = maze.START_STATE\n steps = 0\n while state not in maze.GOAL_STATES:\n action = np.argmax(q_values[state[0], state[1], :])\n state, _ = maze.step(state, action)\n steps += 1\n if steps > max_steps:\n return False\n return True\n\n# Example 8.4, mazes with different resolution\ndef example_8_4():\n # get the original 6 * 9 maze\n original_maze = Maze()\n\n # set up the parameters for each algorithm\n params_dyna = DynaParams()\n params_dyna.planning_steps = 5\n params_dyna.alpha = 0.5\n params_dyna.gamma = 0.95\n\n params_prioritized = DynaParams()\n params_prioritized.theta = 0.0001\n params_prioritized.planning_steps = 5\n params_prioritized.alpha = 0.5\n params_prioritized.gamma = 0.95\n\n params = [params_prioritized, params_dyna]\n\n # set up models for planning\n models = [PriorityModel, TrivialModel]\n method_names = ['Prioritized Sweeping', 'Dyna-Q']\n\n # due to limitation of my machine, I can only perform experiments for 5 mazes\n # assuming the 1st maze has w * h states, then k-th maze has w * h * k * k states\n num_of_mazes = 5\n\n # build all the mazes\n mazes = [original_maze.extend_maze(i) for i in range(1, num_of_mazes + 1)]\n methods = [prioritized_sweeping, dyna_q]\n\n # My machine cannot afford too many runs...\n runs = 5\n\n # track the # of backups\n backups = np.zeros((runs, 2, num_of_mazes))\n\n for run in range(0, runs):\n for i in range(0, len(method_names)):\n for mazeIndex, maze in zip(range(0, len(mazes)), mazes):\n print('run %d, %s, maze size %d' % (run, method_names[i], maze.WORLD_HEIGHT * maze.WORLD_WIDTH))\n\n # initialize the state action values\n q_value = np.zeros(maze.q_size)\n\n # track steps / backups for each episode\n steps = []\n\n # generate the model\n model = models[i]()\n\n # play for an episode\n while True:\n steps.append(methods[i](q_value, model, maze, params[i]))\n\n # print best actions w.r.t. current state-action values\n # printActions(currentStateActionValues, maze)\n\n # check whether the (relaxed) optimal path is found\n if check_path(q_value, maze):\n break\n\n # update the total steps / backups for this maze\n backups[run, i, mazeIndex] = np.sum(steps)\n\n backups = backups.mean(axis=0)\n\n # Dyna-Q performs several backups per step\n backups[1, :] *= params_dyna.planning_steps + 1\n\n for i in range(0, len(method_names)):\n plt.plot(np.arange(1, num_of_mazes + 1), backups[i, :], label=method_names[i])\n plt.xlabel('maze resolution factor')\n plt.ylabel('backups until optimal solution')\n plt.yscale('log')\n plt.legend()\n\n plt.savefig('../images/example_8_4.png')\n plt.close()\n\nif __name__ == '__main__':\n figure_8_2()\n # figure_8_4()\n # figure_8_5()\n # example_8_4()\n\n" ]
[ [ "numpy.random.binomial", "numpy.sqrt", "numpy.sum", "matplotlib.pyplot.legend", "numpy.zeros", "matplotlib.pyplot.savefig", "matplotlib.pyplot.yscale", "numpy.random.choice", "numpy.argmax", "numpy.arange", "numpy.max", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.close", "matplotlib.use", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlabel" ] ]
BitJetKit/turicreate
[ "4fc93fb8873b90a1a57da499db4ca710e78f1478" ]
[ "src/unity/python/turicreate/test/test_sframe.py" ]
[ "# -*- coding: utf-8 -*-\n# Copyright © 2017 Apple Inc. All rights reserved.\n#\n# Use of this source code is governed by a BSD-3-clause license that can\n# be found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause\nfrom __future__ import print_function as _\nfrom __future__ import division as _\nfrom __future__ import absolute_import as _\nfrom ..data_structures.sframe import SFrame\nfrom ..data_structures.sarray import SArray\nfrom ..data_structures.image import Image\nfrom ..util import _assert_sframe_equal, generate_random_sframe\nfrom .. import _launch, load_sframe, aggregate\nfrom . import util\n\nimport pandas as pd\nfrom .._cython.cy_flexible_type import GMT\nfrom pandas.util.testing import assert_frame_equal\nimport unittest\nimport datetime as dt\nimport tempfile\nimport os\nimport csv\nimport gzip\nimport string\nimport time\nimport numpy as np\nimport array\nimport math\nimport random\nimport shutil\nimport functools\nimport sys\nimport mock\nimport sqlite3\nfrom .dbapi2_mock import dbapi2_mock\n\n\nclass SFrameTest(unittest.TestCase):\n def setUp(self):\n self.int_data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n self.float_data = [1., 2., 3., 4., 5., 6., 7., 8., 9., 10.]\n self.string_data = [\"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\"]\n self.a_to_z = [str(chr(97 + i)) for i in range(0, 26)]\n self.dataframe = pd.DataFrame({'int_data': self.int_data, 'float_data': self.float_data, 'string_data': self.string_data})\n self.url = \"http://s3-us-west-2.amazonaws.com/testdatasets/a_to_z.txt.gz\"\n\n self.int_data2 = range(50,60)\n self.float_data2 = [1.0 * i for i in range(50,60)]\n self.string_data2 = [str(i) for i in range(50,60)]\n self.dataframe2 = pd.DataFrame({'int_data': self.int_data2, 'float_data': self.float_data2, 'string_data': self.string_data2})\n self.vec_data = [array.array('d', [i, i+1]) for i in self.int_data]\n self.list_data = [[i, str(i), i * 1.0] for i in self.int_data]\n self.dict_data = [{str(i): i, i : float(i)} for i in self.int_data]\n self.datetime_data = [dt.datetime(2013, 5, 7, 10, 4, 10),\n dt.datetime(1902, 10, 21, 10, 34, 10).replace(tzinfo=GMT(0.0))]\n self.all_type_cols = [self.int_data,\n self.float_data,\n self.string_data,\n self.vec_data,\n self.list_data,\n self.dict_data,\n self.datetime_data*5]\n self.sf_all_types = SFrame({\"X\"+str(i[0]):i[1] for i in zip(range(1,8),\n self.all_type_cols)})\n\n # Taken from http://en.wikipedia.org/wiki/Join_(SQL) for fun.\n self.employees_sf = SFrame()\n self.employees_sf.add_column(SArray(['Rafferty','Jones','Heisenberg','Robinson','Smith','John']), 'last_name', inplace=True)\n self.employees_sf.add_column(SArray([31,33,33,34,34,None]), 'dep_id', inplace=True)\n\n # XXX: below are only used by one test!\n self.departments_sf = SFrame()\n self.departments_sf.add_column(SArray([31,33,34,35]), 'dep_id', inplace=True)\n self.departments_sf.add_column(SArray(['Sales','Engineering','Clerical','Marketing']), 'dep_name', inplace=True)\n\n def __assert_sarray_equal(self, sa1, sa2):\n l1 = list(sa1)\n l2 = list(sa2)\n self.assertEqual(len(l1), len(l2))\n for i in range(len(l1)):\n v1 = l1[i]\n v2 = l2[i]\n if v1 is None:\n self.assertEqual(v2, None)\n else:\n if type(v1) == dict:\n self.assertEqual(len(v1), len(v2))\n for key in v1:\n self.assertTrue(key in v1)\n self.assertEqual(v1[key], v2[key])\n\n elif (hasattr(v1, \"__iter__\")):\n self.assertEqual(len(v1), len(v2))\n for j in range(len(v1)):\n t1 = v1[j]; t2 = v2[j]\n if (type(t1) == float):\n if (math.isnan(t1)):\n self.assertTrue(math.isnan(t2))\n else:\n self.assertEqual(t1, t2)\n else:\n self.assertEqual(t1, t2)\n else:\n self.assertEqual(v1, v2)\n\n def test_split_datetime(self):\n from_zone = GMT(0)\n to_zone = GMT(4.5)\n utc = dt.datetime.strptime('2011-01-21 02:37:21', '%Y-%m-%d %H:%M:%S')\n utc = utc.replace(tzinfo=from_zone)\n central = utc.astimezone(to_zone)\n\n sa = SArray([utc,central])\n\n expected = SFrame()\n expected ['X.year'] = [2011,2011]\n expected ['X.month'] = [1,1]\n expected ['X.day'] = [21,21]\n expected ['X.hour'] = [2,7]\n expected ['X.minute'] = [37,7]\n expected ['X.second'] = [21,21]\n expected ['X.timezone'] = [0.0,4.5]\n result = sa.split_datetime(timezone=True)\n assert_frame_equal(result.to_dataframe(), expected.to_dataframe())\n\n # column names\n expected = SFrame()\n expected ['ttt.year'] = [2011,2011]\n expected ['ttt.minute'] = [37,7]\n expected ['ttt.second'] = [21,21]\n\n result = sa.split_datetime(column_name_prefix='ttt',limit=['year','minute','second'])\n self.assertEqual(result.column_names(), ['ttt.year', 'ttt.minute', 'ttt.second'])\n assert_frame_equal(result.to_dataframe(), expected.to_dataframe())\n\n sf = SFrame({'datetime': sa})\n result = sf.split_datetime('datetime', column_name_prefix='ttt',limit=['year','minute','second'])\n self.assertEqual(result.column_names(), ['ttt.year', 'ttt.minute', 'ttt.second'])\n assert_frame_equal(result.to_dataframe(), expected.to_dataframe())\n\n\n def __test_equal(self, sf, df):\n # asserts two frames are equal, ignoring column ordering.\n self.assertEqual(sf.num_rows(), df.shape[0])\n self.assertEqual(sf.num_columns(), df.shape[1])\n assert_frame_equal(sf.to_dataframe(), df[sf.column_names()])\n\n def __create_test_df(self, size):\n int_data = []\n float_data = []\n string_data = []\n for i in range(0,size):\n int_data.append(i)\n float_data.append(float(i))\n string_data.append(str(i))\n\n return pd.DataFrame({'int_data': int_data,\n 'float_data': float_data,\n 'string_data': string_data})\n\n # Test if the rows are all the same...row order does not matter.\n # (I do expect column order to be the same)\n def __assert_join_results_equal(self, sf, expected_sf):\n _assert_sframe_equal(sf, expected_sf, check_row_order=False)\n\n def test_creation_from_dataframe(self):\n # created from empty dataframe\n sf_empty = SFrame(data=pd.DataFrame())\n self.__test_equal(sf_empty, pd.DataFrame())\n\n sf = SFrame(data=self.dataframe, format='dataframe')\n self.__test_equal(sf, self.dataframe)\n\n sf = SFrame(data=self.dataframe, format='auto')\n self.__test_equal(sf, self.dataframe)\n\n original_p = pd.DataFrame({'a':[1.0, float('nan')]})\n effective_p = pd.DataFrame({'a':[1.0, None]})\n sf = SFrame(data=original_p)\n self.__test_equal(sf, effective_p)\n\n original_p = pd.DataFrame({'a':['a',None,'b']})\n sf = SFrame(data=original_p)\n self.__test_equal(sf, original_p)\n\n def test_auto_parse_csv_with_bom(self):\n with tempfile.NamedTemporaryFile(mode='w', delete=False) as csvfile:\n df = pd.DataFrame({'float_data': self.float_data,\n 'int_data': self.int_data,\n 'string_data': self.a_to_z[:len(self.int_data)]})\n df.to_csv(csvfile, index=False)\n csvfile.close()\n\n import codecs\n with open(csvfile.name, 'rb') as f:\n content = f.read()\n with open(csvfile.name, 'wb') as f:\n f.write(codecs.BOM_UTF8)\n f.write(content)\n\n sf = SFrame.read_csv(csvfile.name, header=True)\n self.assertEqual(sf.dtype, [float, int, str])\n self.__test_equal(sf, df)\n\n def test_auto_parse_csv(self):\n with tempfile.NamedTemporaryFile(mode='w', delete=False) as csvfile:\n df = pd.DataFrame({'float_data': self.float_data,\n 'int_data': self.int_data,\n 'string_data': self.a_to_z[:len(self.int_data)]})\n df.to_csv(csvfile, index=False)\n csvfile.close()\n\n sf = SFrame.read_csv(csvfile.name, header=True)\n\n self.assertEqual(sf.dtype, [float, int, str])\n self.__test_equal(sf, df)\n\n def test_parse_csv(self):\n with tempfile.NamedTemporaryFile(mode='w', delete=False) as csvfile:\n self.dataframe.to_csv(csvfile, index=False)\n csvfile.close()\n\n # list type hints\n sf = SFrame.read_csv(csvfile.name,\n column_type_hints=[int, int, str])\n self.assertEqual(sf.dtype, [int, int, str])\n sf['int_data'] = sf['int_data'].astype(int)\n sf['float_data'] = sf['float_data'].astype(float)\n sf['string_data'] = sf['string_data'].astype(str)\n self.__test_equal(sf, self.dataframe)\n\n # list type hints, incorrect number of columns\n self.assertRaises(RuntimeError,\n lambda: SFrame.read_csv(csvfile.name,\n column_type_hints=[int, float]))\n\n # dictionary type hints\n sf = SFrame.read_csv(csvfile.name,\n column_type_hints={'int_data': int,\n 'float_data': float,\n 'string_data': str})\n self.__test_equal(sf, self.dataframe)\n\n # partial dictionary type hints\n sf = SFrame.read_csv(csvfile.name,\n column_type_hints={'float_data': float,\n 'string_data': str})\n self.__test_equal(sf, self.dataframe)\n\n # single value type hints\n sf = SFrame.read_csv(csvfile.name, column_type_hints=str)\n self.assertEqual(sf.dtype, [str, str, str])\n all_string_column_df = self.dataframe.apply(lambda x: [str(ele) for ele in x])\n self.__test_equal(sf, all_string_column_df)\n\n # single value type hints row limit\n sf = SFrame.read_csv(csvfile.name, column_type_hints=str, nrows=5)\n self.assertEqual(sf.dtype, [str, str, str])\n all_string_column_df = self.dataframe.apply(lambda x: [str(ele) for ele in x])\n self.assertEqual(len(sf), 5)\n self.__test_equal(sf, all_string_column_df[0:len(sf)])\n\n\n sf = SFrame.read_csv(csvfile.name)\n sf2 = SFrame(csvfile.name, format='csv')\n self.__test_equal(sf2, sf.to_dataframe())\n\n f = open(csvfile.name, \"w\")\n f.write('a,b,c\\n')\n f.write('NA,PIKA,CHU\\n')\n f.write('1.0,2,3\\n')\n f.close()\n sf = SFrame.read_csv(csvfile.name,\n na_values=['NA','PIKA','CHU'],\n column_type_hints={'a':float,'b':int,'c':str})\n t = list(sf['a'])\n self.assertEqual(t[0], None)\n self.assertEqual(t[1], 1.0)\n t = list(sf['b'])\n self.assertEqual(t[0], None)\n self.assertEqual(t[1], 2)\n t = list(sf['c'])\n self.assertEqual(t[0], None)\n self.assertEqual(t[1], \"3\")\n\n def test_parse_csv_non_multi_line_unmatched_quotation(self):\n data = [{'type': 'foo', 'text_string': 'foo foo.'},\n {'type': 'bar', 'text_string': 'bar \" bar.'},\n {'type': 'foo', 'text_string': 'foo\".'}]\n\n with tempfile.NamedTemporaryFile(mode='w', delete=False) as csvfile:\n with open(csvfile.name, 'w') as f:\n f.write(\"type,text_string\\n\") # header\n for l in data:\n f.write(l['type'] + ',' + l['text_string'] + '\\n')\n\n sf = SFrame.read_csv(csvfile.name, quote_char=None)\n self.assertEqual(len(sf), len(data))\n for i in range(len(sf)):\n self.assertEqual(sf[i], data[i])\n\n def test_save_load_file_cleanup(self):\n # when some file is in use, file should not be deleted\n with util.TempDirectory() as f:\n sf = SFrame()\n sf['a'] = SArray(range(1,1000000))\n sf.save(f)\n\n # many for each sarray, 1 sframe_idx, 1 object.bin, 1 ini\n file_count = len(os.listdir(f))\n self.assertTrue(file_count > 3)\n\n # sf1 now references the on disk file\n sf1 = SFrame(f)\n\n # create another SFrame and save to the same location\n sf2 = SFrame()\n sf2['b'] = SArray([str(i) for i in range(1,100000)])\n sf2['c'] = SArray(range(1, 100000))\n sf2.save(f)\n\n file_count = len(os.listdir(f))\n self.assertTrue(file_count > 3)\n\n # now sf1 should still be accessible\n self.__test_equal(sf1, sf.to_dataframe())\n\n # and sf2 is correct too\n sf3 = SFrame(f)\n self.__test_equal(sf3, sf2.to_dataframe())\n\n # when sf1 goes out of scope, the tmp files should be gone\n sf1 = 1\n time.sleep(1) # give time for the files being deleted\n file_count = len(os.listdir(f))\n self.assertTrue(file_count > 3)\n\n def test_save_load(self):\n\n # Check top level load function, with no suffix\n with util.TempDirectory() as f:\n sf = SFrame(data=self.dataframe, format='dataframe')\n sf.save(f)\n sf2 = load_sframe(f)\n self.__test_equal(sf2, self.dataframe)\n\n # Check individual formats with the SFrame constructor\n formats = ['.csv']\n\n for suffix in formats:\n f = tempfile.NamedTemporaryFile(suffix=suffix, delete=False)\n sf = SFrame(data=self.dataframe, format='dataframe')\n sf.save(f.name)\n sf2 = SFrame(f.name)\n sf2['int_data'] = sf2['int_data'].astype(int)\n sf2['float_data'] = sf2['float_data'].astype(float)\n sf2['string_data'] = sf2['string_data'].astype(str)\n self.__test_equal(sf2, self.dataframe)\n g=SArray([['a','b',3],[{'a':'b'}],[1,2,3]])\n g2=SFrame()\n g2['x']=g\n g2.save(f.name)\n g3=SFrame.read_csv(f.name,column_type_hints=list)\n self.__test_equal(g2, g3.to_dataframe())\n f.close()\n os.unlink(f.name)\n\n # Make sure this file don't exist before testing\n self.assertRaises(IOError, lambda: SFrame(data='__no_such_file__.frame_idx', format='sframe'))\n\n del sf2\n\n\n def test_save_load_reference(self):\n\n # Check top level load function, with no suffix\n with util.TempDirectory() as f:\n sf = SFrame(data=self.dataframe, format='dataframe')\n originallen = len(sf)\n sf.save(f)\n del sf\n\n sf = SFrame(f)\n # make a new column of \"1s and save it back\n int_data2 = sf['int_data'] + 1\n int_data2.__materialize__()\n sf['int_data2'] = int_data2\n sf._save_reference(f)\n del sf\n\n sf = SFrame(f)\n self.assertTrue(((sf['int_data2'] - sf['int_data']) == 1).all())\n\n # try to append and save reference\n expected = sf.to_dataframe()\n sf = sf.append(sf)\n sf._save_reference(f)\n\n sf = SFrame(f)\n self.assertTrue(((sf['int_data2'] - sf['int_data']) == 1).all())\n self.assertEqual(2 * originallen, len(sf))\n assert_frame_equal(sf[originallen:].to_dataframe(), expected)\n assert_frame_equal(sf[:originallen].to_dataframe(), expected)\n\n def test_save_to_csv(self):\n f = tempfile.NamedTemporaryFile(suffix='.csv', delete=False)\n sf = SFrame(data=self.dataframe, format='dataframe')\n sf.save(f.name, format='csv')\n sf2 = SFrame.read_csv(f.name, column_type_hints={'int_data': int, 'float_data': float, 'string_data': str})\n self.__test_equal(sf2, self.dataframe)\n\n sf.export_csv(f.name, delimiter=':')\n sf2 = SFrame.read_csv(f.name, column_type_hints={'int_data': int, 'float_data': float, 'string_data': str}, delimiter=':')\n self.__test_equal(sf2, self.dataframe)\n\n sf.export_csv(f.name, delimiter=':', line_terminator='\\r\\n')\n sf2 = SFrame.read_csv(f.name, column_type_hints={'int_data': int, 'float_data': float, 'string_data': str}, delimiter=':', line_terminator='\\r\\n')\n self.__test_equal(sf2, self.dataframe)\n\n sf.export_csv(f.name, delimiter=':', line_terminator='\\r\\n', double_quote=False)\n sf2 = SFrame.read_csv(f.name, column_type_hints={'int_data': int, 'float_data': float, 'string_data': str}, delimiter=':', line_terminator='\\r\\n', double_quote=False)\n self.__test_equal(sf2, self.dataframe)\n\n sf.export_csv(f.name, delimiter=':', line_terminator='\\r\\n', double_quote=False, quote_char='\\'')\n sf2 = SFrame.read_csv(f.name, column_type_hints={'int_data': int, 'float_data': float, 'string_data': str}, delimiter=':', line_terminator='\\r\\n', double_quote=False, quote_char='\\'')\n self.__test_equal(sf2, self.dataframe)\n\n import csv\n sf.export_csv(f.name, delimiter=':', line_terminator='\\r\\n', double_quote=False, quote_char='\\'', quote_level=csv.QUOTE_MINIMAL)\n sf2 = SFrame.read_csv(f.name, column_type_hints={'int_data': int, 'float_data': float, 'string_data': str}, delimiter=':', line_terminator='\\r\\n', double_quote=False, quote_char='\\'')\n self.__test_equal(sf2, self.dataframe)\n\n sf.export_csv(f.name, delimiter=':', line_terminator='\\r\\n', double_quote=False, quote_char='\\'', quote_level=csv.QUOTE_ALL)\n sf2 = SFrame.read_csv(f.name, column_type_hints={'int_data': int, 'float_data': float, 'string_data': str}, delimiter=':', line_terminator='\\r\\n', double_quote=False, quote_char='\\'')\n self.__test_equal(sf2, self.dataframe)\n\n\n sf.export_csv(f.name, delimiter=':', line_terminator='\\r\\n', double_quote=False, quote_char='\\'', quote_level=csv.QUOTE_NONE)\n sf2 = SFrame.read_csv(f.name, column_type_hints={'int_data': int, 'float_data': float, 'string_data': str}, delimiter=':', line_terminator='\\r\\n', double_quote=False, quote_char='\\'')\n self.__test_equal(sf2, self.dataframe)\n\n # Pandas compatibility options\n sf.export_csv(f.name, sep=':', lineterminator='\\r\\n', doublequote=False, quotechar='\\'', quote_level=csv.QUOTE_NONE)\n sf2 = SFrame.read_csv(f.name, column_type_hints={'int_data': int, 'float_data': float, 'string_data': str}, sep=':', lineterminator='\\r\\n', doublequote=False, quotechar='\\'')\n self.__test_equal(sf2, self.dataframe)\n f.close()\n os.unlink(f.name)\n\n def test_save_to_json(self):\n f = tempfile.NamedTemporaryFile(suffix='.json', delete=False)\n sf = SFrame(data=self.dataframe, format='dataframe')\n sf.save(f.name, format='json')\n sf2 = SFrame.read_json(f.name)\n # the float column will be parsed as integer\n sf2['float_data'] = sf2['float_data'].astype(float)\n self.__test_equal(sf2, self.dataframe)\n\n sf = SFrame(data=self.dataframe, format='dataframe')\n sf.export_json(f.name)\n sf2 = SFrame.read_json(f.name)\n sf2['float_data'] = sf2['float_data'].astype(float)\n self.__test_equal(sf2, self.dataframe)\n\n with open(f.name, 'w') as out:\n out.write('[\\n]')\n sf = SFrame.read_json(f.name)\n self.__test_equal(SFrame(), sf.to_dataframe())\n\n with open(f.name, 'w') as out:\n out.write('')\n sf = SFrame.read_json(f.name, orient='lines')\n self.__test_equal(SFrame(), sf.to_dataframe())\n\n sf = SFrame(data=self.dataframe, format='dataframe')\n sf.export_json(f.name, orient='lines')\n sf2 = SFrame.read_json(f.name, orient='lines')\n sf2['float_data'] = sf2['float_data'].astype(float)\n self.__test_equal(sf2, self.dataframe)\n f.close()\n os.unlink(f.name)\n\n def _remove_sframe_files(self, prefix):\n filelist = [ f for f in os.listdir(\".\") if f.startswith(prefix) ]\n for f in filelist:\n os.remove(f)\n\n def test_creation_from_txt(self):\n f = tempfile.NamedTemporaryFile(suffix='.txt', delete=False)\n df = self.dataframe[['string_data']]\n df.to_csv(f.name, index=False)\n sf = SFrame(f.name)\n self.assertEqual(sf['string_data'].dtype, int)\n sf['string_data'] = sf['string_data'].astype(str)\n self.__test_equal(sf, df)\n\n fgzip = tempfile.NamedTemporaryFile(suffix='.txt.gz', delete=False)\n f_in = open(f.name, 'rb')\n f_out = gzip.open(fgzip.name, 'wb')\n f_out.writelines(f_in)\n f_out.close()\n f_in.close()\n sf = SFrame(fgzip.name)\n self.assertEqual(sf['string_data'].dtype, int)\n sf['string_data'] = sf['string_data'].astype(str)\n self.__test_equal(sf, df)\n\n fgzip.close()\n os.unlink(fgzip.name)\n f.close()\n os.unlink(f.name)\n\n def test_creation_from_csv_on_local(self):\n if os.path.exists('./foo.csv'):\n os.remove('./foo.csv')\n with open('./foo.csv', 'w') as f:\n url = f.name\n basesf = SFrame(self.dataframe)\n basesf.save(url, format=\"csv\")\n f.close()\n sf = SFrame('./foo.csv')\n self.assertEqual(sf['float_data'].dtype, int)\n sf['float_data'] = sf['float_data'].astype(float)\n self.assertEqual(sf['string_data'].dtype, int)\n sf['string_data'] = sf['string_data'].astype(str)\n self.__test_equal(sf, self.dataframe)\n sf = SFrame(url)\n self.assertEqual(sf['float_data'].dtype, int)\n sf['float_data'] = sf['float_data'].astype(float)\n self.assertEqual(sf['string_data'].dtype, int)\n sf['string_data'] = sf['string_data'].astype(str)\n self.__test_equal(sf, self.dataframe)\n os.remove(url)\n\n def test_alternate_line_endings(self):\n # test Windows line endings\n if os.path.exists('./windows_lines.csv'):\n os.remove('./windows_lines.csv')\n windows_file_url = None\n with open('./windows_lines.csv', 'w') as f:\n windows_file_url = f.name\n def_writer = csv.writer(f, dialect='excel')\n column_list = ['numbers']\n def_writer.writerow(column_list)\n for i in self.int_data:\n def_writer.writerow([i])\n\n sf = SFrame.read_csv('./windows_lines.csv', column_type_hints={'numbers':int})\n self.assertEqual(sf.column_names(), column_list)\n self.assertEqual(sf.column_types(), [int])\n self.assertEqual(list(sf['numbers'].head()), self.int_data)\n\n sf = SFrame.read_csv('./windows_lines.csv', column_type_hints={'numbers':list}, error_bad_lines=False)\n self.assertEqual(sf.column_names(), column_list)\n self.assertEqual(sf.num_rows(), 0)\n\n os.remove(windows_file_url)\n\n def test_skip_rows(self):\n # test line skipping\n if os.path.exists('./skip_lines.csv'):\n os.remove('./skip_lines.csv')\n skip_file_url = None\n with open('./skip_lines.csv', 'w') as f:\n f.write(\"trash\\n\")\n f.write(\"junk\\n\")\n skip_file_url = f.name\n def_writer = csv.writer(f, dialect='excel')\n column_list = ['numbers']\n def_writer.writerow(column_list)\n for i in self.int_data:\n def_writer.writerow([i])\n\n sf = SFrame.read_csv('./skip_lines.csv', skiprows=2, column_type_hints={'numbers':int})\n self.assertEqual(sf.column_names(), column_list)\n self.assertEqual(sf.column_types(), [int])\n self.assertEqual(list(sf['numbers'].head()), self.int_data)\n\n sf = SFrame.read_csv('./skip_lines.csv', skiprows=2, column_type_hints={'numbers':list}, error_bad_lines=False)\n self.assertEqual(sf.column_names(), column_list)\n self.assertEqual(sf.num_rows(), 0)\n\n os.remove(skip_file_url)\n\n\n def test_creation_from_csv_on_http(self):\n pass\n # sf = SFrame(data=self.url, use_header=False)\n # self.__test_equal(sf, pd.DataFrame({'1': self.a_to_z}))\n\n def test_creation_from_csv_on_s3(self):\n # Requires s3 account for jenkins\n # sf = SFrame(data='s3://turicreate-testdata/foo.csv')\n # print sf.head(sf.num_rows())\n pass\n\n def test_creation_from_csv_dir_local(self):\n csv_dir = \"./csv_dir\"\n\n if os.path.exists(csv_dir):\n shutil.rmtree(csv_dir)\n os.mkdir(csv_dir)\n\n for i in range(0, 100):\n with open(os.path.join(csv_dir, 'foo.%d.csv' % i), 'w') as f:\n url = f.name\n self.dataframe.to_csv(url, index=False)\n f.close()\n\n singleton_sf = SFrame.read_csv(os.path.join(csv_dir, \"foo.0.csv\"))\n self.assertEqual(singleton_sf.num_rows(), 10)\n\n many_sf = SFrame.read_csv(csv_dir)\n self.assertEqual(many_sf.num_rows(), 1000)\n\n glob_sf = SFrame.read_csv(os.path.join(csv_dir, \"foo.*2.csv\"))\n self.assertEqual(glob_sf.num_rows(), 100)\n\n with self.assertRaises(RuntimeError):\n SFrame.read_csv(\"missingdirectory\")\n\n with self.assertRaises(ValueError):\n SFrame.read_csv(\"\")\n\n shutil.rmtree(csv_dir)\n\n def test_creation_from_iterable(self):\n # Normal dict of lists\n the_dict = {'ints':self.int_data,'floats':self.float_data,'strings':self.string_data}\n sf = SFrame(the_dict)\n df = pd.DataFrame(the_dict)\n self.__test_equal(sf, df)\n\n # Test that a missing value does not change the data type\n the_dict['ints'][0] = None\n sf = SFrame(the_dict)\n self.assertEqual(sf['ints'].dtype, int)\n\n # numpy.nan is actually a float, so it should cast the column to float\n the_dict['ints'][0] = np.nan\n sf = SFrame(the_dict)\n self.assertEqual(sf['ints'].dtype, float)\n\n # Just a single list\n sf = SFrame(self.int_data)\n df = pd.DataFrame(self.int_data)\n df.columns = ['X1']\n self.__test_equal(sf, df)\n\n # Normal list of lists\n list_of_lists = [[1.0,2.0,3.0],[4.0,5.0,6.0],[7.0,8.0,9.0]]\n sf = SFrame(list_of_lists)\n cntr = 0\n for i in sf:\n self.assertEqual(list_of_lists[cntr], list(i['X1']))\n cntr += 1\n\n self.assertEqual(sf.num_columns(), 1)\n\n the_dict = {'ints':self.int_data,'floats':self.float_data,'strings':self.string_data}\n sf = SFrame(the_dict)\n sf2 = SFrame({'ints':sf['ints'],'floats':sf['floats'],'strings':sf['strings']})\n df = pd.DataFrame(the_dict)\n self.__test_equal(sf2, df)\n sf2 = SFrame([sf['ints'],sf['floats'],sf['strings']])\n self.assertEqual(['X1','X2','X3'],sf2.column_names())\n sf2.rename({'X1':'ints','X2':'floats','X3':'strings'}, inplace=True)\n sf2=sf2[['floats','ints','strings']]\n self.__test_equal(sf2, df)\n\n sf = SFrame({'text': ('foo', 'bar', 'biz')})\n df = pd.DataFrame({'text': ['foo', 'bar', 'biz']})\n self.__test_equal(sf, df)\n\n def test_head_tail(self):\n sf = SFrame(data=self.dataframe)\n assert_frame_equal(sf.head(4).to_dataframe(), self.dataframe.head(4))\n # Cannot test for equality the same way because of dataframe indices\n taildf = sf.tail(4)\n for i in range(0, 4):\n self.assertEqual(taildf['int_data'][i], self.dataframe['int_data'][i+6])\n self.assertEqual(taildf['float_data'][i], self.dataframe['float_data'][i+6])\n self.assertEqual(taildf['string_data'][i], self.dataframe['string_data'][i+6])\n\n def test_head_tail_edge_case(self):\n sf = SFrame()\n self.assertEqual(sf.head().num_columns(), 0)\n self.assertEqual(sf.tail().num_columns(), 0)\n self.assertEqual(sf.head().num_rows(), 0)\n self.assertEqual(sf.tail().num_rows(), 0)\n sf = SFrame()\n sf['a'] = []\n self.assertEqual(sf.head().num_columns(), 1)\n self.assertEqual(sf.tail().num_columns(), 1)\n self.assertEqual(sf.head().num_rows(), 0)\n self.assertEqual(sf.tail().num_rows(), 0)\n\n def test_transform(self):\n sf = SFrame(data=self.dataframe)\n for i in range(sf.num_columns()):\n colname = sf.column_names()[i]\n sa = sf.apply(lambda x: x[colname], sf.column_types()[i])\n self.__assert_sarray_equal(sa, sf[sf.column_names()[i]])\n\n sa = sf.apply(lambda x: x['int_data'] + x['float_data'], float)\n self.__assert_sarray_equal(sf['int_data'] + sf['float_data'], sa)\n\n def test_transform_with_recursion(self):\n sf = SFrame(data={'a':[0,1,2,3,4], 'b':['0','1','2','3','4']})\n # this should be the equivalent to sf.apply(lambda x:x since a is\n # equivalent to range(4)\n sa = sf.apply(lambda x: sf[x['a']])\n sb = sf.apply(lambda x: x)\n self.__assert_sarray_equal(sa, sb)\n\n def test_transform_with_type_inference(self):\n sf = SFrame(data=self.dataframe)\n for i in range(sf.num_columns()):\n colname = sf.column_names()[i]\n sa = sf.apply(lambda x: x[colname])\n self.__assert_sarray_equal(sa, sf[sf.column_names()[i]])\n\n sa = sf.apply(lambda x: x['int_data'] + x['float_data'])\n self.__assert_sarray_equal(sf['int_data'] + sf['float_data'], sa)\n\n # SFrame apply returns list of vector of numeric should be vector, not list\n sa = sf.apply(lambda x: [x['int_data'], x['float_data']])\n self.assertEqual(sa.dtype, array.array)\n\n def test_transform_with_exception(self):\n sf = SFrame(data=self.dataframe)\n self.assertRaises(KeyError, lambda: sf.apply(lambda x: x['some random key'])) # cannot find the key\n self.assertRaises(TypeError, lambda: sf.apply(lambda x: sum(x.values()))) # lambda cannot sum int and str\n self.assertRaises(ZeroDivisionError, lambda: sf.apply(lambda x: x['int_data'] / 0)) # divide by 0 error\n self.assertRaises(IndexError, lambda: sf.apply(lambda x: list(x.values())[10])) # index out of bound error\n\n def test_empty_transform(self):\n sf = SFrame()\n b = sf.apply(lambda x:x)\n self.assertEqual(len(b.head()), 0)\n\n def test_flatmap(self):\n # Correctness of typical usage\n n = 10\n sf = SFrame({'id': range(n)})\n new_sf = sf.flat_map([\"id_range\"], lambda x: [[str(i)] for i in range(x['id'])])\n self.assertEqual(new_sf.column_names(), [\"id_range\"])\n self.assertEqual(new_sf.column_types(), [str])\n expected_col = [str(x) for i in range(n) for x in range(i)]\n self.assertListEqual(list(new_sf['id_range']), expected_col)\n\n # Empty SFrame, without explicit column types\n sf = SFrame()\n with self.assertRaises(TypeError):\n new_sf = sf.flat_map(['id_range'],\n lambda x: [[i] for i in range(x['id'])])\n\n # Empty rows successfully removed\n sf = SFrame({'id': range(15)})\n new_sf = sf.flat_map(['id'],\n lambda x: [[x['id']]] if x['id'] > 8 else [])\n self.assertEqual(new_sf.num_rows(), 6)\n\n # First ten rows are empty raises error\n with self.assertRaises(TypeError):\n new_sf = sf.flat_map(['id'],\n lambda x: [[x['id']]] if x['id'] > 9 else [])\n\n\n\n def test_select_column(self):\n sf = SFrame(data=self.dataframe)\n\n sub_sf = sf.select_columns(['int_data', 'string_data'])\n exp_df = pd.DataFrame({'int_data': self.int_data, 'string_data': self.string_data})\n self.__test_equal(sub_sf, exp_df)\n\n with self.assertRaises(ValueError):\n sf.select_columns(['int_data', 'string_data', 'int_data'])\n\n # test indexing\n sub_col = sf['float_data']\n self.assertEqual(list(sub_col.head(10)), self.float_data)\n\n with self.assertRaises(TypeError):\n sub_sf = sf.select_columns(['duh',1])\n\n with self.assertRaises(TypeError):\n sub_sf = sf.select_columns(0)\n\n with self.assertRaises(RuntimeError):\n sub_sf = sf.select_columns(['not_a_column'])\n\n self.assertEqual(sf.select_columns([int]).column_names(), ['int_data'])\n self.assertEqual(sf.select_columns([int, str]).column_names(), ['int_data', 'string_data'])\n\n self.assertEqual(sf[int].column_names(), ['int_data'])\n self.assertEqual(sf[[int, str]].column_names(), ['int_data', 'string_data'])\n self.assertEqual(sf[int, str].column_names(), ['int_data', 'string_data'])\n self.assertEqual(sf['int_data', 'string_data'].column_names(), ['int_data', 'string_data'])\n self.assertEqual(sf['string_data', 'int_data'].column_names(), ['string_data', 'int_data'])\n\n sf = SFrame()\n with self.assertRaises(RuntimeError):\n sf.select_column('x')\n\n with self.assertRaises(RuntimeError):\n sf.select_columns(['x'])\n\n sf.add_column(SArray(), 'x', inplace=True)\n # does not throw\n sf.select_column('x')\n sf.select_columns(['x'])\n with self.assertRaises(RuntimeError):\n sf.select_column('y')\n\n with self.assertRaises(RuntimeError):\n sf.select_columns(['y'])\n\n def test_topk(self):\n sf = SFrame(data=self.dataframe)\n\n # Test that order is preserved\n df2 = sf.topk('int_data').to_dataframe()\n df2_expected = self.dataframe.sort_values('int_data', ascending=False)\n df2_expected.index = range(df2.shape[0])\n assert_frame_equal(df2, df2_expected)\n\n df2 = sf.topk('float_data', 3).to_dataframe()\n df2_expected = self.dataframe.sort_values('float_data', ascending=False).head(3)\n df2_expected.index = range(3)\n assert_frame_equal(df2, df2_expected)\n\n df2 = sf.topk('string_data', 3).to_dataframe()\n for i in range(0, 3):\n self.assertEqual(df2['int_data'][2-i], i + 7)\n\n with self.assertRaises(TypeError):\n sf.topk(2,3)\n\n sf = SFrame()\n sf.add_column(SArray([1,2,3,4,5]), 'a', inplace=True)\n sf.add_column(SArray([1,2,3,4,5]), 'b', inplace=True)\n\n sf.topk('a', 1) # should not fail\n\n\n def test_filter(self):\n sf = SFrame(data=self.dataframe)\n\n filter_sa = SArray([1,1,1,0,0,0,0,1,1,1])\n\n sf2 = sf[filter_sa]\n exp_df = sf.head(3).append(sf.tail(3))\n self.__test_equal(sf2, exp_df.to_dataframe())\n\n # filter by 1s\n sf2 = sf[SArray(self.int_data)]\n exp_df = sf.head(10).to_dataframe()\n self.__test_equal(sf2, exp_df)\n\n # filter by 0s\n sf2 = sf[SArray([0,0,0,0,0,0,0,0,0,0])]\n exp_df = sf.head(0).to_dataframe()\n self.__test_equal(sf2, exp_df)\n\n # wrong size\n with self.assertRaises(IndexError):\n sf2 = sf[SArray([0,1,205])]\n\n # slightly bigger size\n sf = SFrame()\n n = 1000000\n sf['a'] = range(n)\n result = sf[sf['a'] == -1]\n self.assertEqual(len(result), 0)\n\n result = sf[sf['a'] > n - 123]\n self.assertEqual(len(result), 122)\n l = list(result['a'])\n for i in range(len(result)):\n self.assertEqual(i + n - 122, l[i])\n\n result = sf[sf['a'] < 2000]\n self.assertEqual(len(result), 2000)\n l = list(result['a'])\n for i in range(len(result)):\n self.assertEqual(i, l[i])\n\n # map input type\n toy_data = SFrame({'a': range(100)})\n map_result = map(lambda x: x+1, [1, 30])\n result = toy_data.filter_by(map_result, 'a')\n self.assertEqual(len(result), 2)\n self.assertEqual(result[0]['a'], 2)\n self.assertEqual(result[1]['a'], 31)\n\n\n def test_sample_split(self):\n sf = SFrame(data=self.__create_test_df(100))\n entry_list = set()\n for i in sf:\n entry_list.add(str(i))\n\n\n sample_sf = sf.sample(.12, 9)\n sample_sf2 = sf.sample(.12, 9)\n self.assertEqual(len(sample_sf), len(sample_sf2))\n assert_frame_equal(sample_sf.head().to_dataframe(), sample_sf2.head().to_dataframe())\n self.assertEqual(len(sf.sample(0.5,1,exact=True)), 50)\n self.assertEqual(len(sf.sample(0.5,2,exact=True)), 50)\n\n for i in sample_sf:\n self.assertTrue(str(i) in entry_list)\n\n with self.assertRaises(ValueError):\n sf.sample(3)\n\n sample_sf = SFrame().sample(.12, 9)\n self.assertEqual(len(sample_sf), 0)\n\n a_split = sf.random_split(.12, 9)\n\n first_split_entries = set()\n for i in a_split[0]:\n first_split_entries.add(str(i))\n\n for i in a_split[1]:\n self.assertTrue(str(i) in entry_list)\n self.assertTrue(str(i) not in first_split_entries)\n\n with self.assertRaises(ValueError):\n sf.random_split(3)\n\n self.assertEqual(len(SFrame().random_split(.4)[0]), 0)\n self.assertEqual(len(SFrame().random_split(.4)[1]), 0)\n\n self.assertEqual(len(sf.random_split(0.5,1,exact=True)[0]), 50)\n self.assertEqual(len(sf.random_split(0.5,2,exact=True)[0]), 50)\n\n # tests add_column, rename\n def test_edit_column_ops(self):\n sf = SFrame()\n\n # typical add column stuff\n sf.add_column(SArray(self.int_data), inplace=True)\n sf.add_column(SArray(self.float_data), inplace=True)\n sf.add_column(SArray(self.string_data), inplace=True)\n\n # Make sure auto names work\n names = sf.column_names()\n cntr = 1\n for i in names:\n self.assertEqual(\"X\"+str(cntr), i)\n cntr = cntr + 1\n\n # Remove a column\n del sf['X2']\n\n # names\n names = sf.column_names()\n self.assertEqual(len(names), 2)\n self.assertEqual('X1', names[0])\n self.assertEqual('X3', names[1])\n\n # check content\n self.assertEqual(list(sf['X1'].head(10)), self.int_data)\n self.assertEqual(list(sf['X3'].head(10)), self.string_data)\n\n # check that a new automatically named column will not conflict\n sf.add_column(SArray(self.string_data), inplace=True)\n\n names = sf.column_names()\n self.assertEqual(len(names), 3)\n uniq_set = set()\n for i in names:\n uniq_set.add(i)\n if len(uniq_set) == 1:\n self.assertEqual(list(sf[i].head(10)), self.int_data)\n else:\n self.assertEqual(list(sf[i].head(10)), self.string_data)\n self.assertEqual(len(uniq_set), 3)\n\n # replacing columns preserves order\n names = sf.column_names()\n for n in names:\n sf[n] = sf[n].apply(lambda x: x)\n self.assertEqual(sf.column_names(), names)\n\n # do it again!\n del sf['X1']\n\n sf.add_column(SArray(self.string_data), inplace=True)\n names = sf.column_names()\n self.assertEqual(len(names), 3)\n uniq_set = set()\n for i in names:\n uniq_set.add(i)\n self.assertEqual(list(sf[i].head(10)), self.string_data)\n self.assertEqual(len(uniq_set), len(names))\n\n # standard rename\n rename_dict = {'X3':'data','X3.1':'more_data','X3.2':'even_more'}\n sf.rename(rename_dict, inplace=True)\n self.assertEqual(sf.column_names(), ['data','more_data','even_more'])\n\n # rename a column to a name that's already taken\n with self.assertRaises(RuntimeError):\n sf.rename({'data':'more_data'}, inplace=True)\n\n # try to rename a column that doesn't exist\n with self.assertRaises(ValueError):\n sf.rename({'foo':'bar'}, inplace=True)\n\n # pass something other than a dict\n with self.assertRaises(TypeError):\n sf.rename('foo', inplace=True)\n\n # Setting a column to const preserves order\n names = sf.column_names()\n for n in names:\n sf[n] = 1\n self.assertEqual(sf.column_names(), names)\n\n def test_duplicate_add_column_failure(self):\n sf = SFrame()\n\n # typical add column stuff\n sf.add_column(SArray(self.int_data), \"hello\", inplace=True)\n with self.assertRaises(RuntimeError):\n sf.add_column(SArray(self.float_data), \"hello\", inplace=True)\n\n def test_remove_column(self):\n sf = SFrame()\n\n # typical add column stuff\n sf.add_column(SArray(self.int_data), inplace=True)\n sf.add_column(SArray(self.int_data), inplace=True)\n sf.add_column(SArray(self.int_data), inplace=True)\n sf.add_column(SArray(self.float_data), inplace=True)\n sf.add_column(SArray(self.string_data), inplace=True)\n\n self.assertEqual(sf.column_names(), ['X1', 'X2', 'X3', 'X4', 'X5'])\n\n sf2 = sf.remove_column('X3', inplace=True)\n\n assert sf is sf2\n\n self.assertEqual(sf.column_names(), ['X1', 'X2', 'X4', 'X5'])\n\n sf2 = sf.remove_columns(['X2', 'X5'], inplace=True)\n\n assert sf is sf2\n\n self.assertEqual(sf.column_names(), ['X1', 'X4'])\n\n # with a generator expression\n sf2 = sf.remove_columns((n for n in ['X1', 'X5'] if n in sf.column_names()), inplace=True)\n\n assert sf is sf2\n\n self.assertEqual(sf.column_names(), ['X4'])\n\n\n def test_remove_bad_column(self):\n sf = SFrame()\n\n # typical add column stuff\n sf.add_column(SArray(self.int_data), inplace=True)\n sf.add_column(SArray(self.int_data), inplace=True)\n sf.add_column(SArray(self.int_data), inplace=True)\n sf.add_column(SArray(self.float_data), inplace=True)\n sf.add_column(SArray(self.string_data), inplace=True)\n\n self.assertEqual(sf.column_names(), ['X1', 'X2', 'X3', 'X4', 'X5'])\n\n self.assertRaises(KeyError, lambda: sf.remove_column('bad', inplace=True))\n\n self.assertEqual(sf.column_names(), ['X1', 'X2', 'X3', 'X4', 'X5'])\n\n self.assertRaises(KeyError, lambda: sf.remove_columns(['X1', 'X2', 'X3', 'bad', 'X4'], inplace=True))\n\n self.assertEqual(sf.column_names(), ['X1', 'X2', 'X3', 'X4', 'X5'])\n\n\n def __generate_synthetic_sframe__(self, num_users):\n \"\"\"\n synthetic collaborative data.\n generate 1000 users, user i watched movie 0, ... i-1.\n rating(i, j) = i + j\n length(i, j) = i - j\n \"\"\"\n sf = SFrame()\n sparse_matrix = {}\n for i in range(1, num_users + 1):\n sparse_matrix[i] = [(j, i + j, i - j) for j in range(1, i + 1)]\n user_ids = []\n movie_ids = []\n ratings = []\n length_of_watching = []\n for u in sparse_matrix:\n user_ids += [u] * len(sparse_matrix[u])\n movie_ids += [x[0] for x in sparse_matrix[u]]\n ratings += [x[1] for x in sparse_matrix[u]]\n length_of_watching += [x[2] for x in sparse_matrix[u]]\n # typical add column stuff\n sf['user_id'] = (SArray(user_ids, int))\n sf['movie_id'] = (SArray(movie_ids, str))\n sf['rating'] = (SArray(ratings, float))\n sf['length'] = (SArray(length_of_watching, int))\n return sf\n\n def test_aggregate_ops(self):\n \"\"\"\n Test builtin groupby aggregators\n \"\"\"\n for m in [1, 10, 20, 50, 100]:\n values = range(m)\n vector_values = [[random.randint(1,100) for num in range(10)] \\\n for y in range(m)]\n nd_values = [np.array([float(random.randint(1,100)) for num in range(10)]).reshape(2,5) \\\n for y in range(m)]\n sf = SFrame()\n sf['key'] = [1] * m\n sf['value'] = values\n sf['vector_values'] = vector_values\n sf['nd_values'] = nd_values \n sf.__materialize__()\n built_ins = [aggregate.COUNT(), aggregate.SUM('value'),\n aggregate.AVG('value'), aggregate.MIN('value'),\n aggregate.MAX('value'), aggregate.VAR('value'),\n aggregate.STDV('value'), aggregate.SUM('vector_values'),\n aggregate.MEAN('vector_values'),\n aggregate.COUNT_DISTINCT('value'),\n aggregate.DISTINCT('value'),\n aggregate.FREQ_COUNT('value'),\n aggregate.SUM('nd_values'),\n aggregate.MEAN('nd_values')]\n sf2 = sf.groupby('key', built_ins)\n self.assertEqual(len(sf2), 1)\n self.assertEqual(sf2['Count'][0], m)\n self.assertEqual(sf2['Sum of value'][0], sum(values))\n self.assertAlmostEqual(sf2['Avg of value'][0], np.mean(values))\n self.assertEqual(sf2['Min of value'][0], min(values))\n self.assertEqual(sf2['Max of value'][0], max(values))\n self.assertAlmostEqual(sf2['Var of value'][0], np.var(values))\n self.assertAlmostEqual(sf2['Stdv of value'][0], np.std(values))\n np.testing.assert_almost_equal(list(sf2['Vector Sum of vector_values'][0]),\n list(np.sum(vector_values, axis=0)))\n np.testing.assert_almost_equal(list(sf2['Vector Avg of vector_values'][0]),\n list(np.mean(vector_values, axis=0)))\n np.testing.assert_almost_equal(list(sf2['Vector Sum of nd_values'][0]),\n list(np.sum(nd_values, axis=0)))\n np.testing.assert_almost_equal(list(sf2['Vector Avg of nd_values'][0]),\n list(np.mean(nd_values, axis=0)))\n self.assertEqual(sf2['Count Distinct of value'][0],\n len(np.unique(values)))\n self.assertEqual(sorted(sf2['Distinct of value'][0]),\n sorted(list(np.unique(values))))\n self.assertEqual(sf2['Frequency Count of value'][0],\n {k:1 for k in np.unique(values)})\n\n # For vectors\n\n\n def test_min_max_with_missing_values(self):\n \"\"\"\n Test builtin groupby aggregators\n \"\"\"\n sf = SFrame()\n sf['key'] = [1,1,1,1,1,1,2,2,2,2]\n sf['value'] = [1,None,None,None,None,None, None,None,None,None]\n built_ins = [aggregate.COUNT(), aggregate.SUM('value'),\n aggregate.AVG('value'), aggregate.MIN('value'),\n aggregate.MAX('value'), aggregate.VAR('value'),\n aggregate.STDV('value'), aggregate.COUNT_DISTINCT('value'),\n aggregate.DISTINCT('value'), aggregate.FREQ_COUNT('value')]\n sf2 = sf.groupby('key', built_ins).sort('key')\n self.assertEqual(list(sf2['Count']), [6,4])\n self.assertEqual(list(sf2['Sum of value']), [1, 0])\n self.assertEqual(list(sf2['Avg of value']), [1, None])\n self.assertEqual(list(sf2['Min of value']), [1, None])\n self.assertEqual(list(sf2['Max of value']), [1, None])\n self.assertEqual(list(sf2['Var of value']), [0, 0])\n self.assertEqual(list(sf2['Stdv of value']), [0, 0])\n self.assertEqual(list(sf2['Count Distinct of value']), [2, 1])\n self.assertEqual(set(sf2['Distinct of value'][0]), set([1, None]))\n self.assertEqual(set(sf2['Distinct of value'][1]), set([None]))\n self.assertEqual(sf2['Frequency Count of value'][0], {1:1, None:5})\n self.assertEqual(sf2['Frequency Count of value'][1], {None:4})\n\n\n def test_aggregate_ops_on_lazy_frame(self):\n \"\"\"\n Test builtin groupby aggregators\n \"\"\"\n for m in [1, 10, 20, 50, 100]:\n values = range(m)\n vector_values = [[random.randint(1,100) for num in range(10)] \\\n for y in range(m)]\n sf = SFrame()\n sf['key'] = [1] * m\n sf['value'] = values\n sf['vector_values'] = vector_values\n sf['value'] = sf['value'] + 0\n built_ins = [aggregate.COUNT(), aggregate.SUM('value'),\n aggregate.AVG('value'), aggregate.MIN('value'),\n aggregate.MAX('value'), aggregate.VAR('value'),\n aggregate.STDV('value'), aggregate.SUM('vector_values'),\n aggregate.MEAN('vector_values'),\n aggregate.COUNT_DISTINCT('value'),\n aggregate.DISTINCT('value')]\n sf2 = sf.groupby('key', built_ins)\n self.assertEqual(len(sf2), 1)\n self.assertEqual(sf2['Count'][0], m)\n self.assertEqual(sf2['Sum of value'][0], sum(values))\n self.assertAlmostEqual(sf2['Avg of value'][0], np.mean(values))\n self.assertEqual(sf2['Min of value'][0], min(values))\n self.assertEqual(sf2['Max of value'][0], max(values))\n self.assertAlmostEqual(sf2['Var of value'][0], np.var(values))\n self.assertAlmostEqual(sf2['Stdv of value'][0], np.std(values))\n np.testing.assert_almost_equal(list(sf2['Vector Sum of vector_values'][0]),\n list(np.sum(vector_values, axis=0)))\n np.testing.assert_almost_equal(list(sf2['Vector Avg of vector_values'][0]),\n list(np.mean(vector_values, axis=0)))\n self.assertEqual(sf2['Count Distinct of value'][0],\n len(np.unique(values)))\n self.assertEqual(sorted(sf2['Distinct of value'][0]),\n sorted(np.unique(values)))\n\n def test_aggregate_ops2(self):\n \"\"\"\n Test builtin groupby aggregators using explicit named columns\n \"\"\"\n for m in [1, 10, 20, 50, 100]:\n values = range(m)\n vector_values = [[random.randint(1,100) for num in range(10)] \\\n for y in range(m)]\n sf = SFrame()\n sf['key'] = [1] * m\n sf['value'] = values\n sf['vector_values'] = vector_values\n built_ins = {'count':aggregate.COUNT,\n 'sum':aggregate.SUM('value'),\n 'avg':aggregate.AVG('value'),\n 'avg2':aggregate.MEAN('value'),\n 'min':aggregate.MIN('value'),\n 'max':aggregate.MAX('value'),\n 'var':aggregate.VAR('value'),\n 'var2':aggregate.VARIANCE('value'),\n 'stdv':aggregate.STD('value'),\n 'stdv2':aggregate.STDV('value'),\n 'vector_sum': aggregate.SUM('vector_values'),\n 'vector_mean': aggregate.MEAN('vector_values'),\n 'count_unique':aggregate.COUNT_DISTINCT('value'),\n 'unique':aggregate.DISTINCT('value'),\n 'frequency':aggregate.FREQ_COUNT('value')}\n sf2 = sf.groupby('key', built_ins)\n self.assertEqual(len(sf2), 1)\n self.assertEqual(sf2['count'][0], m)\n self.assertEqual(sf2['sum'][0], sum(values))\n self.assertAlmostEqual(sf2['avg'][0], np.mean(values))\n self.assertAlmostEqual(sf2['avg2'][0], np.mean(values))\n self.assertEqual(sf2['min'][0], min(values))\n self.assertEqual(sf2['max'][0], max(values))\n self.assertAlmostEqual(sf2['var'][0], np.var(values))\n self.assertAlmostEqual(sf2['var2'][0], np.var(values))\n self.assertAlmostEqual(sf2['stdv'][0], np.std(values))\n self.assertAlmostEqual(sf2['stdv2'][0], np.std(values))\n np.testing.assert_almost_equal(sf2['vector_sum'][0], list(np.sum(vector_values, axis=0)))\n np.testing.assert_almost_equal(sf2['vector_mean'][0], list(np.mean(vector_values, axis=0)))\n self.assertEqual(sf2['count_unique'][0], len(np.unique(values)))\n self.assertEqual(sorted(sf2['unique'][0]),\n sorted(np.unique(values)))\n self.assertEqual(sf2['frequency'][0],\n {k:1 for k in np.unique(values)})\n\n def test_groupby(self):\n \"\"\"\n Test builtin groupby and aggregate on different column types\n \"\"\"\n num_users = 500\n sf = self.__generate_synthetic_sframe__(num_users=num_users)\n\n built_ins = [aggregate.COUNT(), aggregate.SUM('rating'),\n aggregate.AVG('rating'), aggregate.MIN('rating'),\n aggregate.MAX('rating'), aggregate.VAR('rating'),\n aggregate.STDV('rating')]\n\n built_in_names = ['Sum', 'Avg', 'Min', 'Max', 'Var', 'Stdv']\n\n \"\"\"\n Test groupby user_id and aggregate on rating\n \"\"\"\n sf_user_rating = sf.groupby('user_id', built_ins)\n actual = sf_user_rating.column_names()\n expected = ['%s of rating' % v for v in built_in_names] \\\n + ['user_id'] + ['Count']\n self.assertSetEqual(set(actual), set(expected))\n for row in sf_user_rating:\n uid = row['user_id']\n mids = range(1, uid + 1)\n ratings = [uid + i for i in mids]\n expected = [len(ratings), sum(ratings), np.mean(ratings),\n min(ratings), max(ratings), np.var(ratings),\n np.sqrt(np.var(ratings))]\n actual = [row['Count']] + [row['%s of rating' % op] \\\n for op in built_in_names]\n for i in range(len(actual)):\n self.assertAlmostEqual(actual[i], expected[i])\n\n \"\"\"\n Test that count can be applied on empty aggregate column.\n \"\"\"\n sf_user_rating = sf.groupby(\"user_id\", {'counter': aggregate.COUNT()})\n actual = {x['user_id']: x['counter'] for x in sf_user_rating}\n expected = {i: i for i in range(1, num_users + 1)}\n self.assertDictEqual(actual, expected)\n\n \"\"\"\n Test groupby movie_id and aggregate on length_of_watching\n \"\"\"\n built_ins = [aggregate.COUNT(), aggregate.SUM('length'),\n aggregate.AVG('length'), aggregate.MIN('length'),\n aggregate.MAX('length'), aggregate.VAR('length'),\n aggregate.STDV('length')]\n sf_movie_length = sf.groupby('movie_id', built_ins)\n actual = sf_movie_length.column_names()\n expected = ['%s of length' % v for v in built_in_names] \\\n + ['movie_id'] + ['Count']\n self.assertSetEqual(set(actual), set(expected))\n for row in sf_movie_length:\n mid = row['movie_id']\n uids = range(int(mid), num_users + 1)\n values = [i - int(mid) for i in uids]\n expected = [len(values), sum(values), np.mean(values), min(values),\n max(values), np.var(values), np.std(values)]\n actual = [row['Count']] + [row['%s of length' % op] \\\n for op in built_in_names]\n for i in range(len(actual)):\n self.assertAlmostEqual(actual[i], expected[i])\n\n def test_quantile_groupby(self):\n sf = self.__generate_synthetic_sframe__(num_users=500)\n # max and min rating for each user\n g = sf.groupby('user_id', [aggregate.MIN('rating'),\n aggregate.MAX('rating'),\n aggregate.QUANTILE('rating', 0, 1)])\n self.assertEqual(len(g), 500)\n for row in g:\n minrating = row['Min of rating']\n maxrating = row['Max of rating']\n arr = list(row['Quantiles of rating'])\n self.assertEqual(len(arr), 2)\n self.assertEqual(arr[0], minrating)\n self.assertEqual(arr[1], maxrating)\n\n def test_argmax_argmin_groupby(self):\n sf = self.__generate_synthetic_sframe__(num_users=500)\n sf_ret = sf.groupby('user_id',\n {'movie with max rating' : aggregate.ARGMAX('rating','movie_id'),\n 'movie with min rating' : aggregate.ARGMIN('rating','movie_id')})\n self.assertEqual(len(sf_ret), 500)\n self.assertEqual(sf_ret[\"movie with max rating\"].dtype, str)\n self.assertEqual(sf_ret[\"movie with min rating\"].dtype, str)\n self.assertEqual(sf_ret[\"user_id\"].dtype, int)\n # make sure we have computed correctly.\n max_d = {}\n min_d = {}\n for i in sf:\n key = i['user_id']\n if key not in max_d:\n max_d[key] = (i['movie_id'],i['rating'])\n min_d[key] = (i['movie_id'],i['rating'])\n else:\n if max_d[key][1] < i['rating']:\n max_d[key] = (i['movie_id'],i['rating'])\n if min_d[key][1] > i['rating']:\n min_d[key] = (i['movie_id'],i['rating'])\n for i in sf_ret:\n key = i['user_id']\n self.assertEqual(i[\"movie with max rating\"],max_d[key][0])\n self.assertEqual(i[\"movie with min rating\"],min_d[key][0])\n\n def test_multicolumn_groupby(self):\n sf = self.__generate_synthetic_sframe__(num_users=500)\n sf_um = sf.groupby([\"user_id\", \"movie_id\"], aggregate.COUNT)\n # I can query it\n t = sf_um.to_dataframe()\n self.assertEqual(sf_um[\"user_id\"].dtype, int)\n self.assertEqual(sf_um[\"movie_id\"].dtype, str)\n # make sure we have counted correctly\n d = {}\n for i in sf:\n key = str(i['user_id']) + \",\" + i[\"movie_id\"]\n if key not in d:\n d[key] = 0\n d[key] = d[key] + 1\n\n for i in sf_um:\n key = str(i['user_id']) + \",\" + i[\"movie_id\"]\n self.assertTrue(key in d)\n self.assertEqual(i['Count'], d[key])\n\n sf_um = sf.groupby([\"movie_id\", \"user_id\"], aggregate.COUNT())\n # I can query it\n t = sf_um.to_dataframe()\n self.assertEqual(sf_um[\"user_id\"].dtype, int)\n self.assertEqual(sf_um[\"movie_id\"].dtype, str)\n\n # make sure we have counted correctly\n d = {}\n for i in sf:\n key = str(i['user_id']) + \",\" + i[\"movie_id\"]\n if key not in d:\n d[key] = 0\n d[key] = d[key] + 1\n\n for i in sf_um:\n key = str(i['user_id']) + \",\" + i[\"movie_id\"]\n self.assertTrue(key in d)\n self.assertEqual(i['Count'], d[key])\n\n def __assert_concat_result_equal(self, result, expected, list_columns):\n self.assertEqual(result.num_columns(), expected.num_columns())\n for column in result.column_names():\n c1 = result[column]\n c2 = expected[column]\n self.assertEqual(c1.dtype, c2.dtype)\n self.assertEqual(len(c1), len(c2))\n if (column in list_columns):\n for i in range(len(c1)):\n if (c1[i] is None):\n self.assertTrue(c2[i] is None)\n continue\n if (c1.dtype == dict):\n for k in c1[i]:\n self.assertEqual(c2[i][k], c1[i][k])\n else:\n s1 = list(c1[i])\n if s1 is not None: s1.sort()\n s2 = list(c2[i])\n if s2 is not None: s2.sort()\n self.assertEqual(s1, s2)\n else:\n self.assertEqual(list(c1),list(c2))\n\n def test_groupby_dict_key(self):\n t = SFrame({'a':[{1:2},{3:4}]})\n with self.assertRaises(TypeError):\n t.groupby('a', {})\n\n def test_concat(self):\n sf = SFrame()\n sf['a'] = [1,1,1,1, 2,2,2, 3, 4,4, 5]\n sf['b'] = [1,2,1,2, 3,3,1, 4, None, 2, None]\n sf['c'] = ['a','b','a','b', 'e','e', None, 'h', 'i','j', 'k']\n sf['d'] = [1.0,2.0,1.0,2.0, 3.0,3.0,1.0, 4.0, None, 2.0, None]\n sf['e'] = [{'x': 1}] * len(sf['a'])\n\n print(sf['b'].dtype)\n\n result = sf.groupby('a', aggregate.CONCAT('b'))\n expected_result = SFrame({\n 'a': [1,2,3,4, 5],\n 'List of b': [[1.,1.,2.,2.],[1.,3.,3.],[4.],[2.], []]\n })\n expected_result['List of b'] = expected_result['List of b'].astype(list)\n self.__assert_concat_result_equal(result.sort('a'), expected_result.sort('a'), ['List of b'])\n\n\n result = sf.groupby('a', aggregate.CONCAT('d'))\n\n expected_result = SFrame({\n 'a': [1,2,3,4, 5],\n 'List of d': [[1,1,2,2],[1,3,3],[4],[2], []]\n })\n self.__assert_concat_result_equal(result.sort('a'), expected_result.sort('a'), ['List of d'])\n\n\n result = sf.groupby('a', {'c_c' :aggregate.CONCAT('c')})\n expected_result = SFrame({\n 'a': [1,2,3,4, 5],\n 'c_c': [['a','b','a','b'],['e','e'],['h'],['i','j'], ['k']]\n })\n\n self.__assert_concat_result_equal(result.sort('a'), expected_result.sort('a'), ['c_c'])\n\n result = sf.groupby('a', aggregate.CONCAT('b','c'))\n expected_result = SFrame({\n 'a': [1,2,3,4,5],\n 'Dict of b_c': [{1:'a',2:'b'},{3:'e', 1: None},{4:'h'},{2:'j'}, {}]\n })\n\n self.__assert_concat_result_equal(result.sort('a'), expected_result.sort('a'), ['Dict of b_c'])\n\n result = sf.groupby('a', {'c_b':aggregate.CONCAT('c','b')})\n expected_result = SFrame({\n 'a': [1,2,3,4,5],\n 'c_b': [{'a':1, 'b':2},{'e':3},{'h':4},{'i':None, 'j':2},{'k':None}]\n })\n\n self.__assert_concat_result_equal(result.sort('a'), expected_result.sort('a'), ['c_b'])\n\n result = sf.groupby('a', {'cs':aggregate.CONCAT('c'), 'bs':aggregate.CONCAT('b')})\n expected_result = SFrame({\n 'a': [1,2,3,4,5],\n 'bs': [[1,1,2,2],[1,3,3],[4],[2], []],\n 'cs': [['a','b','a','b'],['e','e'],['h'],['i','j'], ['k']]\n })\n expected_result['bs'] = expected_result['bs'].astype(list)\n self.__assert_concat_result_equal(result.sort('a'), expected_result.sort('a'), ['bs','cs'])\n\n #exception fail if there is not column\n with self.assertRaises(TypeError):\n sf.groupby('a', aggregate.CONCAT())\n\n with self.assertRaises(KeyError):\n sf.groupby('a', aggregate.CONCAT('nonexist'))\n\n with self.assertRaises(TypeError):\n sf.groupby('a', aggregate.CONCAT('e', 'a'))\n\n def test_select_one(self):\n sf = SFrame({'a':[1,1,2,2,3,3,4,4,5,5],'b':[1,2,3,4,5,6,7,8,9,10]})\n res = list(sf.groupby('a', {'b':aggregate.SELECT_ONE('b')}))\n self.assertEqual(len(res), 5)\n for i in res:\n self.assertTrue(i['b'] == 2 * i['a'] or i['b'] == 2 * i['a'] - 1)\n\n def test_unique(self):\n sf = SFrame({'a':[1,1,2,2,3,3,4,4,5,5],'b':[1,2,3,4,5,6,7,8,9,10]})\n self.assertEqual(len(sf.unique()), 10)\n\n vals = [1,1,2,2,3,3,4,4, None, None]\n sf = SFrame({'a':vals,'b':vals})\n res = sf.unique()\n self.assertEqual(len(res), 5)\n self.assertEqual(set(res['a']), set([1,2,3,4,None]))\n self.assertEqual(set(res['b']), set([1,2,3,4,None]))\n\n def test_append_empty(self):\n sf_with_data = SFrame(data=self.dataframe)\n empty_sf = SFrame()\n self.assertFalse(sf_with_data.append(empty_sf) is sf_with_data)\n self.assertFalse(empty_sf.append(sf_with_data) is sf_with_data)\n self.assertFalse(empty_sf.append(empty_sf) is empty_sf)\n\n def test_append_all_match(self):\n sf1 = SFrame(data=self.dataframe)\n sf2 = SFrame(data=self.dataframe2)\n\n new_sf = sf1.append(sf2)\n assert_frame_equal(self.dataframe.append(self.dataframe2, ignore_index=True), new_sf.to_dataframe())\n\n def test_append_lazy(self):\n sf1 = SFrame(data=self.dataframe)\n sf2 = SFrame(data=self.dataframe2)\n\n new_sf = sf1.append(sf2)\n self.assertTrue(new_sf.__is_materialized__())\n\n filter_sf1 = SArray([1 for i in range(sf1.num_rows())] + [0 for i in range(sf2.num_rows())])\n filter_sf2 = SArray([0 for i in range(sf1.num_rows())] + [1 for i in range(sf2.num_rows())])\n new_sf1 = new_sf[filter_sf1]\n new_sf2 = new_sf[filter_sf2]\n assert_frame_equal(self.dataframe.append(self.dataframe2, ignore_index=True), new_sf.to_dataframe())\n assert_frame_equal(sf1.to_dataframe(), new_sf1.to_dataframe())\n assert_frame_equal(sf2.to_dataframe(), new_sf2.to_dataframe())\n\n row = sf1.head(1)\n sf = SFrame()\n for i in range(10):\n sf = sf.append(row)\n df = sf.to_dataframe()\n for i in range(10):\n self.assertEqual(list(df.iloc[[i]]), list(sf.head(1).to_dataframe().iloc[[0]]))\n\n def test_recursive_append(self):\n sf = SFrame()\n for i in range(200):\n sf = sf.append(SFrame(data = self.dataframe))\n\n #consume\n sf.__materialize__()\n\n def test_print_sframe(self):\n sf = SFrame()\n\n def _test_print():\n sf.__repr__()\n sf._repr_html_()\n try:\n from StringIO import StringIO\n except ImportError:\n from io import StringIO\n output = StringIO()\n sf.print_rows(output_file=output)\n\n n = 20\n sf['int'] = [i for i in range(n)]\n sf['float'] = [float(i) for i in range(n)]\n sf['str'] = [str(i) for i in range(n)]\n uc = '\\xe5\\xa4\\xa7\\xe5\\xa4\\xb4'\n sf['unicode'] = [uc for i in range(n)]\n sf['array'] = [array.array('d', [i]) for i in range(n)]\n sf['list'] = [[i, float(i), [i]] for i in range(n)]\n utc = dt.datetime.strptime('2011-01-21 02:37:21', '%Y-%m-%d %H:%M:%S')\n sf['dt'] = [utc for i in range(n)]\n sf['img'] = [Image() for i in range(n)]\n sf['long_str'] = [\"\".join([str(i)] * 50) for i in range(n)]\n sf['long_unicode'] = [\"\".join([uc] * 50) for i in range(n)]\n sf['bad_unicode'] = ['\\x9d' + uc for i in range(n)]\n _test_print()\n\n def test_print_lazy_sframe(self):\n sf1 = SFrame(data=self.dataframe)\n self.assertTrue(sf1.__is_materialized__())\n sf2 = sf1[sf1['int_data'] > 3]\n sf2.__repr__()\n sf2.__str__()\n self.assertFalse(sf2.__is_materialized__())\n len(sf2)\n self.assertTrue(sf2.__is_materialized__())\n\n def test_append_order_diff(self):\n # name match but column order not match\n sf1 = SFrame(data=self.dataframe)\n sf2 = SFrame(data=self.dataframe2)\n sf2.swap_columns('int_data', 'string_data', inplace=True)\n\n new_sf = sf1.append(sf2)\n assert_frame_equal(self.dataframe.append(self.dataframe2, ignore_index=True), new_sf.to_dataframe())\n\n def test_append_empty_sframe(self):\n sf = SFrame(data=self.dataframe)\n other = SFrame()\n\n # non empty append empty\n assert_frame_equal(sf.append(other).to_dataframe(), self.dataframe)\n\n # empty append non empty\n assert_frame_equal(other.append(sf).to_dataframe(), self.dataframe)\n\n #empty append empty\n assert_frame_equal(other.append(other).to_dataframe(), pd.DataFrame())\n\n def test_append_exception(self):\n sf = SFrame(data=self.dataframe)\n\n # column number not match\n other = SFrame()\n other.add_column(SArray(), \"test\", inplace=True)\n self.assertRaises(RuntimeError, lambda: sf.append(other)) # column not the same\n\n # column name not match\n other = SFrame()\n names = sf.column_names()\n for name in sf.column_names():\n other.add_column(SArray(), name, inplace=True)\n names[0] = 'some name not match'\n self.assertRaises(RuntimeError, lambda: sf.append(other))\n\n # name match but column type order not match\n sf1 = SFrame(data=self.dataframe)\n sf2 = SFrame(data=self.dataframe2)\n\n #change one column type\n sf1[\"int_data\"] = sf2.select_column(\"int_data\").astype(float)\n self.assertRaises(RuntimeError, lambda: sf.append(other))\n\n def test_simple_joins(self):\n inner_expected = SFrame()\n inner_expected.add_column(SArray(['Robinson','Jones','Smith','Heisenberg','Rafferty']), 'last_name', inplace=True)\n inner_expected.add_column(SArray([34,33,34,33,31]), 'dep_id', inplace=True)\n inner_expected.add_column(SArray(['Clerical','Engineering','Clerical','Engineering','Sales']), 'dep_name', inplace=True)\n\n # Tests the \"natural join\" case\n beg = time.time()\n res = self.employees_sf.join(self.departments_sf)\n end = time.time()\n print(\"Really small join: \" + str(end-beg) + \" s\")\n\n self.__assert_join_results_equal(res, inner_expected)\n\n left_join_row = SFrame()\n left_join_row.add_column(SArray(['John']), 'last_name', inplace=True)\n left_join_row.add_column(SArray([None], int), 'dep_id', inplace=True)\n left_join_row.add_column(SArray([None], str), 'dep_name', inplace=True)\n\n left_expected = inner_expected.append(left_join_row)\n\n # Left outer join, passing string to 'on'\n res = self.employees_sf.join(self.departments_sf, how='left', on='dep_id')\n self.__assert_join_results_equal(res, left_expected)\n\n right_join_row = SFrame()\n right_join_row.add_column(SArray([None], str), 'last_name', inplace=True)\n right_join_row.add_column(SArray([35]), 'dep_id', inplace=True)\n right_join_row.add_column(SArray(['Marketing']), 'dep_name', inplace=True)\n\n right_expected = inner_expected.append(right_join_row)\n\n # Right outer join, passing list to 'on'\n res = self.employees_sf.join(self.departments_sf, how='right', on=['dep_id'])\n self.__assert_join_results_equal(res, right_expected)\n\n outer_expected = left_expected.append(right_join_row)\n\n # Full outer join, passing dict to 'on'\n res = self.employees_sf.join(self.departments_sf, how='outer', on={'dep_id':'dep_id'})\n self.__assert_join_results_equal(res, outer_expected)\n\n # Test a join on non-matching key\n res = self.employees_sf.join(self.departments_sf, on={'last_name':'dep_name'})\n self.assertEqual(res.num_rows(), 0)\n self.assertEqual(res.num_columns(), 3)\n self.assertEqual(res.column_names(), ['last_name', 'dep_id', 'dep_id.1'])\n\n # Test a join on a non-unique key\n bad_departments = SFrame()\n bad_departments['dep_id'] = SArray([33,33,31,31])\n bad_departments['dep_name'] = self.departments_sf['dep_name']\n\n no_pk_expected = SFrame()\n no_pk_expected['last_name'] = SArray(['Rafferty','Rafferty','Heisenberg','Jones','Heisenberg','Jones'])\n no_pk_expected['dep_id'] = SArray([31,31,33,33,33,33])\n no_pk_expected['dep_name'] = SArray(['Clerical','Marketing','Sales','Sales','Engineering','Engineering'])\n res = self.employees_sf.join(bad_departments, on='dep_id')\n self.__assert_join_results_equal(res, no_pk_expected)\n\n # Left join on non-unique key\n bad_departments = bad_departments.append(right_join_row[['dep_id', 'dep_name']])\n bad_departments = bad_departments.append(right_join_row[['dep_id', 'dep_name']])\n no_pk_expected = no_pk_expected.append(right_join_row)\n no_pk_expected = no_pk_expected.append(right_join_row)\n no_pk_expected = no_pk_expected[['dep_id', 'dep_name', 'last_name']]\n res = bad_departments.join(self.employees_sf, on='dep_id', how='left')\n self.__assert_join_results_equal(res, no_pk_expected)\n\n def test_big_composite_join(self):\n # Create a semi large SFrame with composite primary key (letter, number)\n letter_keys = []\n number_keys = []\n data = []\n for i in string.ascii_lowercase:\n for j in range(0,100):\n letter_keys.append(i)\n number_keys.append(j)\n which = j % 3\n if which == 0:\n data.append(string.ascii_uppercase)\n elif which == 1:\n data.append(string.digits)\n elif which == 2:\n data.append(string.hexdigits)\n pk_gibberish = SFrame()\n pk_gibberish['letter'] = SArray(letter_keys, str)\n pk_gibberish['number'] = SArray(number_keys, int)\n pk_gibberish['data'] = SArray(data, str)\n\n # Some rows that won't match\n more_data = []\n more_letter_keys = []\n more_number_keys = []\n for i in range(0,40000):\n more_data.append('fish')\n more_letter_keys.append('A')\n more_number_keys.append(200)\n for i in range(0,80):\n for j in range(100,1000):\n more_data.append('waffles')\n more_letter_keys.append(letter_keys[j])\n more_number_keys.append(number_keys[j])\n # Non-matching row in this stretch\n if j == 147:\n more_letter_keys[-1] = 'A'\n for i in range(0,5000):\n more_data.append('pizza')\n more_letter_keys.append('Z')\n more_number_keys.append(400)\n\n join_with_gibberish = SFrame()\n join_with_gibberish['data'] = SArray(more_data, str)\n join_with_gibberish['moredata'] = SArray(more_data, str)\n join_with_gibberish['a_number'] = SArray(more_number_keys, int)\n join_with_gibberish['a_letter'] = SArray(more_letter_keys, str)\n\n expected_answer = SFrame()\n exp_letter = []\n exp_number = []\n exp_data = []\n for i in range(0,80):\n exp_letter.extend(letter_keys[100:147])\n exp_number.extend(number_keys[100:147])\n exp_letter.extend(letter_keys[148:1000])\n exp_number.extend(number_keys[148:1000])\n exp_data.extend(data[100:147])\n exp_data.extend(data[148:1000])\n expected_answer['letter'] = SArray(exp_letter, str)\n expected_answer['number'] = SArray(exp_number, int)\n expected_answer['data'] = SArray(exp_data, str)\n expected_answer['data.1'] = 'waffles'\n expected_answer['moredata'] = 'waffles'\n\n beg = time.time()\n res = pk_gibberish.join(join_with_gibberish, on={'letter':'a_letter','number':'a_number'})\n end = time.time()\n print(\"Join took \" + str(end-beg) + \" seconds\")\n self.__assert_join_results_equal(res, expected_answer)\n\n def test_convert_dataframe_empty(self):\n sf = SFrame()\n sf['a'] = SArray([], int)\n df = sf.to_dataframe()\n self.assertEqual(df['a'].dtype, int)\n sf1 = SFrame(df)\n self.assertEqual(sf1['a'].dtype, int)\n self.assertEqual(sf1.num_rows(), 0)\n\n def test_replace_one_column(self):\n sf = SFrame()\n sf['a'] = [1,2,3]\n self.assertEqual(list(sf['a']), [1,2,3])\n\n # this should succeed as we are replacing a new column\n sf['a'] = [1,2]\n self.assertEqual(list(sf['a']), [1,2])\n\n # failed to add new column should revert original sframe\n with self.assertRaises(TypeError):\n sf['a'] = [1,2,'a']\n\n self.assertEqual(list(sf['a']), [1,2])\n\n # add a column with different length should fail if there are more than one column\n sf = SFrame()\n sf['a'] = [1,2,3]\n sf['b'] = ['a', 'b', 'c']\n with self.assertRaises(RuntimeError):\n sf['a'] = [1,2]\n\n def test_filter_by(self):\n # Set up SFrame to filter by\n sf = SFrame()\n sf.add_column(SArray(self.int_data), \"ints\", inplace=True)\n sf.add_column(SArray(self.float_data), \"floats\", inplace=True)\n sf.add_column(SArray(self.string_data), \"strings\", inplace=True)\n\n # Normal cases\n res = sf.filter_by(SArray(self.int_data), \"ints\")\n self.__assert_join_results_equal(res, sf)\n res = sf.filter_by(SArray(self.int_data), \"ints\", exclude=True)\n self.assertEqual(list(res), [])\n\n res = sf.filter_by([5,6], \"ints\")\n exp = SFrame()\n exp.add_column(SArray(self.int_data[4:6]), \"ints\", inplace=True)\n exp.add_column(SArray(self.float_data[4:6]), \"floats\", inplace=True)\n exp.add_column(SArray(self.string_data[4:6]), \"strings\", inplace=True)\n self.__assert_join_results_equal(res, exp)\n exp_opposite = SFrame()\n exp_opposite.add_column(SArray(self.int_data[:4]+self.int_data[6:]), \"ints\", inplace=True)\n exp_opposite.add_column(SArray(self.float_data[:4]+self.float_data[6:]), \"floats\", inplace=True)\n exp_opposite.add_column(SArray(self.string_data[:4]+self.string_data[6:]), \"strings\", inplace=True)\n res = sf.filter_by([5,6], \"ints\", exclude=True)\n self.__assert_join_results_equal(res, exp_opposite)\n\n exp_one = SFrame()\n exp_one.add_column(SArray(self.int_data[4:5]), \"ints\", inplace=True)\n exp_one.add_column(SArray(self.float_data[4:5]), \"floats\", inplace=True)\n exp_one.add_column(SArray(self.string_data[4:5]), \"strings\", inplace=True)\n exp_all_but_one = SFrame()\n exp_all_but_one.add_column(SArray(self.int_data[:4]+self.int_data[5:]), \"ints\", inplace=True)\n exp_all_but_one.add_column(SArray(self.float_data[:4]+self.float_data[5:]), \"floats\", inplace=True)\n exp_all_but_one.add_column(SArray(self.string_data[:4]+self.string_data[5:]), \"strings\", inplace=True)\n\n res = sf.filter_by(5, \"ints\")\n self.__assert_join_results_equal(res, exp_one)\n res = sf.filter_by(5, \"ints\", exclude=True)\n self.__assert_join_results_equal(res, exp_all_but_one)\n\n res = sf.filter_by(\"5\", \"strings\")\n self.__assert_join_results_equal(res, exp_one)\n res = sf.filter_by(5, \"ints\", exclude=True)\n self.__assert_join_results_equal(res, exp_all_but_one)\n\n # Only missing values\n res = sf.filter_by([77,77,88,88], \"ints\")\n # Test against empty SFrame with correct columns/types\n self.__assert_join_results_equal(res, exp_one[exp_one['ints'] == 9000])\n res = sf.filter_by([77,77,88,88], \"ints\", exclude=True)\n self.__assert_join_results_equal(res, sf)\n\n\n # Duplicate values\n res = sf.filter_by([6,6,5,5,6,5,5,6,5,5,5], \"ints\")\n self.__assert_join_results_equal(res, exp)\n res = sf.filter_by([6,6,5,5,6,5,5,6,5,5,5], \"ints\", exclude=True)\n self.__assert_join_results_equal(res, exp_opposite)\n\n # Duplicate and missing\n res = sf.filter_by([11,12,46,6,6,55,5,5], \"ints\")\n self.__assert_join_results_equal(res, exp)\n res = sf.filter_by([11,12,46,6,6,55,5,5], \"ints\", exclude=True)\n self.__assert_join_results_equal(res, exp_opposite)\n\n\n # Type mismatch\n with self.assertRaises(TypeError):\n res = sf.filter_by([\"hi\"], \"ints\")\n\n # Column doesn't exist\n with self.assertRaises(KeyError):\n res = sf.filter_by([1,2], \"intssss\")\n\n # Something that can't be turned into an SArray\n with self.assertRaises(Exception):\n res = sf.filter_by({1:2,3:4}, \"ints\")\n\n # column_name not given as string\n with self.assertRaises(TypeError):\n res = sf.filter_by(1,2)\n\n # Duplicate column names after join. Should be last because of the\n # renames.\n sf.rename({'ints':'id','floats':'id1','strings':'id11'}, inplace=True)\n exp.rename({'ints':'id','floats':'id1','strings':'id11'}, inplace=True)\n exp_opposite.rename({'ints':'id','floats':'id1','strings':'id11'}, inplace=True)\n res = sf.filter_by([5,6], \"id\")\n self.__assert_join_results_equal(res, exp)\n res = sf.filter_by([5,6], \"id\", exclude=True)\n self.__assert_join_results_equal(res, exp_opposite)\n\n # XXXXXX: should be inner function\n def __test_to_from_dataframe(self, data, type):\n sf = SFrame()\n sf['a'] = data\n df = sf.to_dataframe()\n sf1 = SFrame(df)\n self.assertTrue(sf1.dtype[0]== type)\n\n df = pd.DataFrame({'val': data})\n sf1 = SFrame(df)\n self.assertTrue(sf1.dtype[0]== type)\n\n def test_to_from_dataframe(self):\n self.__test_to_from_dataframe([1,2,3], int)\n self.__test_to_from_dataframe(['a', 'b', 'c'], str)\n self.__test_to_from_dataframe([1.0, 2.0, 3.0], float)\n self.__test_to_from_dataframe([[1, 'b', {'a': 1}], [1,2,3]], list)\n self.__test_to_from_dataframe([{'a':1, 1:None}, {'b':2}], dict)\n self.__test_to_from_dataframe([[1,2],[1,2],[]], array.array)\n\n def test_pack_columns_exception(self):\n sf = SFrame()\n sf['a'] = [1, 2, 3, None, None]\n sf['b'] = [None, '2', '3', None, '5']\n sf['c'] = [None, 2.0, 3.0, None, 5.0]\n\n # cannot pack non array value into array\n with self.assertRaises(TypeError):\n sf.pack_columns(dtype=array.array)\n\n # cannot given non numeric na vlaue to array\n with self.assertRaises(ValueError):\n sf.pack_columns(dtype=array.array, fill_na='c')\n\n # cannot pack non exist columns\n with self.assertRaises(ValueError):\n sf.pack_columns(['d','a'])\n\n # dtype has to be dict/array/list\n with self.assertRaises(ValueError):\n sf.pack_columns(dtype=str)\n\n # pack duplicate columns\n with self.assertRaises(ValueError):\n sf.pack_columns(['a','a'])\n\n # pack partial columns to array, should fail if for columns that are not numeric\n with self.assertRaises(TypeError):\n sf.pack_columns(['a','b'], dtype=array.array)\n\n with self.assertRaises(TypeError):\n sf.pack_columns(column_name_prefix = 1)\n\n with self.assertRaises(ValueError):\n sf.pack_columns(column_name_prefix = '1')\n\n with self.assertRaises(ValueError):\n sf.pack_columns(column_name_prefix = 'c', column_names=['a', 'b'])\n\n def test_pack_columns2(self):\n sf = SFrame()\n sf['id'] = [1, 2, 3, 4]\n sf['category.a'] = [None, '2', '3', None]\n sf['category.b'] = [None, 2.0, None, 4.0]\n\n expected = SArray([\n [None, None],\n ['2', 2.0],\n ['3', None],\n [None, 4.0]])\n result = sf.pack_columns(column_name_prefix='category')\n self.assertEqual(result.column_names(), ['id', 'category'])\n self.__assert_sarray_equal(result['id'], sf['id'])\n self.__assert_sarray_equal(result['category'], expected)\n\n result = sf.pack_columns(column_name_prefix='category', new_column_name=\"new name\")\n self.assertEqual(result.column_names(), ['id', 'new name'])\n self.__assert_sarray_equal(result['id'], sf['id'])\n self.__assert_sarray_equal(result['new name'], expected)\n\n # default dtype is list\n result = sf.pack_columns(column_name_prefix='category', dtype=list)\n self.assertEqual(result.column_names(), ['id', 'category'])\n self.__assert_sarray_equal(result['category'], expected)\n\n # remove prefix == True by default\n expected = SArray([\n {},\n {'a':'2', 'b':2.0},\n {'a':'3'},\n {'b':4.0}\n ])\n result = sf.pack_columns(column_name_prefix='category', dtype=dict)\n self.__assert_sarray_equal(result['category'], expected)\n\n # remove prefix == False\n expected = SArray([\n {},\n {'category.a':'2', 'category.b':2.0},\n {'category.a':'3'},\n {'category.b':4.0}\n ])\n result = sf.pack_columns(column_name_prefix='category', dtype=dict, remove_prefix=False)\n self.assertEqual(result.column_names(), ['id', 'category'])\n self.__assert_sarray_equal(result['category'], expected)\n\n # fill_na\n expected = SArray([\n {'a':1, 'b':1},\n {'a':'2', 'b':2.0},\n {'a':'3', 'b':1},\n {'a':1, 'b':4.0}\n ])\n result = sf.pack_columns(column_name_prefix='category', dtype=dict, fill_na = 1)\n self.__assert_sarray_equal(result['category'], expected)\n\n expected = SArray([\n [1],\n [2],\n [3],\n [4]], list)\n result = sf.pack_columns(['id'], new_column_name='id')\n self.assertEqual(sorted(result.column_names()), sorted(['id', 'category.a', 'category.b']))\n self.__assert_sarray_equal(result['id'], expected)\n\n def test_pack_columns(self):\n sf = SFrame()\n sf['id'] = [1, 2, 3, 4, 5]\n sf['b'] = [None, '2', '3', None, '5']\n sf['c'] = [None, 2.0, 3.0, None, 5.0]\n\n expected_all_default = SArray([\n [1, None, None],\n [2, '2', 2.0],\n [3, '3', 3.0],\n [4, None, None],\n [5, '5', 5.0]\n ])\n\n # pack all columns, all default values\n self.__assert_sarray_equal(sf.pack_columns()['X1'], expected_all_default)\n\n expected_ab_default = SArray([\n [1, None],\n [2, '2'],\n [3, '3'],\n [4, None],\n [5, '5']\n ])\n\n expected_all_fillna_1 = SArray([\n [1, -1, -1],\n [2, '2', 2.0],\n [3, '3', 3.0],\n [4, -1, -1],\n [5, '5', 5.0]\n ])\n\n # pack all columns do not drop na and also fill with some value\n result = sf.pack_columns(fill_na=-1)\n self.assertEqual(result.column_names(), ['X1'])\n self.__assert_sarray_equal(result['X1'], expected_all_fillna_1)\n\n # pack partial columns, all default value\n result = sf.pack_columns(['id','b'])\n self.assertEqual(result.column_names(), ['c','X2'])\n self.__assert_sarray_equal(result['c'], sf['c'])\n self.__assert_sarray_equal(result['X2'], expected_ab_default)\n\n expected_sarray_ac_fillna_default = SArray([\n [1, float('NaN')],\n [2, 2.0],\n [3, 3.0],\n [4, float('NaN')],\n [5, 5.0]\n ])\n\n result = sf.pack_columns(['id','c'], dtype=array.array)\n self.assertEqual(result.column_names(), ['b', 'X2'])\n self.__assert_sarray_equal(result['b'], sf['b'])\n self.__assert_sarray_equal(result['X2'], expected_sarray_ac_fillna_default)\n\n expected_dict_default = SArray([\n {'id': 1},\n {'id': 2, 'b':'2', 'c': 2.0},\n {'id': 3, 'b':'3', 'c': 3.0},\n {'id':4 },\n {'id':5, 'b':'5', 'c': 5.0}\n ])\n\n result = sf.pack_columns(dtype=dict)\n self.__assert_sarray_equal(result['X1'], expected_dict_default)\n\n expected_dict_fillna = SArray([\n {'id': 1, 'b':-1, 'c': -1},\n {'id': 2, 'b':'2', 'c': 2.0},\n {'id': 3, 'b':'3', 'c': 3.0},\n {'id': 4, 'b':-1, 'c': -1},\n {'id': 5, 'b':'5', 'c': 5.0}\n ])\n\n result = sf.pack_columns(dtype=dict, fill_na=-1)\n self.__assert_sarray_equal(result['X1'], expected_dict_fillna)\n\n # pack large number of rows\n sf = SFrame()\n num_rows = 100000\n sf['a'] = range(0, num_rows)\n sf['b'] = range(0, num_rows)\n result = sf.pack_columns(['a', 'b'])\n self.assertEqual(len(result), num_rows)\n\n def test_pack_columns_dtype(self):\n a = SFrame({'name':[-140500967,-1405039672],'data':[3,4]})\n b = a.pack_columns(['name','data'],dtype=array.array)\n expected = SArray([[-140500967, 3],[-1405039672,4]])\n self.__assert_sarray_equal(b['X1'], expected)\n\n def test_unpack_dict_mixtype(self):\n sf = SFrame({'a':[{'a':[\"haha\", \"hoho\"]}, {'a':array.array('d', [1,2,3])}]})\n sf = sf.unpack('a', column_name_prefix = '')\n self.assertEqual(sf['a'].dtype, list)\n\n sf = SFrame({'a':[{'a':[\"haha\", \"hoho\"]}, {'a':array.array('d', [1,2,3])}]})\n sf = sf.unpack()\n self.assertEqual(sf['a'].dtype, list)\n\n sf = SFrame({'a':[{'a':[\"haha\", \"hoho\"]}, {'a':None}]})\n sf = sf.unpack('a', column_name_prefix = '')\n self.assertEqual(sf['a'].dtype, list)\n\n sf = SFrame({'a':[{'a':[\"haha\", \"hoho\"]}, {'a':None}]})\n sf = sf.unpack('a', column_name_prefix = '')\n self.assertEqual(sf['a'].dtype, list)\n\n sa = SArray([{'a':array.array('d', [1,2,3])}, {'a':None}])\n sf = sa.unpack(column_name_prefix = '')\n self.assertEqual(sf['a'].dtype, array.array)\n\n sa = SArray([{'a':array.array('d', [1,2,3])}, {'a':{'b':1}}])\n sf = sa.unpack(column_name_prefix = '')\n self.assertEqual(sf['a'].dtype, str)\n\n sa = SArray([{'a': 1, 'b': 0.1}, {'a': 0.1, 'b': 1}])\n sf = sa.unpack(column_name_prefix = '')\n self.assertEqual(sf['a'].dtype, float)\n self.assertEqual(sf['b'].dtype, float)\n\n\n def test_unpack_list(self):\n sa = SArray([\n [1, None, None],\n [2, '2', 2.0],\n [3, '3', 3.0],\n [4, None, None],\n [5, '5', 5.0]\n ])\n\n expected = SFrame()\n expected ['a'] = [1, 2, 3, 4, 5]\n expected ['b'] = [None, '2', '3', None, '5']\n expected ['c'] = [None, 2.0, 3.0, None, 5.0]\n\n result = sa.unpack()\n result.rename(dict(zip(result.column_names(), ['a','b','c'])), inplace=True)\n assert_frame_equal(result.to_dataframe(), expected.to_dataframe())\n\n result = sa.unpack(column_name_prefix='ttt')\n self.assertEqual(result.column_names(), ['ttt.0', 'ttt.1', 'ttt.2'])\n result.rename(dict(zip(result.column_names(), ['a','b','c'])), inplace=True)\n assert_frame_equal(result.to_dataframe(), expected.to_dataframe())\n\n # column types\n result = sa.unpack(column_types=[int, str, float])\n result.rename(dict(zip(result.column_names(), ['a','b','c'])), inplace=True)\n assert_frame_equal(result.to_dataframe(), expected.to_dataframe())\n\n # more column types\n result = sa.unpack(column_types=[int, str, float, int])\n result.rename(dict(zip(result.column_names(), ['a','b','c','d'])), inplace=True)\n e = expected.select_columns(['a','b','c'])\n e.add_column(SArray([None for i in range(5)], int),'d', inplace=True)\n assert_frame_equal(result.to_dataframe(), e.to_dataframe())\n\n # less column types\n result = sa.unpack(column_types=[int, str])\n result.rename(dict(zip(result.column_names(), ['a','b'])), inplace=True)\n e = expected.select_columns(['a','b'])\n assert_frame_equal(result.to_dataframe(), e.to_dataframe())\n\n # fill na_value\n e = SFrame()\n e['a'] = [1, 2, None, 4, 5]\n e['b'] = [None, '2', '3', None, '5']\n e['c'] = [None, 2.0, None, None, 5.0]\n result = sa.unpack(na_value=3)\n result.rename(dict(zip(result.column_names(), ['a','b','c'])), inplace=True)\n assert_frame_equal(result.to_dataframe(), e.to_dataframe())\n\n # wrong length\n with self.assertRaises(TypeError):\n sa.unpack(column_name_prefix=['a','b'])\n\n # wrong type\n with self.assertRaises(RuntimeError):\n sa.unpack(column_types = [str, int, float])\n\n # wrong limit types\n with self.assertRaises(TypeError):\n sa.unpack(limit=[\"1\"])\n\n # int array cannot be unpacked\n with self.assertRaises(TypeError):\n SArray([1,2,3,4]).unpack()\n\n # column name must be a string\n with self.assertRaises(TypeError):\n sa.unpack(1)\n\n # invalid column type\n with self.assertRaises(TypeError):\n sa.unpack(column_types = int)\n\n # invalid column type\n with self.assertRaises(TypeError):\n sa.unpack(column_types = [np.array])\n\n # cannot infer type if no values\n with self.assertRaises(RuntimeError):\n SArray([], list).unpack()\n\n def test_unpack_array(self):\n import array\n sa = SArray([\n array.array('d', [1, 1, 0]),\n array.array('d', [2, -1, 1]),\n array.array('d', [3, 3, 2]),\n array.array('d', [-1, 2, 3]),\n array.array('d', [5, 5, 4])\n ])\n\n expected = SFrame()\n expected ['a'] = [1.0, 2.0, 3.0, -1.0, 5.0]\n expected ['b'] = [1.0, -1.0, 3.0, 2.0, 5.0]\n expected ['c'] = [0.0, 1.0, 2.0, 3.0, 4.0]\n\n result = sa.unpack()\n result.rename(dict(zip(result.column_names(), ['a','b','c'])), inplace=True)\n assert_frame_equal(result.to_dataframe(), expected.to_dataframe())\n\n # right amount column names\n result = sa.unpack(column_name_prefix = 'unpacked')\n result.rename(dict(zip(result.column_names(), ['t.0', 't.1', 't.2'])), inplace=True)\n result.rename(dict(zip(result.column_names(), ['a','b','c'])), inplace=True)\n assert_frame_equal(result.to_dataframe(), expected.to_dataframe())\n\n # column types\n result = sa.unpack(column_types=[int, str, float])\n result.rename(dict(zip(result.column_names(), ['a','b','c'])), inplace=True)\n expected['a'] = expected['a'].astype(int)\n expected['b'] = expected['b'].astype(str)\n expected['c'] = expected['c'].astype(float)\n assert_frame_equal(result.to_dataframe(), expected.to_dataframe())\n\n # more column types\n result = sa.unpack(column_types=[int, str, float, int])\n result.rename(dict(zip(result.column_names(), ['a','b','c','d'])), inplace=True)\n e = expected.select_columns(['a','b','c'])\n e.add_column(SArray([None for i in range(5)], int),'d', inplace=True)\n assert_frame_equal(result.to_dataframe(), e.to_dataframe())\n\n # less column types\n result = sa.unpack(column_types=[int, str])\n result.rename(dict(zip(result.column_names(), ['a','b'])), inplace=True)\n e = expected.select_columns(['a','b'])\n assert_frame_equal(result.to_dataframe(), e.to_dataframe())\n\n # fill na_value\n e = SFrame()\n e['a'] = SArray([1, 2, 3, None, 5], float)\n e['b'] = SArray([1, None, 3, 2, 5], float)\n e['c'] = SArray([0, 1, 2, 3, 4], float)\n result = sa.unpack(na_value=-1)\n result.rename(dict(zip(result.column_names(), ['a','b','c'])), inplace=True)\n assert_frame_equal(result.to_dataframe(), e.to_dataframe())\n\n def test_unpack_dict(self):\n\n sf = SFrame([{'a':1,'b':2,'c':3},{'a':4,'b':5,'c':6}])\n expected_sf = SFrame()\n expected_sf[\"a\"] = [1,4]\n expected_sf[\"b\"] = [2,5]\n expected_sf[\"c\"] = [3,6]\n unpacked_sf = sf.unpack()\n assert_frame_equal(unpacked_sf.to_dataframe(), expected_sf.to_dataframe())\n\n expected_sf = SFrame()\n expected_sf[\"xx.a\"] = [1,4]\n expected_sf[\"xx.b\"] = [2,5]\n expected_sf[\"xx.c\"] = [3,6]\n unpacked_sf = sf.unpack(column_name_prefix='xx')\n assert_frame_equal(unpacked_sf.to_dataframe(), expected_sf.to_dataframe())\n\n packed_sf = SFrame({\"X1\":{'a':1,'b':2,'c':3},\"X2\":{'a':4,'b':5,'c':6}})\n\n with self.assertRaises(RuntimeError):\n packed_sf.unpack()\n\n sf = SFrame()\n\n sf[\"user_id\"] = [1,2,3,4,5,6,7]\n sf[\"is_restaurant\"] = [1, 1,0,0, 1, None, None]\n sf[\"is_retail\"] = [None,1,1,None,1, None, None]\n sf[\"is_electronics\"] = [\"yes\", \"no\",\"yes\",None,\"no\", None, None]\n\n\n packed_sf = SFrame()\n packed_sf['user_id'] = sf['user_id']\n packed_sf[\"category\"] = [\n {\"is_restaurant\": 1, \"is_electronics\": \"yes\"},\n {\"is_restaurant\": 1, \"is_retail\": 1, \"is_electronics\": \"no\"},\n {\"is_restaurant\": 0, \"is_retail\": 1, \"is_electronics\": \"yes\"},\n {\"is_restaurant\": 0 },\n {\"is_restaurant\": 1, \"is_retail\": 1, \"is_electronics\": \"no\"},\n { },\n None]\n\n with self.assertRaises(TypeError):\n packed_sf['user_id'].unpack()\n\n with self.assertRaises(TypeError):\n packed_sf['category'].unpack(1)\n\n with self.assertRaises(TypeError):\n packed_sf['category'].unpack(value_types = [int])\n\n # unpack only one column\n expected_sf = SFrame()\n expected_sf[\"is_retail\"] = sf[\"is_retail\"]\n unpacked_sf = packed_sf['category'].unpack(limit=[\"is_retail\"], column_types=[int], column_name_prefix=None)\n assert_frame_equal(unpacked_sf.to_dataframe(), expected_sf.to_dataframe()) \n\n\n # unpack all\n unpacked_sf = packed_sf['category'].unpack(column_name_prefix=None, column_types=[int, int, str], limit=[\"is_restaurant\", \"is_retail\", \"is_electronics\"])\n assert_frame_equal(unpacked_sf.to_dataframe(), sf[[\"is_restaurant\", \"is_retail\", \"is_electronics\"]].to_dataframe())\n\n # auto infer types, the column order may be different, so use order here before comparison\n unpacked_sf = packed_sf[\"category\"].unpack()\n unpacked_sf.rename({\n \"X.is_restaurant\": \"is_restaurant\",\n \"X.is_retail\": \"is_retail\",\n \"X.is_electronics\": \"is_electronics\"\n }, inplace=True)\n assert_frame_equal(unpacked_sf.to_dataframe().sort_index(axis=1), sf[[\"is_restaurant\", \"is_retail\", \"is_electronics\"]].to_dataframe().sort_index(axis=1))\n\n unpacked_sf = packed_sf[\"category\"].unpack(na_value = 0, column_name_prefix=\"new\")\n expected = SFrame()\n expected[\"new.is_restaurant\"] = [1, 1,None,None, 1, None, None]\n expected[\"new.is_retail\"] = [None,1,1,None,1, None, None]\n expected[\"new.is_electronics\"] = [\"yes\", \"no\",\"yes\",None,\"no\", None, None]\n assert_frame_equal(unpacked_sf.to_dataframe().sort_index(axis=1), expected.to_dataframe().sort_index(axis=1))\n\n # unpack a dictionary key integer as key\n sa = SArray([\n {1: 'a'},\n {2: 'b'}\n ])\n result = sa.unpack()\n expected = SFrame({'X.1':['a', None], 'X.2':[None, 'b']})\n assert_frame_equal(result.to_dataframe(), expected.to_dataframe())\n\n result = sa.unpack(limit=[2])\n expected = SFrame({'X.2':[None, 'b']})\n assert_frame_equal(result.to_dataframe(), expected.to_dataframe())\n\n result = sa.unpack(limit=[2], column_name_prefix=\"expanded\")\n expected = SFrame({'expanded.2':[None, 'b']})\n assert_frame_equal(result.to_dataframe(), expected.to_dataframe())\n\n sa = SArray([{i:i} for i in range(500)])\n unpacked_sa = sa.unpack()\n self.assertEqual(len(unpacked_sa), len(sa))\n i = 0\n for v in unpacked_sa:\n for j in range(500):\n val = v['X.' + str(j)]\n if (j == i):\n self.assertEqual(val, i)\n else:\n self.assertEqual(val, None)\n i = i + 1\n\n # if types don't agree, convert to string automatically\n sa = SArray([{'a':1},{'a': 'a_3'}])\n sf = sa.unpack()\n self.assertEqual(sf.column_types(), [str])\n\n sa = SArray([{'a':None}, {'a': 1}])\n sf = sa.unpack()\n self.assertEqual(sf.column_types(), [int])\n\n sa = SArray([{'a':1}, {'a': None}])\n sf = sa.unpack()\n self.assertEqual(sf.column_types(), [int])\n\n # type inference is already at server side even if limit is given\n sa = SArray([{'c'+str(i): i if i % 2 == 0 else 'v' + str(i)} for i in range(1000)])\n unpacked = sa.unpack(limit=['c'+str(i) for i in range(10)], column_name_prefix=\"\")\n for i in range(10):\n v = unpacked[i]\n for j in range(10):\n if (j != i):\n self.assertEqual(v['c'+str(j)], None)\n elif j % 2 == 0:\n self.assertEqual(v['c'+str(j)], j)\n else:\n self.assertEqual(v['c'+str(j)], 'v' + str(j))\n\n\n\n def test_unpack_sframe(self):\n sf = SFrame()\n sf['user_id'] = range(7)\n sf[\"category\"] = [\n {\"is_restaurant\": 1, \"is_electronics\": \"yes\"},\n {\"is_restaurant\": 1, \"is_retail\": 1, \"is_electronics\": \"no\"},\n {\"is_restaurant\": 0, \"is_retail\": 1, \"is_electronics\": \"yes\"},\n {\"is_restaurant\": 0 },\n {\"is_restaurant\": 1, \"is_retail\": 1, \"is_electronics\": \"no\"},\n { },\n None]\n sf['list'] = [\n None,\n range(1),\n range(2),\n range(3),\n range(1),\n range(2),\n range(3),\n ]\n\n with self.assertRaises(TypeError):\n sf.unpack('user_id')\n\n expected = SFrame()\n expected['user_id'] = sf['user_id']\n expected['list'] = sf['list']\n expected[\"is_restaurant\"] = [1, 1,0,0, 1, None, None]\n expected[\"is_retail\"] = [None,1,1,None,1, None, None]\n expected[\"is_electronics\"] = [\"yes\", \"no\",\"yes\",None,\"no\", None, None]\n\n result = sf.unpack('category')\n result.rename({\n 'category.is_restaurant': 'is_restaurant',\n 'category.is_retail': 'is_retail',\n 'category.is_electronics': 'is_electronics'\n }, inplace=True)\n assert_frame_equal(expected.to_dataframe().sort_index(axis=1), result.to_dataframe().sort_index(axis=1))\n\n result = sf.unpack(column_name='category', column_name_prefix=\"\")\n assert_frame_equal(expected.to_dataframe().sort_index(axis=1), result.to_dataframe().sort_index(axis=1))\n\n result = sf.unpack(column_name='category', column_name_prefix=\"abc\")\n result.rename({\n 'abc.is_restaurant': 'is_restaurant',\n 'abc.is_retail': 'is_retail',\n 'abc.is_electronics': 'is_electronics'\n }, inplace=True)\n assert_frame_equal(expected.to_dataframe().sort_index(axis=1), result.to_dataframe().sort_index(axis=1))\n\n result = sf.unpack(column_name='category', column_name_prefix=\"\", column_types=[str], limit=['is_restaurant'])\n new_expected = expected[['user_id', 'list', 'is_restaurant']]\n new_expected['is_restaurant'] = new_expected['is_restaurant'].astype(str)\n assert_frame_equal(new_expected.to_dataframe().sort_index(axis=1), result.to_dataframe().sort_index(axis=1))\n\n result = sf.unpack(column_name='category', column_name_prefix=\"\", na_value = None)\n assert_frame_equal(expected.to_dataframe().sort_index(axis=1), result.to_dataframe().sort_index(axis=1))\n\n result = sf.unpack(column_name='list')\n expected = SFrame()\n expected['user_id'] = sf['user_id']\n expected['list.0'] = [None,0,0,0, 0,0,0]\n expected['list.1'] = [None,None,1,1, None,1,1]\n expected['list.2'] = [None,None,None,2, None, None,2]\n expected['category'] = sf['category']\n assert_frame_equal(expected.to_dataframe().sort_index(axis=1), result.to_dataframe().sort_index(axis=1))\n\n result = sf.unpack(column_name='list', na_value= 2)\n expected = SFrame()\n expected['user_id'] = sf['user_id']\n expected['list.0'] = [None,0,0,0, 0,0,0]\n expected['list.1'] = [None,None,1,1, None,1,1]\n expected['list.2'] = [None,None,None,None, None, None,None]\n expected['category'] = sf['category']\n assert_frame_equal(expected.to_dataframe().sort_index(axis=1), result.to_dataframe().sort_index(axis=1))\n\n # auto resolving conflicting names\n sf = SFrame()\n sf['a'] = range(100)\n sf['b'] = [range(5) for i in range(100)]\n sf['b.0'] = range(100)\n sf['b.0.1'] = range(100)\n result = sf.unpack('b')\n self.assertEqual(result.column_names(), ['a', 'b.0', 'b.0.1', 'b.0.1.1', 'b.1.1.1', 'b.2.1.1', 'b.3.1.1', 'b.4.1.1'])\n\n sf = SFrame()\n sf['a'] = range(100)\n sf['b'] = [{'str1': i, 'str2':i + 1} for i in range(100)]\n sf['b.str1'] = range(100)\n result = sf.unpack('b')\n self.assertEqual(len(result.column_names()), 4)\n\n def test_stack_dict(self):\n sf = SFrame()\n sf[\"user_id\"] = [1,2,3,4,5]\n sf[\"user_name\"] = ['user' + str(i) for i in list(sf['user_id'])]\n sf[\"category\"] = [\n {\"is_restaurant\": 1, },\n {\"is_restaurant\": 0, \"is_retail\": 1 },\n { \"is_retail\": 0 },\n {},\n None]\n\n expected_sf = SFrame()\n expected_sf[\"user_id\"] = [1,2, 2, 3,4,5]\n expected_sf[\"user_name\"] = ['user' + str(i) for i in list(expected_sf['user_id'])]\n expected_sf['category'] = ['is_restaurant', 'is_restaurant', 'is_retail', 'is_retail', None, None]\n expected_sf['value'] = [1,0,1,0, None, None]\n df_expected = expected_sf.to_dataframe().sort_values(['user_id', 'category']).reset_index(drop=True)\n\n with self.assertRaises(TypeError):\n sf.stack()\n\n with self.assertRaises(ValueError):\n sf.stack('sss')\n\n with self.assertRaises(ValueError):\n sf.stack('category', ['user_id', 'value'])\n\n # normal case\n stacked_sf = sf.stack('category', ['category', 'value'])\n assert_frame_equal(stacked_sf.to_dataframe().sort_values([\"user_id\", \"category\"]).reset_index(drop=True), df_expected)\n\n # set column types\n stacked_sf = sf.stack('category')\n self.assertTrue(stacked_sf.column_types()[2] == str)\n self.assertTrue(stacked_sf.column_types()[3] == int)\n\n # auto generate column names\n stacked_sf = sf.stack('category')\n new_column_names = stacked_sf.column_names()\n self.assertTrue(len(new_column_names) == 4)\n expected_sf.rename({'category':new_column_names[2], 'value':new_column_names[3]}, inplace=True)\n df_expected = expected_sf.to_dataframe().sort_values(['user_id', new_column_names[2]]).reset_index(drop=True)\n assert_frame_equal(stacked_sf.to_dataframe().sort_values([\"user_id\", new_column_names[2]]).reset_index(drop=True), df_expected)\n\n #dropna\n expected_sf = SFrame()\n expected_sf[\"user_id\"] = [1,2, 2, 3, 4, 5]\n expected_sf[\"user_name\"] = ['user' + str(i) for i in list(expected_sf['user_id'])]\n expected_sf['category'] = ['is_restaurant', 'is_restaurant', 'is_retail', 'is_retail', None, None]\n expected_sf['value'] = [1,0,1,0, None, None]\n df_expected = expected_sf.to_dataframe().sort_values(['user_id', 'category']).reset_index(drop=True)\n\n stacked_sf = sf.stack('category', ['category','value'], drop_na = False)\n assert_frame_equal(stacked_sf.to_dataframe().sort_values([\"user_id\", \"category\"]).reset_index(drop=True), df_expected)\n\n sf = SFrame()\n sf['a'] = SArray(([{}] * 100) + [{'a':1}])\n\n # its a dict need 2 types\n with self.assertRaises(ValueError):\n sf.stack('a',['key', 'value'], new_column_type=[str])\n with self.assertRaises(ValueError):\n sf.stack('a',['key', 'value'], new_column_type=str)\n\n sf.stack('a',['key', 'value'], new_column_type=[str, int])\n expected_sf = SFrame()\n expected_sf['key'] = SArray([None] * 100 + [\"a\"])\n expected_sf['value'] = SArray([None] * 100 + [1])\n\n def test_stack_list(self):\n sf = SFrame()\n sf[\"a\"] = [1,2,3,4,5]\n sf[\"b\"] = [['a', 'b'], ['c'], ['d'],['e', None], None]\n expected_result = SFrame()\n expected_result['a'] = [1,1,2,3,4,4,5]\n expected_result['X1'] = ['a','b','c','d','e',None, None]\n\n with self.assertRaises(TypeError):\n sf.stack()\n\n with self.assertRaises(ValueError):\n sf.stack('sss')\n\n with self.assertRaises(TypeError):\n sf.stack('a')\n\n with self.assertRaises(TypeError):\n sf.stack('b', [\"something\"])\n\n result = sf.stack(\"b\", drop_na = False)\n stacked_column_name = result.column_names()[1]\n expected_result.rename({'X1':stacked_column_name}, inplace=True)\n assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())\n\n # default drop_na=False\n result = sf.stack(\"b\")\n assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())\n\n result = sf.stack(\"b\", new_column_name = \"b\", drop_na = False)\n expected_result.rename({stacked_column_name: 'b'}, inplace=True)\n assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())\n\n result = sf.stack(\"b\", new_column_name = \"b\", drop_na = False)\n assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())\n\n # drop_na=True\n result = sf.stack(\"b\", drop_na = True)\n expected_result = SFrame()\n expected_result['a'] = [1,1,2,3,4,4]\n expected_result[result.column_names()[1]] = ['a','b','c','d','e',None]\n assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())\n\n\n sf = SFrame()\n n = 1000000\n sf['a'] = range(1,n)\n sf['b'] = [[str(i), str(i+1)] for i in range(1,n)]\n result = sf.stack('b')\n self.assertTrue(len(result), n * 2)\n\n\n sf = SFrame()\n sf['a'] = SArray(([[]] * 100) + [['a','b']])\n\n # its a dict need 2 types\n with self.assertRaises(ValueError):\n sf.stack('a', 'a', new_column_type=[str, int])\n\n sf.stack('a', 'a', new_column_type=str)\n expected_sf = SFrame()\n expected_sf['a'] = SArray([None] * 100 + [\"a\", \"b\"])\n\n def test_stack_vector(self):\n sf = SFrame()\n sf[\"a\"] = [1,2,3,4,5]\n sf[\"b\"] = [[1],[1,2],[1,2,3],[1,2,3,4],None]\n expected_result = SFrame()\n expected_result['a'] = [1,2,2,3,3,3,4,4,4,4,5]\n expected_result['X1'] = [1,1,2,1,2,3,1,2,3,4,None]\n\n with self.assertRaises(TypeError):\n sf.stack()\n\n with self.assertRaises(ValueError):\n sf.stack('sss')\n\n with self.assertRaises(TypeError):\n sf.stack('a')\n\n with self.assertRaises(TypeError):\n sf.stack('b', [\"something\"])\n\n result = sf.stack(\"b\", drop_na = False)\n stacked_column_name = result.column_names()[1]\n expected_result.rename({'X1':stacked_column_name}, inplace=True)\n assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())\n\n # default drop_na=False\n result = sf.stack(\"b\")\n assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())\n\n\n result = sf.stack(\"b\", new_column_name = \"b\", drop_na = False)\n expected_result.rename({stacked_column_name: 'b'}, inplace=True)\n assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())\n\n result = sf.stack(\"b\", new_column_name = \"b\", drop_na = False)\n assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())\n\n # drop_na=True\n result = sf.stack(\"b\", drop_na = True)\n expected_result = SFrame()\n expected_result['a'] = [1,2,2,3,3,3,4,4,4,4]\n expected_result[result.column_names()[1]] = SArray([1,1,2,1,2,3,1,2,3,4], float)\n assert_frame_equal(result.to_dataframe(), expected_result.to_dataframe())\n\n import array\n sf = SFrame()\n sf['a'] = SArray(([array.array('d')] * 100) + [array.array('d',[1.0,2.0])])\n\n # its a dict need 2 types\n with self.assertRaises(ValueError):\n sf.stack('a', 'a', new_column_type=[str, int])\n\n sf.stack('a', 'a', new_column_type=int)\n expected_sf = SFrame()\n expected_sf['a'] = SArray([None] * 100 + [1, 2])\n\n def test_unstack_dict(self):\n sf = SFrame()\n sf[\"user_id\"] = [1,2,3,4]\n sf[\"user_name\"] = ['user' + str(i) for i in list(sf['user_id'])]\n sf[\"categories\"] = [\n {\"is_restaurant\": 1, },\n {\"is_restaurant\": 0, \"is_retail\": 1 },\n { \"is_retail\": 0 },\n None]\n\n stacked_sf = sf.stack('categories', ['category', 'value'], drop_na=False)\n\n # normal unstack\n unstacked_sf = stacked_sf.unstack(column_names=['category', 'value'], new_column_name = 'categories')\n # these frames are *almost* equal except user4 will be {} instead of None\n assert_frame_equal(sf.fillna('categories',{}).to_dataframe(), unstacked_sf.to_dataframe().sort_values(\"user_id\").reset_index(drop=True))\n\n # missing new column name\n unstacked_sf = stacked_sf.unstack(['category', 'value'])\n self.assertEqual(len(unstacked_sf.column_names()), 3)\n unstacked_sf.rename({unstacked_sf.column_names()[2] : 'categories'}, inplace=True)\n assert_frame_equal(sf.fillna('categories',{}).to_dataframe(), unstacked_sf.to_dataframe().sort_values(\"user_id\").reset_index(drop=True))\n\n # missing column names\n with self.assertRaises(KeyError):\n stacked_sf.unstack(['category','value1'])\n\n # wrong input\n with self.assertRaises(TypeError):\n stacked_sf.unstack(['category'])\n\n # duplicate new column name\n with self.assertRaises(RuntimeError):\n unstacked_sf = stacked_sf.unstack(['category', 'value'], 'user_name')\n\n def test_unstack_list(self):\n sf = SFrame()\n sf['a'] = [1,2,3,4]\n sf['b'] = [range(10), range(20), range(30), range(50)]\n stacked_sf = sf.stack('b', new_column_name = 'new_b')\n unstacked_sf = stacked_sf.unstack('new_b', new_column_name = 'b')\n self.__assert_concat_result_equal(sf.sort('a'), unstacked_sf.sort('a'), ['b'])\n\n unstacked_sf = stacked_sf.unstack('new_b')\n unstacked_sf.rename({unstacked_sf.column_names()[1]: 'b'}, inplace=True)\n self.__assert_concat_result_equal(sf.sort('a'), unstacked_sf.sort('a'), ['b'])\n\n unstacked_sf = stacked_sf.unstack('new_b', new_column_name='b')\n unstacked_sf.rename({unstacked_sf.column_names()[1]: 'b'}, inplace=True)\n self.__assert_concat_result_equal(sf.sort('a'), unstacked_sf.sort('a'), ['b'])\n\n with self.assertRaises(RuntimeError):\n stacked_sf.unstack('new_b', new_column_name='a')\n\n with self.assertRaises(TypeError):\n stacked_sf.unstack(['new_b'])\n\n with self.assertRaises(KeyError):\n stacked_sf.unstack('non exist')\n\n def test_content_identifier(self):\n sf = SFrame({\"a\":[1,2,3,4],\"b\":[\"1\",\"2\",\"3\",\"4\"]})\n a1 = sf['a'].__get_content_identifier__()\n a2 = sf['a'].__get_content_identifier__()\n self.assertEqual(a1, a2)\n\n def test_random_access(self):\n t1 = list(range(0,100000))\n t2 = [str(i) for i in t1]\n t = [{'t1':t1[i], 't2':t2[i]} for i in range(len(t1))]\n s = SFrame({'t1':t1,'t2':t2})\n # simple slices\n self.__test_equal(s[1:10000], pd.DataFrame(t[1:10000]))\n self.__test_equal(s[0:10000:3], pd.DataFrame(t[0:10000:3]))\n self.__test_equal(s[1:10000:3], pd.DataFrame(t[1:10000:3]))\n self.__test_equal(s[2:10000:3], pd.DataFrame(t[2:10000:3]))\n self.__test_equal(s[3:10000:101], pd.DataFrame(t[3:10000:101]))\n # negative slices\n self.__test_equal(s[-5:], pd.DataFrame(t[-5:]))\n self.__test_equal(s[-1:], pd.DataFrame(t[-1:]))\n self.__test_equal(s[-100:-10], pd.DataFrame(t[-100:-10]))\n self.__test_equal(s[-100:-10:2], pd.DataFrame(t[-100:-10:2]))\n # single element reads\n self.assertEqual(s[511], t[511])\n self.assertEqual(s[1912],t[1912])\n self.assertEqual(s[-1], t[-1])\n self.assertEqual(s[-10],t[-10])\n\n # edge case oddities\n self.__test_equal(s[10:100:100], pd.DataFrame(t[10:100:100]))\n self.__test_equal(s[-100:len(s):10], pd.DataFrame(t[-100:len(t):10]))\n self.assertEqual(len(s[-1:-2]), 0)\n self.assertEqual(len(s[-1:-1000:2]), 0)\n with self.assertRaises(IndexError):\n s[len(s)]\n\n def sort_n_rows(self, nrows=100):\n nrows += 1\n sf = SFrame()\n sf['a'] = range(1, nrows)\n sf['b'] = [float(i) for i in range(1,nrows)]\n sf['c'] = [str(i) for i in range(1,nrows)]\n sf['d'] = [[i, i+1] for i in range(1,nrows)]\n\n reversed_sf = SFrame()\n reversed_sf['a'] = range(nrows-1, 0, -1)\n reversed_sf['b'] = [float(i) for i in range(nrows-1, 0, -1)]\n reversed_sf['c'] = [str(i) for i in range(nrows-1, 0, -1)]\n reversed_sf['d'] = [[i, i+1] for i in range(nrows-1, 0, -1)]\n\n with self.assertRaises(TypeError):\n sf.sort()\n\n with self.assertRaises(TypeError):\n sf.sort(1)\n\n with self.assertRaises(TypeError):\n sf.sort(\"d\")\n\n with self.assertRaises(ValueError):\n sf.sort(\"nonexist\")\n\n with self.assertRaises(TypeError):\n sf.sort({'a':True})\n\n result = sf.sort('a')\n assert_frame_equal(sf.to_dataframe(), result.to_dataframe())\n\n # try a lazy input\n result = sf[sf['a'] > 10].sort('a')\n assert_frame_equal(sf[sf['a'] > 10].to_dataframe(), result.to_dataframe())\n\n result = sf.sort('a', ascending = False)\n assert_frame_equal(reversed_sf.to_dataframe(), result.to_dataframe())\n\n # lazy reversed\n result = sf[sf['a'] > 10].sort('a', ascending = False)\n assert_frame_equal(reversed_sf[reversed_sf['a'] > 10].to_dataframe(), result.to_dataframe())\n\n # lazy reversed\n result = sf[sf['a'] > 10].sort('a', ascending = False)\n assert_frame_equal(reversed_sf[reversed_sf['a'] > 10].to_dataframe(), result.to_dataframe())\n\n # sort two columns\n result = sf.sort(['a', 'b'])\n assert_frame_equal(sf.to_dataframe(), result.to_dataframe())\n\n result = sf.sort(['a', 'c'], ascending = False)\n assert_frame_equal(reversed_sf.to_dataframe(), result.to_dataframe())\n\n result = sf.sort([('a', True), ('b', False)])\n assert_frame_equal(sf.to_dataframe(), result.to_dataframe())\n\n result = sf.sort([('a', False), ('b', True)])\n assert_frame_equal(reversed_sf.to_dataframe(), result.to_dataframe())\n\n # empty sort should not throw\n sf = SFrame({'x':[]})\n sf.sort('x')\n\n def test_sort(self):\n #self.sort_n_rows(100)\n for i in range(1, 10):\n self.sort_n_rows(i)\n\n def test_dropna(self):\n # empty case\n sf = SFrame()\n self.assertEqual(len(sf.dropna()), 0)\n\n # normal case\n self.__test_equal(self.employees_sf.dropna(), self.employees_sf[0:5].to_dataframe())\n test_split = self.employees_sf.dropna_split()\n self.__test_equal(test_split[0], self.employees_sf[0:5].to_dataframe())\n self.__test_equal(test_split[1], self.employees_sf[5:6].to_dataframe())\n\n\n # create some other test sframe\n test_sf = SFrame({'ints':SArray([None,None,3,4,None], int),\n 'floats':SArray([np.nan,2.,3.,4.,np.nan],float),\n 'strs':SArray(['1',np.nan,'','4',None], str),\n 'lists':SArray([[1],None,[],[1,1,1,1],None], list),\n 'dicts':SArray([{1:2},{2:3},{},{4:5},None], dict)})\n\n # another normal, but more interesting case\n self.__test_equal(test_sf.dropna(),\n pd.DataFrame({'ints':[3,4],'floats':[3.,4.],'strs':['','4'],'lists':[[],[1,1,1,1]],'dicts':[{},{4:5}]}))\n test_split = test_sf.dropna_split()\n self.__test_equal(test_split[0], test_sf[2:4].to_dataframe())\n self.__test_equal(test_split[1], test_sf[0:2].append(test_sf[4:5]).to_dataframe())\n\n # the 'all' case\n self.__test_equal(test_sf.dropna(how='all'), test_sf[0:4].to_dataframe())\n test_split = test_sf.dropna_split(how='all')\n self.__test_equal(test_split[0], test_sf[0:4].to_dataframe())\n self.__test_equal(test_split[1], test_sf[4:5].to_dataframe())\n\n # select some columns\n self.__test_equal(test_sf.dropna(['ints','floats'], how='all'), test_sf[1:4].to_dataframe())\n test_split = test_sf.dropna_split(['ints','floats'], how='all')\n self.__test_equal(test_split[0], test_sf[1:4].to_dataframe())\n self.__test_equal(test_split[1], test_sf[0:1].append(test_sf[4:5]).to_dataframe())\n\n self.__test_equal(test_sf.dropna('strs'), test_sf[0:4].to_dataframe())\n test_split = test_sf.dropna_split('strs')\n self.__test_equal(test_split[0], test_sf[0:4].to_dataframe())\n self.__test_equal(test_split[1], test_sf[4:5].to_dataframe())\n\n self.__test_equal(test_sf.dropna(['strs','dicts']), test_sf[0:4].to_dataframe())\n test_split = test_sf.dropna_split(['strs','dicts'])\n self.__test_equal(test_split[0], test_sf[0:4].to_dataframe())\n self.__test_equal(test_split[1], test_sf[4:5].to_dataframe())\n\n # bad stuff\n with self.assertRaises(TypeError):\n test_sf.dropna(1)\n test_sf.dropna([1,2])\n test_sf.dropna('strs', how=1)\n test_sf.dropna_split(1)\n test_sf.dropna_split([1,2])\n test_sf.dropna_split('strs', how=1)\n\n with self.assertRaises(ValueError):\n test_sf.dropna('ints', how='blah')\n test_sf.dropna_split('ints', how='blah')\n\n with self.assertRaises(RuntimeError):\n test_sf.dropna('dontexist')\n test_sf.dropna_split('dontexist')\n\n def test_add_row_number(self):\n sf = SFrame(self.__create_test_df(400000))\n\n sf = sf.add_row_number('id')\n self.assertEqual(list(sf['id']), list(range(0,400000)))\n\n del sf['id']\n\n sf = sf.add_row_number('id', -20000)\n self.assertEqual(list(sf['id']), list(range(-20000,380000)))\n del sf['id']\n\n sf = sf.add_row_number('id', 40000)\n self.assertEqual(list(sf['id']), list(range(40000,440000)))\n\n with self.assertRaises(RuntimeError):\n sf.add_row_number('id')\n\n with self.assertRaises(TypeError):\n sf = sf.add_row_number(46)\n sf = sf.add_row_number('id2',start='hi')\n\n def test_inplace_not_inplace(self):\n # add row number\n sf = SFrame(self.__create_test_df(1000))\n sf2 = sf.add_row_number('id', inplace=False)\n self.assertTrue(sf2 is not sf)\n self.assertTrue('id' in sf2.column_names())\n self.assertTrue('id' not in sf.column_names())\n\n sf2 = sf.add_row_number('id', inplace=True)\n self.assertTrue(sf2 is sf)\n self.assertTrue('id' in sf2.column_names())\n\n # add column\n sf = SFrame(self.__create_test_df(1000))\n newcol = SArray(range(1000))\n sf2 = sf.add_column(newcol, 'newcol', inplace=False)\n self.assertTrue(sf2 is not sf)\n self.assertTrue('newcol' in sf2.column_names())\n self.assertTrue('newcol' not in sf.column_names())\n sf2 = sf.add_column(newcol, 'newcol', inplace=True)\n self.assertTrue(sf2 is sf)\n self.assertTrue('newcol' in sf2.column_names())\n\n # add columns\n sf = SFrame(self.__create_test_df(1000))\n newcols = SFrame({'newcol':range(1000), 'newcol2':range(1000)})\n sf2 = sf.add_columns(newcols, inplace=False)\n self.assertTrue(sf2 is not sf)\n self.assertTrue('newcol' in sf2.column_names())\n self.assertTrue('newcol2' in sf2.column_names())\n self.assertTrue('newcol' not in sf.column_names())\n self.assertTrue('newcol2' not in sf.column_names())\n sf2 = sf.add_columns(newcols, inplace=True)\n self.assertTrue(sf2 is sf)\n self.assertTrue('newcol' in sf2.column_names())\n self.assertTrue('newcol2' in sf2.column_names())\n\n # remove column\n sf = SFrame(self.__create_test_df(1000))\n sf2 = sf.remove_column('int_data', inplace=False)\n self.assertTrue(sf2 is not sf)\n self.assertTrue('int_data' in sf.column_names())\n self.assertTrue('int_data' not in sf2.column_names())\n sf2 = sf.remove_column('int_data', inplace=True)\n self.assertTrue(sf2 is sf)\n self.assertTrue('int_data' not in sf2.column_names())\n\n # remove columns\n sf = SFrame(self.__create_test_df(1000))\n sf2 = sf.remove_columns(['int_data', 'float_data'], inplace=False)\n self.assertTrue(sf2 is not sf)\n self.assertTrue('int_data' in sf.column_names())\n self.assertTrue('float_data' in sf.column_names())\n self.assertTrue('int_data' not in sf2.column_names())\n self.assertTrue('float_data' not in sf2.column_names())\n sf2 = sf.remove_columns(['int_data', 'float_data'], inplace=True)\n self.assertTrue(sf2 is sf)\n self.assertTrue('int_data' not in sf2.column_names())\n self.assertTrue('float_data' not in sf2.column_names())\n\n # rename \n sf = SFrame(self.__create_test_df(1000))\n sf2 = sf.rename({'int_data':'int','float_data':'float'}, inplace=False)\n self.assertTrue(sf2 is not sf)\n self.assertTrue('int_data' in sf.column_names())\n self.assertTrue('float_data' in sf.column_names())\n self.assertTrue('int' not in sf.column_names())\n self.assertTrue('float' not in sf.column_names())\n self.assertTrue('int_data' not in sf2.column_names())\n self.assertTrue('float_data' not in sf2.column_names())\n self.assertTrue('int' in sf2.column_names())\n self.assertTrue('float' in sf2.column_names())\n sf2 = sf.rename({'int_data':'int','float_data':'float'}, inplace=True)\n self.assertTrue(sf2 is sf)\n self.assertTrue('int_data' not in sf2.column_names())\n self.assertTrue('float_data' not in sf2.column_names())\n self.assertTrue('int' in sf2.column_names())\n self.assertTrue('float' in sf2.column_names())\n\n # swap \n sf = SFrame(self.__create_test_df(1000))\n old_cnames = sf.column_names()\n\n # swap int_data and float_data\n new_cnames = sf.column_names()\n int_data_idx = new_cnames.index('int_data')\n float_data_idx = new_cnames.index('float_data')\n new_cnames[int_data_idx],new_cnames[float_data_idx] = new_cnames[float_data_idx],new_cnames[int_data_idx] \n\n\n\n sf2 = sf.swap_columns('int_data', 'float_data', inplace=False)\n self.assertTrue(sf2 is not sf)\n self.assertEqual(sf.column_names(), old_cnames)\n self.assertEqual(sf2.column_names(), new_cnames)\n\n sf2 = sf.swap_columns('int_data', 'float_data', inplace=True)\n self.assertTrue(sf2 is sf)\n self.assertEqual(sf2.column_names(), new_cnames)\n\n\n def test_check_lazy_sframe_size(self):\n # empty sframe, materialized, has_size\n sf = SFrame()\n self.assertTrue(sf.__is_materialized__())\n self.assertTrue(sf.__has_size__())\n\n # add one column, not materialized, has_size\n sf['a'] = range(1000)\n self.assertTrue(sf.__is_materialized__())\n self.assertTrue(sf.__has_size__())\n\n # materialize it, materialized, has_size\n sf['a'] = range(1000)\n sf.__materialize__()\n self.assertTrue(sf.__is_materialized__())\n self.assertTrue(sf.__has_size__())\n\n # logical filter, not materialized, not has_size\n sf = sf[sf['a'] > 5000]\n self.assertFalse(sf.__is_materialized__())\n self.assertFalse(sf.__has_size__())\n\n def test_lazy_logical_filter_sarray(self):\n g=SArray(range(10000))\n g2=SArray(range(10000))\n a=g[g>10]\n a2=g2[g>10]\n z=a[a2>20]\n self.assertEqual(len(z), 9979)\n\n def test_lazy_logical_filter_sframe(self):\n g=SFrame({'a':range(10000)})\n g2=SFrame({'a':range(10000)})\n a=g[g['a']>10]\n a2=g2[g['a']>10]\n z=a[a2['a']>20]\n self.assertEqual(len(z), 9979)\n\n\n def test_column_manipulation_of_lazy_sframe(self):\n g=SFrame({'a':[1,2,3,4,5],'id':[1,2,3,4,5]})\n g = g[g['id'] > 2]\n del g['id']\n # if lazy column deletion is quirky, this will cause an exception\n self.assertEqual(list(g[0:2]['a']), [3,4])\n g=SFrame({'a':[1,2,3,4,5],'id':[1,2,3,4,5]})\n g = g[g['id'] > 2]\n g.swap_columns('a','id', inplace=True)\n # if lazy column swap is quirky, this will cause an exception\n self.assertEqual(list(g[0:2]['a']), [3,4])\n\n def test_empty_sarray(self):\n with util.TempDirectory() as f:\n sf = SArray()\n sf.save(f)\n sf2 = SArray(f)\n self.assertEqual(len(sf2), 0)\n\n def test_empty_sframe(self):\n with util.TempDirectory() as f:\n sf = SFrame()\n sf.save(f)\n sf2 = SFrame(f)\n self.assertEqual(len(sf2), 0)\n self.assertEqual(sf2.num_columns(), 0)\n\n def test_none_column(self):\n sf = SFrame({'a':[1,2,3,4,5]})\n sf['b'] = None\n self.assertEqual(sf['b'].dtype, float)\n df = pd.DataFrame({'a': [1,2,3,4,5], 'b': [None,None,None,None,None]})\n self.__test_equal(sf, df)\n\n sa = SArray.from_const(None, 100)\n self.assertEqual(list(sa), [None] * 100)\n self.assertEqual(sa.dtype, float)\n\n def test_apply_with_partial(self):\n sf = SFrame({'a': [1, 2, 3, 4, 5]})\n\n def concat_fn(character, row):\n return '%s%d' % (character, row['a'])\n\n my_partial_fn = functools.partial(concat_fn, 'x')\n sa = sf.apply(my_partial_fn)\n self.assertEqual(list(sa), ['x1', 'x2', 'x3', 'x4', 'x5'])\n\n def test_apply_with_functor(self):\n sf = SFrame({'a': [1, 2, 3, 4, 5]})\n\n class Concatenator(object):\n def __init__(self, character):\n self.character = character\n\n def __call__(self, row):\n return '%s%d' % (self.character, row['a'])\n\n concatenator = Concatenator('x')\n sa = sf.apply(concatenator)\n self.assertEqual(list(sa), ['x1', 'x2', 'x3', 'x4', 'x5'])\n\n def test_save_sframe(self):\n '''save lazily evaluated SFrame should not materialize to target folder\n '''\n data = SFrame()\n data['x'] = range(100)\n data['x'] = data['x'] > 50\n #lazy and good\n tmp_dir = tempfile.mkdtemp()\n data.save(tmp_dir)\n shutil.rmtree(tmp_dir)\n print(data)\n\n def test_empty_argmax_does_not_fail(self):\n # an empty argmax should not result in a crash\n sf = SFrame({'id': [0, 0, 0, 1, 1, 2, 2],\n 'value': [3.0, 2.0, 2.3, None, None, 4.3, 1.3],\n 'category': ['A', 'B', 'A', 'E', 'A', 'A', 'B']})\n sf.groupby('id', aggregate.ARGMAX('value', 'category'))\n\n def test_cache_invalidation(self):\n # Changes to the SFrame should invalidate the indexing cache.\n\n X = SFrame({'a' : range(4000),\n 'b' : range(4000)})\n\n for i in range(0, 4000, 20):\n self.assertEqual(X[i], {'a' : i, 'b' : i})\n\n X['a'] = range(1000, 5000)\n\n for i in range(0, 4000, 20):\n self.assertEqual(X[i], {'a' : 1000 + i, 'b' : i})\n\n del X['b']\n\n for i in range(0, 4000, 20):\n self.assertEqual(X[i], {'a' : 1000 + i})\n\n X['b'] = X['a']\n\n for i in range(0, 4000, 20):\n self.assertEqual(X[i], {'a' : 1000 + i, 'b' : 1000 + i})\n\n X.rename({'b' : 'c'}, inplace=True)\n\n for i in range(0, 4000, 20):\n self.assertEqual(X[i], {'a' : 1000 + i, 'c' : 1000 + i})\n\n def test_to_numpy(self):\n X = SFrame({'a' : range(100),\n 'b' : range(100)})\n import numpy as np\n import numpy.testing as nptest\n Y = np.transpose(np.array([range(100), range(100)]))\n nptest.assert_array_equal(X.to_numpy(), Y)\n\n X['b'] = X['b'].astype(str)\n s = [str(i) for i in range(100)]\n Y = np.transpose(np.array([s, s]))\n nptest.assert_array_equal(X.to_numpy(), Y)\n\n @mock.patch(__name__+'.sqlite3.Cursor', spec=True)\n @mock.patch(__name__+'.sqlite3.Connection', spec=True)\n def test_from_sql(self, mock_conn, mock_cursor):\n # Set up mock connection and cursor\n conn = mock_conn('example.db')\n curs = mock_cursor()\n conn.cursor.return_value = curs\n sf_type_codes = [44,44,41,22,114,199,43]\n\n sf_data = list(zip(*self.all_type_cols))\n sf_iter = sf_data.__iter__()\n\n def mock_fetchone():\n try:\n return next(sf_iter)\n except StopIteration:\n return None\n\n def mock_fetchmany(size=1):\n count = 0\n ret_list = []\n for i in sf_iter:\n if count == curs.arraysize:\n break\n ret_list.append(i)\n count += 1\n\n return ret_list\n\n curs.fetchone.side_effect = mock_fetchone\n curs.fetchmany.side_effect = mock_fetchmany\n\n curs.description = [['X'+str(i+1),sf_type_codes[i]]+[None for j in range(5)] for i in range(len(sf_data[0]))]\n\n # bigger than cache, no Nones\n sf = SFrame.from_sql(conn, \"SELECT * FROM test_table\", type_inference_rows=5, dbapi_module=dbapi2_mock())\n _assert_sframe_equal(sf, self.sf_all_types)\n\n # smaller than cache, no Nones\n sf_iter = sf_data.__iter__()\n sf = SFrame.from_sql(conn, \"SELECT * FROM test_table\", type_inference_rows=100, dbapi_module=dbapi2_mock())\n _assert_sframe_equal(sf, self.sf_all_types)\n\n none_col = [None for i in range(5)]\n nones_in_cache = list(zip(*[none_col for i in range(len(sf_data[0]))]))\n none_sf = SFrame({'X'+str(i):none_col for i in range(1,len(sf_data[0])+1)})\n test_data = (nones_in_cache+sf_data)\n sf_iter = test_data.__iter__()\n\n # more None rows than cache & types in description\n sf = SFrame.from_sql(conn, \"SELECT * FROM test_table\", type_inference_rows=5, dbapi_module=dbapi2_mock())\n sf_inferred_types = SFrame()\n expected_types = [float,float,str,str,str,str,dt.datetime]\n for i in zip(self.sf_all_types.column_names(),expected_types):\n new_col = SArray(none_col).astype(i[1])\n new_col = new_col.append(self.sf_all_types[i[0]].apply(lambda x: i[1](x) if i[1] is not dt.datetime else x))\n sf_inferred_types.add_column(new_col, inplace=True)\n\n # Don't test the string representation of dict and list; there are\n # funky consistency issues with the string representations of these\n sf.remove_columns(['X5', 'X6'], inplace=True)\n sf_inferred_types.remove_columns(['X5', 'X6'], inplace=True)\n _assert_sframe_equal(sf, sf_inferred_types)\n\n # more None rows than cache & no type information\n for i in range(len(curs.description)):\n curs.description[i][1] = None\n sf_iter = test_data.__iter__()\n sf = SFrame.from_sql(conn, \"SELECT * FROM test_table\", type_inference_rows=5, dbapi_module=dbapi2_mock())\n\n sf_inferred_types = SFrame()\n expected_types = [str for i in range(len(sf_data[0]))]\n for i in zip(self.sf_all_types.column_names(),expected_types):\n new_col = SArray(none_col).astype(i[1])\n new_col = new_col.append(self.sf_all_types[i[0]].apply(lambda x: str(x)))\n sf_inferred_types.add_column(new_col, inplace=True)\n\n # Don't test the string representation of dict, could be out of order\n sf.remove_columns(['X5', 'X6'], inplace=True)\n sf_inferred_types.remove_columns(['X5', 'X6'], inplace=True)\n _assert_sframe_equal(sf, sf_inferred_types)\n\n ### column_type_hints tests\n sf_iter = test_data.__iter__()\n sf = SFrame.from_sql(conn, \"SELECT * FROM test_table\", type_inference_rows=5,\n dbapi_module=dbapi2_mock(), column_type_hints=str)\n sf.remove_columns(['X5', 'X6'], inplace=True)\n _assert_sframe_equal(sf, sf_inferred_types)\n\n # Provide unhintable types\n sf_iter = test_data.__iter__()\n expected_types = [int,float,str,array.array,list,dict,dt.datetime]\n with self.assertRaises(TypeError):\n sf = SFrame.from_sql(conn,\n \"SELECT * FROM test_table\", type_inference_rows=5,\n dbapi_module=dbapi2_mock(), column_type_hints=expected_types)\n\n sf_iter = test_data.__iter__()\n expected_types = {'X'+str(i+1):expected_types[i] for i in range(3)}\n sf = SFrame.from_sql(conn,\n \"SELECT * FROM test_table\", type_inference_rows=10,\n dbapi_module=dbapi2_mock(), column_type_hints=expected_types)\n _assert_sframe_equal(sf[5:],self.sf_all_types)\n\n # Test a float forced to a str\n sf_iter = test_data.__iter__()\n expected_types['X2'] = str\n self.sf_all_types['X2'] = self.sf_all_types['X2'].apply(lambda x: str(x))\n sf = SFrame.from_sql(conn,\n \"SELECT * FROM test_table\", type_inference_rows=10,\n dbapi_module=dbapi2_mock(), column_type_hints=expected_types)\n _assert_sframe_equal(sf[5:],self.sf_all_types)\n\n # Type unsupported by sframe\n curs.description = [['X1',44],['X2',44]]\n sf_iter = [[complex(4.5,3),1], [complex(3.4,5),2]].__iter__()\n sf = SFrame.from_sql(conn, \"SELECT * FROM test_table\")\n expected_sf = SFrame({'X1':[\"(4.5+3j)\",\"(3.4+5j)\"],'X2':[1,2]})\n _assert_sframe_equal(sf, expected_sf)\n\n # bad DBAPI version!\n bad_version = dbapi2_mock()\n bad_version.apilevel = \"1.0 \"\n with self.assertRaises(NotImplementedError):\n sf = SFrame.from_sql(conn, \"SELECT * FROM test_table\", dbapi_module=bad_version)\n\n # Bad module\n with self.assertRaises(AttributeError):\n sf = SFrame.from_sql(conn, \"SELECT * FROM test_table\", dbapi_module=os)\n\n # Bad connection\n with self.assertRaises(AttributeError):\n sf = SFrame.from_sql(4, \"SELECT * FROM test_table\")\n\n # Empty query result\n curs.description = []\n sf = SFrame.from_sql(conn, \"SELECT * FROM test_table\", dbapi_module=dbapi2_mock())\n _assert_sframe_equal(sf, SFrame())\n\n @mock.patch(__name__+'.sqlite3.Cursor', spec=True)\n @mock.patch(__name__+'.sqlite3.Connection', spec=True)\n def test_to_sql(self, mock_conn, mock_cursor):\n conn = mock_conn('example.db')\n curs = mock_cursor()\n insert_stmt = \"INSERT INTO ins_test (X1,X2,X3,X4,X5,X6,X7) VALUES ({0},{1},{2},{3},{4},{5},{6})\"\n num_cols = len(self.sf_all_types.column_names())\n test_cases = [\n ('qmark',insert_stmt.format(*['?' for i in range(num_cols)])),\n ('numeric',insert_stmt.format(*[':'+str(i) for i in range(1,num_cols+1)])),\n ('named',insert_stmt.format(*[':X'+str(i) for i in range(1,num_cols+1)])),\n ('format',insert_stmt.format(*['%s' for i in range(num_cols)])),\n ('pyformat',insert_stmt.format(*['%(X'+str(i)+')s' for i in range(1,num_cols+1)])),\n ]\n for i in test_cases:\n conn.cursor.return_value = curs\n\n mock_mod = dbapi2_mock()\n mock_mod.paramstyle = i[0]\n self.sf_all_types.to_sql(conn, \"ins_test\", dbapi_module=mock_mod)\n conn.cursor.assert_called_once_with()\n calls = []\n col_names = self.sf_all_types.column_names()\n for j in self.sf_all_types:\n if i[0] == 'named' or i[0] == 'pyformat':\n calls.append(mock.call(i[1],j))\n else:\n calls.append(mock.call(i[1],[j[k] for k in col_names]))\n curs.execute.assert_has_calls(calls, any_order=False)\n self.assertEqual(curs.execute.call_count, len(self.sf_all_types))\n conn.commit.assert_called_once_with()\n curs.close.assert_called_once_with()\n\n conn.reset_mock()\n curs.reset_mock()\n\n # bad DBAPI version!\n bad_version = dbapi2_mock()\n bad_version.apilevel = \"1.0 \"\n with self.assertRaises(NotImplementedError):\n self.sf_all_types.to_sql(conn, \"ins_test\", dbapi_module=bad_version)\n\n # bad paramstyle\n bad_paramstyle = dbapi2_mock()\n bad_paramstyle.paramstyle = 'foo'\n with self.assertRaises(TypeError):\n self.sf_all_types.to_sql(conn, \"ins_test\", dbapi_module=bad_paramstyle)\n\n\n def test_materialize(self):\n sf = SFrame({'a':range(100)})\n sf = sf[sf['a'] > 10]\n self.assertFalse(sf.is_materialized())\n sf.materialize()\n self.assertTrue(sf.is_materialized())\n\n def test_materialization_slicing(self):\n # Has been known to fail.\n g=SFrame({'a':range(100)})[:10]\n g['b'] = g['a'] + 1\n g['b'].materialize()\n g.materialize()\n\n def test_copy(self):\n from copy import copy\n sf = generate_random_sframe(100, \"Cns\")\n sf_copy = copy(sf)\n\n assert sf is not sf_copy\n\n _assert_sframe_equal(sf, sf_copy)\n\n def test_deepcopy(self):\n from copy import deepcopy\n sf = generate_random_sframe(100, \"Cns\")\n sf_copy = deepcopy(sf)\n\n assert sf is not sf_copy\n\n _assert_sframe_equal(sf, sf_copy)\n\n def test_builtins(self):\n import builtins\n import six\n\n sf = SFrame({'dict': [builtins.dict({'foo': 'bar'})],\n 'float': [builtins.float(3.14)],\n 'int': [builtins.int(12)],\n 'bool': [builtins.bool(False)],\n 'list': [builtins.list([1,2,3])],\n 'str': [builtins.str('foo')],\n 'tuple': [builtins.tuple((1,2))],\n })\n sf2 = SFrame({'dict': [{'foo': 'bar'}],\n 'float': [3.14],\n 'int': [12],\n 'bool': [False],\n 'list': [[1,2,3]],\n 'str': ['foo'],\n 'tuple': [(1,2)],\n })\n\n if six.PY2:\n sf = sf.add_columns(SFrame(\n {'long': [builtins.long(12)], 'unicode': [builtins.unicode('foo')]}))\n sf2 = sf2.add_columns(SFrame(\n {'long': [12], 'unicode': [unicode('foo')]}))\n\n _assert_sframe_equal(sf, sf2)\n \n def test_add_column_nonSArray(self):\n sf = SFrame()\n sf = sf.add_column([1,2,3,4],'x')\n \n sf_test = SFrame()\n sf_test['x'] = SArray([1,2,3,4])\n \n _assert_sframe_equal(sf, sf_test)\n \n \n def test_add_column_noniterable1(self):\n sf = SFrame()\n sf = sf.add_column([1,2,3,4],'x')\n sf = sf.add_column(5,'y')\n \n sf_test = SFrame()\n sf_test['x'] = SArray([1,2,3,4])\n sf_test['y'] = 5\n \n _assert_sframe_equal(sf, sf_test)\n \n \n\n def test_add_column_noniterable2(self):\n # If SFrame is empty then the passed data should be treated as an SArray of size 1\n sf = SFrame()\n sf = sf.add_column(5,'y')\n\n sf_test = SFrame()\n sf_test['y'] = SArray([5])\n \n _assert_sframe_equal(sf, sf_test)\n\n\nif __name__ == \"__main__\":\n\n import sys\n\n # Check if we are supposed to connect to another server\n for i, v in enumerate(sys.argv):\n if v.startswith(\"ipc://\"):\n _launch(v)\n\n # The rest of the arguments need to get passed through to\n # the unittest module\n del sys.argv[i]\n break\n\n unittest.main()\n\n" ]
[ [ "numpy.sum", "numpy.var", "pandas.DataFrame", "numpy.array", "numpy.std", "pandas.util.testing.assert_frame_equal", "numpy.unique", "numpy.mean" ] ]
Mofokeng-C/rgz_rcnn_py3
[ "00bb9a9179b74db5e3fe469f4249dc00c5c0edfc" ]
[ "lib/fast_rcnn/test.py" ]
[ "# Modified by Chen Wu ([email protected])\n\nfrom fast_rcnn.config import cfg, get_output_dir\nimport argparse\nfrom utils.timer import Timer\nimport numpy as np\nimport cv2\nfrom utils.cython_nms import nms, nms_new\nfrom utils.boxes_grid import get_boxes_grid\nfrom utils.project_bbox import project_bbox_inv\nimport pickle\nimport heapq\nfrom utils.blob import im_list_to_blob\nimport os\nimport math\nfrom rpn_msr.generate import imdb_proposals_det\nimport tensorflow as tf\nfrom fast_rcnn.bbox_transform import clip_boxes, bbox_transform_inv, bbox_contains\ntry:\n import matplotlib.pyplot as plt\nexcept:\n print('Cannot run vis during test due to the unavailability of matplotlib')\nfrom tensorflow.python.client import timeline\nimport time\nfrom collections import defaultdict\n\ndef _get_image_blob(im):\n \"\"\"Converts an image into a network input.\n Arguments:\n im (ndarray): a color image in BGR order\n Returns:\n blob (ndarray): a data blob holding an image pyramid\n im_scale_factors (list): list of image scales (relative to im) used\n in the image pyramid\n \"\"\"\n im_orig = im.astype(np.float32, copy=True)\n im_orig -= cfg.PIXEL_MEANS\n\n im_shape = im_orig.shape\n im_size_min = np.min(im_shape[0:2])\n im_size_max = np.max(im_shape[0:2])\n\n processed_ims = []\n im_scale_factors = []\n\n for target_size in cfg.TEST.SCALES:\n im_scale = float(target_size) / float(im_size_min)\n # Prevent the biggest axis from being more than MAX_SIZE\n if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:\n im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)\n im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,\n interpolation=cv2.INTER_LINEAR)\n im_scale_factors.append(im_scale)\n processed_ims.append(im)\n\n # Create a blob to hold the input images\n blob = im_list_to_blob(processed_ims)\n\n return blob, np.array(im_scale_factors)\n\ndef _get_rois_blob(im_rois, im_scale_factors):\n \"\"\"Converts RoIs into network inputs.\n Arguments:\n im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates\n im_scale_factors (list): scale factors as returned by _get_image_blob\n Returns:\n blob (ndarray): R x 5 matrix of RoIs in the image pyramid\n \"\"\"\n rois, levels = _project_im_rois(im_rois, im_scale_factors)\n rois_blob = np.hstack((levels, rois))\n return rois_blob.astype(np.float32, copy=False)\n\ndef _project_im_rois(im_rois, scales):\n \"\"\"Project image RoIs into the image pyramid built by _get_image_blob.\n Arguments:\n im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates\n scales (list): scale factors as returned by _get_image_blob\n Returns:\n rois (ndarray): R x 4 matrix of projected RoI coordinates\n levels (list): image pyramid levels used by each projected RoI\n \"\"\"\n im_rois = im_rois.astype(np.float, copy=False)\n scales = np.array(scales)\n\n if len(scales) > 1:\n widths = im_rois[:, 2] - im_rois[:, 0] + 1\n heights = im_rois[:, 3] - im_rois[:, 1] + 1\n\n areas = widths * heights\n scaled_areas = areas[:, np.newaxis] * (scales[np.newaxis, :] ** 2)\n diff_areas = np.abs(scaled_areas - 224 * 224)\n levels = diff_areas.argmin(axis=1)[:, np.newaxis]\n else:\n levels = np.zeros((im_rois.shape[0], 1), dtype=np.int)\n\n rois = im_rois * scales[levels]\n\n return rois, levels\n\ndef _get_blobs(im, rois):\n \"\"\"Convert an image and RoIs within that image into network inputs.\"\"\"\n if cfg.TEST.HAS_RPN:\n blobs = {'data' : None, 'rois' : None}\n blobs['data'], im_scale_factors = _get_image_blob(im)\n else:\n blobs = {'data' : None, 'rois' : None}\n blobs['data'], im_scale_factors = _get_image_blob(im)\n if cfg.IS_MULTISCALE:\n if cfg.IS_EXTRAPOLATING:\n blobs['rois'] = _get_rois_blob(rois, cfg.TEST.SCALES)\n else:\n blobs['rois'] = _get_rois_blob(rois, cfg.TEST.SCALES_BASE)\n else:\n blobs['rois'] = _get_rois_blob(rois, cfg.TEST.SCALES_BASE)\n\n return blobs, im_scale_factors\n\ndef _clip_boxes(boxes, im_shape):\n \"\"\"Clip boxes to image boundaries.\"\"\"\n # x1 >= 0\n boxes[:, 0::4] = np.maximum(boxes[:, 0::4], 0)\n # y1 >= 0\n boxes[:, 1::4] = np.maximum(boxes[:, 1::4], 0)\n # x2 < im_shape[1]\n boxes[:, 2::4] = np.minimum(boxes[:, 2::4], im_shape[1] - 1)\n # y2 < im_shape[0]\n boxes[:, 3::4] = np.minimum(boxes[:, 3::4], im_shape[0] - 1)\n return boxes\n\n\ndef _rescale_boxes(boxes, inds, scales):\n \"\"\"Rescale boxes according to image rescaling.\"\"\"\n\n for i in range(boxes.shape[0]):\n boxes[i,:] = boxes[i,:] / scales[int(inds[i])]\n\n return boxes\n\n\ndef im_detect(sess, net, im, boxes=None, save_vis_dir=None,\n img_name='', include_rpn_score=False):\n \"\"\"Detect object classes in an image given object proposals.\n Arguments:\n net (caffe.Net): Fast R-CNN network to use\n im (ndarray): color image to test (in BGR order)\n boxes (ndarray): R x 4 array of object proposals\n Returns:\n scores (ndarray): R x K array of object class scores (K includes\n background as object category 0)\n boxes (ndarray): R x (4*K) array of predicted bounding boxes\n \"\"\"\n\n blobs, im_scales = _get_blobs(im, boxes)\n\n # When mapping from image ROIs to feature map ROIs, there's some aliasing\n # (some distinct image ROIs get mapped to the same feature ROI).\n # Here, we identify duplicate feature ROIs, so we only compute features\n # on the unique subset.\n if cfg.DEDUP_BOXES > 0 and not cfg.TEST.HAS_RPN:\n v = np.array([1, 1e3, 1e6, 1e9, 1e12])\n hashes = np.round(blobs['rois'] * cfg.DEDUP_BOXES).dot(v)\n _, index, inv_index = np.unique(hashes, return_index=True,\n return_inverse=True)\n blobs['rois'] = blobs['rois'][index, :]\n boxes = boxes[index, :]\n\n if cfg.TEST.HAS_RPN:\n im_blob = blobs['data']\n blobs['im_info'] = np.array(\n [[im_blob.shape[1], im_blob.shape[2], im_scales[0]]],\n dtype=np.float32)\n # forward pass\n if cfg.TEST.HAS_RPN:\n feed_dict={net.data: blobs['data'], net.im_info: blobs['im_info'], net.keep_prob: 1.0}\n else:\n feed_dict={net.data: blobs['data'], net.rois: blobs['rois'], net.keep_prob: 1.0}\n\n run_options = None\n run_metadata = None\n if cfg.TEST.DEBUG_TIMELINE:\n run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)\n run_metadata = tf.RunMetadata()\n\n #theta_tensor = tf.get_default_graph().get_tensor_by_name('spt_trans_theta')\n cls_score, cls_prob, bbox_pred, rois = sess.run([net.get_output('cls_score'),\n net.get_output('cls_prob'), net.get_output('bbox_pred'), net.get_output('rois')],\n feed_dict=feed_dict,\n options=run_options,\n run_metadata=run_metadata)\n\n if (save_vis_dir is not None and os.path.exists(save_vis_dir)):\n # first get the weights out\n with tf.variable_scope('conv5_3', reuse=True) as scope:\n conv5_3_weights = tf.get_variable(\"weights\")\n\n conv5_3_weights_np, conv5_3_features, st_pool_features =\\\n sess.run([conv5_3_weights, net.get_output('conv5_3'), net.get_output('pool_5')],\n feed_dict=feed_dict,\n options=run_options,\n run_metadata=run_metadata)\n np.save(os.path.join(save_vis_dir, '%s_conv5_3_w.npy' % img_name), conv5_3_weights_np)\n np.save(os.path.join(save_vis_dir, '%s_conv5_3_f.npy' % img_name), conv5_3_features)\n np.save(os.path.join(save_vis_dir, '%s_st_pool_f.npy' % img_name), st_pool_features)\n\n\n if cfg.TEST.HAS_RPN:\n assert len(im_scales) == 1, \"Only single-image batch implemented\"\n boxes = rois[:, 1:5] / im_scales[0]\n\n\n if cfg.TEST.SVM:\n # use the raw scores before softmax under the assumption they\n # were trained as linear SVMs\n scores = cls_score\n else:\n # use softmax estimated probabilities\n scores = cls_prob\n\n if cfg.TEST.BBOX_REG:\n # Apply bounding-box regression deltas\n box_deltas = bbox_pred\n pred_boxes = bbox_transform_inv(boxes, box_deltas)\n #project_bbox_inv(pred_boxes, theta) # project spatially transformed box back\n pred_boxes = _clip_boxes(pred_boxes, im.shape)\n else:\n # Simply repeat the boxes, once for each class\n pred_boxes = np.tile(boxes, (1, scores.shape[1]))\n\n if cfg.DEDUP_BOXES > 0 and not cfg.TEST.HAS_RPN:\n # Map scores and predictions back to the original set of boxes\n scores = scores[inv_index, :]\n pred_boxes = pred_boxes[inv_index, :]\n\n if cfg.TEST.DEBUG_TIMELINE:\n trace = timeline.Timeline(step_stats=run_metadata.step_stats)\n trace_file = open(str(int(time.time() * 1000)) + '-test-timeline.ctf.json', 'w')\n trace_file.write(trace.generate_chrome_trace_format(show_memory=False))\n trace_file.close()\n\n if (include_rpn_score):\n # score is a joint prob instead of conditional prob\n scores *= np.reshape(rois[:, 0], [-1, 1])\n return scores, pred_boxes\n\n\ndef vis_detections(im, class_name, dets, thresh=0.8):\n \"\"\"Visual debugging of detections.\"\"\"\n import matplotlib.pyplot as plt\n #im = im[:, :, (2, 1, 0)]\n for i in range(np.minimum(10, dets.shape[0])):\n bbox = dets[i, :4]\n score = dets[i, -1]\n if score > thresh:\n #plt.cla()\n #plt.imshow(im)\n plt.gca().add_patch(\n plt.Rectangle((bbox[0], bbox[1]),\n bbox[2] - bbox[0],\n bbox[3] - bbox[1], fill=False,\n edgecolor='g', linewidth=3)\n )\n plt.gca().text(bbox[0], bbox[1] - 2,\n '{:s} {:.3f}'.format(class_name, score),\n bbox=dict(facecolor='blue', alpha=0.5),\n fontsize=14, color='white')\n\n plt.title('{} {:.3f}'.format(class_name, score))\n #plt.show()\n\ndef apply_nms(all_boxes, thresh):\n \"\"\"Apply non-maximum suppression to all predicted boxes output by the\n test_net method.\n \"\"\"\n num_classes = len(all_boxes)\n num_images = len(all_boxes[0])\n nms_boxes = [[[] for _ in range(num_images)]\n for _ in range(num_classes)]\n for cls_ind in range(num_classes):\n for im_ind in range(num_images):\n dets = all_boxes[cls_ind][im_ind]\n if dets == []:\n continue\n\n x1 = dets[:, 0]\n y1 = dets[:, 1]\n x2 = dets[:, 2]\n y2 = dets[:, 3]\n scores = dets[:, 4]\n inds = np.where((x2 > x1) & (y2 > y1) & (scores > cfg.TEST.DET_THRESHOLD))[0]\n dets = dets[inds,:]\n if dets == []:\n continue\n\n keep = nms(dets, thresh)\n if len(keep) == 0:\n continue\n nms_boxes[cls_ind][im_ind] = dets[keep, :].copy()\n return nms_boxes\n\ndef remove_embedded(boxes, scores, remove_option=1):\n \"\"\"\n Return indices of those that should be KEPT\n \"\"\"\n removed_indices = set()\n num_props = boxes.shape[0]\n for i in range(num_props):\n if (i in removed_indices):\n continue\n bxA = boxes[i, :]\n for j in range(num_props):\n if ((j == i) or (j in removed_indices)):\n continue\n bxB = boxes[j, :]\n if (bbox_contains(bxA, bxB, delta=0)):\n if ((1 == remove_option) and (scores[i] != scores[j])):\n if (scores[i] > scores[j]):\n removed_indices.add(j)\n else:\n removed_indices.add(i)\n else: # remove_option == 2 or scores[i] == scores[j]\n removed_indices.add(j)\n return sorted(set(range(num_props)) - removed_indices)\n # nr = len(removed_indices)\n # if (nr > 0):\n # new_boxes = sorted(set(range(num_props)) - removed_indices)\n # boxes = boxes[new_boxes, :]\n # scores = scores[new_boxes]\n #\n # return boxes, scores\n\ndef test_net(sess, net, imdb, weights_filename , max_per_image=300,\n thresh=0.05, vis=False, force=False):\n \"\"\"Test a Fast R-CNN network on an image database.\"\"\"\n num_images = len(imdb.image_index)\n # all detections are collected into:\n # all_boxes[cls][image] = N x 5 array of detections in\n # (x1, y1, x2, y2, score)\n all_boxes = [[[] for _ in range(num_images)]\n for _ in range(imdb.num_classes)]\n\n output_dir = get_output_dir(imdb, weights_filename)\n det_file = os.path.join(output_dir, 'detections.pkl')\n if (force and os.path.exists(det_file)):\n os.remove(det_file)\n if (not os.path.exists(det_file)):\n # timers\n _t = {'im_detect' : Timer(), 'misc' : Timer()}\n\n if not cfg.TEST.HAS_RPN:\n roidb = imdb.roidb\n\n for i in range(num_images):\n # filter out any ground truth boxes\n if cfg.TEST.HAS_RPN:\n box_proposals = None\n else:\n # The roidb may contain ground-truth rois (for example, if the roidb\n # comes from the training or val split). We only want to evaluate\n # detection on the *non*-ground-truth rois. We select those the rois\n # that have the gt_classes field set to 0, which means there's no\n # ground truth.\n box_proposals = roidb[i]['boxes'][roidb[i]['gt_classes'] == 0]\n\n im = cv2.imread(imdb.image_path_at(i))\n _t['im_detect'].tic()\n scores, boxes = im_detect(sess, net, im, box_proposals)\n _t['im_detect'].toc()\n\n _t['misc'].tic()\n if vis:\n image = im[:, :, (2, 1, 0)]\n plt.cla()\n plt.imshow(image)\n\n # skip j = 0, because it's the background class\n ttt = 0\n bbox_img = []\n bscore_img = []\n bbc = 0 #bbox count\n index_map = dict()\n for j in range(1, imdb.num_classes):\n inds = np.where(scores[:, j] > thresh)[0]\n ttt += len(inds)\n cls_scores = scores[inds, j]\n cls_boxes = boxes[inds, j*4:(j+1)*4]\n cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \\\n .astype(np.float32, copy=False)\n keep = nms(cls_dets, cfg.TEST.NMS)\n cls_dets = cls_dets[keep, :]\n if vis:\n vis_detections(image, imdb.classes[j], cls_dets)\n all_boxes[j][i] = cls_dets\n #cls_dets.shape == [nb_detections_for_cls_j, 5]\n # we need to get all bboxes in a image regardless of classes\n # if (cls_dets.shape[0] > 0):\n # bbox_img.append(cls_dets[:, 0:-1])\n # bscore_img.append(np.reshape(cls_dets[:, -1], [-1, 1]))\n # # remember the mapping\n # for bc in range(cls_dets.shape[0]):\n # index_map[bbc] = (j, bc)\n # bbc += 1\n removed = 0\n # if (len(bbox_img) > 0):\n # boxes = np.vstack(bbox_img)\n # scores = np.vstack(bscore_img)\n # keep_indices = remove_embedded(boxes, scores, remove_option=1)\n # removed = bbc - len(keep_indices)\n # # need to find out which j, and which k correspond to which index\n # cls_keep = defaultdict(list)\n # for ki in keep_indices:\n # j, bc = index_map[ki]\n # cls_keep[j].append(bc)\n #\n # for j in xrange(1, imdb.num_classes):\n # if (j in cls_keep):\n # all_boxes[j][i] = all_boxes[j][i][cls_keep[j], :]\n\n if vis:\n plt.show()\n # Limit to max_per_image detections *over all classes*\n if max_per_image > 0:\n image_scores = np.hstack([all_boxes[j][i][:, -1]\n for j in range(1, imdb.num_classes)])\n if len(image_scores) > max_per_image:\n image_thresh = np.sort(image_scores)[-max_per_image]\n for j in range(1, imdb.num_classes):\n keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]\n all_boxes[j][i] = all_boxes[j][i][keep, :]\n _t['misc'].toc()\n\n print('im_detect: {:d}/{:d} {:d} detection {:d} removed {:.3f}s' \\\n .format(i + 1, num_images, ttt, removed, _t['im_detect'].average_time))\n\n with open(det_file, 'wb') as f:\n pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)\n else:\n with open(det_file, 'r') as fin:\n all_boxes = pickle.load(fin)\n\n print('Evaluating detections')\n imdb.evaluate_detections(all_boxes, output_dir)\n" ]
[ [ "matplotlib.pyplot.cla", "tensorflow.python.client.timeline.Timeline", "tensorflow.variable_scope", "matplotlib.pyplot.imshow", "matplotlib.pyplot.gca", "numpy.reshape", "numpy.abs", "numpy.where", "matplotlib.pyplot.Rectangle", "numpy.round", "numpy.unique", "numpy.minimum", "numpy.tile", "numpy.zeros", "numpy.hstack", "numpy.max", "tensorflow.RunOptions", "numpy.min", "numpy.maximum", "numpy.sort", "tensorflow.RunMetadata", "matplotlib.pyplot.show", "numpy.array", "tensorflow.get_variable" ] ]
AlexWang000/AlacGAN
[ "3b9df7c25c3e95b7727b00fa789cab0cf7d46266" ]
[ "train.py" ]
[ "import argparse\nimport os\nimport random\nimport yaml\nimport time\nimport logging\nimport pprint\n\nimport scipy.stats as stats\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nimport torchvision.utils as vutils\nimport numpy as np\nfrom torch.utils.tensorboard import SummaryWriter\nfrom torch.autograd import grad\nfrom easydict import EasyDict\n\nfrom data.train import CreateDataLoader as train_loader\nfrom data.eval import CreateDataLoader as val_loader\nfrom utils import create_logger, save_checkpoint, load_state, get_scheduler, AverageMeter, calculate_fid\nfrom models.standard import *\n\nparser = argparse.ArgumentParser(description='PyTorch Colorization Training')\n\nparser.add_argument('--config', default='experiments/origin/config.yaml')\nparser.add_argument('--resume', default='', type=str, help='path to checkpoint, should be experiments/origin/')\n\n\ndef calc_gradient_penalty(netD, real_data, fake_data, sketch_feat):\n alpha = torch.rand(config.batch_size, 1, 1, 1, device=config.device)\n\n interpolates = alpha * real_data + ((1 - alpha) * fake_data)\n\n interpolates.requires_grad = True\n\n disc_interpolates = netD(interpolates, sketch_feat)\n\n gradients = grad(outputs=disc_interpolates, inputs=interpolates,\n grad_outputs=torch.ones(disc_interpolates.size(), device=config.device), create_graph=True,\n retain_graph=True, only_inputs=True)[0]\n\n gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * config.gpW\n return gradient_penalty\n\n\ndef mask_gen():\n maskS = config.image_size // 4\n\n mask1 = torch.cat(\n [torch.rand(1, 1, maskS, maskS).ge(X.rvs(1)[0]).float() for _ in range(config.batch_size // 2)], 0)\n mask2 = torch.cat([torch.zeros(1, 1, maskS, maskS).float() for _ in range(config.batch_size // 2)], 0)\n mask = torch.cat([mask1, mask2], 0)\n\n return mask.to(config.device)\n\n\ndef main():\n global args, config, X\n\n args = parser.parse_args()\n print(args)\n\n with open(args.config) as f:\n config = EasyDict(yaml.load(f))\n\n config.save_path = os.path.dirname(args.config)\n\n ####### regular set up\n assert torch.cuda.is_available()\n device = torch.device(\"cuda\")\n config.device = device\n\n # random seed setup\n print(\"Random Seed: \", config.seed)\n random.seed(config.seed)\n torch.manual_seed(config.seed)\n torch.cuda.manual_seed(config.seed)\n cudnn.benchmark = True\n\n ####### regular set up end\n\n\n netG = torch.nn.DataParallel(NetG(ngf=config.ngf))\n netD = torch.nn.DataParallel(NetD(ndf=config.ndf))\n\n netF = torch.nn.DataParallel(NetF())\n netI = torch.nn.DataParallel(NetI()).eval()\n for param in netF.parameters():\n param.requires_grad = False\n\n criterion_MSE = nn.MSELoss()\n\n fixed_sketch = torch.tensor(0, device=device).float()\n fixed_hint = torch.tensor(0, device=device).float()\n fixed_sketch_feat = torch.tensor(0, device=device).float()\n\n ####################\n netD = netD.to(device)\n netG = netG.to(device)\n netF = netF.to(device)\n netI = netI.to(device)\n criterion_MSE = criterion_MSE.to(device)\n\n # setup optimizer\n\n optimizerG = optim.Adam(netG.parameters(), lr=config.lr_scheduler.base_lr, betas=(0.5, 0.9))\n optimizerD = optim.Adam(netD.parameters(), lr=config.lr_scheduler.base_lr, betas=(0.5, 0.9))\n\n last_iter = -1\n best_fid = 1e6\n\n if args.resume:\n best_fid, last_iter = load_state(args.resume, netG, netD, optimizerG, optimizerD)\n\n config.lr_scheduler['last_iter'] = last_iter\n\n config.lr_scheduler['optimizer'] = optimizerG\n lr_schedulerG = get_scheduler(config.lr_scheduler)\n config.lr_scheduler['optimizer'] = optimizerD\n lr_schedulerD = get_scheduler(config.lr_scheduler)\n\n tb_logger = SummaryWriter(config.save_path + '/events')\n logger = create_logger('global_logger', config.save_path + '/log.txt')\n logger.info(f'args: {pprint.pformat(args)}')\n logger.info(f'config: {pprint.pformat(config)}')\n\n batch_time = AverageMeter(config.print_freq)\n data_time = AverageMeter(config.print_freq)\n flag = 1\n mu, sigma = 1, 0.005\n X = stats.truncnorm((0 - mu) / sigma, (1 - mu) / sigma, loc=mu, scale=sigma)\n i = 0\n curr_iter = last_iter + 1\n\n dataloader = train_loader(config)\n data_iter = iter(dataloader)\n\n end = time.time()\n while i < len(dataloader):\n lr_schedulerG.step(curr_iter)\n lr_schedulerD.step(curr_iter)\n current_lr = lr_schedulerG.get_lr()[0]\n ############################\n # (1) Update D network\n ###########################\n for p in netD.parameters(): # reset requires_grad\n p.requires_grad = True # they are set to False below in netG update\n for p in netG.parameters():\n p.requires_grad = False # to avoid computation ft_params\n\n # train the discriminator Diters times\n j = 0\n while j < config.diters:\n netD.zero_grad()\n\n i += 1\n j += 1\n\n data_end = time.time()\n real_cim, real_vim, real_sim = data_iter.next()\n data_time.update(time.time() - data_end)\n\n real_cim, real_vim, real_sim = real_cim.to(device), real_vim.to(device), real_sim.to(device)\n mask = mask_gen()\n hint = torch.cat((real_vim * mask, mask), 1)\n\n # train with fake\n with torch.no_grad():\n feat_sim = netI(real_sim).detach()\n fake_cim = netG(real_sim, hint, feat_sim).detach()\n\n errD_fake = netD(fake_cim, feat_sim)\n errD_fake = errD_fake.mean(0).view(1)\n\n errD_fake.backward(retain_graph=True) # backward on score on real\n\n errD_real = netD(real_cim, feat_sim)\n errD_real = errD_real.mean(0).view(1)\n errD = errD_real - errD_fake\n\n errD_realer = -1 * errD_real + errD_real.pow(2) * config.drift\n\n errD_realer.backward(retain_graph=True) # backward on score on real\n\n gradient_penalty = calc_gradient_penalty(netD, real_cim, fake_cim, feat_sim)\n gradient_penalty.backward()\n\n optimizerD.step()\n\n ############################\n # (2) Update G network\n ############################\n\n for p in netD.parameters():\n p.requires_grad = False # to avoid computation\n for p in netG.parameters():\n p.requires_grad = True\n netG.zero_grad()\n\n data = data_iter.next()\n real_cim, real_vim, real_sim = data\n i += 1\n\n real_cim, real_vim, real_sim = real_cim.to(device), real_vim.to(device), real_sim.to(device)\n\n if flag: # fix samples\n mask = mask_gen()\n hint = torch.cat((real_vim * mask, mask), 1)\n with torch.no_grad():\n feat_sim = netI(real_sim).detach()\n\n tb_logger.add_image('target imgs', vutils.make_grid(real_cim.mul(0.5).add(0.5), nrow=4))\n tb_logger.add_image('sketch imgs', vutils.make_grid(real_sim.mul(0.5).add(0.5), nrow=4))\n tb_logger.add_image('hint', vutils.make_grid((real_vim * mask).mul(0.5).add(0.5), nrow=4))\n\n fixed_sketch.resize_as_(real_sim).copy_(real_sim)\n fixed_hint.resize_as_(hint).copy_(hint)\n fixed_sketch_feat.resize_as_(feat_sim).copy_(feat_sim)\n\n flag -= 1\n\n mask = mask_gen()\n hint = torch.cat((real_vim * mask, mask), 1)\n\n with torch.no_grad():\n feat_sim = netI(real_sim).detach()\n\n fake = netG(real_sim, hint, feat_sim)\n\n errd = netD(fake, feat_sim)\n errG = errd.mean() * config.advW * -1\n errG.backward(retain_graph=True)\n feat1 = netF(fake)\n with torch.no_grad():\n feat2 = netF(real_cim)\n\n contentLoss = criterion_MSE(feat1, feat2)\n contentLoss.backward()\n\n optimizerG.step()\n batch_time.update(time.time() - end)\n\n ############################\n # (3) Report & 100 Batch checkpoint\n ############################\n curr_iter += 1\n\n if curr_iter % config.print_freq == 0:\n tb_logger.add_scalar('VGG MSE Loss', contentLoss.item(), curr_iter)\n tb_logger.add_scalar('wasserstein distance', errD.item(), curr_iter)\n tb_logger.add_scalar('errD_real', errD_real.item(), curr_iter)\n tb_logger.add_scalar('errD_fake', errD_fake.item(), curr_iter)\n tb_logger.add_scalar('Gnet loss toward real', errG.item(), curr_iter)\n tb_logger.add_scalar('gradient_penalty', gradient_penalty.item(), curr_iter)\n tb_logger.add_scalar('lr', current_lr, curr_iter)\n logger.info(f'Iter: [{curr_iter}/{len(dataloader)//(config.diters+1)}]\\t'\n f'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n f'Data {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n f'errG {errG.item():.4f}\\t'\n f'errD {errD.item():.4f}\\t'\n f'err_D_real {errD_real.item():.4f}\\t'\n f'err_D_fake {errD_fake.item():.4f}\\t'\n f'content loss {contentLoss.item():.4f}\\t'\n f'LR {current_lr:.4f}'\n )\n\n if curr_iter % config.print_img_freq == 0:\n with torch.no_grad():\n fake = netG(fixed_sketch, fixed_hint, fixed_sketch_feat)\n tb_logger.add_image('colored imgs',\n vutils.make_grid(fake.detach().mul(0.5).add(0.5), nrow=4),\n curr_iter)\n\n if curr_iter % config.val_freq == 0:\n fid, var = validate(netG, netI)\n tb_logger.add_scalar('fid_val', fid, curr_iter)\n tb_logger.add_scalar('fid_variance', var, curr_iter)\n logger.info(f'fid: {fid:.3f} ({var})\\t')\n\n # remember best fid and save checkpoint\n is_best = fid < best_fid\n best_fid = min(fid, best_fid)\n save_checkpoint({\n 'step': curr_iter - 1,\n 'state_dictG': netG.state_dict(),\n 'state_dictD': netD.state_dict(),\n 'best_fid': best_fid,\n 'optimizerG': optimizerG.state_dict(),\n 'optimizerD': optimizerD.state_dict(),\n }, is_best, config.save_path + '/ckpt')\n\n end = time.time()\n\n\ndef validate(netG, netI):\n fids = []\n fid_value = 0\n for _ in range(1):\n fid = calculate_fid(netG, netI, val_loader(config), config, 2048)\n print('FID: ', fid)\n fid_value += fid\n fids.append(fid)\n return fid_value, np.var(fids)\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.utils.tensorboard.SummaryWriter", "numpy.var", "scipy.stats.truncnorm" ] ]
IanHawke/toy-evolve
[ "a1490327dd19492e2c0bb0d9c6909abe8b167135" ]
[ "toy-evolve/burgers_stiff_source_imex222.py" ]
[ "# Burgers test evolution: just one\n\nimport numpy\nfrom models import burgers\nfrom bcs import outflow\nfrom simulation import simulation\nfrom methods import weno3_upwind\nfrom rk import rk3, imex222\nfrom grid import grid\nfrom matplotlib import pyplot\n\nNgz = 3\nNpoints = 200\ntau = 0.05\nbeta = 0.8\nL = 1\ninterval = grid([-L, L], Npoints, Ngz)\nmodel = burgers.burgers(initial_data = burgers.initial_riemann(0, 1))\nmodel = burgers.burgers(initial_data = burgers.initial_travelling_wave(tau))\nsource = burgers.stiff_source(tau, beta)\nsim = simulation(model, interval, weno3_upwind, imex222(source), outflow)\nsim.evolve(0.56*L)\n\nsim.plot_scalar_vs_initial()\npyplot.show()\n\nq_exact = lambda x, t : 1/(1+numpy.exp(-(x-beta*t)/tau))\nx_exact = numpy.linspace(-L,L,1000)\npyplot.figure()\npyplot.plot(x_exact, q_exact(x_exact, sim.t))\npyplot.plot(sim.coordinates, sim.q[0,:], 'kx')\npyplot.xlim(-L, L)\npyplot.ylim(-0.1, 1.1)\npyplot.xlabel(r\"$x$\")\npyplot.ylabel(r\"$q$\")\npyplot.title(r\"Travelling wave, $\\tau={}$\".format(tau))\npyplot.show()\n\n\ntau2 = 0.01\nmodel2 = burgers.burgers(initial_data = burgers.initial_travelling_wave(tau2))\nsource2 = burgers.stiff_source(tau2, beta)\nsim2 = simulation(model2, interval, weno3_upwind, imex222(source2), outflow)\nsim2.evolve(0.56*L)\nq_exact2 = lambda x, t : 1/(1+numpy.exp(-(x-beta*t)/tau2))\npyplot.figure()\npyplot.plot(x_exact, q_exact2(x_exact, sim2.t))\npyplot.plot(sim2.coordinates, sim2.q[0,:], 'kx')\npyplot.xlim(-L, L)\npyplot.ylim(-0.1, 1.1)\npyplot.xlabel(r\"$x$\")\npyplot.ylabel(r\"$q$\")\npyplot.title(r\"Travelling wave, $\\tau={}$\".format(tau2))\npyplot.show()\n\n\nNpoints_all = [50, 100, 200]\ntau3 = 0.01\nmodel3 = burgers.burgers(initial_data = burgers.initial_travelling_wave(tau3))\nsource3 = burgers.stiff_source(tau3, beta)\nq_exact3 = lambda x, t : 1/(1+numpy.exp(-(x-beta*t)/tau3))\npyplot.figure()\nt_end = 0.56*L\npyplot.plot(x_exact, q_exact3(x_exact, t_end))\nfor Npoints in Npoints_all:\n interval3 = grid([-L, L], Npoints, Ngz)\n sim3 = simulation(model3, interval, weno3_upwind, imex222(source3), outflow)\n sim3.evolve(t_end)\n pyplot.plot(sim3.coordinates, sim3.q[0,:], 'x--', mew=2, lw=2, label=\"{} points\".format(Npoints))\npyplot.xlim(-L, L)\npyplot.ylim(-0.1, 1.1)\npyplot.xlabel(r\"$x$\")\npyplot.ylabel(r\"$q$\")\npyplot.legend(loc=\"upper left\")\npyplot.title(r\"Travelling wave, $\\tau={}$\".format(tau3))\npyplot.show()\n" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.figure", "numpy.exp", "matplotlib.pyplot.xlim", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.ylim", "matplotlib.pyplot.plot", "numpy.linspace", "matplotlib.pyplot.xlabel" ] ]
Dongpaca/test
[ "b965a1e752436b250b2108d5df4651516b15698a" ]
[ "enhancer.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.backends.cudnn as cudnn\nimport torch.optim\nimport torch.utils.data as data\nimport torchvision\nimport numpy as np\nimport cv2\nimport random\nimport net\nimport numpy\nfrom torchvision import transforms\nfrom utils import *\nimport matplotlib.image as img\n\n\n\ndef init_weights(m):\n \n if type(m) == nn.modules.conv.Conv2d:\n print(\"Weights initialized for:\", m)\n torch.nn.init.xavier_uniform(m.weight)\n m.bias.data.fill_(0.01)\n\n\ndef enhance(img_path, scale):\n\n SRNet = net.SRNet().cuda()\n SRNet.apply(init_weights)\n\n criterion = nn.L1Loss().cuda()\n\n optimizer = torch.optim.Adam(SRNet.parameters(), lr=0.001)\n\n SRNet.train()\n\n image = img.imread(img_path)\n hr_fathers_sources = [image]\n\n scale_factors = np.array([[1.0, 1.5], [1.5, 1.0], [1.5, 1.5], [1.5, 2.0], [2.0, 1.5], [2.0, 2.0]])\n back_projection_iters = np.array([6, 6, 8, 10, 10, 12])\n learning_rate_change_iter_nums = [0]\n\n rec_mse = []\n steps_mse = []\n\n \n for sf_ind, scale in enumerate(scale_factors):\n\n for i in range(10000):\n\n hr_father = random_augment(ims=hr_fathers_sources,\n base_scales = [1.0] + list(scale_factors),\n leave_as_is_probability=0.05,\n no_interpolate_probability=0.45,\n min_scale=0.5,\n max_scale=([1.0]+list(scale_factors))[len(hr_fathers_sources)-1],\n allow_rotation=True,\n scale_diff_sigma=0.25,\n shear_sigma=0.1,\n crop_size=128\n )\n\n lr_son = father_to_son(hr_father, scale)\n lr_son_interpolated = imresize(lr_son, scale, hr_father.shape, \"cubic\")\n\n hr_father = torch.from_numpy(hr_father).unsqueeze(0).cuda().permute(0,3,1,2).float()\n lr_son_interpolated = torch.from_numpy(lr_son_interpolated).unsqueeze(0).cuda().permute(0,3,1,2).float()\n\n sr_son = SRNet(lr_son_interpolated)\n\n loss = criterion(sr_son, hr_father)\n\n if(not i % 50):\n son_out = father_to_son(image, scale)\n son_out_inter = imresize(son_out, scale, image.shape, \"cubic\")\n son_out_inter = torch.from_numpy(son_out_inter).unsqueeze(0).cuda().permute(0,3,1,2).float() \n sr_son_out = SRNet(son_out_inter).permute(0,2,3,1).squeeze().data.cpu().numpy()\n sr_son_out = np.clip(np.squeeze(sr_son_out), 0, 1)\n rec_mse.append(np.mean(np.ndarray.flatten(np.square(image - sr_son_out))))\n steps_mse.append(i)\n\n lr_policy(i, optimizer, learning_rate_change_iter_nums, steps_mse, rec_mse)\n\n #curr_lr = 100\n for param_group in optimizer.param_groups:\n #if param_group['lr'] < 9e-6:\n curr_lr = param_group['lr']\n break\n\n\n\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step() \n\n if i%10 == 0:\n print(\"Iteration:\", i, \"Loss:\",loss.item())\n\n if curr_lr < 9e-6:\n break\n \n\n ### Evaluation the result\n\n lr_img = img.imread(img_path)\n \n interpolated_lr_img = imresize(lr_img, scale, None, \"cubic\")\n interpolated_lr_img = torch.from_numpy(interpolated_lr_img).unsqueeze(0).cuda().permute(0,3,1,2).float()\n \n sr_img = infer(lr_img, scale, sf_ind, SRNet, back_projection_iters) #SRNet(interpolated_lr_img)\n\n save_img = torch.from_numpy(sr_img).unsqueeze(0).permute(0,3,1,2)\n torchvision.utils.save_image((save_img),img_path.split(\".\")[0]+'SR.'+ img_path.split(\".\")[1], normalize=False)\n torchvision.utils.save_image((interpolated_lr_img),img_path.split(\".\")[0]+'LR.'+img_path.split(\".\")[1] , normalize=False)\n\n hr_fathers_sources.append(sr_img)\n print(\"Optimization done for scale\", scale)\n\n\n\ndef infer(input_img, scale, sf_ind, SRNet, back_projection_iters):\n \n outputs = []\n\n for k in range(0, 1+7, 1+int(scale[0] != scale[1])):\n test_img = np.rot90(input_img, k) if k < 4 else np.fliplr(np.rot90(input_img,k))\n interpolated_test_img = imresize(test_img, scale, None, \"cubic\")\n interpolated_test_img = torch.from_numpy(interpolated_test_img).unsqueeze(0).cuda().permute(0,3,1,2).float()\n tmp_output = SRNet(interpolated_test_img)\n tmp_output = tmp_output.permute(0,2,3,1).squeeze().data.cpu().numpy()\n tmp_output = np.clip(np.squeeze(tmp_output), 0, 1)\n\n tmp_output = np.rot90(tmp_output, -k) if k < 4 else np.rot90(np.fliplr(tmp_output), k)\n\n for bp_iter in range(back_projection_iters[sf_ind]):\n tmp_output = back_projection(tmp_output, input_img, \"cubic\", \"cubic\", scale)\n\n outputs.append(tmp_output)\n\n\n outputs_pre = np.median(outputs, 0)\n\n for bp_iter in range(back_projection_iters[sf_ind]):\n outputs_pre = back_projection(outputs_pre, input_img, \"cubic\", \"cubic\", scale)\n\n return outputs_pre\n\n\ndef lr_policy(iters, optimizer, learning_rate_change_iter_nums, mse_steps, mse_rec):\n\n if ((not (1 + iters) % 60) and (iters - learning_rate_change_iter_nums[-1] > 256)):\n [slope, _], [[var,_],_] = np.polyfit(mse_steps[(-256//50):], mse_rec[(-256//50):], 1, cov=True)\n\n std = np.sqrt(var)\n\n print('Slope:', slope, \"STD:\", std)\n\n if -1.5*slope < std:\n for param_group in optimizer.param_groups:\n param_group['lr'] = param_group['lr'] * 0.8\n print(\"Learning Rate Updated:\", param_group['lr'])\n learning_rate_change_iter_nums.append(iters)\n \n\n\n\n\n\n\n\n\n\n\n\nif __name__ == '__main__':\n ## First argument is the image that you want to upsample with ZSSR. \n ## Second argument is the scale with which you want to resize. Currently only scale = 2 supported. For other scales, change the variable 'scale_factors' accordingly.\n\n enhance('images/%d.png' %i , 2)\n" ]
[ [ "numpy.sqrt", "torch.nn.init.xavier_uniform", "numpy.squeeze", "torch.nn.L1Loss", "numpy.fliplr", "numpy.median", "numpy.rot90", "torch.from_numpy", "numpy.array", "numpy.polyfit", "numpy.square", "matplotlib.image.imread" ] ]
Jakoviz/Infected-sweetpotato-classification
[ "003befcc5c430f41f8426d9ac94894e20fdfc247" ]
[ "models/F1_score.py" ]
[ "\"\"\"\nFrom https://stackoverflow.com/questions/62265351/measuring-f1-score-for-multiclass-classification-natively-in-pytorch\nwith this modification https://stackoverflow.com/questions/62265351/measuring-f1-score-for-multiclass-classification-natively-in-pytorch#comment122867942_63358412\n\"\"\"\n\nfrom typing import Tuple\nimport torch\n\nclass F1Score:\n \"\"\"\n Class for f1 calculation in Pytorch.\n \"\"\"\n\n def __init__(self, average: str = 'weighted'):\n \"\"\"\n Init.\n\n Args:\n average: averaging method\n \"\"\"\n self.average = average\n if average not in [None, 'micro', 'macro', 'weighted']:\n raise ValueError('Wrong value of average parameter')\n\n @staticmethod\n def calc_f1_micro(predictions: torch.Tensor, labels: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Calculate f1 micro.\n\n Args:\n predictions: tensor with predictions\n labels: tensor with original labels\n\n Returns:\n f1 score\n \"\"\"\n true_positive = torch.eq(labels, predictions).sum().float()\n f1_score = torch.div(true_positive, len(labels))\n return f1_score\n\n @staticmethod\n def calc_f1_count_for_label(predictions: torch.Tensor,\n labels: torch.Tensor, label_id: int) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Calculate f1 and true count for the label\n\n Args:\n predictions: tensor with predictions\n labels: tensor with original labels\n label_id: id of current label\n\n Returns:\n f1 score and true count for label\n \"\"\"\n # label count\n true_count = torch.eq(labels, label_id).sum()\n\n # true positives: labels equal to prediction and to label_id\n true_positive = torch.logical_and(torch.eq(labels, predictions),\n torch.eq(labels, label_id)).sum().float()\n # precision for label\n precision = torch.div(true_positive, torch.eq(predictions, label_id).sum().float())\n # replace nan values with 0\n precision = torch.where(torch.isnan(precision),\n torch.zeros_like(precision).type_as(true_positive),\n precision)\n\n # recall for label\n recall = torch.div(true_positive, true_count)\n # f1\n f1 = 2 * precision * recall / (precision + recall)\n # replace nan values with 0\n f1 = torch.where(torch.isnan(f1), torch.zeros_like(f1).type_as(true_positive), f1)\n return f1, true_count\n\n def __call__(self, predictions: torch.Tensor, labels: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Calculate f1 score based on averaging method defined in init.\n\n Args:\n predictions: tensor with predictions\n labels: tensor with original labels\n\n Returns:\n f1 score\n \"\"\"\n\n # simpler calculation for micro\n if self.average == 'micro':\n return self.calc_f1_micro(predictions, labels)\n\n f1_score = 0\n for label_id in range(0, len(labels.unique())):\n f1, true_count = self.calc_f1_count_for_label(predictions, labels, label_id)\n\n if self.average == 'weighted':\n f1_score += f1 * true_count\n elif self.average == 'macro':\n f1_score += f1\n\n if self.average == 'weighted':\n f1_score = torch.div(f1_score, len(labels))\n elif self.average == 'macro':\n f1_score = torch.div(f1_score, len(labels.unique()))\n\n return f1_score\n" ]
[ [ "torch.isnan", "torch.zeros_like", "torch.eq", "torch.div" ] ]
check-spelling/poppy
[ "2640d89d3a326fc6ecf03dcd24c878279b7807e5" ]
[ "poppy/instrument.py" ]
[ "import getpass\nimport os\nimport platform\nimport re\nimport time\nimport astropy.io.fits as fits\nimport astropy.units as units\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport scipy.interpolate\nimport scipy.ndimage\n\ntry:\n import synphot\n _HAS_SYNPHOT = True\nexcept ImportError:\n synphot = None\n _HAS_SYNPHOT = False\n\nfrom . import poppy_core\nfrom . import optics\nfrom . import utils\nfrom . import conf\n\nimport logging\n\n_log = logging.getLogger('poppy')\n\n__all__ = ['Instrument']\n\n\nclass Instrument(object):\n \"\"\" A generic astronomical instrument, composed of\n (1) an optical system implemented using POPPY, optionally with several configurations such as\n selectable image plane or pupil plane stops, and\n (2) some defined spectral bandpass(es) such as selectable filters, implemented using synphot.\n\n This provides the capability to model both the optical and spectral responses of a given system.\n PSFs may be calculated for given source\n spectral energy distributions and output as FITS files, with substantial flexibility.\n\n It also provides capabilities for modeling some PSF effects not due to wavefront aberrations, for instance\n blurring caused by pointing jitter.\n\n\n This is a base class for Instrument functionality - you cannot easily use this directly, but\n rather should subclass it for your particular instrument of interest. Some of the complexity of this class\n is due to splitting up functionality into many separate routines to allow users to subclass just the relevant\n portions for a given task. There's a fair amount of functionality here but the learning curve is steeper than\n elsewhere in POPPY.\n\n You will at a minimum want to override the following class methods:\n\n * get_optical_system\n * _get_filter_list\n * _get_default_nlambda\n * _get_default_fov\n * _get_fits_header\n\n For more complicated systems you may also want to override:\n\n * _validate_config\n * _get_synphot_bandpass\n * _apply_jitter\n \"\"\"\n\n name = \"Instrument\"\n pupil = None\n \"Aperture for this optical system. May be a FITS filename, FITS HDUList object, or poppy.OpticalElement\"\n pupilopd = None\n \"\"\"Pupil OPD for this optical system. May be a FITS filename, or FITS HDUList.\n If the file contains a datacube, you may set this to a tuple (filename, slice) to select a given slice, or else\n the first slice will be used.\"\"\"\n options = {}\n \"\"\"\n A dictionary capable of storing other arbitrary options, for extensibility. The following are all optional, and\n may or may not be meaningful depending on which instrument is selected.\n\n Parameters\n ----------\n source_offset_r : float\n Radial offset of the target from the center, in arcseconds\n source_offset_theta : float\n Position angle for that offset\n pupil_shift_x, pupil_shift_y : float\n Relative shift of a coronagraphic pupil in X and Y, expressed as a decimal between 0.0-1.0\n Note that shifting an array too much will wrap around to the other side unphysically, but\n for reasonable values of shift this is a non-issue.\n jitter : string \"gaussian\" or None\n Type of jitter model to apply. Currently only convolution with a Gaussian kernel of specified\n width `jitter_sigma` is implemented. (default: None)\n jitter_sigma : float\n Width of the jitter kernel in arcseconds per axis (default: 0.007 arcsec)\n parity : string \"even\" or \"odd\"\n You may wish to ensure that the output PSF grid has either an odd or even number of pixels.\n Setting this option will force that to be the case by increasing npix by one if necessary.\n\n \"\"\"\n filter_list = None\n \"\"\"List of available filter names for this instrument\"\"\"\n pixelscale = 0.025\n \"\"\"Detector pixel scale, in arcseconds/pixel (default: 0.025)\"\"\"\n\n def __init__(self, name=\"\", *args, **kwargs):\n self.name = name\n self.pupil = optics.CircularAperture(*args, **kwargs)\n self.pupilopd = None\n self.options = {}\n self.filter_list, self._synphot_bandpasses = self._get_filter_list() # List of available filter names\n\n # create private instance variables. These will be\n # wrapped just below to create properties with validation.\n self._filter = None\n self._rotation = None\n # for caching synphot results.\n self._spectra_cache = {}\n self.filter = self.filter_list[0]\n\n self.optsys = None # instance attribute for Optical System\n\n def __str__(self):\n return \"Instrument name=\" + self.name\n\n # create properties with error checking\n @property\n def filter(self):\n \"\"\"Currently selected filter name (e.g. F200W)\"\"\"\n return self._filter\n\n @filter.setter\n def filter(self, value):\n value = value.upper() # force to uppercase\n if value not in self.filter_list:\n raise ValueError(\"Instrument %s doesn't have a filter called %s.\" % (self.name, value))\n self._filter = value\n\n # ----- actual optical calculations follow here -----\n def calc_psf(self, outfile=None, source=None, nlambda=None, monochromatic=None,\n fov_arcsec=None, fov_pixels=None, oversample=None, detector_oversample=None, fft_oversample=None,\n overwrite=True, display=False, save_intermediates=False, return_intermediates=False,\n normalize='first'):\n \"\"\" Compute a PSF.\n The result can either be written to disk (set outfile=\"filename\") or else will be returned as\n a FITS HDUlist object.\n\n\n Output sampling may be specified in one of two ways:\n\n 1) Set `oversample=<number>`. This will use that oversampling factor beyond detector pixels\n for output images, and beyond Nyquist sampling for any FFTs to prior optical planes.\n 2) set `detector_oversample=<number>` and `fft_oversample=<other_number>`. This syntax lets\n you specify distinct oversampling factors for intermediate and final planes.\n\n By default, both oversampling factors are set equal to 2.\n\n Notes\n -----\n More advanced PSF computation options (pupil shifts, source positions, jitter, ...)\n may be set by configuring the `.options` dictionary attribute of this class.\n\n Parameters\n ----------\n source : synphot.spectrum.SourceSpectrum or dict\n specification of source input spectrum. Default is a 5700 K sunlike star.\n nlambda : int\n How many wavelengths to model for broadband?\n The default depends on how wide the filter is: (5,3,1) for types (W,M,N) respectively\n monochromatic : float, optional\n Setting this to a wavelength value (in meters) will compute a monochromatic PSF at that\n wavelength, overriding filter and nlambda settings.\n fov_arcsec : float\n field of view in arcsec. Default=5\n fov_pixels : int\n field of view in pixels. This is an alternative to fov_arcsec.\n outfile : string\n Filename to write. If None, then result is returned as an HDUList\n oversample, detector_oversample, fft_oversample : int\n How much to oversample. Default=4. By default the same factor is used for final output\n pixels and intermediate optical planes, but you may optionally use different factors\n if so desired.\n overwrite : bool\n overwrite output FITS file if it already exists?\n display : bool\n Whether to display the PSF when done or not.\n save_intermediates, return_intermediates : bool\n Options for saving to disk or returning to the calling function the intermediate optical planes during\n the propagation. This is useful if you want to e.g. examine the intensity in the Lyot plane for a\n coronagraphic propagation.\n normalize : string\n Desired normalization for output PSFs. See doc string for OpticalSystem.calc_psf. Default is\n to normalize the entrance pupil to have integrated total intensity = 1.\n\n Returns\n -------\n outfits : fits.HDUList\n The output PSF is returned as a fits.HDUlist object.\n If `outfile` is set to a valid filename, the output is also written to that file.\n\n\n \"\"\"\n local_options = self.options.copy() # all local state should be stored in a dict, for\n # ease of handing off to the various subroutines of\n # calc_psf. Don't just modify the global self.options\n # structure since that would pollute it with temporary\n # state as well as persistent state.\n local_options['monochromatic'] = monochromatic\n\n # ----- choose # of wavelengths intelligently. Do this first before generating the source spectrum weighting.\n if nlambda is None or nlambda == 0:\n nlambda = self._get_default_nlambda(self.filter)\n local_options['nlambda'] = nlambda\n\n # ----- calculate field of view depending on supplied parameters\n if fov_arcsec is None and fov_pixels is None: # pick decent defaults.\n fov_arcsec = self._get_default_fov()\n if fov_pixels is not None:\n if np.isscalar(fov_pixels):\n fov_spec = 'pixels = %d' % fov_pixels\n else:\n fov_spec = 'pixels = (%d, %d)' % (fov_pixels[0], fov_pixels[1])\n local_options['fov_pixels'] = fov_pixels\n elif fov_arcsec is not None:\n if np.isscalar(fov_arcsec):\n fov_spec = 'arcsec = %f' % fov_arcsec\n else:\n fov_spec = 'arcsec = (%.3f, %.3f)' % (fov_arcsec[0], fov_arcsec[1])\n local_options['fov_arcsec'] = fov_arcsec\n local_options['fov_spec'] = fov_spec\n\n # ---- Implement the semi-convoluted logic for the oversampling options. See docstring above\n if oversample is not None and detector_oversample is not None and fft_oversample is not None:\n # all options set, contradictorily -> complain!\n raise ValueError(\n \"You cannot specify simultaneously the oversample= option with the detector_oversample \" +\n \"and fft_oversample options. Pick one or the other!\")\n elif oversample is None and detector_oversample is None and fft_oversample is None:\n # nothing set -> set oversample = 4\n oversample = 4\n if detector_oversample is None:\n detector_oversample = oversample\n if fft_oversample is None:\n fft_oversample = oversample\n local_options['detector_oversample'] = detector_oversample\n local_options['fft_oversample'] = fft_oversample\n\n # ----- compute weights for each wavelength based on source spectrum\n wavelens, weights = self._get_weights(source=source, nlambda=local_options['nlambda'],\n monochromatic=local_options['monochromatic'])\n\n # Validate that the calculation we're about to do makes sense with this instrument config\n self._validate_config(wavelengths=wavelens)\n poppy_core._log.info(\n \"PSF calc using fov_%s, oversample = %d, number of wavelengths = %d\" % (\n local_options['fov_spec'], local_options['detector_oversample'], len(wavelens)\n )\n )\n\n # ---- now at last, actually do the PSF calc:\n # instantiate an optical system using the current parameters\n self.optsys = self._get_optical_system(fov_arcsec=fov_arcsec, fov_pixels=fov_pixels,\n fft_oversample=fft_oversample, detector_oversample=detector_oversample,\n options=local_options)\n self._check_for_aliasing(wavelens)\n # and use it to compute the PSF (the real work happens here, in code in poppy.py)\n result = self.optsys.calc_psf(wavelens, weights, display_intermediates=display, display=display,\n save_intermediates=save_intermediates, return_intermediates=return_intermediates,\n normalize=normalize)\n\n if return_intermediates: # this implies we got handed back a tuple, so split it apart\n result, intermediates = result\n\n self._apply_jitter(result,\n local_options) # will immediately return if there is no jitter parameter in local_options\n\n self._get_fits_header(result, local_options)\n\n self._calc_psf_format_output(result, local_options)\n\n if display:\n f = plt.gcf()\n plt.suptitle(\"%s, filter= %s\" % (self.name, self.filter), size='xx-large')\n\n if monochromatic is not None:\n labeltext = \"Monochromatic calculation at {:.3f} um\".format(monochromatic * 1e6)\n else:\n labeltext = \"Calculation with %d wavelengths (%g - %g um)\" % (\n nlambda, wavelens[0] * 1e6, wavelens[-1] * 1e6)\n plt.text(0.99, 0.04, labeltext,\n transform=f.transFigure, horizontalalignment='right')\n\n if outfile is not None:\n result[0].header[\"FILENAME\"] = (os.path.basename(outfile), \"Name of this file\")\n result.writeto(outfile, overwrite=overwrite)\n poppy_core._log.info(\"Saved result to \" + outfile)\n\n if return_intermediates:\n return result, intermediates\n else:\n return result\n\n def calc_datacube(self, wavelengths, *args, **kwargs):\n \"\"\"Calculate a spectral datacube of PSFs\n\n Parameters\n -----------\n wavelengths : iterable of floats\n List or ndarray or tuple of floating point wavelengths in meters, such as\n you would supply in a call to calc_psf via the \"monochromatic\" option\n \"\"\"\n\n # Allow up to 10,000 wavelength slices. The number matters because FITS\n # header keys can only have up to 8 characters. Backward-compatible.\n nwavelengths = len(wavelengths)\n if nwavelengths < 100:\n label_wl = lambda i: 'WAVELN{:02d}'.format(i)\n elif nwavelengths < 10000:\n label_wl = lambda i: 'WVLN{:04d}'.format(i)\n else:\n raise ValueError(\"Maximum number of wavelengths exceeded. \"\n \"Cannot be more than 10,000.\")\n\n # Set up cube and initialize structure based on PSF at first wavelength\n poppy_core._log.info(\"Starting multiwavelength data cube calculation.\")\n psf = self.calc_psf(*args, monochromatic=wavelengths[0], **kwargs)\n from copy import deepcopy\n cube = deepcopy(psf)\n for ext in range(len(psf)):\n cube[ext].data = np.zeros((nwavelengths, psf[ext].data.shape[0], psf[ext].data.shape[1]))\n cube[ext].data[0] = psf[ext].data\n cube[ext].header[label_wl(0)] = wavelengths[0]\n\n # iterate rest of wavelengths\n for i in range(1, nwavelengths):\n wl = wavelengths[i]\n psf = self.calc_psf(*args, monochromatic=wl, **kwargs)\n for ext in range(len(psf)):\n cube[ext].data[i] = psf[ext].data\n cube[ext].header[label_wl(i)] = wl\n cube[ext].header.add_history(\"--- Cube Plane {} ---\".format(i))\n for h in psf[ext].header['HISTORY']:\n cube[ext].header.add_history(h)\n\n cube[0].header['NWAVES'] = nwavelengths\n return cube\n\n def _calc_psf_format_output(self, result, options):\n \"\"\" Apply desired formatting to output file:\n - rebin to detector pixel scale if desired\n - set up FITS extensions if desired\n - output either the oversampled, rebinned, or both\n Which image(s) get output depends on the value of the options['output_mode']\n parameter. It may be set to 'Oversampled image' to output just the oversampled image,\n 'Detector sampled image' to output just the image binned down onto detector pixels, or\n 'Both as FITS extensions' to output the oversampled image as primary HDU and the\n rebinned image as the first image extension. For convenience, the option can be set\n to just 'oversampled', 'detector', or 'both'.\n\n Modifies the 'result' HDUList object.\n\n \"\"\"\n\n output_mode = options.get('output_mode', 'Both as FITS extensions')\n detector_oversample = options.get('detector_oversample', 1)\n\n if (output_mode == 'Oversampled image') or ('oversampled' in output_mode.lower()):\n # we just want to output the oversampled image as\n # the primary HDU. Nothing special needs to be done.\n poppy_core._log.info(\" Returning only the oversampled data. Oversampled by {}\".format(detector_oversample))\n return\n\n elif (output_mode == 'Detector sampled image') or ('detector' in output_mode.lower()):\n # output only the detector sampled image as primary HDU.\n # need to downsample it and replace the existing primary HDU\n if options['detector_oversample'] > 1:\n poppy_core._log.info(\" Downsampling to detector pixel scale, by {}\".format(detector_oversample))\n for ext in range(len(result)):\n result[ext].data = utils.rebin_array(result[ext].data,\n rc=(detector_oversample, detector_oversample))\n else:\n poppy_core._log.info(\" Result already at detector pixel scale; no downsampling needed.\")\n\n for ext in np.arange(len(result)):\n result[ext].header['OVERSAMP'] = (1, 'These data are rebinned to detector pixels')\n result[ext].header['CALCSAMP'] = (detector_oversample, 'This much oversampling used in calculation')\n result[ext].header['PIXELSCL'] *= detector_oversample\n result[ext].header['EXTNAME'] = result[ext].header['EXTNAME'].replace(\"OVER\", \"DET_\")\n return\n\n elif (output_mode == 'Both as FITS extensions') or ('both' in output_mode.lower()):\n # return the downsampled image in the first image extension\n # keep the oversampled image in the primary HDU.\n # create the image extension even if we're already at 1x sampling, for consistency\n poppy_core._log.info(\" Adding extension with image downsampled to detector pixel scale.\")\n\n hdu = fits.HDUList() # append to new hdulist object to preserve the order\n for ext in np.arange(len(result)):\n rebinned_result = result[ext].copy()\n if options['detector_oversample'] > 1:\n poppy_core._log.info(\" Downsampling to detector pixel scale, by {}\".format(detector_oversample))\n rebinned_result.data = utils.rebin_array(rebinned_result.data,\n rc=(detector_oversample, detector_oversample))\n\n rebinned_result.header['OVERSAMP'] = (1, 'These data are rebinned to detector pixels')\n rebinned_result.header['CALCSAMP'] = (detector_oversample, 'This much oversampling used in calculation')\n rebinned_result.header['PIXELSCL'] *= detector_oversample\n rebinned_result.header['EXTNAME'] = rebinned_result.header['EXTNAME'].replace(\"OVER\", \"DET_\")\n\n hdu.append(result[ext])\n hdu.append(rebinned_result)\n\n # Create enough new extensions to append all psfs to them\n [result.append(fits.ImageHDU()) for i in np.arange(len(hdu) - len(result))]\n for ext in np.arange(len(hdu)): result[ext] = hdu[ext]\n\n return\n\n def _get_fits_header(self, result, options):\n \"\"\" Set instrument-specific FITS header keywords\n\n Parameters:\n result : fits.HDUList object\n The HDUList containing the image to be output.\n options : dict\n A dictionary containing options\n\n This function will modify the primary header of the result HDUlist.\n \"\"\"\n\n try:\n from .version import version as __version__\n except ImportError:\n __version__ = ''\n\n # --- update FITS header, display, and output.\n if isinstance(self.pupil, str):\n pupilstr = os.path.basename(self.pupil)\n elif isinstance(self.pupil, fits.HDUList):\n pupilstr = 'pupil from supplied FITS HDUList object'\n elif isinstance(self.pupil, poppy_core.OpticalElement):\n pupilstr = 'pupil from supplied OpticalElement: ' + str(self.pupil)\n result[0].header['PUPILINT'] = (pupilstr, 'Pupil aperture intensity source')\n\n if self.pupilopd is None:\n opdstring = \"NONE - perfect telescope! \"\n opdfile = 'None'\n opdslice = 0\n elif isinstance(self.pupilopd, str):\n opdstring = os.path.basename(self.pupilopd)\n opdfile = os.path.basename(self.pupilopd)\n opdslice = 0 # default slice\n elif isinstance(self.pupilopd, fits.HDUList):\n opdstring = 'OPD from supplied FITS HDUlist object'\n if isinstance(self.pupilopd.filename(), str):\n opdfile = os.path.basename(self.pupilopd.filename())\n else:\n opdfile = 'None'\n opdslice = 0\n elif isinstance(self.pupilopd, poppy_core.OpticalElement):\n opdstring = 'OPD from supplied OpticalElement: ' + str(self.pupilopd)\n opdfile = str(self.pupilopd)\n opdslice = 0\n else: # tuple?\n opdstring = \"%s slice %d\" % (os.path.basename(self.pupilopd[0]), self.pupilopd[1])\n opdfile = os.path.basename(self.pupilopd[0])\n opdslice = self.pupilopd[1]\n result[0].header['PUPILOPD'] = (opdstring, 'Pupil OPD source')\n result[0].header['OPD_FILE'] = (opdfile, 'Pupil OPD file name')\n result[0].header['OPDSLICE'] = (opdslice, 'Pupil OPD slice number, if file is a datacube')\n\n result[0].header['INSTRUME'] = (self.name, 'Instrument')\n result[0].header['FILTER'] = (self.filter, 'Filter name')\n result[0].header['EXTNAME'] = ('OVERSAMP', 'This extension is oversampled.')\n result[0].header.add_history('Created by POPPY version ' + __version__)\n\n if 'fft_oversample' in options:\n result[0].header['OVERSAMP'] = (options['fft_oversample'], 'Oversampling factor for FFTs in computation')\n if 'detector_oversample' in options:\n result[0].header['DET_SAMP'] = (\n options['detector_oversample'], 'Oversampling factor for MFT to detector plane')\n\n (year, month, day, hour, minute, second, weekday, doy, dst) = time.gmtime()\n result[0].header[\"DATE\"] = (\n \"%4d-%02d-%02dT%02d:%02d:%02d\" % (year, month, day, hour, minute, second), \"Date of calculation\")\n # get username and hostname in a cross-platform way\n username = getpass.getuser()\n hostname = platform.node()\n result[0].header[\"AUTHOR\"] = (\"%s@%s\" % (username, hostname), \"username@host for calculation\")\n\n def _validate_config(self, wavelengths=None):\n \"\"\"Determine if a provided instrument configuration is valid.\n\n Wavelengths to be propagated in the calculation are passed in as the `wavelengths`\n keyword argument.\n\n Subclasses should raise an exception if the configuration is invalid/unachievable.\n \"\"\"\n pass\n\n def get_optical_system(self, fft_oversample=2, detector_oversample=None, fov_arcsec=2, fov_pixels=None,\n options=None):\n \"\"\" Return an OpticalSystem instance corresponding to the instrument as currently configured.\n\n When creating such an OpticalSystem, you must specify the parameters needed to define the\n desired sampling, specifically the oversampling and field of view.\n\n\n Parameters\n ----------\n\n fft_oversample : int\n Oversampling factor for intermediate plane calculations. Default is 2\n detector_oversample: int, optional\n By default the detector oversampling is equal to the intermediate calculation oversampling.\n If you wish to use a different value for the detector, set this parameter.\n Note that if you just want images at detector pixel resolution you will achieve higher fidelity\n by still using some oversampling (i.e. *not* setting `oversample_detector=1`) and instead rebinning\n down the oversampled data.\n fov_pixels : float\n Field of view in pixels. Overrides fov_arcsec if both set.\n fov_arcsec : float\n Field of view, in arcseconds. Default is 2\n options : dict\n Other arbitrary options for optical system creation\n\n\n Returns\n -------\n osys : poppy.OpticalSystem\n an optical system instance representing the desired configuration.\n\n \"\"\"\n\n poppy_core._log.info(\"Creating optical system model:\")\n\n if detector_oversample is None:\n detector_oversample = fft_oversample\n if options is None:\n options = dict()\n\n poppy_core._log.debug(\"Oversample: %d %d \" % (fft_oversample, detector_oversample))\n optsys = poppy_core.OpticalSystem(name=self.name, oversample=fft_oversample)\n\n if 'source_offset_x' in options or 'source_offset_y' in options:\n if 'source_offset_r' in options:\n raise ValueError(\"Cannot set source offset using source_offset_x and source_offset_y\" +\n \" at the same time as source_offset_r\")\n offx = options.get('source_offset_x', 0)\n offy = options.get('source_offset_y', 0)\n optsys.source_offset_r = np.sqrt(offx ** 2 + offy ** 2)\n optsys.source_offset_theta = np.rad2deg(np.arctan2(-offx, offy))\n _log.debug(\"Source offset from X,Y = ({}, {}) is (r,theta) = {},{}\".format(\n offx, offy, optsys.source_offset_r, optsys.source_offset_theta))\n else:\n if 'source_offset_r' in options:\n optsys.source_offset_r = options['source_offset_r']\n if 'source_offset_theta' in options:\n optsys.source_offset_theta = options['source_offset_theta']\n _log.debug(\"Source offset is (r,theta) = {},{}\".format(\n optsys.source_offset_r, optsys.source_offset_theta))\n\n # ---- set pupil intensity\n pupil_optic = None # no optic yet defined\n if isinstance(self.pupil, poppy_core.OpticalElement): # do we already have an object?\n pupil_optic = self.pupil\n full_pupil_path = None\n elif isinstance(self.pupil, str): # simple filename\n if os.path.exists(self.pupil):\n full_pupil_path = self.pupil\n else:\n raise IOError(\"File not found: \" + self.pupil)\n elif isinstance(self.pupil, fits.HDUList): # pupil supplied as FITS HDUList object\n full_pupil_path = self.pupil\n else:\n raise TypeError(\"Not sure what to do with a pupil of that type:\" + str(type(self.pupil)))\n\n # ---- set pupil OPD\n if isinstance(self.pupilopd, str): # simple filename\n full_opd_path = self.pupilopd if os.path.exists(self.pupilopd) else os.path.join(self._datapath, \"OPD\",\n self.pupilopd)\n elif hasattr(self.pupilopd, '__getitem__') and isinstance(self.pupilopd[0],\n str): # tuple with filename and slice\n full_opd_path = (\n self.pupilopd[0] if os.path.exists(self.pupilopd[0]) else os.path.join(self._datapath, \"OPD\",\n self.pupilopd[0]),\n self.pupilopd[1])\n elif isinstance(self.pupilopd, fits.HDUList): # OPD supplied as FITS HDUList object\n full_opd_path = self.pupilopd # not a path per se but this works correctly to pass it to poppy\n elif self.pupilopd is None:\n full_opd_path = None\n else:\n raise TypeError(\"Not sure what to do with a pupilopd of that type:\" + str(type(self.pupilopd)))\n\n # ---- apply pupil intensity and OPD to the optical model\n optsys.add_pupil(name='Entrance Pupil', optic=pupil_optic, transmission=full_pupil_path, opd=full_opd_path,\n rotation=self._rotation)\n\n # Allow instrument subclass to add field-dependent aberrations\n aberration_optic = self._get_aberrations()\n if aberration_optic is not None:\n optsys.add_pupil(aberration_optic)\n\n # --- add the detector element.\n if fov_pixels is None:\n fov_pixels = np.round(fov_arcsec / self.pixelscale)\n if 'parity' in self.options:\n if self.options['parity'].lower() == 'odd' and np.remainder(fov_pixels, 2) == 0:\n fov_pixels += 1\n if self.options['parity'].lower() == 'even' and np.remainder(fov_pixels, 2) == 1:\n fov_pixels += 1\n\n optsys.add_detector(self.pixelscale, fov_pixels=fov_pixels, oversample=detector_oversample,\n name=self.name + \" detector\")\n\n return optsys\n\n def _get_optical_system(self, *args, **kwargs):\n \"\"\" Return an OpticalSystem instance corresponding to the instrument as currently configured.\n\n \"\"\"\n # Note, this has historically been an internal private API function (starting with an underscore)\n # As of version 0.9 it is promoted to a public part of the API for the Instrument class and subclasses.\n # Here we ensure the prior version works, back compatibly.\n import warnings\n warnings.warn(\"_get_optical_system is deprecated; use get_optical_system (without leading underscore) instead.\",\n DeprecationWarning)\n return self.get_optical_system(*args, **kwargs)\n\n def _check_for_aliasing(self, wavelengths):\n \"\"\" Check for spatial frequency aliasing and warn if the\n user is requesting a FOV which is larger than supported based on\n the available pupil resolution in the optical system entrance pupil.\n If the requested FOV of the output PSF exceeds that which is Nyquist\n sampled in the entrance pupil, raise a warning to the user.\n\n The check implemented here is fairly simple, designed to catch the most\n common cases, and makes assumptions about the optical system which are\n not necessarily true in all cases, specifically that it starts with a\n pupil plane with fixed spatial resolution and ends with a detector\n plane. If either of those assumptions is violated, this check is skipped.\n\n See https://github.com/mperrin/poppy/issues/135 and\n https://github.com/mperrin/poppy/issues/180 for more background on the\n relevant Fourier optics.\n \"\"\"\n # Note this must be called after self.optsys is defined in calc_psf()\n\n # compute spatial sampling in the entrance pupil\n if not hasattr(self.optsys.planes[0], 'pixelscale') or self.optsys.planes[0].pixelscale is None:\n return # analytic entrance pupil, no sampling limitations.\n if not isinstance(self.optsys.planes[-1], poppy_core.Detector):\n return # optical system doesn't end on some fixed sampling detector, not sure how to check sampling limit\n\n # determine the spatial frequency which is Nyquist sampled by the input pupil.\n # convert this to units of cycles per meter and make it not a Quantity\n sf = (1. / (self.optsys.planes[0].pixelscale * 2 * units.pixel)).to(1. / units.meter).value\n\n det_fov_arcsec = self.optsys.planes[-1].fov_arcsec.to(units.arcsec).value\n if np.isscalar(det_fov_arcsec): # FOV can be scalar (square) or rectangular\n det_fov_arcsec = (det_fov_arcsec, det_fov_arcsec)\n\n # determine the angular scale that corresponds to for the given wavelength\n for wl in wavelengths:\n critical_angle_arcsec = wl * sf * poppy_core._RADIANStoARCSEC\n if (critical_angle_arcsec < det_fov_arcsec[0] / 2) or (critical_angle_arcsec < det_fov_arcsec[1] / 2):\n import warnings\n warnings.warn((\n \"For wavelength {:.3f} microns, a FOV of {:.3f} * {:.3f} arcsec exceeds the maximum \" +\n \" spatial frequency well sampled by the input pupil. Your computed PSF will suffer from \" +\n \"aliasing for angles beyond {:.3f} arcsec radius.\").format(\n wl * 1e6, det_fov_arcsec[0], det_fov_arcsec[1], critical_angle_arcsec))\n\n def _get_aberrations(self):\n \"\"\"Incorporate a pupil-plane optic that represents optical aberrations\n (e.g. field-dependence as an OPD map). Subclasses should override this method.\n (If no aberration optic should be applied, None should be returned.)\n\n Returns\n -------\n aberration_optic : poppy.OpticalElement subclass or None\n Optional. Will be added to the optical system immediately after the\n entrance pupil (and any pupil OPD map).\n \"\"\"\n return None\n\n def _apply_jitter(self, result, local_options=None):\n \"\"\" Modify a PSF to account for the blurring effects of image jitter.\n Parameter arguments are taken from the options dictionary.\n\n Parameters\n -----------\n result : fits.HDUList\n HDU list containing a point spread function\n local_options : dict, optional\n Options dictionary. If not present, options will be taken from self.options.\n\n The key configuration argument is options['jitter'] which defines the type of jitter.\n If this is the string 'gaussian', then a Gaussian blurring kernel will be applied, the\n amount of the blur is taken from the options['jitter_sigma'] value (arcsec per axis).\n\n Other types of jitter are not yet implemented.\n\n The image in the 'result' HDUlist will be modified by this function.\n \"\"\"\n if local_options is None:\n local_options = self.options\n if 'jitter' not in local_options:\n result[0].header['JITRTYPE'] = ('None', 'Type of jitter applied')\n return\n\n if conf.enable_speed_tests: t0 = time.time() # pragma: no cover\n\n poppy_core._log.info(\"Calculating jitter using \" + str(local_options['jitter']))\n\n if local_options['jitter'] is None:\n return\n elif local_options['jitter'].lower() == 'gaussian':\n import scipy.ndimage\n\n sigma = local_options.get('jitter_sigma')\n if sigma is None:\n poppy_core._log.warning(\n \"Gaussian jitter model requested, but no width for jitter distribution specified. \" +\n \"Assuming jitter_sigma = 0.007 arcsec per axis by default\")\n sigma = 0.007\n\n # that will be in arcseconds, we need to convert to pixels:\n\n poppy_core._log.info(\"Jitter: Convolving with Gaussian with sigma={0:.3f} arcsec\".format(sigma))\n out = scipy.ndimage.gaussian_filter(result[0].data, sigma / result[0].header['PIXELSCL'])\n peak = result[0].data.max()\n newpeak = out.max()\n strehl = newpeak / peak # not really the whole Strehl ratio, just the part due to jitter\n\n poppy_core._log.info(\" resulting image peak drops to {0:.3f} of its previous value\".format(strehl))\n result[0].header['JITRTYPE'] = ('Gaussian convolution', 'Type of jitter applied')\n result[0].header['JITRSIGM'] = (sigma, 'Gaussian sigma for jitter, per axis [arcsec]')\n result[0].header['JITRSTRL'] = (strehl, 'Strehl reduction from jitter ')\n\n result[0].data = out\n else:\n raise ValueError('Unknown jitter option value: ' + local_options['jitter'])\n\n if conf.enable_speed_tests: # pragma: no cover\n t1 = time.time()\n _log.debug(\"\\tTIME %f s\\t for jitter model\" % (t1 - t0))\n\n\n #####################################################\n # Display routines\n\n def display(self):\n \"\"\"Display the currently configured optical system on screen\"\"\"\n # if coronagraphy is set, then we have to temporarily disable\n # semi-analytic coronagraphic mode to get a regular displayable optical system\n try:\n old_no_sam = self.options['no_sam']\n self.options['no_sam'] = True\n except KeyError:\n old_no_sam = None\n # Trigger config validation to update any optical planes\n # (specifically auto-selected pupils based on filter selection)\n wavelengths, _ = self._get_weights(nlambda=1)\n self._validate_config(wavelengths=wavelengths)\n optsys = self._get_optical_system()\n optsys.display(what='both')\n if old_no_sam is not None:\n self.options['no_sam'] = old_no_sam\n\n #####################################################\n #\n # Synthetic Photometry related methods\n #\n def _get_spec_cache_key(self, source, nlambda):\n \"\"\" return key for the cache of precomputed spectral weightings.\n This is a separate function so the TFI subclass can override it.\n \"\"\"\n name = source.meta.get('name')\n if not name:\n name = source.meta['expr']\n return self.filter, name, nlambda\n\n def _get_synphot_bandpass(self, filtername):\n \"\"\" Return a synphot.spectrum.SpectralElement object for the given desired band.\n\n By subclassing this, you can define whatever custom bandpasses are appropriate for your instrument\n\n Parameters\n ----------\n filtername : str\n String name of the filter that you are interested in\n\n Returns\n --------\n a synphot.spectrum.ObservationSpectralElement object for that filter.\n\n \"\"\"\n if not _HAS_SYNPHOT:\n raise RuntimeError(\"synphot not found\")\n\n bpname = self._synphot_bandpasses[filtername]\n\n try:\n band = synphot.spectrum.SpectralElement.from_filter(bpname)\n except Exception:\n raise LookupError(\"Don't know how to compute bandpass for a filter named \" + bpname)\n\n return band\n\n def _get_default_nlambda(self, filtername):\n \"\"\" Return the default # of wavelengths to be used for calculation by a given filter \"\"\"\n return 10\n\n def _get_default_fov(self):\n \"\"\" Return default FOV in arcseconds \"\"\"\n return 5\n\n def _get_filter_list(self):\n \"\"\" Returns a list of allowable filters, and the corresponding synphot obsmode\n for each.\n\n If you need to define bandpasses that are not already available in synphot, consider subclassing\n _getSynphotBandpass instead to create a synphot spectrum based on data read from disk, etc.\n\n Returns\n --------\n filterlist : list\n List of string filter names\n bandpasslist : dict\n dictionary of string names for use by synphot\n\n This could probably be folded into one using an OrderdDict. FIXME do that later\n\n \"\"\"\n\n filterlist = ['U', 'B', 'V', 'R', 'I']\n bandpasslist = {\n 'U': 'johnson_u',\n 'B': 'johnson_b',\n 'V': 'johnson_v',\n 'R': 'johnson_r',\n 'I': 'johnson_i',\n }\n\n return filterlist, bandpasslist\n\n # def _getJitterKernel(self, type='Gaussian', sigma=10):\n\n def _get_weights(self, source=None, nlambda=None, monochromatic=None, verbose=False):\n \"\"\" Return the set of discrete wavelengths, and weights for each wavelength,\n that should be used for a PSF calculation.\n\n Uses synphot (if installed), otherwise assumes simple-minded flat spectrum\n\n \"\"\"\n if nlambda is None or nlambda == 0:\n nlambda = self._get_default_nlambda(self.filter)\n\n if monochromatic is not None:\n poppy_core._log.info(\"Monochromatic calculation requested.\")\n monochromatic_wavelen_meters = monochromatic.to_value(units.meter) if isinstance(monochromatic, units.Quantity) else monochromatic\n return (np.asarray([monochromatic_wavelen_meters]), np.asarray([1]))\n\n elif _HAS_SYNPHOT and (isinstance(source, synphot.SourceSpectrum) or source is None):\n \"\"\" Given a synphot.SourceSpectrum object, perform synthetic photometry for\n nlambda bins spanning the wavelength range of interest.\n\n Because this calculation is kind of slow, cache results for reuse in the frequent\n case where one is computing many PSFs for the same spectral source.\n \"\"\"\n from synphot import SpectralElement, Observation\n from synphot.models import Box1D, BlackBodyNorm1D, Empirical1D\n\n poppy_core._log.debug(\n \"Calculating spectral weights using synphot, nlambda=%d, source=%s\" % (nlambda, str(source)))\n if source is None:\n source = synphot.SourceSpectrum(BlackBodyNorm1D, temperature=5700 * units.K)\n poppy_core._log.info(\"No source spectrum supplied, therefore defaulting to 5700 K blackbody\")\n poppy_core._log.debug(\"Computing spectral weights for source = \" + str(source))\n\n try:\n key = self._get_spec_cache_key(source, nlambda)\n if key in self._spectra_cache:\n poppy_core._log.debug(\"Previously computed spectral weights found in cache, just reusing those\")\n return self._spectra_cache[key]\n except KeyError:\n pass # in case sourcespectrum lacks a name element so the above lookup fails - just do the below calc.\n\n poppy_core._log.info(\"Computing wavelength weights using synthetic photometry for %s...\" % self.filter)\n band = self._get_synphot_bandpass(self.filter)\n band_wave = band.waveset\n band_thru = band(band_wave)\n\n # Update source to ensure that it covers the entire filter\n if band_wave.value.min() < source.waveset.value.min() or \\\n band_wave.value.max() > source.waveset.value.max():\n source_meta = source.meta\n wave, wave_str = synphot.utils.generate_wavelengths(band_wave.value.min(), band_wave.value.max(),\n wave_unit=units.angstrom, log=False)\n source = synphot.SourceSpectrum(Empirical1D, points=wave, lookup_table=source(wave))\n source.meta.update(source_meta)\n\n # choose reasonable min and max wavelengths\n w_above10 = (band_thru > 0.10 * band_thru.max())\n\n minwave = band_wave[w_above10].min()\n maxwave = band_wave[w_above10].max()\n poppy_core._log.debug(\"Min, max wavelengths = %f, %f\" % (\n minwave.to_value(units.micron), maxwave.to_value(units.micron)))\n\n wave_bin_edges = np.linspace(minwave, maxwave, nlambda + 1)\n wavesteps = (wave_bin_edges[:-1] + wave_bin_edges[1:]) / 2\n deltawave = wave_bin_edges[1] - wave_bin_edges[0]\n area = 1 * (units.m * units.m)\n effstims = []\n\n for wave in wavesteps:\n poppy_core._log.debug(\n f\"Integrating across band centered at {wave.to(units.micron):.2f} \"\n f\"with width {deltawave.to(units.micron):.2f}\")\n box = SpectralElement(Box1D, amplitude=1, x_0=wave, width=deltawave) * band\n if box.tpeak() == 0:\n # watch out for pathological cases with no overlap (happens with MIRI FND at high nlambda)\n result = 0.0\n else:\n binset = np.linspace(wave - deltawave, wave + deltawave,\n 30) # what wavelens to use when integrating across the sub-band?\n binset = binset[binset >= 0] # remove any negative values\n result = Observation(source, box, binset=binset).effstim('count', area=area)\n effstims.append(result)\n\n effstims = units.Quantity(effstims)\n effstims /= effstims.sum() # Normalized count rate is unitless\n wave_m = wavesteps.to_value(units.m) # convert to meters\n\n newsource = (wave_m, effstims.to_value())\n if verbose:\n _log.info(\" Wavelengths and weights computed from synphot: \" + str(newsource))\n self._spectra_cache[self._get_spec_cache_key(source, nlambda)] = newsource\n return newsource\n elif isinstance(source, dict) and ('wavelengths' in source) and ('weights' in source):\n # Allow providing directly a set of specific weights and wavelengths, as in poppy.calc_psf source option #2\n return source['wavelengths'], source['weights']\n elif isinstance(source, tuple) and len(source) == 2:\n # Allow user to provide directly a tuple, as in poppy.calc_psf source option #3\n return source\n\n else: # Fallback simple code for if we don't have synphot.\n poppy_core._log.warning(\n \"synphot unavailable (or invalid source supplied)! Assuming flat # of counts versus wavelength.\")\n # compute a source spectrum weighted by the desired filter curves.\n # The existing FITS files all have wavelength in ANGSTROMS since that is the synphot convention...\n filterfile = self._filters[self.filter].filename\n filterheader = fits.getheader(filterfile, 1)\n filterdata = fits.getdata(filterfile, 1)\n try:\n wavelengths = filterdata.WAVELENGTH.astype('=f8')\n throughputs = filterdata.THROUGHPUT.astype('=f8')\n except AttributeError:\n raise ValueError(\n \"The supplied file, {0}, does not appear to be a FITS table with WAVELENGTH and \" +\n \"THROUGHPUT columns.\".format(filterfile))\n if 'WAVEUNIT' in filterheader:\n waveunit = filterheader['WAVEUNIT'].lower()\n if re.match(r'[Aa]ngstroms?', waveunit) is None:\n raise ValueError(\n \"The supplied file, {0}, has WAVEUNIT='{1}'. Only WAVEUNIT = Angstrom supported \" +\n \"when synphot is not installed.\".format(filterfile, waveunit))\n else:\n waveunit = 'Angstrom'\n poppy_core._log.warning(\n \"CAUTION: no WAVEUNIT keyword found in filter file {0}. Assuming = {1} by default\".format(\n filterfile, waveunit))\n\n poppy_core._log.warning(\n \"CAUTION: Just interpolating rather than integrating filter profile, over {0} steps\".format(nlambda))\n wavelengths = wavelengths * units.Unit(waveunit)\n lrange = wavelengths[throughputs > 0.4].to_value(units.m) # convert from Angstroms to Meters\n # get evenly spaced points within the range of allowed lambdas, centered on each bin\n lambd = np.linspace(np.min(lrange), np.max(lrange), nlambda, endpoint=False) + (\n np.max(lrange) - np.min(lrange)) / (2 * nlambda)\n filter_fn = scipy.interpolate.interp1d(wavelengths.to_value(units.m), throughputs, kind='cubic',\n bounds_error=False)\n weights = filter_fn(lambd)\n return lambd, weights\n" ]
[ [ "numpy.arctan2", "numpy.zeros", "matplotlib.pyplot.gcf", "numpy.asarray", "numpy.remainder", "numpy.max", "matplotlib.pyplot.text", "matplotlib.pyplot.suptitle", "numpy.min", "numpy.sqrt", "numpy.linspace", "numpy.isscalar", "numpy.round" ] ]
arpith-kp/superset
[ "42ff4fc19a34144b31cef82b341871dff34f37d2" ]
[ "superset/examples/multiformat_time_series.py" ]
[ "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nfrom typing import Dict, Optional, Tuple\n\nimport pandas as pd\nfrom sqlalchemy import BigInteger, Date, DateTime, String\n\nfrom superset import db\nfrom superset.models.slice import Slice\nfrom superset.utils.core import get_example_database\n\nfrom .helpers import (\n config,\n get_example_data,\n get_slice_json,\n merge_slice,\n misc_dash_slices,\n TBL,\n)\n\n\ndef load_multiformat_time_series(\n only_metadata: bool = False, force: bool = False\n) -> None:\n \"\"\"Loading time series data from a zip file in the repo\"\"\"\n tbl_name = \"multiformat_time_series\"\n database = get_example_database()\n table_exists = database.has_table_by_name(tbl_name)\n\n if not only_metadata and (not table_exists or force):\n data = get_example_data(\"multiformat_time_series.json.gz\")\n pdf = pd.read_json(data)\n # TODO(bkyryliuk): move load examples data into the pytest fixture\n if database.backend == \"presto\":\n pdf.ds = pd.to_datetime(pdf.ds, unit=\"s\")\n pdf.ds = pdf.ds.dt.strftime(\"%Y-%m-%d\")\n pdf.ds2 = pd.to_datetime(pdf.ds2, unit=\"s\")\n pdf.ds2 = pdf.ds2.dt.strftime(\"%Y-%m-%d %H:%M%:%S\")\n else:\n pdf.ds = pd.to_datetime(pdf.ds, unit=\"s\")\n pdf.ds2 = pd.to_datetime(pdf.ds2, unit=\"s\")\n\n pdf.to_sql(\n tbl_name,\n database.get_sqla_engine(),\n if_exists=\"replace\",\n chunksize=500,\n dtype={\n \"ds\": String(255) if database.backend == \"presto\" else Date,\n \"ds2\": String(255) if database.backend == \"presto\" else DateTime,\n \"epoch_s\": BigInteger,\n \"epoch_ms\": BigInteger,\n \"string0\": String(100),\n \"string1\": String(100),\n \"string2\": String(100),\n \"string3\": String(100),\n },\n index=False,\n )\n print(\"Done loading table!\")\n print(\"-\" * 80)\n\n print(f\"Creating table [{tbl_name}] reference\")\n obj = db.session.query(TBL).filter_by(table_name=tbl_name).first()\n if not obj:\n obj = TBL(table_name=tbl_name)\n obj.main_dttm_col = \"ds\"\n obj.database = database\n dttm_and_expr_dict: Dict[str, Tuple[Optional[str], None]] = {\n \"ds\": (None, None),\n \"ds2\": (None, None),\n \"epoch_s\": (\"epoch_s\", None),\n \"epoch_ms\": (\"epoch_ms\", None),\n \"string2\": (\"%Y%m%d-%H%M%S\", None),\n \"string1\": (\"%Y-%m-%d^%H:%M:%S\", None),\n \"string0\": (\"%Y-%m-%d %H:%M:%S.%f\", None),\n \"string3\": (\"%Y/%m/%d%H:%M:%S.%f\", None),\n }\n for col in obj.columns:\n dttm_and_expr = dttm_and_expr_dict[col.column_name]\n col.python_date_format = dttm_and_expr[0]\n col.dbatabase_expr = dttm_and_expr[1]\n col.is_dttm = True\n db.session.merge(obj)\n db.session.commit()\n obj.fetch_metadata()\n tbl = obj\n\n print(\"Creating Heatmap charts\")\n for i, col in enumerate(tbl.columns):\n slice_data = {\n \"metrics\": [\"count\"],\n \"granularity_sqla\": col.column_name,\n \"row_limit\": config[\"ROW_LIMIT\"],\n \"since\": \"2015\",\n \"until\": \"2016\",\n \"viz_type\": \"cal_heatmap\",\n \"domain_granularity\": \"month\",\n \"subdomain_granularity\": \"day\",\n }\n\n slc = Slice(\n slice_name=f\"Calendar Heatmap multiformat {i}\",\n viz_type=\"cal_heatmap\",\n datasource_type=\"table\",\n datasource_id=tbl.id,\n params=get_slice_json(slice_data),\n )\n merge_slice(slc)\n misc_dash_slices.add(\"Calendar Heatmap multiformat 0\")\n" ]
[ [ "pandas.to_datetime", "pandas.read_json" ] ]
Chengwei94/clinica
[ "0e9d837baf9064a626198422b2a70fe120f227f0" ]
[ "clinica/iotools/converters/adni_to_bids/adni_modalities/adni_pib_pet.py" ]
[ "# coding: utf-8\n\n\"\"\"Module for converting PIB PET of ADNI.\"\"\"\n\n\ndef convert_adni_pib_pet(\n source_dir, csv_dir, dest_dir, conversion_dir, subjs_list=None, mod_to_update=False\n):\n \"\"\"Convert PIB PET images of ADNI into BIDS format.\n\n Args:\n source_dir: path to the ADNI directory\n csv_dir: path to the clinical data directory\n dest_dir: path to the destination BIDS directory\n conversion_dir: path to the TSV files including the paths to original images\n subjs_list: subjects list\n mod_to_update: If True, pre-existing images in the BIDS directory will be erased and extracted again.\n \"\"\"\n from os import path\n\n import pandas as pd\n from colorama import Fore\n\n from clinica.iotools.converters.adni_to_bids.adni_utils import paths_to_bids\n from clinica.utils.stream import cprint\n\n if subjs_list is None:\n adni_merge_path = path.join(csv_dir, \"ADNIMERGE.csv\")\n adni_merge = pd.read_csv(adni_merge_path, sep=\",\", low_memory=False)\n subjs_list = list(adni_merge.PTID.unique())\n\n cprint(\n f\"Calculating paths of PIB PET images. Output will be stored in {conversion_dir}.\"\n )\n images = compute_pib_pet_paths(\n source_dir, csv_dir, dest_dir, subjs_list, conversion_dir\n )\n cprint(\"Paths of PIB PET images found. Exporting images into BIDS ...\")\n paths_to_bids(images, dest_dir, \"pib\", mod_to_update=mod_to_update)\n cprint(f\"{Fore.GREEN}PIB PET conversion done.{Fore.RESET}\")\n\n\ndef compute_pib_pet_paths(source_dir, csv_dir, dest_dir, subjs_list, conversion_dir):\n \"\"\"Compute the paths to the PIB PET images and store them in a TSV file.\n\n Args:\n source_dir: path to the ADNI directory\n csv_dir: path to the clinical data directory\n dest_dir: path to the destination BIDS directory\n subjs_list: subjects list\n conversion_dir: path to the TSV files including the paths to original images\n\n Returns:\n images: a dataframe with all the paths to the PET images that will be converted into BIDS\n \"\"\"\n from os import path\n\n import pandas as pd\n\n from clinica.iotools.converters.adni_to_bids.adni_utils import (\n find_image_path,\n get_images_pet,\n )\n\n pet_pib_col = [\n \"Phase\",\n \"Subject_ID\",\n \"VISCODE\",\n \"Visit\",\n \"Sequence\",\n \"Scan_Date\",\n \"Study_ID\",\n \"Series_ID\",\n \"Image_ID\",\n \"Original\",\n ]\n pet_pib_df = pd.DataFrame(columns=pet_pib_col)\n pet_pib_dfs_list = []\n\n # Loading needed .csv files\n pibqc = pd.read_csv(path.join(csv_dir, \"PIBQC.csv\"), sep=\",\", low_memory=False)\n pet_meta_list = pd.read_csv(\n path.join(csv_dir, \"PET_META_LIST.csv\"), sep=\",\", low_memory=False\n )\n\n for subj in subjs_list:\n\n # PET images metadata for subject\n subject_pet_meta = pet_meta_list[pet_meta_list[\"Subject\"] == subj]\n\n if subject_pet_meta.empty:\n continue\n\n # QC for PIB PET images\n pet_qc_subj = pibqc[(pibqc.PASS == 1) & (pibqc.RID == int(subj[-4:]))]\n\n sequences_preprocessing_step = [\"PIB Co-registered, Averaged\"]\n subj_dfs_list = get_images_pet(\n subj,\n pet_qc_subj,\n subject_pet_meta,\n pet_pib_col,\n \"PIB-PET\",\n sequences_preprocessing_step,\n viscode_field=\"VISCODE\",\n )\n if subj_dfs_list:\n pet_pib_dfs_list += subj_dfs_list\n\n if pet_pib_dfs_list:\n pet_pib_df = pd.concat(pet_pib_dfs_list, ignore_index=True)\n\n # Exceptions\n # ==========\n conversion_errors = []\n\n # Removing known exceptions from images to convert\n if not pet_pib_df.empty:\n error_ind = pet_pib_df.index[\n pet_pib_df.apply(\n lambda x: ((x.Subject_ID, x.VISCODE) in conversion_errors), axis=1\n )\n ]\n pet_pib_df.drop(error_ind, inplace=True)\n\n images = find_image_path(pet_pib_df, source_dir, \"PIB\", \"I\", \"Image_ID\")\n images.to_csv(path.join(conversion_dir, \"pib_pet_paths.tsv\"), sep=\"\\t\", index=False)\n\n return images\n" ]
[ [ "pandas.read_csv", "pandas.DataFrame", "pandas.concat" ] ]
dandavison/tensorflow-models
[ "64eea4f573094068bd99900016c603080a59e788" ]
[ "research/audioset/vggish/vggish_train_demo.py" ]
[ "# Copyright 2017 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nr\"\"\"A simple demonstration of running VGGish in training mode.\n\nThis is intended as a toy example that demonstrates how to use the VGGish model\ndefinition within a larger model that adds more layers on top, and then train\nthe larger model. If you let VGGish train as well, then this allows you to\nfine-tune the VGGish model parameters for your application. If you don't let\nVGGish train, then you use VGGish as a feature extractor for the layers above\nit.\n\nFor this toy task, we are training a classifier to distinguish between three\nclasses: sine waves, constant signals, and white noise. We generate synthetic\nwaveforms from each of these classes, convert into shuffled batches of log mel\nspectrogram examples with associated labels, and feed the batches into a model\nthat includes VGGish at the bottom and a couple of additional layers on top. We\nalso plumb in labels that are associated with the examples, which feed a label\nloss used for training.\n\nUsage:\n # Run training for 100 steps using a model checkpoint in the default\n # location (vggish_model.ckpt in the current directory). Allow VGGish\n # to get fine-tuned.\n $ python vggish_train_demo.py --num_batches 100\n\n # Same as before but run for fewer steps and don't change VGGish parameters\n # and use a checkpoint in a different location\n $ python vggish_train_demo.py --num_batches 50 \\\n --train_vggish=False \\\n --checkpoint /path/to/model/checkpoint\n\"\"\"\n\nfrom __future__ import print_function\n\nfrom random import shuffle\n\nimport numpy as np\nimport tensorflow.compat.v1 as tf\ntf.disable_v2_behavior()\nimport tf_slim as slim\n\nfrom . import vggish_input\nfrom . import vggish_params\nfrom . import vggish_slim\n\nflags = tf.app.flags\n\nflags.DEFINE_integer(\n 'num_batches', 30,\n 'Number of batches of examples to feed into the model. Each batch is of '\n 'variable size and contains shuffled examples of each class of audio.')\n\nflags.DEFINE_boolean(\n 'train_vggish', True,\n 'If True, allow VGGish parameters to change during training, thus '\n 'fine-tuning VGGish. If False, VGGish parameters are fixed, thus using '\n 'VGGish as a fixed feature extractor.')\n\nflags.DEFINE_string(\n 'checkpoint', 'vggish_model.ckpt',\n 'Path to the VGGish checkpoint file.')\n\nFLAGS = flags.FLAGS\n\n_NUM_CLASSES = 3\n\n\ndef _get_examples_batch():\n \"\"\"Returns a shuffled batch of examples of all audio classes.\n\n Note that this is just a toy function because this is a simple demo intended\n to illustrate how the training code might work.\n\n Returns:\n a tuple (features, labels) where features is a NumPy array of shape\n [batch_size, num_frames, num_bands] where the batch_size is variable and\n each row is a log mel spectrogram patch of shape [num_frames, num_bands]\n suitable for feeding VGGish, while labels is a NumPy array of shape\n [batch_size, num_classes] where each row is a multi-hot label vector that\n provides the labels for corresponding rows in features.\n \"\"\"\n # Make a waveform for each class.\n num_seconds = 5\n sr = 44100 # Sampling rate.\n t = np.linspace(0, num_seconds, int(num_seconds * sr)) # Time axis.\n # Random sine wave.\n freq = np.random.uniform(100, 1000)\n sine = np.sin(2 * np.pi * freq * t)\n # Random constant signal.\n magnitude = np.random.uniform(-1, 1)\n const = magnitude * t\n # White noise.\n noise = np.random.normal(-1, 1, size=t.shape)\n\n # Make examples of each signal and corresponding labels.\n # Sine is class index 0, Const class index 1, Noise class index 2.\n sine_examples = vggish_input.waveform_to_examples(sine, sr)\n sine_labels = np.array([[1, 0, 0]] * sine_examples.shape[0])\n const_examples = vggish_input.waveform_to_examples(const, sr)\n const_labels = np.array([[0, 1, 0]] * const_examples.shape[0])\n noise_examples = vggish_input.waveform_to_examples(noise, sr)\n noise_labels = np.array([[0, 0, 1]] * noise_examples.shape[0])\n\n # Shuffle (example, label) pairs across all classes.\n all_examples = np.concatenate((sine_examples, const_examples, noise_examples))\n all_labels = np.concatenate((sine_labels, const_labels, noise_labels))\n labeled_examples = list(zip(all_examples, all_labels))\n shuffle(labeled_examples)\n\n # Separate and return the features and labels.\n features = [example for (example, _) in labeled_examples]\n labels = [label for (_, label) in labeled_examples]\n return (features, labels)\n\n\ndef main(_):\n with tf.Graph().as_default(), tf.Session() as sess:\n # Define VGGish.\n embeddings = vggish_slim.define_vggish_slim(FLAGS.train_vggish)\n\n # Define a shallow classification model and associated training ops on top\n # of VGGish.\n with tf.variable_scope('mymodel'):\n # Add a fully connected layer with 100 units.\n num_units = 100\n fc = slim.fully_connected(embeddings, num_units)\n\n # Add a classifier layer at the end, consisting of parallel logistic\n # classifiers, one per class. This allows for multi-class tasks.\n logits = slim.fully_connected(\n fc, _NUM_CLASSES, activation_fn=None, scope='logits')\n tf.sigmoid(logits, name='prediction')\n\n # Add training ops.\n with tf.variable_scope('train'):\n global_step = tf.Variable(\n 0, name='global_step', trainable=False,\n collections=[tf.GraphKeys.GLOBAL_VARIABLES,\n tf.GraphKeys.GLOBAL_STEP])\n\n # Labels are assumed to be fed as a batch multi-hot vectors, with\n # a 1 in the position of each positive class label, and 0 elsewhere.\n labels = tf.placeholder(\n tf.float32, shape=(None, _NUM_CLASSES), name='labels')\n\n # Cross-entropy label loss.\n xent = tf.nn.sigmoid_cross_entropy_with_logits(\n logits=logits, labels=labels, name='xent')\n loss = tf.reduce_mean(xent, name='loss_op')\n tf.summary.scalar('loss', loss)\n\n # We use the same optimizer and hyperparameters as used to train VGGish.\n optimizer = tf.train.AdamOptimizer(\n learning_rate=vggish_params.LEARNING_RATE,\n epsilon=vggish_params.ADAM_EPSILON)\n optimizer.minimize(loss, global_step=global_step, name='train_op')\n\n # Initialize all variables in the model, and then load the pre-trained\n # VGGish checkpoint.\n sess.run(tf.global_variables_initializer())\n vggish_slim.load_vggish_slim_checkpoint(sess, FLAGS.checkpoint)\n\n # Locate all the tensors and ops we need for the training loop.\n features_tensor = sess.graph.get_tensor_by_name(\n vggish_params.INPUT_TENSOR_NAME)\n labels_tensor = sess.graph.get_tensor_by_name('mymodel/train/labels:0')\n global_step_tensor = sess.graph.get_tensor_by_name(\n 'mymodel/train/global_step:0')\n loss_tensor = sess.graph.get_tensor_by_name('mymodel/train/loss_op:0')\n train_op = sess.graph.get_operation_by_name('mymodel/train/train_op')\n\n # The training loop.\n for _ in range(FLAGS.num_batches):\n (features, labels) = _get_examples_batch()\n [num_steps, loss, _] = sess.run(\n [global_step_tensor, loss_tensor, train_op],\n feed_dict={features_tensor: features, labels_tensor: labels})\n print('Step %d: loss %g' % (num_steps, loss))\n\nif __name__ == '__main__':\n tf.app.run()\n" ]
[ [ "numpy.random.uniform", "tensorflow.compat.v1.placeholder", "tensorflow.compat.v1.Session", "tensorflow.compat.v1.summary.scalar", "tensorflow.compat.v1.Variable", "numpy.concatenate", "tensorflow.compat.v1.Graph", "tensorflow.compat.v1.reduce_mean", "tensorflow.compat.v1.train.AdamOptimizer", "tensorflow.compat.v1.sigmoid", "tensorflow.compat.v1.variable_scope", "tensorflow.compat.v1.nn.sigmoid_cross_entropy_with_logits", "tensorflow.compat.v1.global_variables_initializer", "numpy.random.normal", "numpy.sin", "tensorflow.compat.v1.disable_v2_behavior", "numpy.array", "tensorflow.compat.v1.app.run" ] ]
airmler/ctf
[ "88c101348b00a87def0393f962d3f87b1007110a" ]
[ "test/python/test_partition.py" ]
[ "#!/usr/bin/env python\n\nimport unittest\nimport numpy\nimport ctf\nimport os\nimport sys\n\n\ndef allclose(a, b):\n return abs(ctf.to_nparray(a) - ctf.to_nparray(b)).sum() < 1e-14\n\nclass KnowValues(unittest.TestCase):\n def test_partition(self):\n AA = ctf.tensor((4,4),sym=[ctf.SYM.SY,ctf.SYM.NS])\n AA.fill_random()\n idx, prl, blk = AA.get_distribution()\n BB = ctf.tensor((4, 4), idx=idx, prl=prl.get_idx_partition(idx[:1]), blk=blk)\n BB += AA\n CC = ctf.tensor((4, 4), idx=idx, prl=prl.get_idx_partition(idx[1:2]), blk=blk)\n CC += AA\n self.assertTrue(allclose(AA,BB)) \n self.assertTrue(allclose(BB,CC)) \n\ndef run_tests():\n numpy.random.seed(5330);\n wrld = ctf.comm()\n if ctf.comm().rank() != 0:\n result = unittest.TextTestRunner(stream = open(os.devnull, 'w')).run(unittest.TestSuite(unittest.TestLoader().loadTestsFromTestCase(KnowValues)))\n else:\n print(\"Tests for partition\")\n result = unittest.TextTestRunner().run(unittest.TestSuite(unittest.TestLoader().loadTestsFromTestCase(KnowValues)))\n return result\n\nif __name__ == \"__main__\":\n result = run_tests()\n ctf.MPI_Stop()\n sys.exit(not result)\n" ]
[ [ "numpy.random.seed" ] ]
HninPwint/nba-career-prediction
[ "ffce32507cad2c4dd020c62cee7f33cf97c886f7" ]
[ "src/features/build_features.py" ]
[ "import pandas as pd\nimport numpy as np\n\ndef replace_outliers(data, columns):\n '''\n Quantile-based Flooring and Capping\n '''\n df = data.copy()\n for column in columns:\n \n \n ten_percentile = (df[column].quantile(0.10))\n ninety_percentile = (df[column].quantile(0.90))\n\n df[column] = np.where(df[column] <ten_percentile, ten_percentile,df[column])\n df[column] = np.where(df[column] >ninety_percentile, ninety_percentile,df[column])\n \n return df\n\ndef add_knn_feature(model, data, columns_to_drop):\n df = data.copy()\n df = df.drop(columns_to_drop, axis = 1)\n pred = model.predict(df)\n df['Knn'] = pred\n return df" ]
[ [ "numpy.where" ] ]
Fyy10/ML-DL_Practice
[ "7b7c8a63d52fbf2c9192d913255852bbbe3de276" ]
[ "Basics/Regression.py" ]
[ "import torch\nimport torch.nn.functional as F\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport torch.nn as nn\nimport torch.optim as optim\n\n\nx = torch.unsqueeze(torch.linspace(-1, 1, 100), dim=1)\ny = x.pow(2) + 0.2 * torch.rand(x.size())\n\n# plt.scatter(x.numpy(), y.numpy())\n# plt.show()\n\n\nclass Net(nn.Module):\n def __init__(self, n_features, n_hidden, n_output):\n super(Net, self).__init__()\n self.hidden = nn.Linear(n_features, n_hidden)\n self.predict = nn.Linear(n_hidden, n_output)\n\n def forward(self, x):\n x = F.relu(self.hidden(x))\n x = self.predict(x)\n return x\n\n\nnet = Net(1, 20, 1)\nprint(net)\n\nplt.ion()\nplt.show()\n\noptimizer = optim.SGD(net.parameters(), lr=0.02, momentum=0.9)\nloss_function = nn.MSELoss()\n\nfor t in range(200):\n prediction = net(x)\n\n loss = loss_function(prediction, y)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n if (t % 5) == 0:\n print(\"Loss : %f\" % loss.data)\n # plot and show learning process\n plt.cla()\n plt.scatter(x.numpy(), y.numpy())\n plt.plot(x.numpy(), prediction.data.numpy(), 'r-', lw=5)\n plt.text(0.5, 0, 'Loss=%.4f' % loss.data, fontdict={'size': 20, 'color': 'red'})\n plt.pause(0.1)\n\nplt.ioff()\nplt.show()\n" ]
[ [ "matplotlib.pyplot.pause", "torch.nn.Linear", "torch.nn.MSELoss", "matplotlib.pyplot.cla", "matplotlib.pyplot.ioff", "torch.linspace", "matplotlib.pyplot.show", "matplotlib.pyplot.text", "matplotlib.pyplot.ion" ] ]
vrautela/hail
[ "7db6189b5b1feafa88452b8470e497d9505d9a46" ]
[ "hail/python/test/hail/expr/test_ndarrays.py" ]
[ "import numpy as np\nfrom ..helpers import *\nimport pytest\n\nfrom hail.utils.java import FatalError, HailUserError\n\nsetUpModule = startTestHailContext\ntearDownModule = stopTestHailContext\n\ndef assert_ndarrays(asserter, exprs_and_expecteds):\n exprs, expecteds = zip(*exprs_and_expecteds)\n\n expr_tuple = hl.tuple(exprs)\n evaled_exprs = hl.eval(expr_tuple)\n\n evaled_and_expected = zip(evaled_exprs, expecteds)\n for (idx, (evaled, expected)) in enumerate(evaled_and_expected):\n assert asserter(evaled, expected), f\"NDArray comparison {idx} failed, got: {evaled}, expected: {expected}\"\n\n\ndef assert_ndarrays_eq(*expr_and_expected):\n assert_ndarrays(np.array_equal, expr_and_expected)\n\n\ndef assert_ndarrays_almost_eq(*expr_and_expected):\n assert_ndarrays(np.allclose, expr_and_expected)\n\n\n@fails_service_backend()\ndef test_ndarray_ref():\n\n scalar = 5.0\n np_scalar = np.array(scalar)\n h_scalar = hl.nd.array(scalar)\n h_np_scalar = hl.nd.array(np_scalar)\n\n assert_evals_to(h_scalar[()], 5.0)\n assert_evals_to(h_np_scalar[()], 5.0)\n\n cube = [[[0, 1],\n [2, 3]],\n [[4, 5],\n [6, 7]]]\n h_cube = hl.nd.array(cube)\n h_np_cube = hl.nd.array(np.array(cube))\n missing = hl.nd.array(hl.missing(hl.tarray(hl.tint32)))\n\n assert_all_eval_to(\n (h_cube[0, 0, 1], 1),\n (h_cube[1, 1, 0], 6),\n (h_np_cube[0, 0, 1], 1),\n (h_np_cube[1, 1, 0], 6),\n (hl.nd.array([[[[1]]]])[0, 0, 0, 0], 1),\n (hl.nd.array([[[1, 2]], [[3, 4]]])[1, 0, 0], 3),\n (missing[1], None),\n (hl.nd.array([1, 2, 3])[hl.missing(hl.tint32)], None),\n (h_cube[0, 0, hl.missing(hl.tint32)], None)\n )\n\n with pytest.raises(HailUserError) as exc:\n hl.eval(hl.nd.array([1, 2, 3])[4])\n assert \"Index 4 is out of bounds for axis 0 with size 3\" in str(exc.value)\n\n\n@fails_service_backend()\ndef test_ndarray_slice():\n np_rect_prism = np.arange(24).reshape((2, 3, 4))\n rect_prism = hl.nd.array(np_rect_prism)\n np_mat = np.arange(8).reshape((2, 4))\n mat = hl.nd.array(np_mat)\n np_flat = np.arange(20)\n flat = hl.nd.array(np_flat)\n a = [0, 1]\n an = np.array(a)\n ah = hl.nd.array(a)\n ae_np = np.arange(4*4*5*6*5*4).reshape((4, 4, 5, 6, 5, 4))\n ae = hl.nd.array(ae_np)\n assert_ndarrays_eq(\n (rect_prism[:, :, :], np_rect_prism[:, :, :]),\n (rect_prism[:, :, 1], np_rect_prism[:, :, 1]),\n (rect_prism[0:1, 1:3, 0:2], np_rect_prism[0:1, 1:3, 0:2]),\n (rect_prism[:, :, 1:4:2], np_rect_prism[:, :, 1:4:2]),\n (rect_prism[:, 2, 1:4:2], np_rect_prism[:, 2, 1:4:2]),\n (rect_prism[0, 2, 1:4:2], np_rect_prism[0, 2, 1:4:2]),\n (rect_prism[0, :, 1:4:2] + rect_prism[:, :1, 1:4:2],\n np_rect_prism[0, :, 1:4:2] + np_rect_prism[:, :1, 1:4:2]),\n (rect_prism[0:, :, 1:4:2] + rect_prism[:, :1, 1:4:2],\n np_rect_prism[0:, :, 1:4:2] + np_rect_prism[:, :1, 1:4:2]),\n (rect_prism[0, 0, -3:-1], np_rect_prism[0, 0, -3:-1]),\n (rect_prism[-1, 0:1, 3:0:-1], np_rect_prism[-1, 0:1, 3:0:-1]),\n # partial indexing\n (rect_prism[1], np_rect_prism[1]),\n (rect_prism[1:2], np_rect_prism[1:2]),\n (rect_prism[1:2:2], np_rect_prism[1:2:2]),\n (rect_prism[1, 2], np_rect_prism[1, 2]),\n (rect_prism[-1, 1:2:2], np_rect_prism[-1, 1:2:2]),\n # ellipses inclusion\n (rect_prism[...], np_rect_prism[...]),\n (rect_prism[1, ...], np_rect_prism[1, ...]),\n (rect_prism[..., 1], np_rect_prism[..., 1]),\n # np.newaxis inclusion\n (rect_prism[hl.nd.newaxis, :, :], np_rect_prism[np.newaxis, :, :]),\n (rect_prism[hl.nd.newaxis], np_rect_prism[np.newaxis]),\n (rect_prism[hl.nd.newaxis, np.newaxis, np.newaxis], np_rect_prism[np.newaxis, np.newaxis, np.newaxis]),\n (rect_prism[hl.nd.newaxis, np.newaxis, 1:4:2], np_rect_prism[np.newaxis, np.newaxis, 1:4:2]),\n (rect_prism[1, :, hl.nd.newaxis], np_rect_prism[1, :, np.newaxis]),\n (rect_prism[1, hl.nd.newaxis, 1], np_rect_prism[1, np.newaxis, 1]),\n (rect_prism[..., hl.nd.newaxis, 1], np_rect_prism[..., np.newaxis, 1]),\n )\n assert_ndarrays_eq(\n (flat[15:5:-1], np_flat[15:5:-1]),\n (flat[::-1], np_flat[::-1]),\n (flat[::22], np_flat[::22]),\n (flat[::-22], np_flat[::-22]),\n (flat[15:5], np_flat[15:5]),\n (flat[3:12:-1], np_flat[3:12:-1]),\n (flat[12:3:1], np_flat[12:3:1]),\n (flat[4:1:-2], np_flat[4:1:-2]),\n (flat[0:0:1], np_flat[0:0:1]),\n (flat[-4:-1:2], np_flat[-4:-1:2]),\n # ellipses inclusion\n (flat[...], np_flat[...]),\n\n\n (mat[::-1, :], np_mat[::-1, :]),\n (mat[0, 1:4:2] + mat[:, 1:4:2], np_mat[0, 1:4:2] + np_mat[:, 1:4:2]),\n (mat[-1:4:1, 0], np_mat[-1:4:1, 0]),\n (mat[-1:4:-1, 0], np_mat[-1:4:-1, 0]),\n # out of bounds on start\n (mat[9:2:-1, 1:4], np_mat[9:2:-1, 1:4]),\n (mat[9:-1:-1, 1:4], np_mat[9:-1:-1, 1:4]),\n (mat[-5::, 0], np_mat[-5::, 0]),\n (mat[-5::-1, 0], np_mat[-5::-1, 0]),\n (mat[-5:-1:-1, 0], np_mat[-5:-1:-1, 0]),\n (mat[-5:-5:-1, 0], np_mat[-5:-5:-1, 0]),\n (mat[4::, 0], np_mat[4::, 0]),\n (mat[4:-1:, 0], np_mat[4:-1:, 0]),\n (mat[4:-1:-1, 0], np_mat[4:-1:-1, 0]),\n (mat[5::, 0], np_mat[5::, 0]),\n (mat[5::-1, 0], np_mat[5::-1, 0]),\n (mat[-5::-1, 0], np_mat[-5::-1, 0]),\n (mat[-5::1, 0], np_mat[-5::1, 0]),\n (mat[5:-1:-1, 0], np_mat[5:-1:-1, 0]),\n (mat[5:-5:-1, 0], np_mat[5:-5:-1, 0]),\n # out of bounds on stop\n (mat[0:20, 0:17], np_mat[0:20, 0:17]),\n (mat[0:20, 2:17], np_mat[0:20, 2:17]),\n (mat[:4, 0], np_mat[:4, 0]),\n (mat[:4:-1, 0], np_mat[:4:-1, 0]),\n (mat[:-5, 0], np_mat[:-5, 0]),\n (mat[:-5:-1, 0], np_mat[:-5:-1, 0]),\n (mat[0:-5, 0], np_mat[0:-5, 0]),\n (mat[0:-5:-1, 0], np_mat[0:-5:-1, 0]),\n # partial indexing\n (mat[1], np_mat[1]),\n (mat[0:1], np_mat[0:1]),\n # ellipses inclusion\n (mat[...], np_mat[...]),\n\n (ah[:-3:1], an[:-3:1]),\n (ah[:-3:-1], an[:-3:-1]),\n (ah[-3::-1], an[-3::-1]),\n (ah[-3::1], an[-3::1]),\n\n # ellipses inclusion\n (ae[..., 3], ae_np[..., 3]),\n (ae[3, ...], ae_np[3, ...]),\n (ae[2, 3, 1:2:2, ...], ae_np[2, 3, 1:2:2, ...]),\n (ae[3, 2, 3, ..., 2], ae_np[3, 2, 3, ..., 2]),\n (ae[3, 2, 2, ..., 2, 1:2:2], ae_np[3, 2, 2, ..., 2, 1:2:2]),\n (ae[3, :, hl.nd.newaxis, ..., :, hl.nd.newaxis, 2], ae_np[3, :, np.newaxis, ..., :, np.newaxis, 2])\n )\n\n assert hl.eval(flat[hl.missing(hl.tint32):4:1]) is None\n assert hl.eval(flat[4:hl.missing(hl.tint32)]) is None\n assert hl.eval(flat[4:10:hl.missing(hl.tint32)]) is None\n assert hl.eval(rect_prism[:, :, 0:hl.missing(hl.tint32):1]) is None\n assert hl.eval(rect_prism[hl.missing(hl.tint32), :, :]) is None\n\n with pytest.raises(HailUserError, match=\"Slice step cannot be zero\"):\n hl.eval(flat[::0])\n\n with pytest.raises(HailUserError, match=\"Index 3 is out of bounds for axis 0 with size 2\"):\n hl.eval(mat[3, 1:3])\n\n with pytest.raises(HailUserError, match=\"Index -4 is out of bounds for axis 0 with size 2\"):\n hl.eval(mat[-4, 0:3])\n\n with pytest.raises(IndexError, match=\"an index can only have a single ellipsis\"):\n hl.eval(rect_prism[..., ...])\n\n with pytest.raises(IndexError, match=\"too many indices for array: array is 3-dimensional, but 4 were indexed\"):\n hl.eval(rect_prism[1, 1, 1, 1])\n\n\ndef test_ndarray_transposed_slice():\n a = hl.nd.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]])\n np_a = np.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]])\n aT = a.T\n np_aT = np_a.T\n assert_ndarrays_eq(\n (a, np_a),\n (aT[0:aT.shape[0], 0:5], np_aT[0:np_aT.shape[0], 0:5])\n )\n\n\n@fails_service_backend()\ndef test_ndarray_eval():\n data_list = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]\n mishapen_data_list1 = [[4], [1, 2, 3]]\n mishapen_data_list2 = [[[1], [2, 3]]]\n mishapen_data_list3 = [[4], [1, 2, 3], 5]\n\n nd_expr = hl.nd.array(data_list)\n evaled = hl.eval(nd_expr)\n np_equiv = np.array(data_list, dtype=np.int32)\n np_equiv_fortran_style = np.asfortranarray(np_equiv)\n np_equiv_extra_dimension = np_equiv.reshape((3, 1, 3))\n assert(np.array_equal(evaled, np_equiv))\n\n assert np.array_equal(hl.eval(hl.nd.array([])), np.array([]))\n\n zero_array = np.zeros((10, 10), dtype=np.int64)\n evaled_zero_array = hl.eval(hl.literal(zero_array))\n\n assert np.array_equal(evaled_zero_array, zero_array)\n assert zero_array.dtype == evaled_zero_array.dtype\n\n # Testing correct interpretation of numpy strides\n assert np.array_equal(hl.eval(hl.literal(np_equiv_fortran_style)), np_equiv_fortran_style)\n assert np.array_equal(hl.eval(hl.literal(np_equiv_extra_dimension)), np_equiv_extra_dimension)\n\n # Testing from hail arrays\n assert np.array_equal(hl.eval(hl.nd.array(hl.range(6))), np.arange(6))\n assert np.array_equal(hl.eval(hl.nd.array(hl.int64(4))), np.array(4))\n\n # Testing from nested hail arrays\n assert np.array_equal(\n hl.eval(hl.nd.array(hl.array([hl.array(x) for x in data_list]))), np.arange(9).reshape((3, 3)) + 1)\n\n # Testing missing data\n assert hl.eval(hl.nd.array(hl.missing(hl.tarray(hl.tint32)))) is None\n\n with pytest.raises(ValueError) as exc:\n hl.nd.array(mishapen_data_list1)\n assert \"inner dimensions do not match\" in str(exc.value)\n\n with pytest.raises(HailUserError) as exc:\n hl.eval(hl.nd.array(hl.array(mishapen_data_list1)))\n assert \"inner dimensions do not match\" in str(exc.value)\n\n with pytest.raises(HailUserError) as exc:\n hl.eval(hl.nd.array(hl.array(mishapen_data_list2)))\n assert \"inner dimensions do not match\" in str(exc.value)\n\n with pytest.raises(ValueError) as exc:\n hl.nd.array(mishapen_data_list3)\n assert \"inner dimensions do not match\" in str(exc.value)\n\n with pytest.raises(HailUserError) as exc:\n hl.eval(hl.nd.array([1, hl.missing(hl.tint32), 3]))\n\n\ndef test_ndarray_shape():\n np_e = np.array(3)\n np_row = np.array([1, 2, 3])\n np_col = np.array([[1], [2], [3]])\n np_m = np.array([[1, 2], [3, 4]])\n np_nd = np.arange(30).reshape((2, 5, 3))\n\n e = hl.nd.array(np_e)\n row = hl.nd.array(np_row)\n col = hl.nd.array(np_col)\n m = hl.nd.array(np_m)\n nd = hl.nd.array(np_nd)\n missing = hl.nd.array(hl.missing(hl.tarray(hl.tint32)))\n\n assert_all_eval_to(\n (e.shape, np_e.shape),\n (row.shape, np_row.shape),\n (col.shape, np_col.shape),\n (m.shape, np_m.shape),\n (nd.shape, np_nd.shape),\n ((row + nd).shape, (np_row + np_nd).shape),\n ((row + col).shape, (np_row + np_col).shape),\n (m.transpose().shape, np_m.transpose().shape),\n (missing.shape, None)\n )\n\n\ndef test_ndarray_reshape():\n np_single = np.array([8])\n single = hl.nd.array([8])\n\n np_zero_dim = np.array(4)\n zero_dim = hl.nd.array(4)\n\n np_a = np.array([1, 2, 3, 4, 5, 6])\n a = hl.nd.array(np_a)\n\n np_cube = np.array([0, 1, 2, 3, 4, 5, 6, 7]).reshape((2, 2, 2))\n cube = hl.nd.array([0, 1, 2, 3, 4, 5, 6, 7]).reshape((2, 2, 2))\n cube_to_rect = cube.reshape((2, 4))\n np_cube_to_rect = np_cube.reshape((2, 4))\n cube_t_to_rect = cube.transpose((1, 0, 2)).reshape((2, 4))\n np_cube_t_to_rect = np_cube.transpose((1, 0, 2)).reshape((2, 4))\n\n np_hypercube = np.arange(3 * 5 * 7 * 9).reshape((3, 5, 7, 9))\n hypercube = hl.nd.array(np_hypercube)\n\n np_shape_zero = np.array([])\n shape_zero = hl.nd.array(np_shape_zero)\n\n assert_ndarrays_eq(\n (single.reshape(()), np_single.reshape(())),\n (zero_dim.reshape(()), np_zero_dim.reshape(())),\n (zero_dim.reshape((1,)), np_zero_dim.reshape((1,))),\n (a.reshape((6,)), np_a.reshape((6,))),\n (a.reshape((2, 3)), np_a.reshape((2, 3))),\n (a.reshape(2, 3), np_a.reshape(2, 3)),\n (a.reshape((3, 2)), np_a.reshape((3, 2))),\n (a.reshape((3, -1)), np_a.reshape((3, -1))),\n (a.reshape((-1, 2)), np_a.reshape((-1, 2))),\n (cube_to_rect, np_cube_to_rect),\n (cube_t_to_rect, np_cube_t_to_rect),\n (hypercube.reshape((5, 7, 9, 3)).reshape((7, 9, 3, 5)), np_hypercube.reshape((7, 9, 3, 5))),\n (hypercube.reshape(hl.tuple([5, 7, 9, 3])), np_hypercube.reshape((5, 7, 9, 3))),\n (shape_zero.reshape((0, 5)), np_shape_zero.reshape((0, 5))),\n (shape_zero.reshape((-1, 5)), np_shape_zero.reshape((-1, 5)))\n )\n\n assert hl.eval(hl.missing(hl.tndarray(hl.tfloat, 2)).reshape((4, 5))) is None\n assert hl.eval(hl.nd.array(hl.range(20)).reshape(\n hl.missing(hl.ttuple(hl.tint64, hl.tint64)))) is None\n\n with pytest.raises(HailUserError) as exc:\n hl.eval(hl.literal(np_cube).reshape((-1, -1)))\n assert \"more than one -1\" in str(exc.value)\n\n with pytest.raises(HailUserError) as exc:\n hl.eval(hl.literal(np_cube).reshape((20,)))\n assert \"requested shape is incompatible with number of elements\" in str(exc.value)\n\n with pytest.raises(HailUserError) as exc:\n hl.eval(a.reshape((3,)))\n assert \"requested shape is incompatible with number of elements\" in str(exc.value)\n\n with pytest.raises(HailUserError) as exc:\n hl.eval(a.reshape(()))\n assert \"requested shape is incompatible with number of elements\" in str(exc.value)\n\n with pytest.raises(HailUserError) as exc:\n hl.eval(hl.literal(np_cube).reshape((0, 2, 2)))\n assert \"requested shape is incompatible with number of elements\" in str(exc.value)\n\n with pytest.raises(HailUserError) as exc:\n hl.eval(hl.literal(np_cube).reshape((2, 2, -2)))\n assert \"must contain only nonnegative numbers or -1\" in str(exc.value)\n\n with pytest.raises(HailUserError) as exc:\n hl.eval(shape_zero.reshape((0, -1)))\n assert \"Can't reshape\" in str(exc.value)\n\n with pytest.raises(TypeError):\n a.reshape(hl.tuple(['4', '5']))\n\n\ndef test_ndarray_map1():\n a = hl.nd.array([[2, 3, 4], [5, 6, 7]])\n b = hl.map(lambda x: -x, a)\n b2 = b.map(lambda x: x * x)\n c = hl.map(lambda x: True, a)\n\n assert_ndarrays_eq(\n (b, [[-2, -3, -4], [-5, -6, -7]]),\n (b2, [[4, 9, 16], [25, 36, 49]]),\n (c, [[True, True, True],\n [True, True, True]]))\n\n assert hl.eval(hl.missing(hl.tndarray(hl.tfloat, 1)).map(lambda x: x * 2)) is None\n\n # NDArrays don't correctly support elements that contain pointers at the moment.\n # s = hl.nd.array([\"hail\", \"is\", \"great\"])\n # s_lens = s.map(lambda e: hl.len(e))\n # assert np.array_equal(hl.eval(s_lens), np.array([4, 2, 5]))\n\n structs = hl.nd.array([hl.struct(x=5, y=True), hl.struct(x=9, y=False)])\n assert np.array_equal(hl.eval(structs.map(lambda e: e.y)), np.array([True, False]))\n\n\ndef test_ndarray_map2():\n\n a = 2.0\n b = 3.0\n x = np.array([a, b])\n y = np.array([b, a])\n row_vec = np.array([[1, 2]])\n cube1 = np.array([[[1, 2],\n [3, 4]],\n [[5, 6],\n [7, 8]]])\n cube2 = np.array([[[9, 10],\n [11, 12]],\n [[13, 14],\n [15, 16]]])\n\n na = hl.nd.array(a)\n nx = hl.nd.array(x)\n ny = hl.nd.array(y)\n nrow_vec = hl.nd.array(row_vec)\n ncube1 = hl.nd.array(cube1)\n ncube2 = hl.nd.array(cube2)\n\n assert_ndarrays_eq(\n # with lists/numerics\n (na + b, np.array(a + b)),\n (b + na, np.array(a + b)),\n (nx + y, x + y),\n (ncube1 + cube2, cube1 + cube2),\n (na + na, np.array(a + a)),\n (nx + ny, x + y),\n (ncube1 + ncube2, cube1 + cube2),\n (nx.map2(y, lambda c, d: c+d), x + y),\n (ncube1.map2(cube2, lambda c, d: c+d), cube1 + cube2),\n # Broadcasting\n (ncube1 + na, cube1 + a),\n (na + ncube1, a + cube1),\n (ncube1 + ny, cube1 + y),\n (ny + ncube1, y + cube1),\n (nrow_vec + ncube1, row_vec + cube1),\n (ncube1 + nrow_vec, cube1 + row_vec),\n (ncube1.map2(na, lambda c, d: c+d), cube1 + a),\n (nrow_vec.map2(ncube1, lambda c, d: c+d), row_vec + cube1),\n\n\n # Subtraction\n (na - na, np.array(a - a)),\n (nx - nx, x - x),\n (ncube1 - ncube2, cube1 - cube2),\n # Broadcasting\n (ncube1 - na, cube1 - a),\n (na - ncube1, a - cube1),\n (ncube1 - ny, cube1 - y),\n (ny - ncube1, y - cube1),\n (ncube1 - nrow_vec, cube1 - row_vec),\n (nrow_vec - ncube1, row_vec - cube1),\n\n # Multiplication\n (na * na, np.array(a * a)),\n (nx * nx, x * x),\n (nx * na, x * a),\n (na * nx, a * x),\n (ncube1 * ncube2, cube1 * cube2),\n # Broadcasting\n (ncube1 * na, cube1 * a),\n (na * ncube1, a * cube1),\n (ncube1 * ny, cube1 * y),\n (ny * ncube1, y * cube1),\n (ncube1 * nrow_vec, cube1 * row_vec),\n (nrow_vec * ncube1, row_vec * cube1),\n\n\n # Floor div\n (na // na, np.array(a // a)),\n (nx // nx, x // x),\n (nx // na, x // a),\n (na // nx, a // x),\n (ncube1 // ncube2, cube1 // cube2),\n # Broadcasting\n (ncube1 // na, cube1 // a),\n (na // ncube1, a // cube1),\n (ncube1 // ny, cube1 // y),\n (ny // ncube1, y // cube1),\n (ncube1 // nrow_vec, cube1 // row_vec),\n (nrow_vec // ncube1, row_vec // cube1)\n )\n\n # Division\n assert_ndarrays_almost_eq(\n (na / na, np.array(a / a)),\n (nx / nx, x / x),\n (nx / na, x / a),\n (na / nx, a / x),\n (ncube1 / ncube2, cube1 / cube2),\n # Broadcasting\n (ncube1 / na, cube1 / a),\n (na / ncube1, a / cube1),\n (ncube1 / ny, cube1 / y),\n (ny / ncube1, y / cube1),\n (ncube1 / nrow_vec, cube1 / row_vec),\n (nrow_vec / ncube1, row_vec / cube1))\n\n # Missingness tests\n missing = hl.missing(hl.tndarray(hl.tfloat64, 2))\n present = hl.nd.array(np.arange(10).reshape(5, 2))\n\n assert hl.eval(missing + missing) is None\n assert hl.eval(missing + present) is None\n assert hl.eval(present + missing) is None\n\ndef test_ndarray_sum():\n np_m = np.array([[1, 2], [3, 4]])\n m = hl.nd.array(np_m)\n\n assert_ndarrays_eq(\n (m.sum(axis=0), np_m.sum(axis=0)),\n (m.sum(axis=1), np_m.sum(axis=1)),\n (m.sum(tuple([])), np_m.sum(tuple([]))))\n\n assert hl.eval(m.sum()) == 10\n assert hl.eval(m.sum((0, 1))) == 10\n\n bool_nd = hl.nd.array([[True, False, True], [False, True, True]])\n assert hl.eval(bool_nd.sum()) == 4\n\n with pytest.raises(ValueError) as exc:\n m.sum(3)\n assert \"out of bounds for ndarray of dimension 2\" in str(exc.value)\n\n with pytest.raises(ValueError) as exc:\n m.sum((1, 1))\n assert \"duplicate\" in str(exc.value)\n\n\ndef test_ndarray_transpose():\n np_v = np.array([1, 2, 3])\n np_m = np.array([[1, 2, 3], [4, 5, 6]])\n np_cube = np.array([[[1, 2],\n [3, 4]],\n [[5, 6],\n [7, 8]]])\n v = hl.nd.array(np_v)\n m = hl.nd.array(np_m)\n cube = hl.nd.array(np_cube)\n\n assert_ndarrays_eq(\n (v.T, np_v.T),\n (v.T, np_v),\n (m.T, np_m.T),\n (cube.transpose((0, 2, 1)), np_cube.transpose((0, 2, 1))),\n (cube.T, np_cube.T))\n\n assert hl.eval(hl.missing(hl.tndarray(hl.tfloat, 1)).T) is None\n\n with pytest.raises(ValueError) as exc:\n v.transpose((1,))\n assert \"Invalid axis: 1\" in str(exc.value)\n\n with pytest.raises(ValueError) as exc:\n cube.transpose((1, 1))\n assert \"Expected 3 axes, got 2\" in str(exc.value)\n\n with pytest.raises(ValueError) as exc:\n cube.transpose((1, 1, 1))\n assert \"Axes cannot contain duplicates\" in str(exc.value)\n\n\ndef test_ndarray_matmul():\n np_v = np.array([1, 2])\n np_y = np.array([1, 1, 1])\n np_m = np.array([[1, 2], [3, 4]])\n np_m_f32 = np_m.astype(np.float32)\n np_m_f64 = np_m.astype(np.float64)\n np_r = np.array([[1, 2, 3], [4, 5, 6]])\n np_r_f32 = np_r.astype(np.float32)\n np_r_f64 = np_r.astype(np.float64)\n np_cube = np.arange(8).reshape((2, 2, 2))\n np_rect_prism = np.arange(12).reshape((3, 2, 2))\n np_broadcasted_mat = np.arange(4).reshape((1, 2, 2))\n np_six_dim_tensor = np.arange(3 * 7 * 1 * 9 * 4 * 5).reshape((3, 7, 1, 9, 4, 5))\n np_five_dim_tensor = np.arange(7 * 5 * 1 * 5 * 3).reshape((7, 5, 1, 5, 3))\n np_ones_int32 = np.ones((4, 4), dtype=np.int32)\n np_ones_float64 = np.ones((4, 4), dtype=np.float64)\n np_zero_by_four = np.array([], dtype=np.float64).reshape((0, 4))\n\n v = hl.nd.array(np_v)\n y = hl.nd.array(np_y)\n m = hl.nd.array(np_m)\n m_f32 = hl.nd.array(np_m_f32)\n m_f64 = hl.nd.array(np_m_f64)\n r = hl.nd.array(np_r)\n r_f32 = hl.nd.array(np_r_f32)\n r_f64 = hl.nd.array(np_r_f64)\n cube = hl.nd.array(np_cube)\n rect_prism = hl.nd.array(np_rect_prism)\n broadcasted_mat = hl.nd.array(np_broadcasted_mat)\n six_dim_tensor = hl.nd.array(np_six_dim_tensor)\n five_dim_tensor = hl.nd.array(np_five_dim_tensor)\n ones_int32 = hl.nd.array(np_ones_int32)\n ones_float64 = hl.nd.array(np_ones_float64)\n zero_by_four = hl.nd.array(np_zero_by_four)\n\n assert_ndarrays_eq(\n (v @ v, np_v @ np_v),\n (m @ m, np_m @ np_m),\n (m_f32 @ m_f32, np_m_f32 @ np_m_f32),\n (m_f64 @ m_f64, np_m_f64 @ np_m_f64),\n (m @ m.T, np_m @ np_m.T),\n (m_f64 @ m_f64.T, np_m_f64 @ np_m_f64.T),\n (r @ r.T, np_r @ np_r.T),\n (r_f32 @ r_f32.T, np_r_f32 @ np_r_f32.T),\n (r_f64 @ r_f64.T, np_r_f64 @ np_r_f64.T),\n (v @ m, np_v @ np_m),\n (m @ v, np_m @ np_v),\n (v @ r, np_v @ np_r),\n (r @ y, np_r @ np_y),\n (cube @ cube, np_cube @ np_cube),\n (cube @ v, np_cube @ np_v),\n (v @ cube, np_v @ np_cube),\n (cube @ m, np_cube @ np_m),\n (m @ cube, np_m @ np_cube),\n (rect_prism @ m, np_rect_prism @ np_m),\n (m @ rect_prism, np_m @ np_rect_prism),\n (m @ rect_prism.T, np_m @ np_rect_prism.T),\n (broadcasted_mat @ rect_prism, np_broadcasted_mat @ np_rect_prism),\n (six_dim_tensor @ five_dim_tensor, np_six_dim_tensor @ np_five_dim_tensor),\n (zero_by_four @ ones_float64, np_zero_by_four, np_ones_float64),\n (zero_by_four.transpose() @ zero_by_four, np_zero_by_four.transpose() @ np_zero_by_four)\n )\n\n assert hl.eval(hl.missing(hl.tndarray(hl.tfloat64, 2)) @\n hl.missing(hl.tndarray(hl.tfloat64, 2))) is None\n assert hl.eval(hl.missing(hl.tndarray(hl.tint64, 2)) @\n hl.nd.array(np.arange(10).reshape(5, 2))) is None\n assert hl.eval(hl.nd.array(np.arange(10).reshape(5, 2)) @\n hl.missing(hl.tndarray(hl.tint64, 2))) is None\n\n assert np.array_equal(hl.eval(ones_int32 @ ones_float64), np_ones_int32 @ np_ones_float64)\n\n with pytest.raises(ValueError):\n m @ 5\n\n with pytest.raises(ValueError):\n m @ hl.nd.array(5)\n\n with pytest.raises(ValueError):\n cube @ hl.nd.array(5)\n\n with pytest.raises(HailUserError) as exc:\n hl.eval(r @ r)\n assert \"Matrix dimensions incompatible: (2, 3) can't be multiplied by matrix with dimensions (2, 3)\" in str(exc.value), str(exc.value)\n\n with pytest.raises(HailUserError) as exc:\n hl.eval(hl.nd.array([1, 2]) @ hl.nd.array([1, 2, 3]))\n assert \"Matrix dimensions incompatible\" in str(exc.value)\n\ndef test_ndarray_matmul_dgemv():\n np_mat_3_4 = np.arange(12, dtype=np.float64).reshape((3, 4))\n np_mat_4_3 = np.arange(12, dtype=np.float64).reshape((4, 3))\n np_vec_3 = np.array([4, 2, 7], dtype=np.float64)\n np_vec_4 = np.array([9, 17, 3, 1], dtype=np.float64)\n\n mat_3_4 = hl.nd.array(np_mat_3_4)\n mat_4_3 = hl.nd.array(np_mat_4_3)\n vec_3 = hl.nd.array(np_vec_3)\n vec_4 = hl.nd.array(np_vec_4)\n\n assert_ndarrays_eq(\n (mat_3_4 @ vec_4, np_mat_3_4 @ np_vec_4),\n (mat_4_3 @ vec_3, np_mat_4_3 @ np_vec_3),\n (mat_3_4.T @ vec_3, np_mat_3_4.T @ np_vec_3)\n )\n\ndef test_ndarray_big():\n assert hl.eval(hl.nd.array(hl.range(100_000))).size == 100_000\n\n\ndef test_ndarray_full():\n assert_ndarrays_eq(\n (hl.nd.zeros(4), np.zeros(4)),\n (hl.nd.zeros((3, 4, 5)), np.zeros((3, 4, 5))),\n (hl.nd.ones(6), np.ones(6)),\n (hl.nd.ones((6, 6, 6)), np.ones((6, 6, 6))),\n (hl.nd.full(7, 9), np.full(7, 9)),\n (hl.nd.full((3, 4, 5), 9), np.full((3, 4, 5), 9))\n )\n\n assert hl.eval(hl.nd.zeros((5, 5), dtype=hl.tfloat32)).dtype, np.float32\n assert hl.eval(hl.nd.ones(3, dtype=hl.tint64)).dtype, np.int64\n assert hl.eval(hl.nd.full((5, 6, 7), hl.int32(3), dtype=hl.tfloat64)).dtype, np.float64\n\n\ndef test_ndarray_arange():\n assert_ndarrays_eq(\n (hl.nd.arange(40), np.arange(40)),\n (hl.nd.arange(5, 50), np.arange(5, 50)),\n (hl.nd.arange(2, 47, 13), np.arange(2, 47, 13))\n )\n\n with pytest.raises(HailUserError) as exc:\n hl.eval(hl.nd.arange(5, 20, 0))\n assert \"Array range cannot have step size 0\" in str(exc.value)\n\n\ndef test_ndarray_mixed():\n assert hl.eval(hl.missing(hl.tndarray(hl.tint64, 2)).map(\n lambda x: x * x).reshape((4, 5)).T) is None\n assert hl.eval(\n (hl.nd.zeros((5, 10)).map(lambda x: x - 2) +\n hl.nd.ones((5, 10)).map(lambda x: x + 5)).reshape(hl.missing(hl.ttuple(hl.tint64, hl.tint64))).T.reshape((10, 5))) is None\n assert hl.eval(hl.or_missing(False, hl.nd.array(np.arange(10)).reshape(\n (5, 2)).map(lambda x: x * 2)).map(lambda y: y * 2)) is None\n\n\ndef test_ndarray_show():\n hl.nd.array(3).show()\n hl.nd.arange(6).show()\n hl.nd.arange(6).reshape((2, 3)).show()\n hl.nd.arange(8).reshape((2, 2, 2)).show()\n\n\ndef test_ndarray_diagonal():\n assert np.array_equal(hl.eval(hl.nd.diagonal(hl.nd.array([[1, 2], [3, 4]]))), np.array([1, 4]))\n assert np.array_equal(hl.eval(hl.nd.diagonal(\n hl.nd.array([[1, 2, 3], [4, 5, 6]]))), np.array([1, 5]))\n assert np.array_equal(hl.eval(hl.nd.diagonal(\n hl.nd.array([[1, 2], [3, 4], [5, 6]]))), np.array([1, 4]))\n\n with pytest.raises(AssertionError) as exc:\n hl.nd.diagonal(hl.nd.array([1, 2]))\n assert \"2 dimensional\" in str(exc.value)\n\n\ndef test_ndarray_solve_triangular():\n a = hl.nd.array([[1, 1], [0, 1]])\n b = hl.nd.array([2, 1])\n b2 = hl.nd.array([[11, 5], [6, 3]])\n\n a_low = hl.nd.array([[4, 0], [2, 1]])\n b_low = hl.nd.array([4, 5])\n\n a_sing = hl.nd.array([[0, 1], [0, 1]])\n b_sing = hl.nd.array([2, 2])\n\n assert np.allclose(hl.eval(hl.nd.solve_triangular(a, b)), np.array([1., 1.]))\n assert np.allclose(hl.eval(hl.nd.solve_triangular(a, b2)), np.array([[5., 2.], [6., 3.]]))\n assert np.allclose(hl.eval(hl.nd.solve_triangular(a_low, b_low, True)), np.array([[1., 3.]]))\n with pytest.raises(HailUserError) as exc:\n hl.eval(hl.nd.solve_triangular(a_sing, b_sing))\n assert \"singular\" in str(exc.value), str(exc.value)\n\n\ndef test_ndarray_solve():\n a = hl.nd.array([[1, 2], [3, 5]])\n b = hl.nd.array([1, 2])\n b2 = hl.nd.array([[1, 8], [2, 12]])\n\n assert np.allclose(hl.eval(hl.nd.solve(a, b)), np.array([-1., 1.]))\n assert np.allclose(hl.eval(hl.nd.solve(a, b2)), np.array([[-1., -16.], [1, 12]]))\n assert np.allclose(hl.eval(hl.nd.solve(a.T, b2.T)), np.array([[19., 26.], [-6, -8]]))\n\n with pytest.raises(HailUserError) as exc:\n hl.eval(hl.nd.solve(hl.nd.array([[1, 2], [1, 2]]), hl.nd.array([8, 10])))\n assert \"singular\" in str(exc.value), str(exc.value)\n\n\ndef test_ndarray_qr():\n def assert_raw_equivalence(hl_ndarray, np_ndarray):\n ndarray_h, ndarray_tau = hl.eval(hl.nd.qr(hl_ndarray, mode=\"raw\"))\n np_ndarray_h, np_ndarray_tau = np.linalg.qr(np_ndarray, mode=\"raw\")\n\n # Can't ask for the rank of something that has a 0 in its shape.\n if 0 in np_ndarray.shape:\n assert ndarray_h.shape == np_ndarray_h.shape\n assert ndarray_tau.shape == np_ndarray_tau.shape\n else:\n rank = np.linalg.matrix_rank(np_ndarray)\n\n assert np.allclose(ndarray_h[:, :rank], np_ndarray_h[:, :rank])\n assert np.allclose(ndarray_tau[:rank], np_ndarray_tau[:rank])\n\n def assert_r_equivalence(hl_ndarray, np_ndarray):\n assert np.allclose(hl.eval(hl.nd.qr(hl_ndarray, mode=\"r\")),\n np.linalg.qr(np_ndarray, mode=\"r\"))\n\n def assert_reduced_equivalence(hl_ndarray, np_ndarray):\n q, r = hl.eval(hl.nd.qr(hl_ndarray, mode=\"reduced\"))\n nq, nr = np.linalg.qr(np_ndarray, mode=\"reduced\")\n\n # Can't ask for the rank of something that has a 0 in its shape.\n if 0 in np_ndarray.shape:\n assert q.shape == nq.shape\n assert r.shape == nr.shape\n else:\n rank = np.linalg.matrix_rank(np_ndarray)\n\n assert np.allclose(q[:, :rank], nq[:, :rank])\n assert np.allclose(r, nr)\n assert np.allclose(q @ r, np_ndarray)\n\n def assert_complete_equivalence(hl_ndarray, np_ndarray):\n q, r = hl.eval(hl.nd.qr(hl_ndarray, mode=\"complete\"))\n nq, nr = np.linalg.qr(np_ndarray, mode=\"complete\")\n\n # Can't ask for the rank of something that has a 0 in its shape.\n if 0 in np_ndarray.shape:\n assert q.shape == nq.shape\n assert r.shape == nr.shape\n else:\n rank = np.linalg.matrix_rank(np_ndarray)\n\n assert np.allclose(q[:, :rank], nq[:, :rank])\n assert np.allclose(r, nr)\n assert np.allclose(q @ r, np_ndarray)\n\n def assert_same_qr(hl_ndarray, np_ndarray):\n assert_raw_equivalence(hl_ndarray, np_ndarray)\n assert_r_equivalence(hl_ndarray, np_ndarray)\n assert_reduced_equivalence(hl_ndarray, np_ndarray)\n assert_complete_equivalence(hl_ndarray, np_ndarray)\n\n np_identity4 = np.identity(4)\n identity4 = hl.nd.array(np_identity4)\n\n assert_same_qr(identity4, np_identity4)\n\n np_size_zero_n = np.zeros((10, 0))\n size_zero_n = hl.nd.zeros((10, 0))\n\n assert_same_qr(size_zero_n, np_size_zero_n)\n\n np_size_zero_m = np.zeros((0, 10))\n size_zero_m = hl.nd.zeros((0, 10))\n\n assert_same_qr(size_zero_m, np_size_zero_m)\n\n np_all3 = np.full((3, 3), 3)\n all3 = hl.nd.full((3, 3), 3)\n\n assert_same_qr(all3, np_all3)\n\n np_nine_square = np.arange(9).reshape((3, 3))\n nine_square = hl.nd.arange(9).reshape((3, 3))\n\n assert_same_qr(nine_square, np_nine_square)\n\n np_wiki_example = np.array([[12, -51, 4],\n [6, 167, -68],\n [-4, 24, -41]])\n wiki_example = hl.nd.array(np_wiki_example)\n\n assert_same_qr(wiki_example, np_wiki_example)\n\n np_wide_rect = np.arange(12).reshape((3, 4))\n wide_rect = hl.nd.arange(12).reshape((3, 4))\n\n assert_same_qr(wide_rect, np_wide_rect)\n\n np_tall_rect = np.arange(12).reshape((4, 3))\n tall_rect = hl.nd.arange(12).reshape((4, 3))\n\n assert_same_qr(tall_rect, np_tall_rect)\n\n np_single_element = np.array([1]).reshape((1, 1))\n single_element = hl.nd.array([1]).reshape((1, 1))\n\n assert_same_qr(single_element, np_single_element)\n\n np_no_elements = np.array([]).reshape((0, 10))\n no_elements = hl.nd.array(np_no_elements)\n\n assert_same_qr(no_elements, np_no_elements)\n\n with pytest.raises(ValueError) as exc:\n hl.nd.qr(wiki_example, mode=\"invalid\")\n assert \"Unrecognized mode\" in str(exc.value)\n\n with pytest.raises(AssertionError) as exc:\n hl.nd.qr(hl.nd.arange(6))\n assert \"requires 2 dimensional\" in str(exc.value)\n\n\ndef test_svd():\n def assert_evals_to_same_svd(nd_expr, np_array, full_matrices=True, compute_uv=True):\n evaled = hl.eval(hl.nd.svd(nd_expr, full_matrices, compute_uv))\n np_svd = np.linalg.svd(np_array, full_matrices, compute_uv)\n\n # check shapes\n for h, n in zip(evaled, np_svd):\n assert h.shape == n.shape\n\n k = min(np_array.shape)\n rank = np.linalg.matrix_rank(np_array)\n\n if compute_uv:\n hu, hs, hv = evaled\n nu, ns, nv = np_svd\n\n # Singular values match\n np.testing.assert_array_almost_equal(hs, ns)\n\n # U is orthonormal\n uut = hu.T @ hu\n np.testing.assert_array_almost_equal(uut, np.identity(uut.shape[0]))\n\n # V is orthonormal\n vvt = hv @ hv.T\n np.testing.assert_array_almost_equal(vvt, np.identity(vvt.shape[0]))\n\n # Multiplying together gets back to original\n smat = np.zeros(np_array.shape) if full_matrices else np.zeros((k, k))\n smat[:k, :k] = np.diag(hs)\n np.testing.assert_array_almost_equal(hu @ smat @ hv, np_array)\n\n else:\n np.testing.assert_array_almost_equal(evaled, np_svd)\n\n np_small_square = np.arange(4).reshape((2, 2))\n small_square = hl.nd.array(np_small_square)\n np_rank_2_wide_rectangle = np.arange(12).reshape((4, 3))\n rank_2_wide_rectangle = hl.nd.array(np_rank_2_wide_rectangle)\n np_rank_2_tall_rectangle = np_rank_2_wide_rectangle.T\n rank_2_tall_rectangle = hl.nd.array(np_rank_2_tall_rectangle)\n\n assert_evals_to_same_svd(small_square, np_small_square)\n assert_evals_to_same_svd(small_square, np_small_square, compute_uv=False)\n\n assert_evals_to_same_svd(rank_2_wide_rectangle, np_rank_2_wide_rectangle)\n assert_evals_to_same_svd(rank_2_wide_rectangle, np_rank_2_wide_rectangle, full_matrices=False)\n\n assert_evals_to_same_svd(rank_2_tall_rectangle, np_rank_2_tall_rectangle)\n assert_evals_to_same_svd(rank_2_tall_rectangle, np_rank_2_tall_rectangle, full_matrices=False)\n\n\ndef test_numpy_interop():\n v = [2, 3]\n w = [3, 5]\n a = [[2, 3]]\n b = [[3], [5]]\n\n assert np.array_equal(hl.eval(np.array(v) * hl.literal(3)), np.array([6, 9]))\n assert np.array_equal(hl.eval(hl.literal(3) * np.array(v)), np.array([6, 9]))\n\n assert np.array_equal(hl.eval(np.array(v) * hl.nd.array(w)), np.array([6, 15]))\n assert np.array_equal(hl.eval(hl.nd.array(w) * np.array(v)), np.array([6, 15]))\n\n assert np.array_equal(hl.eval(np.array(v) + hl.literal(3)), np.array([5, 6]))\n assert np.array_equal(hl.eval(hl.literal(3) + np.array(v)), np.array([5, 6]))\n\n assert np.array_equal(hl.eval(np.array(v) + hl.nd.array(w)), np.array([5, 8]))\n assert np.array_equal(hl.eval(hl.nd.array(w) + np.array(v)), np.array([5, 8]))\n\n assert np.array_equal(hl.eval(np.array(v) @ hl.nd.array(w)), 21)\n assert np.array_equal(hl.eval(hl.nd.array(v) @ np.array(w)), 21)\n\n assert np.array_equal(hl.eval(np.array(a) @ hl.nd.array(b)), np.array([[21]]))\n assert np.array_equal(hl.eval(hl.nd.array(a) @ np.array(b)), np.array([[21]]))\n\n assert np.array_equal(hl.eval(hl.nd.array(b) @ np.array(a)),\n np.array([[6, 9], [10, 15]]))\n assert np.array_equal(hl.eval(np.array(b) @ hl.nd.array(a)),\n np.array([[6, 9], [10, 15]]))\n\n\ndef test_ndarray_emitter_extract():\n np_mat = np.array([0, 1, 2, 1, 0])\n mat = hl.nd.array(np_mat)\n mapped_mat = mat.map(lambda x: hl.array([3, 4, 5])[hl.int(x)])\n assert hl.eval(hl.range(5).map(lambda i: mapped_mat[i])) == [3, 4, 5, 4, 3]\n\n\ndef test_ndarrays_transmute_ops():\n u = hl.utils.range_table(10, n_partitions=10)\n u = u.annotate(x=hl.nd.array([u.idx]), y=hl.nd.array([u.idx]))\n u = u.transmute(xxx=u.x @ u.y)\n assert u.xxx.collect() == [x * x for x in range(10)]\n\n\ndef test_ndarray():\n a1 = hl.eval(hl.nd.array((1, 2, 3)))\n a2 = hl.eval(hl.nd.array([1, 2, 3]))\n an1 = np.array((1, 2, 3))\n an2 = np.array([1, 2, 3])\n\n assert(np.array_equal(a1, a2) and np.array_equal(a2, an2))\n\n a1 = hl.eval(hl.nd.array(((1), (2), (3))))\n a2 = hl.eval(hl.nd.array(([1], [2], [3])))\n a3 = hl.eval(hl.nd.array([[1], [2], [3]]))\n\n an1 = np.array(((1), (2), (3)))\n an2 = np.array(([1], [2], [3]))\n an3 = np.array([[1], [2], [3]])\n\n assert(np.array_equal(a1, an1) and np.array_equal(a2, an2) and np.array_equal(a3, an3))\n\n a1 = hl.eval(hl.nd.array(((1, 2), (2, 5), (3, 8))))\n a2 = hl.eval(hl.nd.array([[1, 2], [2, 5], [3, 8]]))\n\n an1 = np.array(((1, 2), (2, 5), (3, 8)))\n an2 = np.array([[1, 2], [2, 5], [3, 8]])\n\n assert(np.array_equal(a1, an1) and np.array_equal(a2, an2))\n\n\ndef test_cast():\n def testequal(a, hdtype, ndtype):\n ah = hl.eval(hl.nd.array(a, dtype=hdtype))\n an = np.array(a, dtype=ndtype)\n\n assert(ah.dtype == an.dtype)\n\n def test(a):\n testequal(a, hl.tfloat64, np.float64)\n testequal(a, hl.tfloat32, np.float32)\n testequal(a, hl.tint32, np.int32)\n testequal(a, hl.tint64, np.int64)\n\n test([1, 2, 3])\n test([1, 2, 3.])\n test([1., 2., 3.])\n test([[1, 2], [3, 4]])\n\n\ndef test_inv():\n c = np.random.randn(5, 5)\n d = np.linalg.inv(c)\n dhail = hl.eval(hl.nd.inv(c))\n assert np.allclose(dhail, d)\n\n\ndef test_concatenate():\n x = np.array([[1., 2.], [3., 4.]])\n y = np.array([[5.], [6.]])\n np_res = np.concatenate([x, y], axis=1)\n\n res = hl.eval(hl.nd.concatenate([x, y], axis=1))\n assert np.array_equal(np_res, res)\n\n res = hl.eval(hl.nd.concatenate(hl.array([x, y]), axis=1))\n assert np.array_equal(np_res, res)\n\n x = np.array([[1], [3]])\n y = np.array([[5], [6]])\n\n seq = [x, y]\n seq2 = hl.array(seq)\n np_res = np.concatenate(seq)\n res = hl.eval(hl.nd.concatenate(seq))\n assert np.array_equal(np_res, res)\n\n res = hl.eval(hl.nd.concatenate(seq2))\n assert np.array_equal(np_res, res)\n\n seq = (x, y)\n seq2 = hl.array([x, y])\n np_res = np.concatenate(seq)\n res = hl.eval(hl.nd.concatenate(seq))\n assert np.array_equal(np_res, res)\n\n res = hl.eval(hl.nd.concatenate(seq2))\n assert np.array_equal(np_res, res)\n\n\ndef test_vstack():\n ht = hl.utils.range_table(10)\n\n def assert_table(a, b):\n ht2 = ht.annotate(x=hl.nd.array(a), y=hl.nd.array(b))\n ht2 = ht2.annotate(stacked=hl.nd.vstack([ht2.x, ht2.y]))\n assert np.array_equal(ht2.collect()[0].stacked, np.vstack([a, b]))\n\n a = np.array([1, 2, 3])\n b = np.array([2, 3, 4])\n\n seq = (a, b)\n seq2 = hl.array([a, b])\n assert(np.array_equal(hl.eval(hl.nd.vstack(seq)), np.vstack(seq)))\n assert(np.array_equal(hl.eval(hl.nd.vstack(seq2)), np.vstack(seq)))\n assert_table(a, b)\n\n a = np.array([[1], [2], [3]])\n b = np.array([[2], [3], [4]])\n seq = (a, b)\n seq2 = hl.array([a, b])\n assert(np.array_equal(hl.eval(hl.nd.vstack(seq)), np.vstack(seq)))\n assert(np.array_equal(hl.eval(hl.nd.vstack(seq2)), np.vstack(seq)))\n assert_table(a, b)\n\n\ndef test_hstack():\n ht = hl.utils.range_table(10)\n\n def assert_table(a, b):\n ht2 = ht.annotate(x=hl.nd.array(a), y=hl.nd.array(b))\n ht2 = ht2.annotate(stacked=hl.nd.hstack([ht2.x, ht2.y]))\n assert np.array_equal(ht2.collect()[0].stacked, np.hstack([a, b]))\n\n a = np.array([1, 2, 3])\n b = np.array([2, 3, 4])\n assert(np.array_equal(hl.eval(hl.nd.hstack((a, b))), np.hstack((a, b))))\n assert(np.array_equal(hl.eval(hl.nd.hstack(hl.array([a, b]))), np.hstack((a, b))))\n assert_table(a, b)\n\n a = np.array([[1], [2], [3]])\n b = np.array([[2], [3], [4]])\n assert(np.array_equal(hl.eval(hl.nd.hstack((a, b))), np.hstack((a, b))))\n assert(np.array_equal(hl.eval(hl.nd.hstack(hl.array([a, b]))), np.hstack((a, b))))\n assert_table(a, b)\n\n\ndef test_eye():\n for i in range(13):\n assert_ndarrays_eq(*[(hl.nd.eye(i, y), np.eye(i, y)) for y in range(13)])\n\n\ndef test_identity():\n assert_ndarrays_eq(*[(hl.nd.identity(i), np.identity(i)) for i in range(13)])\n\n\ndef test_agg_ndarray_sum():\n no_values = hl.utils.range_table(0).annotate(x=hl.nd.arange(5))\n assert no_values.aggregate(hl.agg.ndarray_sum(no_values.x)) is None\n\n increasing_0d = hl.utils.range_table(10)\n increasing_0d = increasing_0d.annotate(x=hl.nd.array(increasing_0d.idx))\n assert np.array_equal(increasing_0d.aggregate(hl.agg.ndarray_sum(increasing_0d.x)), np.array(45))\n\n just_ones_1d = hl.utils.range_table(20).annotate(x=hl.nd.ones((7,)))\n assert np.array_equal(just_ones_1d.aggregate(hl.agg.ndarray_sum(just_ones_1d.x)), np.full((7,), 20))\n\n just_ones_2d = hl.utils.range_table(100).annotate(x=hl.nd.ones((2, 3)))\n assert np.array_equal(just_ones_2d.aggregate(hl.agg.ndarray_sum(just_ones_2d.x)), np.full((2, 3), 100))\n\n transposes = hl.utils.range_table(4).annotate(x=hl.nd.arange(16).reshape((4, 4)))\n transposes = transposes.annotate(x = hl.if_else((transposes.idx % 2) == 0, transposes.x, transposes.x.T))\n np_arange_4_by_4 = np.arange(16).reshape((4, 4))\n transposes_result = (np_arange_4_by_4 * 2) + (np_arange_4_by_4.T * 2)\n assert np.array_equal(transposes.aggregate(hl.agg.ndarray_sum(transposes.x)), transposes_result)\n\n with pytest.raises(FatalError) as exc:\n mismatched = hl.utils.range_table(5)\n mismatched = mismatched.annotate(x=hl.nd.ones((mismatched.idx,)))\n mismatched.aggregate(hl.agg.ndarray_sum(mismatched.x))\n assert \"Can't sum\" in str(exc.value)\n\n\ndef test_maximum_minimuim():\n x = np.arange(4)\n y = np.array([7, 0, 2, 4])\n z = [5, 2, 3, 1]\n nan_elem = np.array([1.0, float(\"nan\"), 3.0, 6.0])\n f = np.array([1.0, 3.0, 6.0, 4.0])\n nx = hl.nd.array(x)\n ny = hl.nd.array(y)\n nf = hl.nd.array(f)\n ndnan_elem = hl.nd.array([1.0, hl.float64(float(\"NaN\")), 3.0, 6.0])\n\n assert_ndarrays_eq(\n (hl.nd.maximum(nx, ny), np.maximum(x, y)),\n (hl.nd.maximum(ny, z), np.maximum(y, z)),\n (hl.nd.minimum(nx, ny), np.minimum(x, y)),\n (hl.nd.minimum(ny, z), np.minimum(y, z)),\n )\n\n np_nan_max = np.maximum(nan_elem, f)\n nan_max = hl.eval(hl.nd.maximum(ndnan_elem, nf))\n np_nan_min = np.minimum(nan_elem, f)\n nan_min = hl.eval(hl.nd.minimum(ndnan_elem, nf))\n max_matches = 0\n min_matches = 0\n for a, b in zip(np_nan_max, nan_max):\n if a == b:\n max_matches += 1\n elif np.isnan(a) and np.isnan(b):\n max_matches += 1\n for a, b in zip(np_nan_min, nan_min):\n if a == b:\n min_matches += 1\n elif np.isnan(a) and np.isnan(b):\n min_matches += 1\n\n assert(nan_max.size == max_matches)\n assert(nan_min.size == min_matches)\n\ndef test_ndarray_broadcasting_with_decorator():\n nd = hl.nd.array([[1, 4, 9], [16, 25, 36]])\n nd_sqrt = hl.eval(hl.nd.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]))\n nd = hl.eval(hl.sqrt(nd))\n assert(np.array_equal(nd, nd_sqrt))\n\n nd = hl.nd.array([[10, 100, 1000], [10000, 100000, 1000000]])\n nd_log10 = hl.eval(hl.nd.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]))\n nd = hl.eval(hl.log10(nd))\n assert(np.array_equal(nd, nd_log10))\n\n nd = hl.nd.array([[1.2, 2.3, 3.3], [4.3, 5.3, 6.3]])\n nd_floor = hl.eval(hl.nd.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]))\n nd = hl.eval(hl.floor(nd))\n assert(np.array_equal(nd, nd_floor))" ]
[ [ "numpy.ones", "numpy.diag", "numpy.vstack", "numpy.allclose", "numpy.asfortranarray", "numpy.linalg.matrix_rank", "numpy.isnan", "numpy.identity", "numpy.minimum", "numpy.eye", "numpy.zeros", "numpy.arange", "numpy.hstack", "numpy.testing.assert_array_almost_equal", "numpy.maximum", "numpy.linalg.qr", "numpy.linalg.inv", "numpy.random.randn", "numpy.linalg.svd", "numpy.array_equal", "numpy.array", "numpy.concatenate", "numpy.full" ] ]
marianna13/heatpy
[ "a1010c87c592c81e61126f69842d010922c603fe" ]
[ "heatpy.py" ]
[ "import numpy as np\ndef plot(b,d,C,t,x1,y1,x2,y2,fn, show_solution=False):\n '''\n This function finds solutions.\n b,d : boundary conditions,\n C : thermal diffusivity\n t : time\n x2-x1 : size of a square in X direction,\n y2-y1 : size of the square in Y direction,\n fn : initial condition\n '''\n import plotly.graph_objects as go\n\n def solve(b,d,C,t,x1,y1,x2=None,y2=None,fn=None):\n \n a=0\n\n c=0\n def f1(x,y):\n\n return x-y-2\n\n\n def f(x,y,m,n,fn):\n if fn!=None:\n return fn(x,y)*np.sin(m*np.pi/b*x)*np.sin(n*np.pi/d*x)\n return f1(x,y)*np.sin(m*np.pi/b*x)*np.sin(n*np.pi/d*x)\n\n\n def s(a,b,c,d,m,n,fn):\n\n s = (16*f((b+a)/2,(c+d)/2,m,n,fn)+4*f((b+a)/2,d,m,n,fn)+4*f((b+a)/2,c,m,n,fn)+4*f((b+a)/2,d,m,n,fn)+4*f(b,(d+c)/2,m,n,fn)+4*f(a,(d+c)/2,m,n,fn)+f(b,d,m,n,fn)+f(a,d,m,n,fn)+f(a,c,m,n,fn)+f(b,c,m,n,fn))*(b-a)*(d-c)/36\n return s\n\n\n def A(m,n, a,b,c,d,fn):\n '''\n This function finds coefficients A using double \n Fourier transform\n '''\n return 4/(b*d)*s(a,b,c,d,m,n,fn)\n\n\n def u(a,b,c,d,C,t,X,Y,fn):\n '''\n This function finds \n a general solution by summing partial solutions\n '''\n u=0\n for m in range(1,50):\n for n in range(1,50):\n mu = m*np.pi/b\n nu = n*np.pi/d\n lmn = C*np.sqrt(mu**2+nu**2)\n u+=(A(m,n,a,b,c,d,fn)*np.sin(mu*X)*np.sin(nu*Y)*np.exp(-lmn**2*t))\n return u\n\n\n if x2==None:\n return int(x1),int(y1),u(a,b,c,d,C,t,x1,y1,fn)\n\n X = np.linspace(x1,x2,2)\n Y = np.linspace(y1,y2,2)\n\n\n X,Y = np.meshgrid(X,Y)\n return X,Y,u(a,b,c,d,C,t,X,Y,fn)\n\n\n\n X,Y,ut = solve(b,d,C,t,x1,y1,x2,y2,fn=fn)\n if show_solution:\n print(ut)\n fig = go.Figure(data=[go.Surface(z=ut, x=X, y=Y)])\n fig.update_layout(title='', autosize=True,\n width=500, height=500,\n margin=dict(l=65, r=50, b=65, t=90))\n fig.show()\n" ]
[ [ "numpy.exp", "numpy.sqrt", "numpy.sin", "numpy.meshgrid", "numpy.linspace" ] ]
1kc2/Long-Short-Stress-Test
[ "1dd8cdb5f4949fd140c7c494f8315e0e24d001b9" ]
[ "src/attribution.py" ]
[ "from db_utils import read_select, get_temptable,\\\n insert_temp_ret_table, drop_temp_table\nfrom weights import get_portfolio_weights, get_single_ticker_weight\nfrom pandas import DataFrame\nimport pandas as pd\n\ndef load_constituent_prices(ticker, db):\n q = \"\"\"\n SELECT *\n FROM eq_prices\n WHERE ticker = {_ticker}\n \"\"\"\n p = {\"_ticker\": ticker}\n prices = read_select(db, q, p, in_df=True)\n return prices\n\n\ndef calc_daily_return(ticker, db):\n existing_returns = get_ticker_returns(ticker, db)\n if existing_returns.shape[0] > 0:\n print(\"returns already existing for {}\".format(ticker))\n return\n # _ get prices\n prices = load_constituent_prices(ticker, db)\n if prices.shape[0] == 0:\n raise RuntimeError(\"no prices found for {}\".format(ticker))\n\n # _ calculate returns\n prices.index = prices[\"price_date\"]\n returns = prices[\"price\"] / prices[\"price\"].shift(1) - 1\n returns.dropna(inplace=True)\n\n # _ prepare returns df\n insert_df = DataFrame(returns)\n insert_df = insert_df.reset_index()\n insert_df.columns = [\"return_date\", \"price_ret\"]\n insert_df[\"price_ret\"] = insert_df[\"price_ret\"].astype(float)\n insert_df.loc[:, \"ticker\"] = ticker\n\n # _ insert returns and clean up\n temptbl = get_temptable()\n try:\n insert_df.to_sql(temptbl, db)\n insert_temp_ret_table(db, temptbl, \"daily_constituent_returns\")\n except:\n print(\"Error loading returns for {}\".format(ticker))\n drop_temp_table(db, temptbl)\n\n\ndef calc_daily_constituent_returns(tickers, db):\n for ticker in tickers:\n calc_daily_return(ticker, db)\n\n\ndef calc_daily_portfolio_returns(portfolio_name, db):\n\n existing_returns = get_portfolio_returns(portfolio_name, db)\n if existing_returns.shape[0] > 0:\n print(\"returns already exists for {}\".format(portfolio_name))\n return\n\n # _ get constituent weights\n weights = get_portfolio_weights(portfolio_name, db)\n\n # _ get constituent returns\n # _ build a giant frame and merge it\n constituents = weights.ticker.tolist()\n adj_returns = {}\n for ticker in constituents:\n # _ calculate return contribution for each constituent\n _ticker_return = get_ticker_returns(ticker, db)\n _ticker_weight = get_single_ticker_weight(portfolio_name, ticker, db)\n if (_ticker_return is not None and _ticker_weight is not None):\n _adj_ret = _ticker_return * _ticker_weight\n adj_returns[ticker] = _adj_ret\n\n # _ clean-up frame\n portfolio_returns = DataFrame(adj_returns)\n portfolio_returns.fillna(0, inplace=True)\n\n # _ aggregate on the portfolio\n portfolio_returns_agg = portfolio_returns.sum(axis=1)\n portfolio_returns_agg = portfolio_returns_agg.reset_index()\n portfolio_returns_agg.columns = [\"return_date\", \"price_ret\"]\n portfolio_returns_agg.loc[:, \"portfolio_name\"] = portfolio_name\n\n # _ store in db\n temptbl = get_temptable()\n try:\n portfolio_returns_agg.to_sql(temptbl, db)\n insert_temp_ret_table(db, temptbl, returns_tbl=\"portfolio_returns\", is_pf=True)\n except:\n print(\"Error loading portfolio returns for {}\".format(portfolio_name))\n drop_temp_table(db, temptbl, debug=True)\n\n\ndef get_ticker_returns(ticker, db):\n q = \"\"\"\n SELECT price_ret, return_date\n FROM <TBL:daily_constituent_returns> \n WHERE ticker = {_ticker}\n \"\"\"\n p = {\"_ticker\": ticker}\n df = read_select(db, q, p)\n if df.shape[0] > 0:\n index = pd.DatetimeIndex(df[\"return_date\"])\n df.index = index\n del df[\"return_date\"]\n return df[\"price_ret\"].astype(float)\n else:\n return df\n\n\ndef get_portfolio_returns(portfolio_name, db):\n q = \"\"\"\n SELECT *\n FROM <TBL:portfolio_returns>\n where portfolio_name = {_portfolio_name}\n \"\"\"\n p = {\"_portfolio_name\": portfolio_name}\n df = read_select(db, q, p)\n if df.shape[0] > 0:\n index = pd.DatetimeIndex(df[\"return_date\"])\n df.index = index\n del df[\"return_date\"]\n df[\"price_ret\"] = df[\"price_ret\"].astype(float)\n return df\n return df\n" ]
[ [ "pandas.DataFrame", "pandas.DatetimeIndex" ] ]
qeedquan/misc_utilities
[ "94c6363388662ac8ebbf075b9c853ce6defbb5b3" ]
[ "snippets/python/matplotlib/logistic.py" ]
[ "#!/usr/bin/env python3\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef logistic(r=2.0, N=100):\n xs = 0.5*np.ones(N)\n for i in np.arange(N-1):\n xs[i+1] = r*xs[i]*(1.0-xs[i])\n return xs\n\nfig, axes = plt.subplots(2, 2)\naxes[0, 0].plot(logistic(2.7), 'bo')\naxes[1, 0].plot(logistic(3.1), 'ro')\naxes[0, 1].plot(logistic(3.5), 'bs')\naxes[1, 1].plot(logistic(3.9), 'rs')\nplt.show()\n" ]
[ [ "numpy.arange", "numpy.ones", "matplotlib.pyplot.show", "matplotlib.pyplot.subplots" ] ]
plin1112/ANI-Tools
[ "76280c918fc79fee8c266b8bc9ab57f86104ec99" ]
[ "activelearning/datareduction/diff_visualizer.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\n\ndfile = '/home/jujuman/Research/SingleNetworkTest/train_05/diffs.dat'\n\nf = open(dfile, 'r')\nfor l in f:\n diffs = np.array(l.split(','),dtype=np.float)\n plt.scatter(np.arange(diffs.size), diffs, color='black', label='DIFF', linewidth=1)\n plt.show()" ]
[ [ "numpy.arange", "matplotlib.pyplot.show" ] ]
gvvynplaine/numpy
[ "0bd548e287b9e2fd0126f64d8be01a812e3c6a48" ]
[ "numpy/core/tests/test_regression.py" ]
[ "import copy\nimport sys\nimport gc\nimport tempfile\nimport pytest\nfrom os import path\nfrom io import BytesIO\nfrom itertools import chain\n\nimport numpy as np\nfrom numpy.testing import (\n assert_, assert_equal, IS_PYPY, assert_almost_equal,\n assert_array_equal, assert_array_almost_equal, assert_raises,\n assert_raises_regex, assert_warns, suppress_warnings,\n _assert_valid_refcount, HAS_REFCOUNT,\n )\nfrom numpy.testing._private.utils import _no_tracing\nfrom numpy.compat import asbytes, asunicode, pickle\n\ntry:\n RecursionError\nexcept NameError:\n RecursionError = RuntimeError # python < 3.5\n\nclass TestRegression:\n def test_invalid_round(self):\n # Ticket #3\n v = 4.7599999999999998\n assert_array_equal(np.array([v]), np.array(v))\n\n def test_mem_empty(self):\n # Ticket #7\n np.empty((1,), dtype=[('x', np.int64)])\n\n def test_pickle_transposed(self):\n # Ticket #16\n a = np.transpose(np.array([[2, 9], [7, 0], [3, 8]]))\n for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):\n with BytesIO() as f:\n pickle.dump(a, f, protocol=proto)\n f.seek(0)\n b = pickle.load(f)\n assert_array_equal(a, b)\n\n def test_dtype_names(self):\n # Ticket #35\n # Should succeed\n np.dtype([(('name', 'label'), np.int32, 3)])\n\n def test_reduce(self):\n # Ticket #40\n assert_almost_equal(np.add.reduce([1., .5], dtype=None), 1.5)\n\n def test_zeros_order(self):\n # Ticket #43\n np.zeros([3], int, 'C')\n np.zeros([3], order='C')\n np.zeros([3], int, order='C')\n\n def test_asarray_with_order(self):\n # Check that nothing is done when order='F' and array C/F-contiguous\n a = np.ones(2)\n assert_(a is np.asarray(a, order='F'))\n\n def test_ravel_with_order(self):\n # Check that ravel works when order='F' and array C/F-contiguous\n a = np.ones(2)\n assert_(not a.ravel('F').flags.owndata)\n\n def test_sort_bigendian(self):\n # Ticket #47\n a = np.linspace(0, 10, 11)\n c = a.astype(np.dtype('<f8'))\n c.sort()\n assert_array_almost_equal(c, a)\n\n def test_negative_nd_indexing(self):\n # Ticket #49\n c = np.arange(125).reshape((5, 5, 5))\n origidx = np.array([-1, 0, 1])\n idx = np.array(origidx)\n c[idx]\n assert_array_equal(idx, origidx)\n\n def test_char_dump(self):\n # Ticket #50\n ca = np.char.array(np.arange(1000, 1010), itemsize=4)\n for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):\n with BytesIO() as f:\n pickle.dump(ca, f, protocol=proto)\n f.seek(0)\n ca = np.load(f, allow_pickle=True)\n\n def test_noncontiguous_fill(self):\n # Ticket #58.\n a = np.zeros((5, 3))\n b = a[:, :2,]\n\n def rs():\n b.shape = (10,)\n\n assert_raises(AttributeError, rs)\n\n def test_bool(self):\n # Ticket #60\n np.bool_(1) # Should succeed\n\n def test_indexing1(self):\n # Ticket #64\n descr = [('x', [('y', [('z', 'c16', (2,)),]),]),]\n buffer = ((([6j, 4j],),),)\n h = np.array(buffer, dtype=descr)\n h['x']['y']['z']\n\n def test_indexing2(self):\n # Ticket #65\n descr = [('x', 'i4', (2,))]\n buffer = ([3, 2],)\n h = np.array(buffer, dtype=descr)\n h['x']\n\n def test_round(self):\n # Ticket #67\n x = np.array([1+2j])\n assert_almost_equal(x**(-1), [1/(1+2j)])\n\n def test_scalar_compare(self):\n # Trac Ticket #72\n # https://github.com/numpy/numpy/issues/565\n a = np.array(['test', 'auto'])\n assert_array_equal(a == 'auto', np.array([False, True]))\n assert_(a[1] == 'auto')\n assert_(a[0] != 'auto')\n b = np.linspace(0, 10, 11)\n # This should return true for now, but will eventually raise an error:\n with suppress_warnings() as sup:\n sup.filter(FutureWarning)\n assert_(b != 'auto')\n assert_(b[0] != 'auto')\n\n def test_unicode_swapping(self):\n # Ticket #79\n ulen = 1\n ucs_value = u'\\U0010FFFF'\n ua = np.array([[[ucs_value*ulen]*2]*3]*4, dtype='U%s' % ulen)\n ua.newbyteorder() # Should succeed.\n\n def test_object_array_fill(self):\n # Ticket #86\n x = np.zeros(1, 'O')\n x.fill([])\n\n def test_mem_dtype_align(self):\n # Ticket #93\n assert_raises(TypeError, np.dtype,\n {'names':['a'], 'formats':['foo']}, align=1)\n\n def test_endian_bool_indexing(self):\n # Ticket #105\n a = np.arange(10., dtype='>f8')\n b = np.arange(10., dtype='<f8')\n xa = np.where((a > 2) & (a < 6))\n xb = np.where((b > 2) & (b < 6))\n ya = ((a > 2) & (a < 6))\n yb = ((b > 2) & (b < 6))\n assert_array_almost_equal(xa, ya.nonzero())\n assert_array_almost_equal(xb, yb.nonzero())\n assert_(np.all(a[ya] > 0.5))\n assert_(np.all(b[yb] > 0.5))\n\n def test_endian_where(self):\n # GitHub issue #369\n net = np.zeros(3, dtype='>f4')\n net[1] = 0.00458849\n net[2] = 0.605202\n max_net = net.max()\n test = np.where(net <= 0., max_net, net)\n correct = np.array([ 0.60520202, 0.00458849, 0.60520202])\n assert_array_almost_equal(test, correct)\n\n def test_endian_recarray(self):\n # Ticket #2185\n dt = np.dtype([\n ('head', '>u4'),\n ('data', '>u4', 2),\n ])\n buf = np.recarray(1, dtype=dt)\n buf[0]['head'] = 1\n buf[0]['data'][:] = [1, 1]\n\n h = buf[0]['head']\n d = buf[0]['data'][0]\n buf[0]['head'] = h\n buf[0]['data'][0] = d\n assert_(buf[0]['head'] == 1)\n\n def test_mem_dot(self):\n # Ticket #106\n x = np.random.randn(0, 1)\n y = np.random.randn(10, 1)\n # Dummy array to detect bad memory access:\n _z = np.ones(10)\n _dummy = np.empty((0, 10))\n z = np.lib.stride_tricks.as_strided(_z, _dummy.shape, _dummy.strides)\n np.dot(x, np.transpose(y), out=z)\n assert_equal(_z, np.ones(10))\n # Do the same for the built-in dot:\n np.core.multiarray.dot(x, np.transpose(y), out=z)\n assert_equal(_z, np.ones(10))\n\n def test_arange_endian(self):\n # Ticket #111\n ref = np.arange(10)\n x = np.arange(10, dtype='<f8')\n assert_array_equal(ref, x)\n x = np.arange(10, dtype='>f8')\n assert_array_equal(ref, x)\n\n def test_arange_inf_step(self):\n ref = np.arange(0, 1, 10)\n x = np.arange(0, 1, np.inf)\n assert_array_equal(ref, x)\n\n ref = np.arange(0, 1, -10)\n x = np.arange(0, 1, -np.inf)\n assert_array_equal(ref, x)\n\n ref = np.arange(0, -1, -10)\n x = np.arange(0, -1, -np.inf)\n assert_array_equal(ref, x)\n\n ref = np.arange(0, -1, 10)\n x = np.arange(0, -1, np.inf)\n assert_array_equal(ref, x)\n\n def test_arange_underflow_stop_and_step(self):\n finfo = np.finfo(np.float64)\n\n ref = np.arange(0, finfo.eps, 2 * finfo.eps)\n x = np.arange(0, finfo.eps, finfo.max)\n assert_array_equal(ref, x)\n\n ref = np.arange(0, finfo.eps, -2 * finfo.eps)\n x = np.arange(0, finfo.eps, -finfo.max)\n assert_array_equal(ref, x)\n\n ref = np.arange(0, -finfo.eps, -2 * finfo.eps)\n x = np.arange(0, -finfo.eps, -finfo.max)\n assert_array_equal(ref, x)\n\n ref = np.arange(0, -finfo.eps, 2 * finfo.eps)\n x = np.arange(0, -finfo.eps, finfo.max)\n assert_array_equal(ref, x)\n\n def test_argmax(self):\n # Ticket #119\n a = np.random.normal(0, 1, (4, 5, 6, 7, 8))\n for i in range(a.ndim):\n a.argmax(i) # Should succeed\n\n def test_mem_divmod(self):\n # Ticket #126\n for i in range(10):\n divmod(np.array([i])[0], 10)\n\n def test_hstack_invalid_dims(self):\n # Ticket #128\n x = np.arange(9).reshape((3, 3))\n y = np.array([0, 0, 0])\n assert_raises(ValueError, np.hstack, (x, y))\n\n def test_squeeze_type(self):\n # Ticket #133\n a = np.array([3])\n b = np.array(3)\n assert_(type(a.squeeze()) is np.ndarray)\n assert_(type(b.squeeze()) is np.ndarray)\n\n def test_add_identity(self):\n # Ticket #143\n assert_equal(0, np.add.identity)\n\n def test_numpy_float_python_long_addition(self):\n # Check that numpy float and python longs can be added correctly.\n a = np.float_(23.) + 2**135\n assert_equal(a, 23. + 2**135)\n\n def test_binary_repr_0(self):\n # Ticket #151\n assert_equal('0', np.binary_repr(0))\n\n def test_rec_iterate(self):\n # Ticket #160\n descr = np.dtype([('i', int), ('f', float), ('s', '|S3')])\n x = np.rec.array([(1, 1.1, '1.0'),\n (2, 2.2, '2.0')], dtype=descr)\n x[0].tolist()\n [i for i in x[0]]\n\n def test_unicode_string_comparison(self):\n # Ticket #190\n a = np.array('hello', np.unicode_)\n b = np.array('world')\n a == b\n\n def test_tobytes_FORTRANORDER_discontiguous(self):\n # Fix in r2836\n # Create non-contiguous Fortran ordered array\n x = np.array(np.random.rand(3, 3), order='F')[:, :2]\n assert_array_almost_equal(x.ravel(), np.frombuffer(x.tobytes()))\n\n def test_flat_assignment(self):\n # Correct behaviour of ticket #194\n x = np.empty((3, 1))\n x.flat = np.arange(3)\n assert_array_almost_equal(x, [[0], [1], [2]])\n x.flat = np.arange(3, dtype=float)\n assert_array_almost_equal(x, [[0], [1], [2]])\n\n def test_broadcast_flat_assignment(self):\n # Ticket #194\n x = np.empty((3, 1))\n\n def bfa():\n x[:] = np.arange(3)\n\n def bfb():\n x[:] = np.arange(3, dtype=float)\n\n assert_raises(ValueError, bfa)\n assert_raises(ValueError, bfb)\n\n def test_nonarray_assignment(self):\n # See also Issue gh-2870, test for non-array assignment\n # and equivalent unsafe casted array assignment\n a = np.arange(10)\n b = np.ones(10, dtype=bool)\n r = np.arange(10)\n\n def assign(a, b, c):\n a[b] = c\n\n assert_raises(ValueError, assign, a, b, np.nan)\n a[b] = np.array(np.nan) # but not this.\n assert_raises(ValueError, assign, a, r, np.nan)\n a[r] = np.array(np.nan)\n\n def test_unpickle_dtype_with_object(self):\n # Implemented in r2840\n dt = np.dtype([('x', int), ('y', np.object_), ('z', 'O')])\n for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):\n with BytesIO() as f:\n pickle.dump(dt, f, protocol=proto)\n f.seek(0)\n dt_ = pickle.load(f)\n assert_equal(dt, dt_)\n\n def test_mem_array_creation_invalid_specification(self):\n # Ticket #196\n dt = np.dtype([('x', int), ('y', np.object_)])\n # Wrong way\n assert_raises(ValueError, np.array, [1, 'object'], dt)\n # Correct way\n np.array([(1, 'object')], dt)\n\n def test_recarray_single_element(self):\n # Ticket #202\n a = np.array([1, 2, 3], dtype=np.int32)\n b = a.copy()\n r = np.rec.array(a, shape=1, formats=['3i4'], names=['d'])\n assert_array_equal(a, b)\n assert_equal(a, r[0][0])\n\n def test_zero_sized_array_indexing(self):\n # Ticket #205\n tmp = np.array([])\n\n def index_tmp():\n tmp[np.array(10)]\n\n assert_raises(IndexError, index_tmp)\n\n def test_chararray_rstrip(self):\n # Ticket #222\n x = np.chararray((1,), 5)\n x[0] = b'a '\n x = x.rstrip()\n assert_equal(x[0], b'a')\n\n def test_object_array_shape(self):\n # Ticket #239\n assert_equal(np.array([[1, 2], 3, 4], dtype=object).shape, (3,))\n assert_equal(np.array([[1, 2], [3, 4]], dtype=object).shape, (2, 2))\n assert_equal(np.array([(1, 2), (3, 4)], dtype=object).shape, (2, 2))\n assert_equal(np.array([], dtype=object).shape, (0,))\n assert_equal(np.array([[], [], []], dtype=object).shape, (3, 0))\n assert_equal(np.array([[3, 4], [5, 6], None], dtype=object).shape, (3,))\n\n def test_mem_around(self):\n # Ticket #243\n x = np.zeros((1,))\n y = [0]\n decimal = 6\n np.around(abs(x-y), decimal) <= 10.0**(-decimal)\n\n def test_character_array_strip(self):\n # Ticket #246\n x = np.char.array((\"x\", \"x \", \"x \"))\n for c in x:\n assert_equal(c, \"x\")\n\n def test_lexsort(self):\n # Lexsort memory error\n v = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\n assert_equal(np.lexsort(v), 0)\n\n def test_lexsort_invalid_sequence(self):\n # Issue gh-4123\n class BuggySequence:\n def __len__(self):\n return 4\n\n def __getitem__(self, key):\n raise KeyError\n\n assert_raises(KeyError, np.lexsort, BuggySequence())\n\n def test_lexsort_zerolen_custom_strides(self):\n # Ticket #14228\n xs = np.array([], dtype='i8')\n assert xs.strides == (8,)\n assert np.lexsort((xs,)).shape[0] == 0 # Works\n\n xs.strides = (16,)\n assert np.lexsort((xs,)).shape[0] == 0 # Was: MemoryError\n\n def test_lexsort_zerolen_custom_strides_2d(self):\n xs = np.array([], dtype='i8')\n\n xs.shape = (0, 2)\n xs.strides = (16, 16)\n assert np.lexsort((xs,), axis=0).shape[0] == 0\n\n xs.shape = (2, 0)\n xs.strides = (16, 16)\n assert np.lexsort((xs,), axis=0).shape[0] == 2\n\n def test_lexsort_invalid_axis(self):\n assert_raises(np.AxisError, np.lexsort, (np.arange(1),), axis=2)\n assert_raises(np.AxisError, np.lexsort, (np.array([]),), axis=1)\n assert_raises(np.AxisError, np.lexsort, (np.array(1),), axis=10)\n\n def test_lexsort_zerolen_element(self):\n dt = np.dtype([]) # a void dtype with no fields\n xs = np.empty(4, dt)\n\n assert np.lexsort((xs,)).shape[0] == xs.shape[0]\n\n def test_pickle_py2_bytes_encoding(self):\n # Check that arrays and scalars pickled on Py2 are\n # unpickleable on Py3 using encoding='bytes'\n\n test_data = [\n # (original, py2_pickle)\n (np.unicode_('\\u6f2c'),\n b\"cnumpy.core.multiarray\\nscalar\\np0\\n(cnumpy\\ndtype\\np1\\n\"\n b\"(S'U1'\\np2\\nI0\\nI1\\ntp3\\nRp4\\n(I3\\nS'<'\\np5\\nNNNI4\\nI4\\n\"\n b\"I0\\ntp6\\nbS',o\\\\x00\\\\x00'\\np7\\ntp8\\nRp9\\n.\"),\n\n (np.array([9e123], dtype=np.float64),\n b\"cnumpy.core.multiarray\\n_reconstruct\\np0\\n(cnumpy\\nndarray\\n\"\n b\"p1\\n(I0\\ntp2\\nS'b'\\np3\\ntp4\\nRp5\\n(I1\\n(I1\\ntp6\\ncnumpy\\ndtype\\n\"\n b\"p7\\n(S'f8'\\np8\\nI0\\nI1\\ntp9\\nRp10\\n(I3\\nS'<'\\np11\\nNNNI-1\\nI-1\\n\"\n b\"I0\\ntp12\\nbI00\\nS'O\\\\x81\\\\xb7Z\\\\xaa:\\\\xabY'\\np13\\ntp14\\nb.\"),\n\n (np.array([(9e123,)], dtype=[('name', float)]),\n b\"cnumpy.core.multiarray\\n_reconstruct\\np0\\n(cnumpy\\nndarray\\np1\\n\"\n b\"(I0\\ntp2\\nS'b'\\np3\\ntp4\\nRp5\\n(I1\\n(I1\\ntp6\\ncnumpy\\ndtype\\np7\\n\"\n b\"(S'V8'\\np8\\nI0\\nI1\\ntp9\\nRp10\\n(I3\\nS'|'\\np11\\nN(S'name'\\np12\\ntp13\\n\"\n b\"(dp14\\ng12\\n(g7\\n(S'f8'\\np15\\nI0\\nI1\\ntp16\\nRp17\\n(I3\\nS'<'\\np18\\nNNNI-1\\n\"\n b\"I-1\\nI0\\ntp19\\nbI0\\ntp20\\nsI8\\nI1\\nI0\\ntp21\\n\"\n b\"bI00\\nS'O\\\\x81\\\\xb7Z\\\\xaa:\\\\xabY'\\np22\\ntp23\\nb.\"),\n ]\n\n for original, data in test_data:\n result = pickle.loads(data, encoding='bytes')\n assert_equal(result, original)\n\n if isinstance(result, np.ndarray) and result.dtype.names is not None:\n for name in result.dtype.names:\n assert_(isinstance(name, str))\n\n def test_pickle_dtype(self):\n # Ticket #251\n for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):\n pickle.dumps(float, protocol=proto)\n\n def test_swap_real(self):\n # Ticket #265\n assert_equal(np.arange(4, dtype='>c8').imag.max(), 0.0)\n assert_equal(np.arange(4, dtype='<c8').imag.max(), 0.0)\n assert_equal(np.arange(4, dtype='>c8').real.max(), 3.0)\n assert_equal(np.arange(4, dtype='<c8').real.max(), 3.0)\n\n def test_object_array_from_list(self):\n # Ticket #270\n assert_(np.array([1, 'A', None]).shape == (3,))\n\n def test_multiple_assign(self):\n # Ticket #273\n a = np.zeros((3, 1), int)\n a[[1, 2]] = 1\n\n def test_empty_array_type(self):\n assert_equal(np.array([]).dtype, np.zeros(0).dtype)\n\n def test_void_copyswap(self):\n dt = np.dtype([('one', '<i4'), ('two', '<i4')])\n x = np.array((1, 2), dtype=dt)\n x = x.byteswap()\n assert_(x['one'] > 1 and x['two'] > 2)\n\n def test_method_args(self):\n # Make sure methods and functions have same default axis\n # keyword and arguments\n funcs1 = ['argmax', 'argmin', 'sum', ('product', 'prod'),\n ('sometrue', 'any'),\n ('alltrue', 'all'), 'cumsum', ('cumproduct', 'cumprod'),\n 'ptp', 'cumprod', 'prod', 'std', 'var', 'mean',\n 'round', 'min', 'max', 'argsort', 'sort']\n funcs2 = ['compress', 'take', 'repeat']\n\n for func in funcs1:\n arr = np.random.rand(8, 7)\n arr2 = arr.copy()\n if isinstance(func, tuple):\n func_meth = func[1]\n func = func[0]\n else:\n func_meth = func\n res1 = getattr(arr, func_meth)()\n res2 = getattr(np, func)(arr2)\n if res1 is None:\n res1 = arr\n\n if res1.dtype.kind in 'uib':\n assert_((res1 == res2).all(), func)\n else:\n assert_(abs(res1-res2).max() < 1e-8, func)\n\n for func in funcs2:\n arr1 = np.random.rand(8, 7)\n arr2 = np.random.rand(8, 7)\n res1 = None\n if func == 'compress':\n arr1 = arr1.ravel()\n res1 = getattr(arr2, func)(arr1)\n else:\n arr2 = (15*arr2).astype(int).ravel()\n if res1 is None:\n res1 = getattr(arr1, func)(arr2)\n res2 = getattr(np, func)(arr1, arr2)\n assert_(abs(res1-res2).max() < 1e-8, func)\n\n def test_mem_lexsort_strings(self):\n # Ticket #298\n lst = ['abc', 'cde', 'fgh']\n np.lexsort((lst,))\n\n def test_fancy_index(self):\n # Ticket #302\n x = np.array([1, 2])[np.array([0])]\n assert_equal(x.shape, (1,))\n\n def test_recarray_copy(self):\n # Ticket #312\n dt = [('x', np.int16), ('y', np.float64)]\n ra = np.array([(1, 2.3)], dtype=dt)\n rb = np.rec.array(ra, dtype=dt)\n rb['x'] = 2.\n assert_(ra['x'] != rb['x'])\n\n def test_rec_fromarray(self):\n # Ticket #322\n x1 = np.array([[1, 2], [3, 4], [5, 6]])\n x2 = np.array(['a', 'dd', 'xyz'])\n x3 = np.array([1.1, 2, 3])\n np.rec.fromarrays([x1, x2, x3], formats=\"(2,)i4,a3,f8\")\n\n def test_object_array_assign(self):\n x = np.empty((2, 2), object)\n x.flat[2] = (1, 2, 3)\n assert_equal(x.flat[2], (1, 2, 3))\n\n def test_ndmin_float64(self):\n # Ticket #324\n x = np.array([1, 2, 3], dtype=np.float64)\n assert_equal(np.array(x, dtype=np.float32, ndmin=2).ndim, 2)\n assert_equal(np.array(x, dtype=np.float64, ndmin=2).ndim, 2)\n\n def test_ndmin_order(self):\n # Issue #465 and related checks\n assert_(np.array([1, 2], order='C', ndmin=3).flags.c_contiguous)\n assert_(np.array([1, 2], order='F', ndmin=3).flags.f_contiguous)\n assert_(np.array(np.ones((2, 2), order='F'), ndmin=3).flags.f_contiguous)\n assert_(np.array(np.ones((2, 2), order='C'), ndmin=3).flags.c_contiguous)\n\n def test_mem_axis_minimization(self):\n # Ticket #327\n data = np.arange(5)\n data = np.add.outer(data, data)\n\n def test_mem_float_imag(self):\n # Ticket #330\n np.float64(1.0).imag\n\n def test_dtype_tuple(self):\n # Ticket #334\n assert_(np.dtype('i4') == np.dtype(('i4', ())))\n\n def test_dtype_posttuple(self):\n # Ticket #335\n np.dtype([('col1', '()i4')])\n\n def test_numeric_carray_compare(self):\n # Ticket #341\n assert_equal(np.array(['X'], 'c'), b'X')\n\n def test_string_array_size(self):\n # Ticket #342\n assert_raises(ValueError,\n np.array, [['X'], ['X', 'X', 'X']], '|S1')\n\n def test_dtype_repr(self):\n # Ticket #344\n dt1 = np.dtype(('uint32', 2))\n dt2 = np.dtype(('uint32', (2,)))\n assert_equal(dt1.__repr__(), dt2.__repr__())\n\n def test_reshape_order(self):\n # Make sure reshape order works.\n a = np.arange(6).reshape(2, 3, order='F')\n assert_equal(a, [[0, 2, 4], [1, 3, 5]])\n a = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])\n b = a[:, 1]\n assert_equal(b.reshape(2, 2, order='F'), [[2, 6], [4, 8]])\n\n def test_reshape_zero_strides(self):\n # Issue #380, test reshaping of zero strided arrays\n a = np.ones(1)\n a = np.lib.stride_tricks.as_strided(a, shape=(5,), strides=(0,))\n assert_(a.reshape(5, 1).strides[0] == 0)\n\n def test_reshape_zero_size(self):\n # GitHub Issue #2700, setting shape failed for 0-sized arrays\n a = np.ones((0, 2))\n a.shape = (-1, 2)\n\n # Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides.\n # With NPY_RELAXED_STRIDES_CHECKING the test becomes superfluous.\n @pytest.mark.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max,\n reason=\"Using relaxed stride checking\")\n def test_reshape_trailing_ones_strides(self):\n # GitHub issue gh-2949, bad strides for trailing ones of new shape\n a = np.zeros(12, dtype=np.int32)[::2] # not contiguous\n strides_c = (16, 8, 8, 8)\n strides_f = (8, 24, 48, 48)\n assert_equal(a.reshape(3, 2, 1, 1).strides, strides_c)\n assert_equal(a.reshape(3, 2, 1, 1, order='F').strides, strides_f)\n assert_equal(np.array(0, dtype=np.int32).reshape(1, 1).strides, (4, 4))\n\n def test_repeat_discont(self):\n # Ticket #352\n a = np.arange(12).reshape(4, 3)[:, 2]\n assert_equal(a.repeat(3), [2, 2, 2, 5, 5, 5, 8, 8, 8, 11, 11, 11])\n\n def test_array_index(self):\n # Make sure optimization is not called in this case.\n a = np.array([1, 2, 3])\n a2 = np.array([[1, 2, 3]])\n assert_equal(a[np.where(a == 3)], a2[np.where(a2 == 3)])\n\n def test_object_argmax(self):\n a = np.array([1, 2, 3], dtype=object)\n assert_(a.argmax() == 2)\n\n def test_recarray_fields(self):\n # Ticket #372\n dt0 = np.dtype([('f0', 'i4'), ('f1', 'i4')])\n dt1 = np.dtype([('f0', 'i8'), ('f1', 'i8')])\n for a in [np.array([(1, 2), (3, 4)], \"i4,i4\"),\n np.rec.array([(1, 2), (3, 4)], \"i4,i4\"),\n np.rec.array([(1, 2), (3, 4)]),\n np.rec.fromarrays([(1, 2), (3, 4)], \"i4,i4\"),\n np.rec.fromarrays([(1, 2), (3, 4)])]:\n assert_(a.dtype in [dt0, dt1])\n\n def test_random_shuffle(self):\n # Ticket #374\n a = np.arange(5).reshape((5, 1))\n b = a.copy()\n np.random.shuffle(b)\n assert_equal(np.sort(b, axis=0), a)\n\n def test_refcount_vdot(self):\n # Changeset #3443\n _assert_valid_refcount(np.vdot)\n\n def test_startswith(self):\n ca = np.char.array(['Hi', 'There'])\n assert_equal(ca.startswith('H'), [True, False])\n\n def test_noncommutative_reduce_accumulate(self):\n # Ticket #413\n tosubtract = np.arange(5)\n todivide = np.array([2.0, 0.5, 0.25])\n assert_equal(np.subtract.reduce(tosubtract), -10)\n assert_equal(np.divide.reduce(todivide), 16.0)\n assert_array_equal(np.subtract.accumulate(tosubtract),\n np.array([0, -1, -3, -6, -10]))\n assert_array_equal(np.divide.accumulate(todivide),\n np.array([2., 4., 16.]))\n\n def test_convolve_empty(self):\n # Convolve should raise an error for empty input array.\n assert_raises(ValueError, np.convolve, [], [1])\n assert_raises(ValueError, np.convolve, [1], [])\n\n def test_multidim_byteswap(self):\n # Ticket #449\n r = np.array([(1, (0, 1, 2))], dtype=\"i2,3i2\")\n assert_array_equal(r.byteswap(),\n np.array([(256, (0, 256, 512))], r.dtype))\n\n def test_string_NULL(self):\n # Changeset 3557\n assert_equal(np.array(\"a\\x00\\x0b\\x0c\\x00\").item(),\n 'a\\x00\\x0b\\x0c')\n\n def test_junk_in_string_fields_of_recarray(self):\n # Ticket #483\n r = np.array([[b'abc']], dtype=[('var1', '|S20')])\n assert_(asbytes(r['var1'][0][0]) == b'abc')\n\n def test_take_output(self):\n # Ensure that 'take' honours output parameter.\n x = np.arange(12).reshape((3, 4))\n a = np.take(x, [0, 2], axis=1)\n b = np.zeros_like(a)\n np.take(x, [0, 2], axis=1, out=b)\n assert_array_equal(a, b)\n\n def test_take_object_fail(self):\n # Issue gh-3001\n d = 123.\n a = np.array([d, 1], dtype=object)\n if HAS_REFCOUNT:\n ref_d = sys.getrefcount(d)\n try:\n a.take([0, 100])\n except IndexError:\n pass\n if HAS_REFCOUNT:\n assert_(ref_d == sys.getrefcount(d))\n\n def test_array_str_64bit(self):\n # Ticket #501\n s = np.array([1, np.nan], dtype=np.float64)\n with np.errstate(all='raise'):\n np.array_str(s) # Should succeed\n\n def test_frompyfunc_endian(self):\n # Ticket #503\n from math import radians\n uradians = np.frompyfunc(radians, 1, 1)\n big_endian = np.array([83.4, 83.5], dtype='>f8')\n little_endian = np.array([83.4, 83.5], dtype='<f8')\n assert_almost_equal(uradians(big_endian).astype(float),\n uradians(little_endian).astype(float))\n\n def test_mem_string_arr(self):\n # Ticket #514\n s = \"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\"\n t = []\n np.hstack((t, s))\n\n def test_arr_transpose(self):\n # Ticket #516\n x = np.random.rand(*(2,)*16)\n x.transpose(list(range(16))) # Should succeed\n\n def test_string_mergesort(self):\n # Ticket #540\n x = np.array(['a']*32)\n assert_array_equal(x.argsort(kind='m'), np.arange(32))\n\n def test_argmax_byteorder(self):\n # Ticket #546\n a = np.arange(3, dtype='>f')\n assert_(a[a.argmax()] == a.max())\n\n def test_rand_seed(self):\n # Ticket #555\n for l in np.arange(4):\n np.random.seed(l)\n\n def test_mem_deallocation_leak(self):\n # Ticket #562\n a = np.zeros(5, dtype=float)\n b = np.array(a, dtype=float)\n del a, b\n\n def test_mem_on_invalid_dtype(self):\n \"Ticket #583\"\n assert_raises(ValueError, np.fromiter, [['12', ''], ['13', '']], str)\n\n def test_dot_negative_stride(self):\n # Ticket #588\n x = np.array([[1, 5, 25, 125., 625]])\n y = np.array([[20.], [160.], [640.], [1280.], [1024.]])\n z = y[::-1].copy()\n y2 = y[::-1]\n assert_equal(np.dot(x, z), np.dot(x, y2))\n\n def test_object_casting(self):\n # This used to trigger the object-type version of\n # the bitwise_or operation, because float64 -> object\n # casting succeeds\n def rs():\n x = np.ones([484, 286])\n y = np.zeros([484, 286])\n x |= y\n\n assert_raises(TypeError, rs)\n\n def test_unicode_scalar(self):\n # Ticket #600\n x = np.array([\"DROND\", \"DROND1\"], dtype=\"U6\")\n el = x[1]\n for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):\n new = pickle.loads(pickle.dumps(el, protocol=proto))\n assert_equal(new, el)\n\n def test_arange_non_native_dtype(self):\n # Ticket #616\n for T in ('>f4', '<f4'):\n dt = np.dtype(T)\n assert_equal(np.arange(0, dtype=dt).dtype, dt)\n assert_equal(np.arange(0.5, dtype=dt).dtype, dt)\n assert_equal(np.arange(5, dtype=dt).dtype, dt)\n\n def test_bool_flat_indexing_invalid_nr_elements(self):\n s = np.ones(10, dtype=float)\n x = np.array((15,), dtype=float)\n\n def ia(x, s, v):\n x[(s > 0)] = v\n\n assert_raises(IndexError, ia, x, s, np.zeros(9, dtype=float))\n assert_raises(IndexError, ia, x, s, np.zeros(11, dtype=float))\n\n # Old special case (different code path):\n assert_raises(ValueError, ia, x.flat, s, np.zeros(9, dtype=float))\n assert_raises(ValueError, ia, x.flat, s, np.zeros(11, dtype=float))\n\n def test_mem_scalar_indexing(self):\n # Ticket #603\n x = np.array([0], dtype=float)\n index = np.array(0, dtype=np.int32)\n x[index]\n\n def test_binary_repr_0_width(self):\n assert_equal(np.binary_repr(0, width=3), '000')\n\n def test_fromstring(self):\n assert_equal(np.fromstring(\"12:09:09\", dtype=int, sep=\":\"),\n [12, 9, 9])\n\n def test_searchsorted_variable_length(self):\n x = np.array(['a', 'aa', 'b'])\n y = np.array(['d', 'e'])\n assert_equal(x.searchsorted(y), [3, 3])\n\n def test_string_argsort_with_zeros(self):\n # Check argsort for strings containing zeros.\n x = np.frombuffer(b\"\\x00\\x02\\x00\\x01\", dtype=\"|S2\")\n assert_array_equal(x.argsort(kind='m'), np.array([1, 0]))\n assert_array_equal(x.argsort(kind='q'), np.array([1, 0]))\n\n def test_string_sort_with_zeros(self):\n # Check sort for strings containing zeros.\n x = np.frombuffer(b\"\\x00\\x02\\x00\\x01\", dtype=\"|S2\")\n y = np.frombuffer(b\"\\x00\\x01\\x00\\x02\", dtype=\"|S2\")\n assert_array_equal(np.sort(x, kind=\"q\"), y)\n\n def test_copy_detection_zero_dim(self):\n # Ticket #658\n np.indices((0, 3, 4)).T.reshape(-1, 3)\n\n def test_flat_byteorder(self):\n # Ticket #657\n x = np.arange(10)\n assert_array_equal(x.astype('>i4'), x.astype('<i4').flat[:])\n assert_array_equal(x.astype('>i4').flat[:], x.astype('<i4'))\n\n def test_sign_bit(self):\n x = np.array([0, -0.0, 0])\n assert_equal(str(np.abs(x)), '[0. 0. 0.]')\n\n def test_flat_index_byteswap(self):\n for dt in (np.dtype('<i4'), np.dtype('>i4')):\n x = np.array([-1, 0, 1], dtype=dt)\n assert_equal(x.flat[0].dtype, x[0].dtype)\n\n def test_copy_detection_corner_case(self):\n # Ticket #658\n np.indices((0, 3, 4)).T.reshape(-1, 3)\n\n # Cannot test if NPY_RELAXED_STRIDES_CHECKING changes the strides.\n # With NPY_RELAXED_STRIDES_CHECKING the test becomes superfluous,\n # 0-sized reshape itself is tested elsewhere.\n @pytest.mark.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max,\n reason=\"Using relaxed stride checking\")\n def test_copy_detection_corner_case2(self):\n # Ticket #771: strides are not set correctly when reshaping 0-sized\n # arrays\n b = np.indices((0, 3, 4)).T.reshape(-1, 3)\n assert_equal(b.strides, (3 * b.itemsize, b.itemsize))\n\n def test_object_array_refcounting(self):\n # Ticket #633\n if not hasattr(sys, 'getrefcount'):\n return\n\n # NB. this is probably CPython-specific\n\n cnt = sys.getrefcount\n\n a = object()\n b = object()\n c = object()\n\n cnt0_a = cnt(a)\n cnt0_b = cnt(b)\n cnt0_c = cnt(c)\n\n # -- 0d -> 1-d broadcast slice assignment\n\n arr = np.zeros(5, dtype=np.object_)\n\n arr[:] = a\n assert_equal(cnt(a), cnt0_a + 5)\n\n arr[:] = b\n assert_equal(cnt(a), cnt0_a)\n assert_equal(cnt(b), cnt0_b + 5)\n\n arr[:2] = c\n assert_equal(cnt(b), cnt0_b + 3)\n assert_equal(cnt(c), cnt0_c + 2)\n\n del arr\n\n # -- 1-d -> 2-d broadcast slice assignment\n\n arr = np.zeros((5, 2), dtype=np.object_)\n arr0 = np.zeros(2, dtype=np.object_)\n\n arr0[0] = a\n assert_(cnt(a) == cnt0_a + 1)\n arr0[1] = b\n assert_(cnt(b) == cnt0_b + 1)\n\n arr[:, :] = arr0\n assert_(cnt(a) == cnt0_a + 6)\n assert_(cnt(b) == cnt0_b + 6)\n\n arr[:, 0] = None\n assert_(cnt(a) == cnt0_a + 1)\n\n del arr, arr0\n\n # -- 2-d copying + flattening\n\n arr = np.zeros((5, 2), dtype=np.object_)\n\n arr[:, 0] = a\n arr[:, 1] = b\n assert_(cnt(a) == cnt0_a + 5)\n assert_(cnt(b) == cnt0_b + 5)\n\n arr2 = arr.copy()\n assert_(cnt(a) == cnt0_a + 10)\n assert_(cnt(b) == cnt0_b + 10)\n\n arr2 = arr[:, 0].copy()\n assert_(cnt(a) == cnt0_a + 10)\n assert_(cnt(b) == cnt0_b + 5)\n\n arr2 = arr.flatten()\n assert_(cnt(a) == cnt0_a + 10)\n assert_(cnt(b) == cnt0_b + 10)\n\n del arr, arr2\n\n # -- concatenate, repeat, take, choose\n\n arr1 = np.zeros((5, 1), dtype=np.object_)\n arr2 = np.zeros((5, 1), dtype=np.object_)\n\n arr1[...] = a\n arr2[...] = b\n assert_(cnt(a) == cnt0_a + 5)\n assert_(cnt(b) == cnt0_b + 5)\n\n tmp = np.concatenate((arr1, arr2))\n assert_(cnt(a) == cnt0_a + 5 + 5)\n assert_(cnt(b) == cnt0_b + 5 + 5)\n\n tmp = arr1.repeat(3, axis=0)\n assert_(cnt(a) == cnt0_a + 5 + 3*5)\n\n tmp = arr1.take([1, 2, 3], axis=0)\n assert_(cnt(a) == cnt0_a + 5 + 3)\n\n x = np.array([[0], [1], [0], [1], [1]], int)\n tmp = x.choose(arr1, arr2)\n assert_(cnt(a) == cnt0_a + 5 + 2)\n assert_(cnt(b) == cnt0_b + 5 + 3)\n\n del tmp # Avoid pyflakes unused variable warning\n\n def test_mem_custom_float_to_array(self):\n # Ticket 702\n class MyFloat:\n def __float__(self):\n return 1.0\n\n tmp = np.atleast_1d([MyFloat()])\n tmp.astype(float) # Should succeed\n\n def test_object_array_refcount_self_assign(self):\n # Ticket #711\n class VictimObject:\n deleted = False\n\n def __del__(self):\n self.deleted = True\n\n d = VictimObject()\n arr = np.zeros(5, dtype=np.object_)\n arr[:] = d\n del d\n arr[:] = arr # refcount of 'd' might hit zero here\n assert_(not arr[0].deleted)\n arr[:] = arr # trying to induce a segfault by doing it again...\n assert_(not arr[0].deleted)\n\n def test_mem_fromiter_invalid_dtype_string(self):\n x = [1, 2, 3]\n assert_raises(ValueError,\n np.fromiter, [xi for xi in x], dtype='S')\n\n def test_reduce_big_object_array(self):\n # Ticket #713\n oldsize = np.setbufsize(10*16)\n a = np.array([None]*161, object)\n assert_(not np.any(a))\n np.setbufsize(oldsize)\n\n def test_mem_0d_array_index(self):\n # Ticket #714\n np.zeros(10)[np.array(0)]\n\n def test_nonnative_endian_fill(self):\n # Non-native endian arrays were incorrectly filled with scalars\n # before r5034.\n if sys.byteorder == 'little':\n dtype = np.dtype('>i4')\n else:\n dtype = np.dtype('<i4')\n x = np.empty([1], dtype=dtype)\n x.fill(1)\n assert_equal(x, np.array([1], dtype=dtype))\n\n def test_dot_alignment_sse2(self):\n # Test for ticket #551, changeset r5140\n x = np.zeros((30, 40))\n for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):\n y = pickle.loads(pickle.dumps(x, protocol=proto))\n # y is now typically not aligned on a 8-byte boundary\n z = np.ones((1, y.shape[0]))\n # This shouldn't cause a segmentation fault:\n np.dot(z, y)\n\n def test_astype_copy(self):\n # Ticket #788, changeset r5155\n # The test data file was generated by scipy.io.savemat.\n # The dtype is float64, but the isbuiltin attribute is 0.\n data_dir = path.join(path.dirname(__file__), 'data')\n filename = path.join(data_dir, \"astype_copy.pkl\")\n with open(filename, 'rb') as f:\n xp = pickle.load(f, encoding='latin1')\n xpd = xp.astype(np.float64)\n assert_((xp.__array_interface__['data'][0] !=\n xpd.__array_interface__['data'][0]))\n\n def test_compress_small_type(self):\n # Ticket #789, changeset 5217.\n # compress with out argument segfaulted if cannot cast safely\n import numpy as np\n a = np.array([[1, 2], [3, 4]])\n b = np.zeros((2, 1), dtype=np.single)\n try:\n a.compress([True, False], axis=1, out=b)\n raise AssertionError(\"compress with an out which cannot be \"\n \"safely casted should not return \"\n \"successfully\")\n except TypeError:\n pass\n\n def test_attributes(self):\n # Ticket #791\n class TestArray(np.ndarray):\n def __new__(cls, data, info):\n result = np.array(data)\n result = result.view(cls)\n result.info = info\n return result\n\n def __array_finalize__(self, obj):\n self.info = getattr(obj, 'info', '')\n\n dat = TestArray([[1, 2, 3, 4], [5, 6, 7, 8]], 'jubba')\n assert_(dat.info == 'jubba')\n dat.resize((4, 2))\n assert_(dat.info == 'jubba')\n dat.sort()\n assert_(dat.info == 'jubba')\n dat.fill(2)\n assert_(dat.info == 'jubba')\n dat.put([2, 3, 4], [6, 3, 4])\n assert_(dat.info == 'jubba')\n dat.setfield(4, np.int32, 0)\n assert_(dat.info == 'jubba')\n dat.setflags()\n assert_(dat.info == 'jubba')\n assert_(dat.all(1).info == 'jubba')\n assert_(dat.any(1).info == 'jubba')\n assert_(dat.argmax(1).info == 'jubba')\n assert_(dat.argmin(1).info == 'jubba')\n assert_(dat.argsort(1).info == 'jubba')\n assert_(dat.astype(TestArray).info == 'jubba')\n assert_(dat.byteswap().info == 'jubba')\n assert_(dat.clip(2, 7).info == 'jubba')\n assert_(dat.compress([0, 1, 1]).info == 'jubba')\n assert_(dat.conj().info == 'jubba')\n assert_(dat.conjugate().info == 'jubba')\n assert_(dat.copy().info == 'jubba')\n dat2 = TestArray([2, 3, 1, 0], 'jubba')\n choices = [[0, 1, 2, 3], [10, 11, 12, 13],\n [20, 21, 22, 23], [30, 31, 32, 33]]\n assert_(dat2.choose(choices).info == 'jubba')\n assert_(dat.cumprod(1).info == 'jubba')\n assert_(dat.cumsum(1).info == 'jubba')\n assert_(dat.diagonal().info == 'jubba')\n assert_(dat.flatten().info == 'jubba')\n assert_(dat.getfield(np.int32, 0).info == 'jubba')\n assert_(dat.imag.info == 'jubba')\n assert_(dat.max(1).info == 'jubba')\n assert_(dat.mean(1).info == 'jubba')\n assert_(dat.min(1).info == 'jubba')\n assert_(dat.newbyteorder().info == 'jubba')\n assert_(dat.prod(1).info == 'jubba')\n assert_(dat.ptp(1).info == 'jubba')\n assert_(dat.ravel().info == 'jubba')\n assert_(dat.real.info == 'jubba')\n assert_(dat.repeat(2).info == 'jubba')\n assert_(dat.reshape((2, 4)).info == 'jubba')\n assert_(dat.round().info == 'jubba')\n assert_(dat.squeeze().info == 'jubba')\n assert_(dat.std(1).info == 'jubba')\n assert_(dat.sum(1).info == 'jubba')\n assert_(dat.swapaxes(0, 1).info == 'jubba')\n assert_(dat.take([2, 3, 5]).info == 'jubba')\n assert_(dat.transpose().info == 'jubba')\n assert_(dat.T.info == 'jubba')\n assert_(dat.var(1).info == 'jubba')\n assert_(dat.view(TestArray).info == 'jubba')\n # These methods do not preserve subclasses\n assert_(type(dat.nonzero()[0]) is np.ndarray)\n assert_(type(dat.nonzero()[1]) is np.ndarray)\n\n def test_recarray_tolist(self):\n # Ticket #793, changeset r5215\n # Comparisons fail for NaN, so we can't use random memory\n # for the test.\n buf = np.zeros(40, dtype=np.int8)\n a = np.recarray(2, formats=\"i4,f8,f8\", names=\"id,x,y\", buf=buf)\n b = a.tolist()\n assert_( a[0].tolist() == b[0])\n assert_( a[1].tolist() == b[1])\n\n def test_nonscalar_item_method(self):\n # Make sure that .item() fails graciously when it should\n a = np.arange(5)\n assert_raises(ValueError, a.item)\n\n def test_char_array_creation(self):\n a = np.array('123', dtype='c')\n b = np.array([b'1', b'2', b'3'])\n assert_equal(a, b)\n\n def test_unaligned_unicode_access(self):\n # Ticket #825\n for i in range(1, 9):\n msg = 'unicode offset: %d chars' % i\n t = np.dtype([('a', 'S%d' % i), ('b', 'U2')])\n x = np.array([(b'a', u'b')], dtype=t)\n assert_equal(str(x), \"[(b'a', 'b')]\", err_msg=msg)\n\n def test_sign_for_complex_nan(self):\n # Ticket 794.\n with np.errstate(invalid='ignore'):\n C = np.array([-np.inf, -2+1j, 0, 2-1j, np.inf, np.nan])\n have = np.sign(C)\n want = np.array([-1+0j, -1+0j, 0+0j, 1+0j, 1+0j, np.nan])\n assert_equal(have, want)\n\n def test_for_equal_names(self):\n # Ticket #674\n dt = np.dtype([('foo', float), ('bar', float)])\n a = np.zeros(10, dt)\n b = list(a.dtype.names)\n b[0] = \"notfoo\"\n a.dtype.names = b\n assert_(a.dtype.names[0] == \"notfoo\")\n assert_(a.dtype.names[1] == \"bar\")\n\n def test_for_object_scalar_creation(self):\n # Ticket #816\n a = np.object_()\n b = np.object_(3)\n b2 = np.object_(3.0)\n c = np.object_([4, 5])\n d = np.object_([None, {}, []])\n assert_(a is None)\n assert_(type(b) is int)\n assert_(type(b2) is float)\n assert_(type(c) is np.ndarray)\n assert_(c.dtype == object)\n assert_(d.dtype == object)\n\n def test_array_resize_method_system_error(self):\n # Ticket #840 - order should be an invalid keyword.\n x = np.array([[0, 1], [2, 3]])\n assert_raises(TypeError, x.resize, (2, 2), order='C')\n\n def test_for_zero_length_in_choose(self):\n \"Ticket #882\"\n a = np.array(1)\n assert_raises(ValueError, lambda x: x.choose([]), a)\n\n def test_array_ndmin_overflow(self):\n \"Ticket #947.\"\n assert_raises(ValueError, lambda: np.array([1], ndmin=33))\n\n def test_void_scalar_with_titles(self):\n # No ticket\n data = [('john', 4), ('mary', 5)]\n dtype1 = [(('source:yy', 'name'), 'O'), (('source:xx', 'id'), int)]\n arr = np.array(data, dtype=dtype1)\n assert_(arr[0][0] == 'john')\n assert_(arr[0][1] == 4)\n\n def test_void_scalar_constructor(self):\n #Issue #1550\n\n #Create test string data, construct void scalar from data and assert\n #that void scalar contains original data.\n test_string = np.array(\"test\")\n test_string_void_scalar = np.core.multiarray.scalar(\n np.dtype((\"V\", test_string.dtype.itemsize)), test_string.tobytes())\n\n assert_(test_string_void_scalar.view(test_string.dtype) == test_string)\n\n #Create record scalar, construct from data and assert that\n #reconstructed scalar is correct.\n test_record = np.ones((), \"i,i\")\n test_record_void_scalar = np.core.multiarray.scalar(\n test_record.dtype, test_record.tobytes())\n\n assert_(test_record_void_scalar == test_record)\n\n # Test pickle and unpickle of void and record scalars\n for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):\n assert_(pickle.loads(\n pickle.dumps(test_string, protocol=proto)) == test_string)\n assert_(pickle.loads(\n pickle.dumps(test_record, protocol=proto)) == test_record)\n\n @_no_tracing\n def test_blasdot_uninitialized_memory(self):\n # Ticket #950\n for m in [0, 1, 2]:\n for n in [0, 1, 2]:\n for k in range(3):\n # Try to ensure that x->data contains non-zero floats\n x = np.array([123456789e199], dtype=np.float64)\n if IS_PYPY:\n x.resize((m, 0), refcheck=False)\n else:\n x.resize((m, 0))\n y = np.array([123456789e199], dtype=np.float64)\n if IS_PYPY:\n y.resize((0, n), refcheck=False)\n else:\n y.resize((0, n))\n\n # `dot` should just return zero (m, n) matrix\n z = np.dot(x, y)\n assert_(np.all(z == 0))\n assert_(z.shape == (m, n))\n\n def test_zeros(self):\n # Regression test for #1061.\n # Set a size which cannot fit into a 64 bits signed integer\n sz = 2 ** 64\n with assert_raises_regex(ValueError,\n 'Maximum allowed dimension exceeded'):\n np.empty(sz)\n\n def test_huge_arange(self):\n # Regression test for #1062.\n # Set a size which cannot fit into a 64 bits signed integer\n sz = 2 ** 64\n with assert_raises_regex(ValueError,\n 'Maximum allowed size exceeded'):\n np.arange(sz)\n assert_(np.size == sz)\n\n def test_fromiter_bytes(self):\n # Ticket #1058\n a = np.fromiter(list(range(10)), dtype='b')\n b = np.fromiter(list(range(10)), dtype='B')\n assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))\n assert_(np.alltrue(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))\n\n def test_array_from_sequence_scalar_array(self):\n # Ticket #1078: segfaults when creating an array with a sequence of\n # 0d arrays.\n a = np.array((np.ones(2), np.array(2)), dtype=object)\n assert_equal(a.shape, (2,))\n assert_equal(a.dtype, np.dtype(object))\n assert_equal(a[0], np.ones(2))\n assert_equal(a[1], np.array(2))\n\n a = np.array(((1,), np.array(1)), dtype=object)\n assert_equal(a.shape, (2,))\n assert_equal(a.dtype, np.dtype(object))\n assert_equal(a[0], (1,))\n assert_equal(a[1], np.array(1))\n\n def test_array_from_sequence_scalar_array2(self):\n # Ticket #1081: weird array with strange input...\n t = np.array([np.array([]), np.array(0, object)], dtype=object)\n assert_equal(t.shape, (2,))\n assert_equal(t.dtype, np.dtype(object))\n\n def test_array_too_big(self):\n # Ticket #1080.\n assert_raises(ValueError, np.zeros, [975]*7, np.int8)\n assert_raises(ValueError, np.zeros, [26244]*5, np.int8)\n\n def test_dtype_keyerrors_(self):\n # Ticket #1106.\n dt = np.dtype([('f1', np.uint)])\n assert_raises(KeyError, dt.__getitem__, \"f2\")\n assert_raises(IndexError, dt.__getitem__, 1)\n assert_raises(TypeError, dt.__getitem__, 0.0)\n\n def test_lexsort_buffer_length(self):\n # Ticket #1217, don't segfault.\n a = np.ones(100, dtype=np.int8)\n b = np.ones(100, dtype=np.int32)\n i = np.lexsort((a[::-1], b))\n assert_equal(i, np.arange(100, dtype=int))\n\n def test_object_array_to_fixed_string(self):\n # Ticket #1235.\n a = np.array(['abcdefgh', 'ijklmnop'], dtype=np.object_)\n b = np.array(a, dtype=(np.str_, 8))\n assert_equal(a, b)\n c = np.array(a, dtype=(np.str_, 5))\n assert_equal(c, np.array(['abcde', 'ijklm']))\n d = np.array(a, dtype=(np.str_, 12))\n assert_equal(a, d)\n e = np.empty((2, ), dtype=(np.str_, 8))\n e[:] = a[:]\n assert_equal(a, e)\n\n def test_unicode_to_string_cast(self):\n # Ticket #1240.\n a = np.array([[u'abc', u'\\u03a3'],\n [u'asdf', u'erw']],\n dtype='U')\n assert_raises(UnicodeEncodeError, np.array, a, 'S4')\n\n def test_unicode_to_string_cast_error(self):\n # gh-15790\n a = np.array([u'\\x80'] * 129, dtype='U3')\n assert_raises(UnicodeEncodeError, np.array, a, 'S')\n b = a.reshape(3, 43)[:-1, :-1]\n assert_raises(UnicodeEncodeError, np.array, b, 'S')\n\n def test_mixed_string_unicode_array_creation(self):\n a = np.array(['1234', u'123'])\n assert_(a.itemsize == 16)\n a = np.array([u'123', '1234'])\n assert_(a.itemsize == 16)\n a = np.array(['1234', u'123', '12345'])\n assert_(a.itemsize == 20)\n a = np.array([u'123', '1234', u'12345'])\n assert_(a.itemsize == 20)\n a = np.array([u'123', '1234', u'1234'])\n assert_(a.itemsize == 16)\n\n def test_misaligned_objects_segfault(self):\n # Ticket #1198 and #1267\n a1 = np.zeros((10,), dtype='O,c')\n a2 = np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'], 'S10')\n a1['f0'] = a2\n repr(a1)\n np.argmax(a1['f0'])\n a1['f0'][1] = \"FOO\"\n a1['f0'] = \"FOO\"\n np.array(a1['f0'], dtype='S')\n np.nonzero(a1['f0'])\n a1.sort()\n copy.deepcopy(a1)\n\n def test_misaligned_scalars_segfault(self):\n # Ticket #1267\n s1 = np.array(('a', 'Foo'), dtype='c,O')\n s2 = np.array(('b', 'Bar'), dtype='c,O')\n s1['f1'] = s2['f1']\n s1['f1'] = 'Baz'\n\n def test_misaligned_dot_product_objects(self):\n # Ticket #1267\n # This didn't require a fix, but it's worth testing anyway, because\n # it may fail if .dot stops enforcing the arrays to be BEHAVED\n a = np.array([[(1, 'a'), (0, 'a')], [(0, 'a'), (1, 'a')]], dtype='O,c')\n b = np.array([[(4, 'a'), (1, 'a')], [(2, 'a'), (2, 'a')]], dtype='O,c')\n np.dot(a['f0'], b['f0'])\n\n def test_byteswap_complex_scalar(self):\n # Ticket #1259 and gh-441\n for dtype in [np.dtype('<'+t) for t in np.typecodes['Complex']]:\n z = np.array([2.2-1.1j], dtype)\n x = z[0] # always native-endian\n y = x.byteswap()\n if x.dtype.byteorder == z.dtype.byteorder:\n # little-endian machine\n assert_equal(x, np.frombuffer(y.tobytes(), dtype=dtype.newbyteorder()))\n else:\n # big-endian machine\n assert_equal(x, np.frombuffer(y.tobytes(), dtype=dtype))\n # double check real and imaginary parts:\n assert_equal(x.real, y.real.byteswap())\n assert_equal(x.imag, y.imag.byteswap())\n\n def test_structured_arrays_with_objects1(self):\n # Ticket #1299\n stra = 'aaaa'\n strb = 'bbbb'\n x = np.array([[(0, stra), (1, strb)]], 'i8,O')\n x[x.nonzero()] = x.ravel()[:1]\n assert_(x[0, 1] == x[0, 0])\n\n @pytest.mark.skipif(not HAS_REFCOUNT, reason=\"Python lacks refcounts\")\n def test_structured_arrays_with_objects2(self):\n # Ticket #1299 second test\n stra = 'aaaa'\n strb = 'bbbb'\n numb = sys.getrefcount(strb)\n numa = sys.getrefcount(stra)\n x = np.array([[(0, stra), (1, strb)]], 'i8,O')\n x[x.nonzero()] = x.ravel()[:1]\n assert_(sys.getrefcount(strb) == numb)\n assert_(sys.getrefcount(stra) == numa + 2)\n\n def test_duplicate_title_and_name(self):\n # Ticket #1254\n dtspec = [(('a', 'a'), 'i'), ('b', 'i')]\n assert_raises(ValueError, np.dtype, dtspec)\n\n def test_signed_integer_division_overflow(self):\n # Ticket #1317.\n def test_type(t):\n min = np.array([np.iinfo(t).min])\n min //= -1\n\n with np.errstate(divide=\"ignore\"):\n for t in (np.int8, np.int16, np.int32, np.int64, int):\n test_type(t)\n\n def test_buffer_hashlib(self):\n from hashlib import md5\n\n x = np.array([1, 2, 3], dtype=np.dtype('<i4'))\n assert_equal(md5(x).hexdigest(), '2a1dd1e1e59d0a384c26951e316cd7e6')\n\n def test_0d_string_scalar(self):\n # Bug #1436; the following should succeed\n np.asarray('x', '>c')\n\n def test_log1p_compiler_shenanigans(self):\n # Check if log1p is behaving on 32 bit intel systems.\n assert_(np.isfinite(np.log1p(np.exp2(-53))))\n\n def test_fromiter_comparison(self):\n a = np.fromiter(list(range(10)), dtype='b')\n b = np.fromiter(list(range(10)), dtype='B')\n assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))\n assert_(np.alltrue(b == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))\n\n def test_fromstring_crash(self):\n # Ticket #1345: the following should not cause a crash\n with assert_warns(DeprecationWarning):\n np.fromstring(b'aa, aa, 1.0', sep=',')\n\n def test_ticket_1539(self):\n dtypes = [x for x in np.typeDict.values()\n if (issubclass(x, np.number)\n and not issubclass(x, np.timedelta64))]\n a = np.array([], np.bool_) # not x[0] because it is unordered\n failures = []\n\n for x in dtypes:\n b = a.astype(x)\n for y in dtypes:\n c = a.astype(y)\n try:\n np.dot(b, c)\n except TypeError:\n failures.append((x, y))\n if failures:\n raise AssertionError(\"Failures: %r\" % failures)\n\n def test_ticket_1538(self):\n x = np.finfo(np.float32)\n for name in 'eps epsneg max min resolution tiny'.split():\n assert_equal(type(getattr(x, name)), np.float32,\n err_msg=name)\n\n def test_ticket_1434(self):\n # Check that the out= argument in var and std has an effect\n data = np.array(((1, 2, 3), (4, 5, 6), (7, 8, 9)))\n out = np.zeros((3,))\n\n ret = data.var(axis=1, out=out)\n assert_(ret is out)\n assert_array_equal(ret, data.var(axis=1))\n\n ret = data.std(axis=1, out=out)\n assert_(ret is out)\n assert_array_equal(ret, data.std(axis=1))\n\n def test_complex_nan_maximum(self):\n cnan = complex(0, np.nan)\n assert_equal(np.maximum(1, cnan), cnan)\n\n def test_subclass_int_tuple_assignment(self):\n # ticket #1563\n class Subclass(np.ndarray):\n def __new__(cls, i):\n return np.ones((i,)).view(cls)\n\n x = Subclass(5)\n x[(0,)] = 2 # shouldn't raise an exception\n assert_equal(x[0], 2)\n\n def test_ufunc_no_unnecessary_views(self):\n # ticket #1548\n class Subclass(np.ndarray):\n pass\n x = np.array([1, 2, 3]).view(Subclass)\n y = np.add(x, x, x)\n assert_equal(id(x), id(y))\n\n @pytest.mark.skipif(not HAS_REFCOUNT, reason=\"Python lacks refcounts\")\n def test_take_refcount(self):\n # ticket #939\n a = np.arange(16, dtype=float)\n a.shape = (4, 4)\n lut = np.ones((5 + 3, 4), float)\n rgba = np.empty(shape=a.shape + (4,), dtype=lut.dtype)\n c1 = sys.getrefcount(rgba)\n try:\n lut.take(a, axis=0, mode='clip', out=rgba)\n except TypeError:\n pass\n c2 = sys.getrefcount(rgba)\n assert_equal(c1, c2)\n\n def test_fromfile_tofile_seeks(self):\n # On Python 3, tofile/fromfile used to get (#1610) the Python\n # file handle out of sync\n f0 = tempfile.NamedTemporaryFile()\n f = f0.file\n f.write(np.arange(255, dtype='u1').tobytes())\n\n f.seek(20)\n ret = np.fromfile(f, count=4, dtype='u1')\n assert_equal(ret, np.array([20, 21, 22, 23], dtype='u1'))\n assert_equal(f.tell(), 24)\n\n f.seek(40)\n np.array([1, 2, 3], dtype='u1').tofile(f)\n assert_equal(f.tell(), 43)\n\n f.seek(40)\n data = f.read(3)\n assert_equal(data, b\"\\x01\\x02\\x03\")\n\n f.seek(80)\n f.read(4)\n data = np.fromfile(f, dtype='u1', count=4)\n assert_equal(data, np.array([84, 85, 86, 87], dtype='u1'))\n\n f.close()\n\n def test_complex_scalar_warning(self):\n for tp in [np.csingle, np.cdouble, np.clongdouble]:\n x = tp(1+2j)\n assert_warns(np.ComplexWarning, float, x)\n with suppress_warnings() as sup:\n sup.filter(np.ComplexWarning)\n assert_equal(float(x), float(x.real))\n\n def test_complex_scalar_complex_cast(self):\n for tp in [np.csingle, np.cdouble, np.clongdouble]:\n x = tp(1+2j)\n assert_equal(complex(x), 1+2j)\n\n def test_complex_boolean_cast(self):\n # Ticket #2218\n for tp in [np.csingle, np.cdouble, np.clongdouble]:\n x = np.array([0, 0+0.5j, 0.5+0j], dtype=tp)\n assert_equal(x.astype(bool), np.array([0, 1, 1], dtype=bool))\n assert_(np.any(x))\n assert_(np.all(x[1:]))\n\n def test_uint_int_conversion(self):\n x = 2**64 - 1\n assert_equal(int(np.uint64(x)), x)\n\n def test_duplicate_field_names_assign(self):\n ra = np.fromiter(((i*3, i*2) for i in range(10)), dtype='i8,f8')\n ra.dtype.names = ('f1', 'f2')\n repr(ra) # should not cause a segmentation fault\n assert_raises(ValueError, setattr, ra.dtype, 'names', ('f1', 'f1'))\n\n def test_eq_string_and_object_array(self):\n # From e-mail thread \"__eq__ with str and object\" (Keith Goodman)\n a1 = np.array(['a', 'b'], dtype=object)\n a2 = np.array(['a', 'c'])\n assert_array_equal(a1 == a2, [True, False])\n assert_array_equal(a2 == a1, [True, False])\n\n def test_nonzero_byteswap(self):\n a = np.array([0x80000000, 0x00000080, 0], dtype=np.uint32)\n a.dtype = np.float32\n assert_equal(a.nonzero()[0], [1])\n a = a.byteswap().newbyteorder()\n assert_equal(a.nonzero()[0], [1]) # [0] if nonzero() ignores swap\n\n def test_find_common_type_boolean(self):\n # Ticket #1695\n assert_(np.find_common_type([], ['?', '?']) == '?')\n\n def test_empty_mul(self):\n a = np.array([1.])\n a[1:1] *= 2\n assert_equal(a, [1.])\n\n def test_array_side_effect(self):\n # The second use of itemsize was throwing an exception because in\n # ctors.c, discover_itemsize was calling PyObject_Length without\n # checking the return code. This failed to get the length of the\n # number 2, and the exception hung around until something checked\n # PyErr_Occurred() and returned an error.\n assert_equal(np.dtype('S10').itemsize, 10)\n np.array([['abc', 2], ['long ', '0123456789']], dtype=np.string_)\n assert_equal(np.dtype('S10').itemsize, 10)\n\n def test_any_float(self):\n # all and any for floats\n a = np.array([0.1, 0.9])\n assert_(np.any(a))\n assert_(np.all(a))\n\n def test_large_float_sum(self):\n a = np.arange(10000, dtype='f')\n assert_equal(a.sum(dtype='d'), a.astype('d').sum())\n\n def test_ufunc_casting_out(self):\n a = np.array(1.0, dtype=np.float32)\n b = np.array(1.0, dtype=np.float64)\n c = np.array(1.0, dtype=np.float32)\n np.add(a, b, out=c)\n assert_equal(c, 2.0)\n\n def test_array_scalar_contiguous(self):\n # Array scalars are both C and Fortran contiguous\n assert_(np.array(1.0).flags.c_contiguous)\n assert_(np.array(1.0).flags.f_contiguous)\n assert_(np.array(np.float32(1.0)).flags.c_contiguous)\n assert_(np.array(np.float32(1.0)).flags.f_contiguous)\n\n def test_squeeze_contiguous(self):\n # Similar to GitHub issue #387\n a = np.zeros((1, 2)).squeeze()\n b = np.zeros((2, 2, 2), order='F')[:, :, ::2].squeeze()\n assert_(a.flags.c_contiguous)\n assert_(a.flags.f_contiguous)\n assert_(b.flags.f_contiguous)\n\n def test_squeeze_axis_handling(self):\n # Issue #10779\n # Ensure proper handling of objects\n # that don't support axis specification\n # when squeezing\n\n class OldSqueeze(np.ndarray):\n\n def __new__(cls,\n input_array):\n obj = np.asarray(input_array).view(cls)\n return obj\n\n # it is perfectly reasonable that prior\n # to numpy version 1.7.0 a subclass of ndarray\n # might have been created that did not expect\n # squeeze to have an axis argument\n # NOTE: this example is somewhat artificial;\n # it is designed to simulate an old API\n # expectation to guard against regression\n def squeeze(self):\n return super(OldSqueeze, self).squeeze()\n\n oldsqueeze = OldSqueeze(np.array([[1],[2],[3]]))\n\n # if no axis argument is specified the old API\n # expectation should give the correct result\n assert_equal(np.squeeze(oldsqueeze),\n np.array([1,2,3]))\n\n # likewise, axis=None should work perfectly well\n # with the old API expectation\n assert_equal(np.squeeze(oldsqueeze, axis=None),\n np.array([1,2,3]))\n\n # however, specification of any particular axis\n # should raise a TypeError in the context of the\n # old API specification, even when using a valid\n # axis specification like 1 for this array\n with assert_raises(TypeError):\n # this would silently succeed for array\n # subclasses / objects that did not support\n # squeeze axis argument handling before fixing\n # Issue #10779\n np.squeeze(oldsqueeze, axis=1)\n\n # check for the same behavior when using an invalid\n # axis specification -- in this case axis=0 does not\n # have size 1, but the priority should be to raise\n # a TypeError for the axis argument and NOT a\n # ValueError for squeezing a non-empty dimension\n with assert_raises(TypeError):\n np.squeeze(oldsqueeze, axis=0)\n\n # the new API knows how to handle the axis\n # argument and will return a ValueError if\n # attempting to squeeze an axis that is not\n # of length 1\n with assert_raises(ValueError):\n np.squeeze(np.array([[1],[2],[3]]), axis=0)\n\n def test_reduce_contiguous(self):\n # GitHub issue #387\n a = np.add.reduce(np.zeros((2, 1, 2)), (0, 1))\n b = np.add.reduce(np.zeros((2, 1, 2)), 1)\n assert_(a.flags.c_contiguous)\n assert_(a.flags.f_contiguous)\n assert_(b.flags.c_contiguous)\n\n def test_object_array_self_reference(self):\n # Object arrays with references to themselves can cause problems\n a = np.array(0, dtype=object)\n a[()] = a\n assert_raises(RecursionError, int, a)\n assert_raises(RecursionError, float, a)\n a[()] = None\n\n def test_object_array_circular_reference(self):\n # Test the same for a circular reference.\n a = np.array(0, dtype=object)\n b = np.array(0, dtype=object)\n a[()] = b\n b[()] = a\n assert_raises(RecursionError, int, a)\n # NumPy has no tp_traverse currently, so circular references\n # cannot be detected. So resolve it:\n a[()] = None\n\n # This was causing a to become like the above\n a = np.array(0, dtype=object)\n a[...] += 1\n assert_equal(a, 1)\n\n def test_object_array_nested(self):\n # but is fine with a reference to a different array\n a = np.array(0, dtype=object)\n b = np.array(0, dtype=object)\n a[()] = b\n assert_equal(int(a), int(0))\n assert_equal(float(a), float(0))\n\n def test_object_array_self_copy(self):\n # An object array being copied into itself DECREF'ed before INCREF'ing\n # causing segmentation faults (gh-3787)\n a = np.array(object(), dtype=object)\n np.copyto(a, a)\n if HAS_REFCOUNT:\n assert_(sys.getrefcount(a[()]) == 2)\n a[()].__class__ # will segfault if object was deleted\n\n def test_zerosize_accumulate(self):\n \"Ticket #1733\"\n x = np.array([[42, 0]], dtype=np.uint32)\n assert_equal(np.add.accumulate(x[:-1, 0]), [])\n\n def test_objectarray_setfield(self):\n # Setfield should not overwrite Object fields with non-Object data\n x = np.array([1, 2, 3], dtype=object)\n assert_raises(TypeError, x.setfield, 4, np.int32, 0)\n\n def test_setting_rank0_string(self):\n \"Ticket #1736\"\n s1 = b\"hello1\"\n s2 = b\"hello2\"\n a = np.zeros((), dtype=\"S10\")\n a[()] = s1\n assert_equal(a, np.array(s1))\n a[()] = np.array(s2)\n assert_equal(a, np.array(s2))\n\n a = np.zeros((), dtype='f4')\n a[()] = 3\n assert_equal(a, np.array(3))\n a[()] = np.array(4)\n assert_equal(a, np.array(4))\n\n def test_string_astype(self):\n \"Ticket #1748\"\n s1 = b'black'\n s2 = b'white'\n s3 = b'other'\n a = np.array([[s1], [s2], [s3]])\n assert_equal(a.dtype, np.dtype('S5'))\n b = a.astype(np.dtype('S0'))\n assert_equal(b.dtype, np.dtype('S5'))\n\n def test_ticket_1756(self):\n # Ticket #1756\n s = b'0123456789abcdef'\n a = np.array([s]*5)\n for i in range(1, 17):\n a1 = np.array(a, \"|S%d\" % i)\n a2 = np.array([s[:i]]*5)\n assert_equal(a1, a2)\n\n def test_fields_strides(self):\n \"gh-2355\"\n r = np.frombuffer(b'abcdefghijklmnop'*4*3, dtype='i4,(2,3)u2')\n assert_equal(r[0:3:2]['f1'], r['f1'][0:3:2])\n assert_equal(r[0:3:2]['f1'][0], r[0:3:2][0]['f1'])\n assert_equal(r[0:3:2]['f1'][0][()], r[0:3:2][0]['f1'][()])\n assert_equal(r[0:3:2]['f1'][0].strides, r[0:3:2][0]['f1'].strides)\n\n def test_alignment_update(self):\n # Check that alignment flag is updated on stride setting\n a = np.arange(10)\n assert_(a.flags.aligned)\n a.strides = 3\n assert_(not a.flags.aligned)\n\n def test_ticket_1770(self):\n \"Should not segfault on python 3k\"\n import numpy as np\n try:\n a = np.zeros((1,), dtype=[('f1', 'f')])\n a['f1'] = 1\n a['f2'] = 1\n except ValueError:\n pass\n except Exception:\n raise AssertionError\n\n def test_ticket_1608(self):\n \"x.flat shouldn't modify data\"\n x = np.array([[1, 2], [3, 4]]).T\n np.array(x.flat)\n assert_equal(x, [[1, 3], [2, 4]])\n\n def test_pickle_string_overwrite(self):\n import re\n\n data = np.array([1], dtype='b')\n blob = pickle.dumps(data, protocol=1)\n data = pickle.loads(blob)\n\n # Check that loads does not clobber interned strings\n s = re.sub(\"a(.)\", \"\\x01\\\\1\", \"a_\")\n assert_equal(s[0], \"\\x01\")\n data[0] = 0xbb\n s = re.sub(\"a(.)\", \"\\x01\\\\1\", \"a_\")\n assert_equal(s[0], \"\\x01\")\n\n def test_pickle_bytes_overwrite(self):\n for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):\n data = np.array([1], dtype='b')\n data = pickle.loads(pickle.dumps(data, protocol=proto))\n data[0] = 0xdd\n bytestring = \"\\x01 \".encode('ascii')\n assert_equal(bytestring[0:1], '\\x01'.encode('ascii'))\n\n def test_pickle_py2_array_latin1_hack(self):\n # Check that unpickling hacks in Py3 that support\n # encoding='latin1' work correctly.\n\n # Python2 output for pickle.dumps(numpy.array([129], dtype='b'))\n data = (b\"cnumpy.core.multiarray\\n_reconstruct\\np0\\n(cnumpy\\nndarray\\np1\\n(I0\\n\"\n b\"tp2\\nS'b'\\np3\\ntp4\\nRp5\\n(I1\\n(I1\\ntp6\\ncnumpy\\ndtype\\np7\\n(S'i1'\\np8\\n\"\n b\"I0\\nI1\\ntp9\\nRp10\\n(I3\\nS'|'\\np11\\nNNNI-1\\nI-1\\nI0\\ntp12\\nbI00\\nS'\\\\x81'\\n\"\n b\"p13\\ntp14\\nb.\")\n # This should work:\n result = pickle.loads(data, encoding='latin1')\n assert_array_equal(result, np.array([129], dtype='b'))\n # Should not segfault:\n assert_raises(Exception, pickle.loads, data, encoding='koi8-r')\n\n def test_pickle_py2_scalar_latin1_hack(self):\n # Check that scalar unpickling hack in Py3 that supports\n # encoding='latin1' work correctly.\n\n # Python2 output for pickle.dumps(...)\n datas = [\n # (original, python2_pickle, koi8r_validity)\n (np.unicode_('\\u6bd2'),\n (b\"cnumpy.core.multiarray\\nscalar\\np0\\n(cnumpy\\ndtype\\np1\\n\"\n b\"(S'U1'\\np2\\nI0\\nI1\\ntp3\\nRp4\\n(I3\\nS'<'\\np5\\nNNNI4\\nI4\\nI0\\n\"\n b\"tp6\\nbS'\\\\xd2k\\\\x00\\\\x00'\\np7\\ntp8\\nRp9\\n.\"),\n 'invalid'),\n\n (np.float64(9e123),\n (b\"cnumpy.core.multiarray\\nscalar\\np0\\n(cnumpy\\ndtype\\np1\\n(S'f8'\\n\"\n b\"p2\\nI0\\nI1\\ntp3\\nRp4\\n(I3\\nS'<'\\np5\\nNNNI-1\\nI-1\\nI0\\ntp6\\n\"\n b\"bS'O\\\\x81\\\\xb7Z\\\\xaa:\\\\xabY'\\np7\\ntp8\\nRp9\\n.\"),\n 'invalid'),\n\n (np.bytes_(b'\\x9c'), # different 8-bit code point in KOI8-R vs latin1\n (b\"cnumpy.core.multiarray\\nscalar\\np0\\n(cnumpy\\ndtype\\np1\\n(S'S1'\\np2\\n\"\n b\"I0\\nI1\\ntp3\\nRp4\\n(I3\\nS'|'\\np5\\nNNNI1\\nI1\\nI0\\ntp6\\nbS'\\\\x9c'\\np7\\n\"\n b\"tp8\\nRp9\\n.\"),\n 'different'),\n ]\n for original, data, koi8r_validity in datas:\n result = pickle.loads(data, encoding='latin1')\n assert_equal(result, original)\n\n # Decoding under non-latin1 encoding (e.g.) KOI8-R can\n # produce bad results, but should not segfault.\n if koi8r_validity == 'different':\n # Unicode code points happen to lie within latin1,\n # but are different in koi8-r, resulting to silent\n # bogus results\n result = pickle.loads(data, encoding='koi8-r')\n assert_(result != original)\n elif koi8r_validity == 'invalid':\n # Unicode code points outside latin1, so results\n # to an encoding exception\n assert_raises(ValueError, pickle.loads, data, encoding='koi8-r')\n else:\n raise ValueError(koi8r_validity)\n\n def test_structured_type_to_object(self):\n a_rec = np.array([(0, 1), (3, 2)], dtype='i4,i8')\n a_obj = np.empty((2,), dtype=object)\n a_obj[0] = (0, 1)\n a_obj[1] = (3, 2)\n # astype records -> object\n assert_equal(a_rec.astype(object), a_obj)\n # '=' records -> object\n b = np.empty_like(a_obj)\n b[...] = a_rec\n assert_equal(b, a_obj)\n # '=' object -> records\n b = np.empty_like(a_rec)\n b[...] = a_obj\n assert_equal(b, a_rec)\n\n def test_assign_obj_listoflists(self):\n # Ticket # 1870\n # The inner list should get assigned to the object elements\n a = np.zeros(4, dtype=object)\n b = a.copy()\n a[0] = [1]\n a[1] = [2]\n a[2] = [3]\n a[3] = [4]\n b[...] = [[1], [2], [3], [4]]\n assert_equal(a, b)\n # The first dimension should get broadcast\n a = np.zeros((2, 2), dtype=object)\n a[...] = [[1, 2]]\n assert_equal(a, [[1, 2], [1, 2]])\n\n @pytest.mark.slow_pypy\n def test_memoryleak(self):\n # Ticket #1917 - ensure that array data doesn't leak\n for i in range(1000):\n # 100MB times 1000 would give 100GB of memory usage if it leaks\n a = np.empty((100000000,), dtype='i1')\n del a\n\n @pytest.mark.skipif(not HAS_REFCOUNT, reason=\"Python lacks refcounts\")\n def test_ufunc_reduce_memoryleak(self):\n a = np.arange(6)\n acnt = sys.getrefcount(a)\n np.add.reduce(a)\n assert_equal(sys.getrefcount(a), acnt)\n\n def test_search_sorted_invalid_arguments(self):\n # Ticket #2021, should not segfault.\n x = np.arange(0, 4, dtype='datetime64[D]')\n assert_raises(TypeError, x.searchsorted, 1)\n\n def test_string_truncation(self):\n # Ticket #1990 - Data can be truncated in creation of an array from a\n # mixed sequence of numeric values and strings\n for val in [True, 1234, 123.4, complex(1, 234)]:\n for tostr in [asunicode, asbytes]:\n b = np.array([val, tostr('xx')])\n assert_equal(tostr(b[0]), tostr(val))\n b = np.array([tostr('xx'), val])\n assert_equal(tostr(b[1]), tostr(val))\n\n # test also with longer strings\n b = np.array([val, tostr('xxxxxxxxxx')])\n assert_equal(tostr(b[0]), tostr(val))\n b = np.array([tostr('xxxxxxxxxx'), val])\n assert_equal(tostr(b[1]), tostr(val))\n\n def test_string_truncation_ucs2(self):\n # Ticket #2081. Python compiled with two byte unicode\n # can lead to truncation if itemsize is not properly\n # adjusted for NumPy's four byte unicode.\n a = np.array(['abcd'])\n assert_equal(a.dtype.itemsize, 16)\n\n def test_unique_stable(self):\n # Ticket #2063 must always choose stable sort for argsort to\n # get consistent results\n v = np.array(([0]*5 + [1]*6 + [2]*6)*4)\n res = np.unique(v, return_index=True)\n tgt = (np.array([0, 1, 2]), np.array([ 0, 5, 11]))\n assert_equal(res, tgt)\n\n def test_unicode_alloc_dealloc_match(self):\n # Ticket #1578, the mismatch only showed up when running\n # python-debug for python versions >= 2.7, and then as\n # a core dump and error message.\n a = np.array(['abc'], dtype=np.unicode_)[0]\n del a\n\n def test_refcount_error_in_clip(self):\n # Ticket #1588\n a = np.zeros((2,), dtype='>i2').clip(min=0)\n x = a + a\n # This used to segfault:\n y = str(x)\n # Check the final string:\n assert_(y == \"[0 0]\")\n\n def test_searchsorted_wrong_dtype(self):\n # Ticket #2189, it used to segfault, so we check that it raises the\n # proper exception.\n a = np.array([('a', 1)], dtype='S1, int')\n assert_raises(TypeError, np.searchsorted, a, 1.2)\n # Ticket #2066, similar problem:\n dtype = np.format_parser(['i4', 'i4'], [], [])\n a = np.recarray((2, ), dtype)\n assert_raises(TypeError, np.searchsorted, a, 1)\n\n def test_complex64_alignment(self):\n # Issue gh-2668 (trac 2076), segfault on sparc due to misalignment\n dtt = np.complex64\n arr = np.arange(10, dtype=dtt)\n # 2D array\n arr2 = np.reshape(arr, (2, 5))\n # Fortran write followed by (C or F) read caused bus error\n data_str = arr2.tobytes('F')\n data_back = np.ndarray(arr2.shape,\n arr2.dtype,\n buffer=data_str,\n order='F')\n assert_array_equal(arr2, data_back)\n\n def test_structured_count_nonzero(self):\n arr = np.array([0, 1]).astype('i4, (2)i4')[:1]\n count = np.count_nonzero(arr)\n assert_equal(count, 0)\n\n def test_copymodule_preserves_f_contiguity(self):\n a = np.empty((2, 2), order='F')\n b = copy.copy(a)\n c = copy.deepcopy(a)\n assert_(b.flags.fortran)\n assert_(b.flags.f_contiguous)\n assert_(c.flags.fortran)\n assert_(c.flags.f_contiguous)\n\n def test_fortran_order_buffer(self):\n import numpy as np\n a = np.array([['Hello', 'Foob']], dtype='U5', order='F')\n arr = np.ndarray(shape=[1, 2, 5], dtype='U1', buffer=a)\n arr2 = np.array([[[u'H', u'e', u'l', u'l', u'o'],\n [u'F', u'o', u'o', u'b', u'']]])\n assert_array_equal(arr, arr2)\n\n def test_assign_from_sequence_error(self):\n # Ticket #4024.\n arr = np.array([1, 2, 3])\n assert_raises(ValueError, arr.__setitem__, slice(None), [9, 9])\n arr.__setitem__(slice(None), [9])\n assert_equal(arr, [9, 9, 9])\n\n def test_format_on_flex_array_element(self):\n # Ticket #4369.\n dt = np.dtype([('date', '<M8[D]'), ('val', '<f8')])\n arr = np.array([('2000-01-01', 1)], dt)\n formatted = '{0}'.format(arr[0])\n assert_equal(formatted, str(arr[0]))\n\n def test_deepcopy_on_0d_array(self):\n # Ticket #3311.\n arr = np.array(3)\n arr_cp = copy.deepcopy(arr)\n\n assert_equal(arr, arr_cp)\n assert_equal(arr.shape, arr_cp.shape)\n assert_equal(int(arr), int(arr_cp))\n assert_(arr is not arr_cp)\n assert_(isinstance(arr_cp, type(arr)))\n\n def test_deepcopy_F_order_object_array(self):\n # Ticket #6456.\n a = {'a': 1}\n b = {'b': 2}\n arr = np.array([[a, b], [a, b]], order='F')\n arr_cp = copy.deepcopy(arr)\n\n assert_equal(arr, arr_cp)\n assert_(arr is not arr_cp)\n # Ensure that we have actually copied the item.\n assert_(arr[0, 1] is not arr_cp[1, 1])\n # Ensure we are allowed to have references to the same object.\n assert_(arr[0, 1] is arr[1, 1])\n # Check the references hold for the copied objects.\n assert_(arr_cp[0, 1] is arr_cp[1, 1])\n\n def test_deepcopy_empty_object_array(self):\n # Ticket #8536.\n # Deepcopy should succeed\n a = np.array([], dtype=object)\n b = copy.deepcopy(a)\n assert_(a.shape == b.shape)\n\n def test_bool_subscript_crash(self):\n # gh-4494\n c = np.rec.array([(1, 2, 3), (4, 5, 6)])\n masked = c[np.array([True, False])]\n base = masked.base\n del masked, c\n base.dtype\n\n def test_richcompare_crash(self):\n # gh-4613\n import operator as op\n\n # dummy class where __array__ throws exception\n class Foo:\n __array_priority__ = 1002\n\n def __array__(self, *args, **kwargs):\n raise Exception()\n\n rhs = Foo()\n lhs = np.array(1)\n for f in [op.lt, op.le, op.gt, op.ge]:\n assert_raises(TypeError, f, lhs, rhs)\n assert_(not op.eq(lhs, rhs))\n assert_(op.ne(lhs, rhs))\n\n def test_richcompare_scalar_and_subclass(self):\n # gh-4709\n class Foo(np.ndarray):\n def __eq__(self, other):\n return \"OK\"\n\n x = np.array([1, 2, 3]).view(Foo)\n assert_equal(10 == x, \"OK\")\n assert_equal(np.int32(10) == x, \"OK\")\n assert_equal(np.array([10]) == x, \"OK\")\n\n def test_pickle_empty_string(self):\n # gh-3926\n for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):\n test_string = np.string_('')\n assert_equal(pickle.loads(\n pickle.dumps(test_string, protocol=proto)), test_string)\n\n def test_frompyfunc_many_args(self):\n # gh-5672\n\n def passer(*args):\n pass\n\n assert_raises(ValueError, np.frompyfunc, passer, 32, 1)\n\n def test_repeat_broadcasting(self):\n # gh-5743\n a = np.arange(60).reshape(3, 4, 5)\n for axis in chain(range(-a.ndim, a.ndim), [None]):\n assert_equal(a.repeat(2, axis=axis), a.repeat([2], axis=axis))\n\n def test_frompyfunc_nout_0(self):\n # gh-2014\n\n def f(x):\n x[0], x[-1] = x[-1], x[0]\n\n uf = np.frompyfunc(f, 1, 0)\n a = np.array([[1, 2, 3], [4, 5], [6, 7, 8, 9]], dtype=object)\n assert_equal(uf(a), ())\n expected = np.array([[3, 2, 1], [5, 4], [9, 7, 8, 6]], dtype=object)\n assert_array_equal(a, expected)\n\n @pytest.mark.skipif(not HAS_REFCOUNT, reason=\"Python lacks refcounts\")\n def test_leak_in_structured_dtype_comparison(self):\n # gh-6250\n recordtype = np.dtype([('a', np.float64),\n ('b', np.int32),\n ('d', (str, 5))])\n\n # Simple case\n a = np.zeros(2, dtype=recordtype)\n for i in range(100):\n a == a\n assert_(sys.getrefcount(a) < 10)\n\n # The case in the bug report.\n before = sys.getrefcount(a)\n u, v = a[0], a[1]\n u == v\n del u, v\n gc.collect()\n after = sys.getrefcount(a)\n assert_equal(before, after)\n\n def test_empty_percentile(self):\n # gh-6530 / gh-6553\n assert_array_equal(np.percentile(np.arange(10), []), np.array([]))\n\n def test_void_compare_segfault(self):\n # gh-6922. The following should not segfault\n a = np.ones(3, dtype=[('object', 'O'), ('int', '<i2')])\n a.sort()\n\n def test_reshape_size_overflow(self):\n # gh-7455\n a = np.ones(20)[::2]\n if np.dtype(np.intp).itemsize == 8:\n # 64 bit. The following are the prime factors of 2**63 + 5,\n # plus a leading 2, so when multiplied together as int64,\n # the result overflows to a total size of 10.\n new_shape = (2, 13, 419, 691, 823, 2977518503)\n else:\n # 32 bit. The following are the prime factors of 2**31 + 5,\n # plus a leading 2, so when multiplied together as int32,\n # the result overflows to a total size of 10.\n new_shape = (2, 7, 7, 43826197)\n assert_raises(ValueError, a.reshape, new_shape)\n\n def test_invalid_structured_dtypes(self):\n # gh-2865\n # mapping python objects to other dtypes\n assert_raises(ValueError, np.dtype, ('O', [('name', 'i8')]))\n assert_raises(ValueError, np.dtype, ('i8', [('name', 'O')]))\n assert_raises(ValueError, np.dtype,\n ('i8', [('name', [('name', 'O')])]))\n assert_raises(ValueError, np.dtype, ([('a', 'i4'), ('b', 'i4')], 'O'))\n assert_raises(ValueError, np.dtype, ('i8', 'O'))\n # wrong number/type of tuple elements in dict\n assert_raises(ValueError, np.dtype,\n ('i', {'name': ('i', 0, 'title', 'oops')}))\n assert_raises(ValueError, np.dtype,\n ('i', {'name': ('i', 'wrongtype', 'title')}))\n # disallowed as of 1.13\n assert_raises(ValueError, np.dtype,\n ([('a', 'O'), ('b', 'O')], [('c', 'O'), ('d', 'O')]))\n # allowed as a special case due to existing use, see gh-2798\n a = np.ones(1, dtype=('O', [('name', 'O')]))\n assert_equal(a[0], 1)\n # In particular, the above union dtype (and union dtypes in general)\n # should mainly behave like the main (object) dtype:\n assert a[0] is a.item()\n assert type(a[0]) is int\n\n def test_correct_hash_dict(self):\n # gh-8887 - __hash__ would be None despite tp_hash being set\n all_types = set(np.typeDict.values()) - {np.void}\n for t in all_types:\n val = t()\n\n try:\n hash(val)\n except TypeError as e:\n assert_equal(t.__hash__, None)\n else:\n assert_(t.__hash__ != None)\n\n def test_scalar_copy(self):\n scalar_types = set(np.sctypeDict.values())\n values = {\n np.void: b\"a\",\n np.bytes_: b\"a\",\n np.unicode_: \"a\",\n np.datetime64: \"2017-08-25\",\n }\n for sctype in scalar_types:\n item = sctype(values.get(sctype, 1))\n item2 = copy.copy(item)\n assert_equal(item, item2)\n\n def test_void_item_memview(self):\n va = np.zeros(10, 'V4')\n x = va[:1].item()\n va[0] = b'\\xff\\xff\\xff\\xff'\n del va\n assert_equal(x, b'\\x00\\x00\\x00\\x00')\n\n def test_void_getitem(self):\n # Test fix for gh-11668.\n assert_(np.array([b'a'], 'V1').astype('O') == b'a')\n assert_(np.array([b'ab'], 'V2').astype('O') == b'ab')\n assert_(np.array([b'abc'], 'V3').astype('O') == b'abc')\n assert_(np.array([b'abcd'], 'V4').astype('O') == b'abcd')\n\n def test_structarray_title(self):\n # The following used to segfault on pypy, due to NPY_TITLE_KEY\n # not working properly and resulting to double-decref of the\n # structured array field items:\n # See: https://bitbucket.org/pypy/pypy/issues/2789\n for j in range(5):\n structure = np.array([1], dtype=[(('x', 'X'), np.object_)])\n structure[0]['x'] = np.array([2])\n gc.collect()\n\n def test_dtype_scalar_squeeze(self):\n # gh-11384\n values = {\n 'S': b\"a\",\n 'M': \"2018-06-20\",\n }\n for ch in np.typecodes['All']:\n if ch in 'O':\n continue\n sctype = np.dtype(ch).type\n scvalue = sctype(values.get(ch, 3))\n for axis in [None, ()]:\n squeezed = scvalue.squeeze(axis=axis)\n assert_equal(squeezed, scvalue)\n assert_equal(type(squeezed), type(scvalue))\n\n def test_field_access_by_title(self):\n # gh-11507\n s = 'Some long field name'\n if HAS_REFCOUNT:\n base = sys.getrefcount(s)\n t = np.dtype([((s, 'f1'), np.float64)])\n data = np.zeros(10, t)\n for i in range(10):\n str(data[['f1']])\n if HAS_REFCOUNT:\n assert_(base <= sys.getrefcount(s))\n\n @pytest.mark.parametrize('val', [\n # arrays and scalars\n np.ones((10, 10), dtype='int32'),\n np.uint64(10),\n ])\n @pytest.mark.parametrize('protocol',\n range(2, pickle.HIGHEST_PROTOCOL + 1)\n )\n def test_pickle_module(self, protocol, val):\n # gh-12837\n s = pickle.dumps(val, protocol)\n assert b'_multiarray_umath' not in s\n if protocol == 5 and len(val.shape) > 0:\n # unpickling ndarray goes through _frombuffer for protocol 5\n assert b'numpy.core.numeric' in s\n else:\n assert b'numpy.core.multiarray' in s\n\n def test_object_casting_errors(self):\n # gh-11993 update to ValueError (see gh-16909), since strings can in\n # principle be converted to complex, but this string cannot.\n arr = np.array(['AAAAA', 18465886.0, 18465886.0], dtype=object)\n assert_raises(ValueError, arr.astype, 'c8')\n\n def test_eff1d_casting(self):\n # gh-12711\n x = np.array([1, 2, 4, 7, 0], dtype=np.int16)\n res = np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99]))\n assert_equal(res, [-99, 1, 2, 3, -7, 88, 99])\n\n # The use of safe casting means, that 1<<20 is cast unsafely, an\n # error may be better, but currently there is no mechanism for it.\n res = np.ediff1d(x, to_begin=(1<<20), to_end=(1<<20))\n assert_equal(res, [0, 1, 2, 3, -7, 0])\n\n def test_pickle_datetime64_array(self):\n # gh-12745 (would fail with pickle5 installed)\n d = np.datetime64('2015-07-04 12:59:59.50', 'ns')\n arr = np.array([d])\n for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):\n dumped = pickle.dumps(arr, protocol=proto)\n assert_equal(pickle.loads(dumped), arr)\n\n def test_bad_array_interface(self):\n class T:\n __array_interface__ = {}\n\n with assert_raises(ValueError):\n np.array([T()])\n\n def test_2d__array__shape(self):\n class T(object):\n def __array__(self):\n return np.ndarray(shape=(0,0))\n\n # Make sure __array__ is used instead of Sequence methods.\n def __iter__(self):\n return iter([])\n\n def __getitem__(self, idx):\n raise AssertionError(\"__getitem__ was called\")\n\n def __len__(self):\n return 0\n\n\n t = T()\n # gh-13659, would raise in broadcasting [x=t for x in result]\n arr = np.array([t])\n assert arr.shape == (1, 0, 0)\n\n @pytest.mark.skipif(sys.maxsize < 2 ** 31 + 1, reason='overflows 32-bit python')\n @pytest.mark.skipif(sys.platform == 'win32' and sys.version_info[:2] < (3, 8),\n reason='overflows on windows, fixed in bpo-16865')\n def test_to_ctypes(self):\n #gh-14214\n arr = np.zeros((2 ** 31 + 1,), 'b')\n assert arr.size * arr.itemsize > 2 ** 31\n c_arr = np.ctypeslib.as_ctypes(arr)\n assert_equal(c_arr._length_, arr.size)\n\n def test_complex_conversion_error(self):\n # gh-17068\n with pytest.raises(TypeError, match=r\"Unable to convert dtype.*\"):\n complex(np.array(\"now\", np.datetime64))\n\n def test__array_interface__descr(self):\n # gh-17068\n dt = np.dtype(dict(names=['a', 'b'],\n offsets=[0, 0],\n formats=[np.int64, np.int64]))\n descr = np.array((1, 1), dtype=dt).__array_interface__['descr']\n assert descr == [('', '|V8')] # instead of [(b'', '|V8')]\n" ]
[ [ "numpy.ones", "numpy.testing.assert_equal", "numpy.any", "numpy.asarray", "numpy.compat.pickle.loads", "numpy.testing.assert_warns", "numpy.datetime64", "numpy.transpose", "numpy.abs", "numpy.bool_", "numpy.bytes_", "numpy.unique", "numpy.typeDict.values", "numpy.string_", "numpy.float32", "numpy.errstate", "numpy.rec.fromarrays", "numpy.unicode_", "numpy.array", "numpy.random.normal", "numpy.dot", "numpy.subtract.reduce", "numpy.find_common_type", "numpy.testing.assert_raises_regex", "numpy.compat.pickle.dump", "numpy.random.seed", "numpy.add", "numpy.add.reduce", "numpy.ediff1d", "numpy.char.array", "numpy.reshape", "numpy.fromstring", "numpy.binary_repr", "numpy.load", "numpy.argmax", "numpy.copyto", "numpy.divide.reduce", "numpy.count_nonzero", "numpy.hstack", "numpy.uint64", "numpy.int32", "numpy.finfo", "numpy.testing.assert_raises", "numpy.zeros_like", "numpy.random.shuffle", "numpy.format_parser", "numpy.concatenate", "numpy.rec.array", "numpy.array_str", "numpy.frombuffer", "numpy.take", "numpy.add.outer", "numpy.frompyfunc", "numpy.float64", "numpy.empty_like", "numpy.sctypeDict.values", "numpy.random.rand", "numpy.nonzero", "numpy.fromfile", "numpy.float_", "numpy.zeros", "numpy.arange", "numpy.lexsort", "numpy.testing.assert_array_almost_equal", "numpy.recarray", "numpy.chararray", "numpy.sort", "numpy.divide.accumulate", "numpy.random.randn", "numpy.compat.asbytes", "numpy.subtract.accumulate", "numpy.setbufsize", "numpy.compat.pickle.load", "numpy.dtype", "numpy.testing._assert_valid_refcount", "numpy.object_", "numpy.testing.assert_almost_equal", "numpy.testing.assert_array_equal", "numpy.ndarray", "numpy.where", "numpy.linspace", "numpy.compat.pickle.dumps", "numpy.exp2", "numpy.lib.stride_tricks.as_strided", "numpy.all", "numpy.indices", "numpy.maximum", "numpy.sign", "numpy.empty", "numpy.squeeze", "numpy.ctypeslib.as_ctypes", "numpy.add.accumulate", "numpy.iinfo", "numpy.testing.suppress_warnings", "numpy.testing.assert_" ] ]
Finfra/KerasStudy
[ "ccd9b0a9f78d70d63a500ba3c0bf9e6686bee11c" ]
[ "08.ReinforcemetLearning/tf_rl/models.py" ]
[ "import math\nimport tensorflow as tf\n\nfrom .utils import base_name\n\n\nclass Layer(object):\n def __init__(self, input_sizes, output_size, scope):\n \"\"\"Cretes a neural network layer.\"\"\"\n if type(input_sizes) != list:\n input_sizes = [input_sizes]\n\n self.input_sizes = input_sizes\n self.output_size = output_size\n self.scope = scope or \"Layer\"\n\n with tf.variable_scope(self.scope):\n self.Ws = []\n for input_idx, input_size in enumerate(input_sizes):\n W_name = \"W_%d\" % (input_idx,)\n W_initializer = tf.random_uniform_initializer(\n -1.0 / math.sqrt(input_size), 1.0 / math.sqrt(input_size))\n W_var = tf.get_variable(W_name, (input_size, output_size), initializer=W_initializer)\n self.Ws.append(W_var)\n self.b = tf.get_variable(\"b\", (output_size,), initializer=tf.constant_initializer(0))\n\n def __call__(self, xs):\n if type(xs) != list:\n xs = [xs]\n assert len(xs) == len(self.Ws), \\\n \"Expected %d input vectors, got %d\" % (len(self.Ws), len(xs))\n with tf.variable_scope(self.scope):\n return sum([tf.matmul(x, W) for x, W in zip(xs, self.Ws)]) + self.b\n\n def variables(self):\n return [self.b] + self.Ws\n\n def copy(self, scope=None):\n scope = scope or self.scope + \"_copy\"\n\n with tf.variable_scope(scope) as sc:\n for v in self.variables():\n tf.get_variable(base_name(v), v.get_shape(),\n initializer=lambda x,dtype=tf.float32: v.initialized_value())\n sc.reuse_variables()\n return Layer(self.input_sizes, self.output_size, scope=sc)\n\nclass MLP(object):\n def __init__(self, input_sizes, hiddens, nonlinearities, scope=None, given_layers=None):\n self.input_sizes = input_sizes\n # observation is 5 features(distance of each object and X,Y speed) of closest 32 object with hero(friend, enemy, wall) + 2 hero's own speed X,Y\n # ==> 5*32 + 2 = 162 features about the game\n self.hiddens = hiddens\n self.input_nonlinearity, self.layer_nonlinearities = nonlinearities[0], nonlinearities[1:]\n self.scope = scope or \"MLP\"\n\n assert len(hiddens) == len(nonlinearities), \\\n \"Number of hiddens must be equal to number of nonlinearities\"\n\n with tf.variable_scope(self.scope):\n if given_layers is not None:\n self.input_layer = given_layers[0]\n self.layers = given_layers[1:]\n else:\n self.input_layer = Layer(input_sizes, hiddens[0], scope=\"input_layer\") # 135 -> 200\n self.layers = []\n\n for l_idx, (h_from, h_to) in enumerate(zip(hiddens[:-1], hiddens[1:])): # hiddens == [200, 200, 4], so this mean, swifting the index by 1\n # (200, 200) , (200,4)\n self.layers.append(Layer(h_from, h_to, scope=\"hidden_layer_%d\" % (l_idx,)))\n # this has 4 layers\n\n def __call__(self, xs):\n if type(xs) != list:\n xs = [xs]\n with tf.variable_scope(self.scope):\n hidden = self.input_nonlinearity(self.input_layer(xs))\n for layer, nonlinearity in zip(self.layers, self.layer_nonlinearities):\n hidden = nonlinearity(layer(hidden))\n return hidden\n\n def variables(self):\n res = self.input_layer.variables()\n for layer in self.layers:\n res.extend(layer.variables())\n return res\n\n def copy(self, scope=None):\n scope = scope or self.scope + \"_copy\"\n nonlinearities = [self.input_nonlinearity] + self.layer_nonlinearities\n given_layers = [self.input_layer.copy()] + [layer.copy() for layer in self.layers]\n return MLP(self.input_sizes, self.hiddens, nonlinearities, scope=scope,\n given_layers=given_layers)" ]
[ [ "tensorflow.get_variable", "tensorflow.variable_scope", "tensorflow.matmul", "tensorflow.constant_initializer" ] ]
farziengineer/numpy
[ "4ff3af387f93ff37f04f42458d4590c33f61fb9e" ]
[ "setup.py" ]
[ "#!/usr/bin/env python\n\"\"\" NumPy is the fundamental package for array computing with Python.\n\nIt provides:\n\n- a powerful N-dimensional array object\n- sophisticated (broadcasting) functions\n- tools for integrating C/C++ and Fortran code\n- useful linear algebra, Fourier transform, and random number capabilities\n- and much more\n\nBesides its obvious scientific uses, NumPy can also be used as an efficient\nmulti-dimensional container of generic data. Arbitrary data-types can be\ndefined. This allows NumPy to seamlessly and speedily integrate with a wide\nvariety of databases.\n\nAll NumPy wheels distributed on PyPI are BSD licensed.\n\n\"\"\"\nfrom __future__ import division, print_function\n\nDOCLINES = (__doc__ or '').split(\"\\n\")\n\nimport os\nimport sys\nimport subprocess\nimport textwrap\n\n\nif sys.version_info[:2] < (3, 5):\n raise RuntimeError(\"Python version >= 3.5 required.\")\n\nimport builtins\n\n\nCLASSIFIERS = \"\"\"\\\nDevelopment Status :: 5 - Production/Stable\nIntended Audience :: Science/Research\nIntended Audience :: Developers\nLicense :: OSI Approved\nProgramming Language :: C\nProgramming Language :: Python\nProgramming Language :: Python :: 3\nProgramming Language :: Python :: 3.5\nProgramming Language :: Python :: 3.6\nProgramming Language :: Python :: 3.7\nProgramming Language :: Python :: 3 :: Only\nProgramming Language :: Python :: Implementation :: CPython\nTopic :: Software Development\nTopic :: Scientific/Engineering\nOperating System :: Microsoft :: Windows\nOperating System :: POSIX\nOperating System :: Unix\nOperating System :: MacOS\n\"\"\"\n\nMAJOR = 1\nMINOR = 18\nMICRO = 0\nISRELEASED = False\nVERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)\n\n\n# Return the git revision as a string\ndef git_version():\n def _minimal_ext_cmd(cmd):\n # construct minimal environment\n env = {}\n for k in ['SYSTEMROOT', 'PATH', 'HOME']:\n v = os.environ.get(k)\n if v is not None:\n env[k] = v\n # LANGUAGE is used on win32\n env['LANGUAGE'] = 'C'\n env['LANG'] = 'C'\n env['LC_ALL'] = 'C'\n out = subprocess.check_output(cmd, stderr=subprocess.STDOUT, env=env)\n return out\n\n try:\n out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])\n GIT_REVISION = out.strip().decode('ascii')\n except (subprocess.SubprocessError, OSError):\n GIT_REVISION = \"Unknown\"\n\n return GIT_REVISION\n\n# BEFORE importing setuptools, remove MANIFEST. Otherwise it may not be\n# properly updated when the contents of directories change (true for distutils,\n# not sure about setuptools).\nif os.path.exists('MANIFEST'):\n os.remove('MANIFEST')\n\n# This is a bit hackish: we are setting a global variable so that the main\n# numpy __init__ can detect if it is being loaded by the setup routine, to\n# avoid attempting to load components that aren't built yet. While ugly, it's\n# a lot more robust than what was previously being used.\nbuiltins.__NUMPY_SETUP__ = True\n\n\ndef get_version_info():\n # Adding the git rev number needs to be done inside write_version_py(),\n # otherwise the import of numpy.version messes up the build under Python 3.\n FULLVERSION = VERSION\n if os.path.exists('.git'):\n GIT_REVISION = git_version()\n elif os.path.exists('numpy/version.py'):\n # must be a source distribution, use existing version file\n try:\n from numpy.version import git_revision as GIT_REVISION\n except ImportError:\n raise ImportError(\"Unable to import git_revision. Try removing \"\n \"numpy/version.py and the build directory \"\n \"before building.\")\n else:\n GIT_REVISION = \"Unknown\"\n\n if not ISRELEASED:\n FULLVERSION += '.dev0+' + GIT_REVISION[:7]\n\n return FULLVERSION, GIT_REVISION\n\n\ndef write_version_py(filename='numpy/version.py'):\n cnt = \"\"\"\n# THIS FILE IS GENERATED FROM NUMPY SETUP.PY\n#\n# To compare versions robustly, use `numpy.lib.NumpyVersion`\nshort_version = '%(version)s'\nversion = '%(version)s'\nfull_version = '%(full_version)s'\ngit_revision = '%(git_revision)s'\nrelease = %(isrelease)s\n\nif not release:\n version = full_version\n\"\"\"\n FULLVERSION, GIT_REVISION = get_version_info()\n\n a = open(filename, 'w')\n try:\n a.write(cnt % {'version': VERSION,\n 'full_version': FULLVERSION,\n 'git_revision': GIT_REVISION,\n 'isrelease': str(ISRELEASED)})\n finally:\n a.close()\n\n\ndef configuration(parent_package='',top_path=None):\n from numpy.distutils.misc_util import Configuration\n\n config = Configuration(None, parent_package, top_path)\n config.set_options(ignore_setup_xxx_py=True,\n assume_default_configuration=True,\n delegate_options_to_subpackages=True,\n quiet=True)\n\n config.add_subpackage('numpy')\n config.add_data_files(('numpy', 'LICENSE.txt'))\n\n config.get_version('numpy/version.py') # sets config.version\n\n return config\n\n\ndef check_submodules():\n \"\"\" verify that the submodules are checked out and clean\n use `git submodule update --init`; on failure\n \"\"\"\n if not os.path.exists('.git'):\n return\n with open('.gitmodules') as f:\n for l in f:\n if 'path' in l:\n p = l.split('=')[-1].strip()\n if not os.path.exists(p):\n raise ValueError('Submodule %s missing' % p)\n\n\n proc = subprocess.Popen(['git', 'submodule', 'status'],\n stdout=subprocess.PIPE)\n status, _ = proc.communicate()\n status = status.decode(\"ascii\", \"replace\")\n for line in status.splitlines():\n if line.startswith('-') or line.startswith('+'):\n raise ValueError('Submodule not clean: %s' % line)\n\n\nclass concat_license_files():\n \"\"\"Merge LICENSE.txt and LICENSES_bundled.txt for sdist creation\n\n Done this way to keep LICENSE.txt in repo as exact BSD 3-clause (see\n gh-13447). This makes GitHub state correctly how NumPy is licensed.\n \"\"\"\n def __init__(self):\n self.f1 = 'LICENSE.txt'\n self.f2 = 'LICENSES_bundled.txt'\n\n def __enter__(self):\n \"\"\"Concatenate files and remove LICENSES_bundled.txt\"\"\"\n with open(self.f1, 'r') as f1:\n self.bsd_text = f1.read()\n\n with open(self.f1, 'a') as f1:\n with open(self.f2, 'r') as f2:\n self.bundled_text = f2.read()\n f1.write('\\n\\n')\n f1.write(self.bundled_text)\n\n def __exit__(self, exception_type, exception_value, traceback):\n \"\"\"Restore content of both files\"\"\"\n with open(self.f1, 'w') as f:\n f.write(self.bsd_text)\n\n\nfrom distutils.command.sdist import sdist\nfrom numpy.distutils.command.build_src import build_src\nclass sdist_checked(sdist):\n \"\"\" check submodules on sdist to prevent incomplete tarballs \"\"\"\n def run(self):\n check_submodules()\n with concat_license_files():\n sdist.run(self)\n\n\ndef generate_cython():\n cwd = os.path.abspath(os.path.dirname(__file__))\n print(\"Cythonizing sources\")\n for d in ('random',):\n p = subprocess.call([sys.executable,\n os.path.join(cwd, 'tools', 'cythonize.py'),\n 'numpy/{0}'.format(d)],\n cwd=cwd)\n if p != 0:\n raise RuntimeError(\"Running cythonize failed!\")\n\n\ndef parse_setuppy_commands():\n \"\"\"Check the commands and respond appropriately. Disable broken commands.\n\n Return a boolean value for whether or not to run the build or not (avoid\n parsing Cython and template files if False).\n \"\"\"\n args = sys.argv[1:]\n\n if not args:\n # User forgot to give an argument probably, let setuptools handle that.\n return True\n\n info_commands = ['--help-commands', '--name', '--version', '-V',\n '--fullname', '--author', '--author-email',\n '--maintainer', '--maintainer-email', '--contact',\n '--contact-email', '--url', '--license', '--description',\n '--long-description', '--platforms', '--classifiers',\n '--keywords', '--provides', '--requires', '--obsoletes']\n\n for command in info_commands:\n if command in args:\n return False\n\n # Note that 'alias', 'saveopts' and 'setopt' commands also seem to work\n # fine as they are, but are usually used together with one of the commands\n # below and not standalone. Hence they're not added to good_commands.\n good_commands = ('develop', 'sdist', 'build', 'build_ext', 'build_py',\n 'build_clib', 'build_scripts', 'bdist_wheel', 'bdist_rpm',\n 'bdist_wininst', 'bdist_msi', 'bdist_mpkg', 'build_src')\n\n for command in good_commands:\n if command in args:\n return True\n\n # The following commands are supported, but we need to show more\n # useful messages to the user\n if 'install' in args:\n print(textwrap.dedent(\"\"\"\n Note: if you need reliable uninstall behavior, then install\n with pip instead of using `setup.py install`:\n\n - `pip install .` (from a git repo or downloaded source\n release)\n - `pip install numpy` (last NumPy release on PyPi)\n\n \"\"\"))\n return True\n\n if '--help' in args or '-h' in sys.argv[1]:\n print(textwrap.dedent(\"\"\"\n NumPy-specific help\n -------------------\n\n To install NumPy from here with reliable uninstall, we recommend\n that you use `pip install .`. To install the latest NumPy release\n from PyPi, use `pip install numpy`.\n\n For help with build/installation issues, please ask on the\n numpy-discussion mailing list. If you are sure that you have run\n into a bug, please report it at https://github.com/numpy/numpy/issues.\n\n Setuptools commands help\n ------------------------\n \"\"\"))\n return False\n\n\n # The following commands aren't supported. They can only be executed when\n # the user explicitly adds a --force command-line argument.\n bad_commands = dict(\n test=\"\"\"\n `setup.py test` is not supported. Use one of the following\n instead:\n\n - `python runtests.py` (to build and test)\n - `python runtests.py --no-build` (to test installed numpy)\n - `>>> numpy.test()` (run tests for installed numpy\n from within an interpreter)\n \"\"\",\n upload=\"\"\"\n `setup.py upload` is not supported, because it's insecure.\n Instead, build what you want to upload and upload those files\n with `twine upload -s <filenames>` instead.\n \"\"\",\n upload_docs=\"`setup.py upload_docs` is not supported\",\n easy_install=\"`setup.py easy_install` is not supported\",\n clean=\"\"\"\n `setup.py clean` is not supported, use one of the following instead:\n\n - `git clean -xdf` (cleans all files)\n - `git clean -Xdf` (cleans all versioned files, doesn't touch\n files that aren't checked into the git repo)\n \"\"\",\n check=\"`setup.py check` is not supported\",\n register=\"`setup.py register` is not supported\",\n bdist_dumb=\"`setup.py bdist_dumb` is not supported\",\n bdist=\"`setup.py bdist` is not supported\",\n build_sphinx=\"\"\"\n `setup.py build_sphinx` is not supported, use the\n Makefile under doc/\"\"\",\n flake8=\"`setup.py flake8` is not supported, use flake8 standalone\",\n )\n bad_commands['nosetests'] = bad_commands['test']\n for command in ('upload_docs', 'easy_install', 'bdist', 'bdist_dumb',\n 'register', 'check', 'install_data', 'install_headers',\n 'install_lib', 'install_scripts', ):\n bad_commands[command] = \"`setup.py %s` is not supported\" % command\n\n for command in bad_commands.keys():\n if command in args:\n print(textwrap.dedent(bad_commands[command]) +\n \"\\nAdd `--force` to your command to use it anyway if you \"\n \"must (unsupported).\\n\")\n sys.exit(1)\n\n # Commands that do more than print info, but also don't need Cython and\n # template parsing.\n other_commands = ['egg_info', 'install_egg_info', 'rotate']\n for command in other_commands:\n if command in args:\n return False\n\n # If we got here, we didn't detect what setup.py command was given\n import warnings\n warnings.warn(\"Unrecognized setuptools command, proceeding with \"\n \"generating Cython sources and expanding templates\", stacklevel=2)\n return True\n\n\ndef setup_package():\n src_path = os.path.dirname(os.path.abspath(__file__))\n old_path = os.getcwd()\n os.chdir(src_path)\n sys.path.insert(0, src_path)\n\n # Rewrite the version file everytime\n write_version_py()\n\n # The f2py scripts that will be installed\n if sys.platform == 'win32':\n f2py_cmds = [\n 'f2py = numpy.f2py.f2py2e:main',\n ]\n else:\n f2py_cmds = [\n 'f2py = numpy.f2py.f2py2e:main',\n 'f2py%s = numpy.f2py.f2py2e:main' % sys.version_info[:1],\n 'f2py%s.%s = numpy.f2py.f2py2e:main' % sys.version_info[:2],\n ]\n\n metadata = dict(\n name = 'numpy',\n maintainer = \"NumPy Developers\",\n maintainer_email = \"[email protected]\",\n description = DOCLINES[0],\n long_description = \"\\n\".join(DOCLINES[2:]),\n url = \"https://www.numpy.org\",\n author = \"Travis E. Oliphant et al.\",\n download_url = \"https://pypi.python.org/pypi/numpy\",\n project_urls={\n \"Bug Tracker\": \"https://github.com/numpy/numpy/issues\",\n \"Documentation\": \"https://docs.scipy.org/doc/numpy/\",\n \"Source Code\": \"https://github.com/numpy/numpy\",\n },\n license = 'BSD',\n classifiers=[_f for _f in CLASSIFIERS.split('\\n') if _f],\n platforms = [\"Windows\", \"Linux\", \"Solaris\", \"Mac OS-X\", \"Unix\"],\n test_suite='nose.collector',\n cmdclass={\"sdist\": sdist_checked,\n \"build_src\": build_src,\n },\n python_requires='>=3.5',\n zip_safe=False,\n entry_points={\n 'console_scripts': f2py_cmds\n },\n )\n\n if \"--force\" in sys.argv:\n run_build = True\n sys.argv.remove('--force')\n else:\n # Raise errors for unsupported commands, improve help output, etc.\n run_build = parse_setuppy_commands()\n\n from setuptools import setup\n if run_build:\n from numpy.distutils.core import setup\n cwd = os.path.abspath(os.path.dirname(__file__))\n if not 'sdist' in sys.argv:\n # Generate Cython sources, unless we're generating an sdist\n generate_cython()\n\n metadata['configuration'] = configuration\n else:\n # Version number is added to metadata inside configuration() if build\n # is run.\n metadata['version'] = get_version_info()[0]\n\n try:\n setup(**metadata)\n finally:\n del sys.path[0]\n os.chdir(old_path)\n return\n\n\nif __name__ == '__main__':\n setup_package()\n # This may avoid problems where numpy is installed via ``*_requires`` by\n # setuptools, the global namespace isn't reset properly, and then numpy is\n # imported later (which will then fail to load numpy extension modules).\n # See gh-7956 for details\n del builtins.__NUMPY_SETUP__\n" ]
[ [ "numpy.distutils.core.setup", "numpy.distutils.misc_util.Configuration" ] ]
baxterai/AEANNtf
[ "416e282c4ee3e0e8622bafd558363bacb27d4c15" ]
[ "AEANNtf/ANNtf2_operations.py" ]
[ "\"\"\"ANNtf2_operations.py\n\n# Author:\nRichard Bruce Baxter - Copyright (c) 2020-2022 Baxter AI (baxterai.com)\n\n# License:\nMIT License\n\n# Installation:\nsee ANNtf2.py\n\n# Usage:\nsee ANNtf2.py\n\n# Description:\nANNtf operations\n\n\"\"\"\n\nimport tensorflow as tf\nimport numpy as np\nimport ANNtf2_globalDefs\nimport math\n\ndebugSingleLayerNetwork = False\n\n\n#if(useBinaryWeights) or if(generateFirstLayerSDR)\n\t\n\ndef generateParameterName(l, arrayName):\n\tparameterName = \"l\" + str(l) + arrayName\n\treturn parameterName\ndef generateParameterNameSkipLayers(lprior, l, arrayName):\t#support skip layers\n\tparameterName = \"lprior\" + str(lprior) + \"l\" + str(l) + arrayName\n\treturn parameterName\n#support multiple networks:\t\t\t\ndef generateParameterNameNetwork(networkIndex, l, arrayName):\n\tparameterName = \"n\" + str(networkIndex) + \"l\" + str(l) + arrayName\n\treturn parameterName\ndef generateParameterNameNetworkSkipLayers(networkIndex, lprior, l, arrayName):\t#support skip layers\n\tparameterName = \"n\" + str(networkIndex) + \"lprior\" + str(lprior) + \"l\" + str(l) + arrayName\n\treturn parameterName\n\n#support sequential inputs:\t\t\n#used by SANI:\ndef generateParameterNameSeq(l, s, arrayName):\n\tparameterName = \"l\" + str(l) + \"s\" + str(s) + arrayName\n\treturn parameterName\ndef generateParameterNameSeqSkipLayers(lprior, l, s, arrayName):\t#support skip layers\n\tparameterName = \"lprior\" + str(lprior) + \"l\" + str(l) + \"s\" + str(s) + arrayName\n\treturn parameterName\n#used by AEANN:\n#support multiple networks:\t\ndef generateParameterNameNetworkSeq(networkIndex, l, s, arrayName):\n\tparameterName = \"n\" + str(networkIndex) + \"l\" + str(l) + \"s\" + str(s) + arrayName\n\treturn parameterName\t\ndef generateParameterNameNetworkSeqSkipLayers(networkIndex, lprior, l, s, arrayName):\n\tparameterName = \"n\" + str(networkIndex) + \"lprior\" + str(lprior) + \"l\" + str(l) + \"s\" + str(s) + arrayName\n\treturn parameterName\n\n\t\t\ndef printShape(tensor, tensorName):\n\tprint(tensorName + \".shape = \")\n\tprint(tensor.shape)\n\t\ndef printAverage(tensor, tensorName, indentation):\n\ttensorAverage = tf.reduce_mean(tf.dtypes.cast(tensor, tf.float32))\n\tindentationString = \"\"\n\tfor i in range(indentation):\n\t\tindentationString = indentationString + \"\\t\"\n\tprint(indentationString + tensorName + \"Average: %f\" % (tensorAverage))\n\ndef calculateLossCrossEntropy(y_pred, y_true, datasetNumClasses, costCrossEntropyWithLogits=False, oneHotEncoded=False, reduceMean=True):\n\tif(costCrossEntropyWithLogits):\n\t\tcost = tf.nn.sigmoid_cross_entropy_with_logits(logits=tf.squeeze(y_pred), labels=tf.cast(y_true, tf.float32))\n\t\tif(reduceMean):\n\t\t\tcost = tf.reduce_mean(cost)\n\telse:\n\t\tif(not oneHotEncoded):\n\t\t\ty_true = tf.one_hot(y_true, depth=datasetNumClasses)\n\t\ty_pred = tf.clip_by_value(y_pred, 1e-9, 1.)\n\t\tcost = -(y_true * tf.math.log(y_pred))\n\t\tif(reduceMean):\n\t\t\tcost = tf.reduce_sum(cost)\n\t\n\treturn cost\n\ndef calculateLossMeanSquaredError(y_pred, y_true):\n\tloss = tf.reduce_mean(tf.math.squared_difference(y_pred, y_true))\n\treturn loss\n\ndef calculateAccuracy(y_pred, y_true):\n\tcorrect_prediction = calculateCorrectPrediction(y_pred, y_true) \n\treturn tf.reduce_mean(tf.cast(correct_prediction, tf.float32), axis=-1)\n\t\ndef calculateCorrectPrediction(y_pred, y_true):\n\tcorrect_prediction = tf.equal(tf.argmax(y_pred, 1), tf.cast(y_true, tf.int64))\n\treturn correct_prediction\n\ndef filterNParraysByClassTarget(train_x, train_y, classTargetFilterIndex=-1):\n\trowFilter = (train_y == classTargetFilterIndex)\n\t#print(\"rowFilter = \", rowFilter)\n\ttrain_xFiltered = train_x[rowFilter]\n\ttrain_yFiltered = train_y[rowFilter]\n\treturn train_xFiltered, train_yFiltered\n\ndef filterNParraysByClassTargetInverse(train_x, train_y, classTargetFilterIndex=-1):\n\trowFilter = (train_y != classTargetFilterIndex)\n\t#print(\"rowFilter = \", rowFilter)\n\ttrain_xFiltered = train_x[rowFilter]\n\ttrain_yFiltered = train_y[rowFilter]\n\treturn train_xFiltered, train_yFiltered\n \ndef generateTFtrainDataFromNParrays(train_x, train_y, shuffleSize, batchSize):\n\t#shuffleSize = shuffleBufferSize\n\ttrainDataUnbatched = generateTFtrainDataUnbatchedFromNParrays(train_x, train_y)\n\ttrainData = generateTFtrainDataFromTrainDataUnbatched(trainDataUnbatched, shuffleSize, batchSize)\n\treturn trainData\n\n#generate a single batch;\ndef generateTFbatch(test_x, test_y, batchSize):\n\txShape = list(test_x.shape)\n\tyShape = list(test_y.shape)\n\txShape[0] = batchSize\n\tyShape[0] = batchSize\n\txShape = tuple(xShape)\n\tyShape = tuple(yShape)\n\t#print(\"test_x.shape = \", test_x.shape)\n\t#print(\"test_y.shape = \", test_y.shape)\n\ttestBatchX = np.resize(test_x, xShape)\n\ttestBatchY = np.resize(test_y, yShape)\n\t#print(\"testBatchX.shape = \", testBatchX.shape)\n\t#print(\"testBatchY.shape = \", testBatchY.shape)\n\t#print(\"testBatchX = \", testBatchX)\n\t#print(\"testBatchY = \", testBatchY)\n\treturn testBatchX, testBatchY\n\t\ndef generateTFtrainDataUnbatchedFromNParrays(train_x, train_y):\n\t#print(\"train_x.shape = \", train_x.shape)\n\t#print(\"train_y.shape = \", train_y.shape)\n\ttrainDataUnbatched = tf.data.Dataset.from_tensor_slices((train_x, train_y))\n\treturn trainDataUnbatched\n\ndef generateTFtrainDataFromTrainDataUnbatched(trainDataUnbatched, shuffleSize, batchSize):\n\ttrainData = trainDataUnbatched.repeat().shuffle(shuffleSize).batch(batchSize).prefetch(1)\t#do not repeat\n\treturn trainData\n\n\ndef defineNetworkParameters(num_input_neurons, num_output_neurons, datasetNumFeatures, dataset, numberOfNetworks, generateLargeNetwork=False, generateNetworkStatic=False, generateDeepNetwork=False):\n\tif(debugSingleLayerNetwork):\n\t\tn_h, numberOfLayers, numberOfNetworks, datasetNumClasses = defineNetworkParametersANNsingleLayer(num_input_neurons, num_output_neurons, datasetNumFeatures, dataset, trainMultipleFiles, numberOfNetworks)\n\telse:\n\t\tif(generateLargeNetwork):\n\t\t\tfirstHiddenLayerNumberNeurons = num_input_neurons*3\n\t\telse:\n\t\t\tfirstHiddenLayerNumberNeurons = num_input_neurons\n\t\tif(generateDeepNetwork):\n\t\t\tnumberOfLayers = 6\n\t\telse:\n\t\t\tnumberOfLayers = 2\n\t\tn_h, numberOfLayers, numberOfNetworks, datasetNumClasses = defineNetworkParametersDynamic(num_input_neurons, num_output_neurons, datasetNumFeatures, dataset, numberOfNetworks, numberOfLayers, firstHiddenLayerNumberNeurons, generateNetworkStatic)\n\treturn n_h, numberOfLayers, numberOfNetworks, datasetNumClasses\n\ndef defineNetworkParametersANNsingleLayer(num_input_neurons, num_output_neurons, datasetNumFeatures, dataset, trainMultipleFiles, numberOfNetworks):\n\n\tn_x = num_input_neurons #datasetNumFeatures\n\tn_y = num_output_neurons #datasetNumClasses\n\tdatasetNumClasses = n_y\n\tn_h_0 = n_x\n\tn_h_3 = n_y\n\tn_h = [n_h_0, n_h_3]\t\n\tnumberOfLayers = len(n_h)-1\n\t\n\tprint(\"defineNetworkParametersANNsingleLayer, n_h = \", n_h)\n\t\n\treturn \tn_h, numberOfLayers, numberOfNetworks, datasetNumClasses\n\t\n\ndef defineNetworkParametersDynamic(num_input_neurons, num_output_neurons, datasetNumFeatures, dataset, numberOfNetworks, numberOfLayers, firstHiddenLayerNumberNeurons, generateNetworkStatic):\n\n\t#configuration:\t\n\tif(generateNetworkStatic):\n\t\tnetworkDivergenceType = \"linearStatic\"\n\telse:\n\t\t#networkDivergenceType = \"nonLinearConverging\"\n\t\tnetworkDivergenceType = \"linearConverging\"\n\t\tif(networkDivergenceType == \"nonLinearConverging\"):\n\t\t\tnetworkOptimumConvergenceAngle = 0.7\t#if angle > 0.5, then more obtuse triange, if < 0.5 then more acute triangle\t#fractional angle between 0 and 90 degrees\n\t\t\tnetworkDivergence = 1.0-networkOptimumConvergenceAngle \n\t\t\n\t#Network parameters\n\tn_h = []\n\tdatasetNumClasses = 0\n\t\t\n\tn_x = num_input_neurons #datasetNumFeatures\n\tn_y = num_output_neurons #datasetNumClasses\n\tdatasetNumClasses = n_y\n\tn_h_first = n_x\n\tpreviousNumberLayerNeurons = n_h_first\n\tn_h.append(n_h_first)\n\n\tfor l in range(1, numberOfLayers):\t#for every hidden layer\n\t\tif(networkDivergenceType == \"linearConverging\"):\n\t\t\tif(l == 1):\n\t\t\t\tn_h_x = firstHiddenLayerNumberNeurons\n\t\t\telse:\n\t\t\t\tn_h_x = int((firstHiddenLayerNumberNeurons-num_output_neurons) * ((l-1)/(numberOfLayers-1)) + num_output_neurons)\n\t\t\t#print(\"n_h_x = \", n_h_x)\n\t\t\t#previousNumberLayerNeurons = n_h_x\n\t\t\tn_h.append(n_h_x)\n\t\telif(networkDivergenceType == \"nonLinearConverging\"):\n\t\t\tif(l == 1):\n\t\t\t\tn_h_x = firstHiddenLayerNumberNeurons\n\t\t\telse:\n\t\t\t\tn_h_x = int(previousNumberLayerNeurons*networkDivergence)\n\t\t\tn_h.append(n_h_x)\n\t\t\tpreviousNumberLayerNeurons = n_h_x\n\t\telif(networkDivergenceType == \"linearStatic\"):\n\t\t\tn_h_x = firstHiddenLayerNumberNeurons\n\t\t\tn_h.append(n_h_x)\n\t\telif(networkDivergenceType == \"linearDivergingThenConverging\"):\n\t\t\t#not yet coded\n\t\t\tprint(\"defineNetworkParametersANN error: linearDivergingThenConverging not yet coded\")\n\t\t\texit()\n\t\telse:\n\t\t\tprint(\"defineNetworkParametersANN error: unknown networkDivergenceType\")\n\t\t\texit()\n\n\tn_h_last = n_y\n\tn_h.append(n_h_last)\n\t\n\tprint(\"defineNetworkParameters, n_h = \", n_h)\n\t\n\treturn \tn_h, numberOfLayers, numberOfNetworks, datasetNumClasses\n\t\ndef tileDimension(x, dimensionToTile, numberOfTiles, addDimension):\n\n\t#print(\"x = \", x)\n\t#print(\"dimensionToTile = \", dimensionToTile)\n\t#print(\"numberOfTiles = \", numberOfTiles)\t\n\t\n\tif(addDimension):\n\t\tx = tf.expand_dims(x, dimensionToTile)\n\t\t\n\txNumberOfDimensions = (tf.size(x.shape)).numpy()\n\t#print(\"xNumberOfDimensions = \", xNumberOfDimensions)\n\tmultiplesDimension = [1] * xNumberOfDimensions\n\tmultiplesDimension[dimensionToTile] = numberOfTiles\n\t\n\tmultiples = tf.constant(multiplesDimension, tf.int32)\n\txTiled = tf.tile(x, multiples)\n\n\t#print(\"xTiled = \", xTiled)\n\t\n\treturn xTiled\n\t\ndef convertFloatToBool(xFloat):\n\txInt = tf.dtypes.cast(xFloat, dtype=tf.dtypes.int32)\n\txBool = tf.dtypes.cast(xFloat, dtype=tf.dtypes.bool)\n\treturn xBool\n\t\ndef convertSignOutputToBool(xSignOutput):\n\txSignOutput = tf.maximum(xSignOutput, 0)\n\txBool = tf.dtypes.cast(xSignOutput, dtype=tf.dtypes.bool)\n\treturn xBool\n" ]
[ [ "tensorflow.size", "tensorflow.math.log", "tensorflow.dtypes.cast", "tensorflow.expand_dims", "tensorflow.reduce_mean", "tensorflow.math.squared_difference", "tensorflow.squeeze", "tensorflow.cast", "tensorflow.one_hot", "tensorflow.clip_by_value", "tensorflow.argmax", "tensorflow.tile", "tensorflow.constant", "tensorflow.reduce_sum", "tensorflow.maximum", "tensorflow.data.Dataset.from_tensor_slices", "numpy.resize" ] ]
AdamVPro/zenml
[ "6dfba8483b1549b888341aea726362a96e407a43" ]
[ "examples/airflow_local/run.py" ]
[ "# Copyright (c) ZenML GmbH 2021. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at:\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express\n# or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\nimport numpy as np\nimport pandas as pd\nimport requests\nfrom sklearn.base import ClassifierMixin\nfrom sklearn.linear_model import LogisticRegression\n\nfrom zenml.pipelines import pipeline\nfrom zenml.steps import BaseStepConfig, Output, step\n\n\nclass ImporterConfig(BaseStepConfig):\n n_days: int = 1\n\n\ndef get_X_y_from_api(n_days: int = 1, is_train: bool = True):\n url = (\n \"https://storage.googleapis.com/zenml-public-bucket/mnist\"\n \"/mnist_handwritten_train.json\"\n if is_train\n else \"https://storage.googleapis.com/zenml-public-bucket/mnist\"\n \"/mnist_handwritten_test.json\"\n )\n df = pd.DataFrame(requests.get(url).json())\n X = df[\"image\"].map(lambda x: np.array(x)).values\n X = np.array([x.reshape(28, 28) for x in X])\n y = df[\"label\"].map(lambda y: np.array(y)).values\n return X, y\n\n\n@step\ndef dynamic_importer(\n config: ImporterConfig,\n) -> Output(\n X_train=np.ndarray, y_train=np.ndarray, X_test=np.ndarray, y_test=np.ndarray\n):\n \"\"\"Downloads the latest data from a mock API.\"\"\"\n X_train, y_train = get_X_y_from_api(n_days=config.n_days, is_train=True)\n X_test, y_test = get_X_y_from_api(n_days=config.n_days, is_train=False)\n return X_train, y_train, X_test, y_test\n\n\n@step\ndef normalize_mnist(\n X_train: np.ndarray, X_test: np.ndarray\n) -> Output(X_train_normed=np.ndarray, X_test_normed=np.ndarray):\n \"\"\"Normalize the values for all the images so they are between 0 and 1\"\"\"\n X_train_normed = X_train / 255.0\n X_test_normed = X_test / 255.0\n return X_train_normed, X_test_normed\n\n\n@step\ndef sklearn_trainer(\n X_train: np.ndarray,\n y_train: np.ndarray,\n) -> ClassifierMixin:\n \"\"\"Train SVC from sklearn.\"\"\"\n clf = LogisticRegression(penalty=\"l1\", solver=\"saga\", tol=0.1)\n clf.fit(X_train.reshape((X_train.shape[0], -1)), y_train)\n return clf\n\n\n@step\ndef sklearn_evaluator(\n X_test: np.ndarray,\n y_test: np.ndarray,\n model: ClassifierMixin,\n) -> float:\n \"\"\"Calculate accuracy score with classifier.\"\"\"\n\n test_acc = model.score(X_test.reshape((X_test.shape[0], -1)), y_test)\n return test_acc\n\n\n@pipeline(enable_cache=False)\ndef mnist_pipeline(\n importer,\n normalizer,\n trainer,\n evaluator,\n):\n # Link all the steps artifacts together\n X_train, y_train, X_test, y_test = importer()\n X_trained_normed, X_test_normed = normalizer(X_train=X_train, X_test=X_test)\n model = trainer(X_train=X_trained_normed, y_train=y_train)\n evaluator(X_test=X_test_normed, y_test=y_test, model=model)\n\n\n# Initialize a new pipeline run\nscikit_p = mnist_pipeline(\n importer=dynamic_importer(),\n normalizer=normalize_mnist(),\n trainer=sklearn_trainer(),\n evaluator=sklearn_evaluator(),\n)\n\n# Run the new pipeline\nDAG = scikit_p.run()\n" ]
[ [ "numpy.array", "sklearn.linear_model.LogisticRegression" ] ]
adrianogil/git-tools
[ "aa943515841a792aa1e7c4bf07e234fc20fa7811" ]
[ "analysis/prod_analysis.py" ]
[ "import sys, os\nimport subprocess\nfrom git_tools import git_tools\nimport matplotlib.pylab as pylab\n\ncurrent_dir = os.getcwd()\n\nauthors_data = {}\n\ndef commit_analysis(commit_hash, initial_date):\n # print(\"commit: \" + commit_hash)\n\n author_name_cmd = \"git log -1 --pretty=format:'%an' \" + commit_hash\n author_name_output = subprocess.check_output(author_name_cmd, shell=True)\n author_name_output = author_name_output.decode(\"utf8\")\n author_name_output = author_name_output.strip()\n\n author_name = author_name_output\n\n commit_data = {}\n\n # total_line_changed_cmd = \"git log \" + commit_hash + \" -1 --pretty=tformat: --numstat | awk '{ loc += $1 + $2 } END { printf \\\"%s\\\", loc }'\"\n # total_line_changed_output = subprocess.check_output(total_line_changed_cmd, shell=True)\n # total_line_changed_output = total_line_changed_output.decode(\"utf8\")\n # total_line_changed_output = total_line_changed_output.strip()\n\n # commit_data['commit_size'] = int(total_line_changed_output)\n date_diff = (git_tools.get_commit_date(current_dir, commit_hash) - initial_date)\n commit_data['mins'] = date_diff.seconds / 60.0 + date_diff.days * 24 * 60\n\n # print(str(git_tools.get_commit_date(current_dir, commit_hash) - initial_date))\n # print(str(git_tools.get_commit_date(current_dir, commit_hash)))\n # print(str(initial_date))\n\n if author_name in authors_data:\n authors_data[author_name].append(commit_data)\n else:\n authors_data[author_name] = [commit_data]\n\n\ndef plot_repo_data(repo_data):\n\n colors = ['tab:blue', 'tab:orange', 'tab:green', 'tab:red', 'tab:purple', 'tab:brown', 'tab:pink', 'tab:gray', 'tab:olive', 'tab:cyan', \"xkcd:crimson\", \"xkcd:lavender\"]\n cindex = 0\n for author in repo_data.keys():\n days = []\n commit_size = []\n\n total_size = 0\n\n repo_data[author] = sorted(repo_data[author], key=lambda x: x['mins'], reverse=False)\n\n for c in repo_data[author]:\n # total_size += c['commit_size']\n total_size += 1\n days.append(c['mins'])\n commit_size.append(total_size)\n\n pylab.plot(days, commit_size, '-o', color=colors[cindex % len(colors)], label=author)\n cindex += 1\n\n pylab.legend(loc='upper left')\n pylab.show()\n\n\nif __name__ == \"__main__\":\n # print(str(sys.argv))\n\n git_flags = \"\"\n if len(sys.argv) > 1:\n for a in range(1, len(sys.argv)):\n git_flags += sys.argv[a] + \" \"\n else:\n git_flags = \"HEAD\"\n\n git_hashes_cmd = \"git rev-list \" + git_flags\n\n print(git_hashes_cmd)\n\n git_hashes_output = subprocess.check_output(git_hashes_cmd, shell=True)\n git_hashes_output = git_hashes_output.decode(\"utf8\")\n git_hashes_output = git_hashes_output.strip()\n\n git_hashes = git_hashes_output.split(\"\\n\")\n\n initial_date = git_tools.get_commit_date(current_dir, git_hashes[-1])\n\n for h in reversed(git_hashes):\n commit_analysis(h, initial_date)\n\n plot_repo_data(authors_data)\n" ]
[ [ "matplotlib.pylab.legend", "matplotlib.pylab.show" ] ]
MeepoAII/faster-rcnn
[ "d8a0e760c1d5f320bc0eea072b575b3098bda0f1" ]
[ "model/utils/creator_tool.py" ]
[ "import numpy as np\r\nimport torch\r\nfrom torchvision.ops import nms\r\nfrom model.utils.bbox_tools import loc2bbox\r\n\r\ndef _get_inside_index(anchor, H, W):\r\n index_inside = np.where(\r\n (anchor[:, 0] >= 0) &\r\n (anchor[:, 1] >= 0) &\r\n (anchor[:, 2] <= H) &\r\n (anchor[:, 3] <= W)\r\n )[0]\r\n return index_inside\r\n\r\nclass AnchorTargetCreator():\r\n def __init__(self,\r\n n_sample=256,\r\n pos_iou_thresh=0.7,\r\n neg_iou_thresh=0.3,\r\n pos_ratio=0.5):\r\n self.n_sample = n_sample\r\n self.pos_iou_thresh = pos_iou_thresh\r\n self.neg_iou_thresh = neg_iou_thresh\r\n self.pos_ratio = pos_ratio\r\n\r\n def __call__(self, bbox, anchor, img_size):\r\n img_H, img_W = img_size\r\n\r\n n_anchor = len(anchor)\r\n inside_index = _get_inside_index(anchor, img_H, img_W)\r\n anchor = anchor[inside_index]\r\n\r\n\r\n\r\n\r\n\r\n\r\n def _create_label(self, inside_index, anchor, bbox):\r\n label = np.empty((len(inside_index),), dtype=np.int32)\r\n label.fill(-1)\r\n\r\n\r\n\r\n\r\n def _calc_ious(self, anchor, bbox, inside_index):\r\n ious = bbox_iou(anchor, bbox)\r\n\r\n\r\n\r\n\r\nclass ProposalCreator():\r\n def __init__(self,\r\n parent_model,\r\n nms_thresh=0.7,\r\n n_train_pre_nms=12000,\r\n n_train_post_nms=2000,\r\n n_test_pre_nms=6000,\r\n n_test_post_nms=300,\r\n min_size=16):\r\n self.parent_model = parent_model\r\n self.nms_thresh = nms_thresh\r\n self.n_train_pre_nms = n_train_pre_nms\r\n self.n_train_post_nms = n_train_post_nms\r\n self.n_test_pre_nms = n_test_pre_nms\r\n self.n_test_post_nms = n_test_post_nms\r\n self.min_size = min_size\r\n\r\n\r\n def __call__(self, loc, score,\r\n anchor, img_size, scale=1.):\r\n # 这里的loc和score是经过region_proposal_network中\r\n # 1x1卷积分类和回归得到的\r\n\r\n if self.parent_model.training:\r\n n_pre_nms = self.n_train_pre_nms\r\n n_post_nms = self.n_train_post_nms\r\n else:\r\n n_pre_nms = self.n_test_pre_nms\r\n n_post_nms = self.n_test_post_nms\r\n\r\n # 将bbox 转化为近似groundtruth的anchor(rois)\r\n roi = loc2bbox(anchor, loc)\r\n\r\n roi[:, slice(0, 4, 2)] = np.clip(\r\n roi[:, slice(0, 4, 2)], 0, img_size[0]\r\n )\r\n roi[:, slice(1, 4, 2)] = np.clip(\r\n roi[:, slice(1, 4, 2)], 0, img_size[1]\r\n )\r\n\r\n min_size = self.min_size * scale\r\n\r\n hs = roi[:, 2] - roi[:, 0]\r\n ws = roi[:, 3] - roi[:, 1]\r\n # 确保roi的长宽大于最小阈值\r\n keep = np.where((hs >= min_size) & (ws >= min_size))[0]\r\n roi = roi[keep, :]\r\n score = score[keep]\r\n\r\n order = score.ravel().argsort()[::-1]\r\n\r\n if n_pre_nms > 0:\r\n order = order[:n_pre_nms]\r\n\r\n roi = roi[order, :]\r\n score = score[order]\r\n\r\n keep = nms(\r\n torch.from_numpy(roi).cuda(),\r\n torch.from_numpy(score).cuda(),\r\n self.nms_thresh\r\n )\r\n if n_post_nms > 0:\r\n keep = keep[:n_post_nms]\r\n\r\n roi = roi[keep.cpu().numpy()]\r\n\r\n return roi\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n" ]
[ [ "numpy.where", "torch.from_numpy" ] ]
shuaizzZ/mmsegmentation
[ "a6c6b348dbf8c4a0a39ffbdb832a1e82309c533c" ]
[ "mmseg/models/utils/dupsample_block.py" ]
[ "\nimport torch\nfrom torch import nn as nn\nfrom .custom_blocks import int_size\nfrom ..builder import build_loss\nfrom mmcv.runner import auto_fp16, force_fp32\n\n\nclass DUpsamplingBlock(nn.Module):\n def __init__(self, inplanes, scale, num_class=21, pad=0):\n super(DUpsamplingBlock, self).__init__()\n self.inplanes = inplanes\n self.scale = scale\n self.num_class = num_class\n self.pad = pad\n ## W matrix\n NSS = self.num_class * self.scale * self.scale\n self.conv_w = nn.Conv2d(inplanes, NSS, kernel_size=1, padding=pad, bias=False)\n self.T = torch.nn.Parameter(torch.Tensor([1.00])) # softmax with temperature\n\n def forward_process(self, x):\n # N, C, H, W = x.size()\n N, C, H, W = int_size(x)\n\n ## N, W, H, C\n x_permuted = x.permute(0, 3, 2, 1)\n\n ## N, W, H*scale, C/scale\n HmC, CdS = int(H * self.scale), int(C / self.scale)\n x_permuted = x_permuted.contiguous().view((N, W, HmC, CdS))\n\n ## N, H*scale, W, C/scale\n x_permuted = x_permuted.permute(0, 2, 1, 3)\n\n ## N, H*scale, W*scale, C/(scale**2)\n WmC, CdSS = int(W * self.scale), int(C / (self.scale * self.scale))\n x_permuted = x_permuted.contiguous().view((N, HmC, WmC, CdSS))\n\n ## N, C/(scale**2), H*scale, W*scale\n x = x_permuted.permute(0, 3, 1, 2)\n return x\n\n def forward(self, x):\n x = self.conv_w(x)\n x = self.forward_process(x)\n x = x / self.T\n return x\n\n\nclass MirrorDUpsamplingBlock(nn.Module):\n def __init__(self, du_block, loss_cfg=dict(type='MSELoss')):\n super(MirrorDUpsamplingBlock, self).__init__()\n self.fp16_enabled = False\n self.inplanes = du_block.inplanes\n self.scale = du_block.scale\n self.num_class = du_block.num_class\n self.pad = du_block.pad\n self.conv_w = du_block.conv_w\n ## P matrix\n NSS = self.num_class * self.scale * self.scale\n self.conv_p = nn.Conv2d(NSS, self.inplanes, kernel_size=1, padding=self.pad, bias=False)\n self.loss_du = build_loss(loss_cfg)\n\n def mirror_process(self, mask):\n N, _, H, W = int_size(mask) # N, 1, H, W\n C = self.num_class\n\n # N, C, H, W\n sample = torch.zeros(N, C, H, W).type_as(mask)\n\n # 必须要把255这个标签去掉,否则下面scatter_会出错(但不在这里报错)\n mask[mask > C] = 0\n seggt_onehot = sample.scatter_(1, mask, 1)\n\n # N, H, W, C\n seggt_onehot = seggt_onehot.permute(0, 2, 3, 1)\n\n # N, H, W/sacle, C*scale\n WdC, CmS = int(W / self.scale), int(C * self.scale)\n seggt_onehot = seggt_onehot.contiguous()\n seggt_onehot = seggt_onehot.view((N, H, WdC, CmS))\n\n # N, W/sacle, H, C*scale\n seggt_onehot = seggt_onehot.permute(0, 2, 1, 3)\n\n # N, W/sacle, H/sacle, C*scale\n HdC, CmSS = int(H / self.scale), int(C * self.scale * self.scale)\n seggt_onehot = seggt_onehot.contiguous().view((N, WdC, HdC, CmSS))\n\n # N, C*scale*scale, H/sacle, W/sacle\n seggt_onehot = seggt_onehot.permute(0, 3, 2, 1).float()\n return seggt_onehot\n\n # @auto_fp16()\n def forward(self, seggt):\n seggt_onehot = self.mirror_process(seggt)\n seggt_onehot_reconstructed = self.forward_train(seggt_onehot)\n loss = self.losses(seggt_onehot, seggt_onehot_reconstructed)\n return loss\n\n @auto_fp16()\n def forward_train(self, seggt_onehot):\n seggt_onehot_reconstructed = self.conv_p(seggt_onehot)\n seggt_onehot_reconstructed = self.conv_w(seggt_onehot_reconstructed)\n return seggt_onehot_reconstructed\n\n @force_fp32()\n def losses(self, seggt_onehot, seggt_onehot_reconstructed):\n loss = self.loss_du(seggt_onehot, seggt_onehot_reconstructed)\n return loss\n" ]
[ [ "torch.zeros", "torch.nn.Conv2d", "torch.Tensor" ] ]