text
stringlengths 4
1.02M
| meta
dict |
---|---|
import sys
# Need different unic implementations for different Pythons because:
# 1) Importing unicodedata module on Jython takes a very long time, and doesn't
# seem to be necessary as Java probably already handles normalization.
# Furthermore, Jython on Java 1.5 doesn't even have unicodedata.normalize.
# 2) IronPython 2.6 doesn't have unicodedata and probably doesn't need it.
# 3) CPython doesn't automatically normalize Unicode strings.
if sys.platform.startswith('java'):
from java.lang import Object, Class
def unic(item, *args):
# http://bugs.jython.org/issue1564
if isinstance(item, Object) and not isinstance(item, Class):
try:
item = item.toString() # http://bugs.jython.org/issue1563
except:
return _unrepresentable_object(item)
return _unic(item, *args)
elif sys.platform == 'cli':
def unic(item, *args):
return _unic(item, *args)
else:
from unicodedata import normalize
def unic(item, *args):
return normalize('NFC', _unic(item, *args))
def _unic(item, *args):
# Based on a recipe from http://code.activestate.com/recipes/466341
try:
return unicode(item, *args)
except UnicodeError:
try:
ascii_text = str(item).encode('string_escape')
except:
return _unrepresentable_object(item)
else:
return unicode(ascii_text)
except:
return _unrepresentable_object(item)
def safe_repr(item):
try:
return unic(repr(item))
except UnicodeError:
return repr(unic(item))
except:
return _unrepresentable_object(item)
_unrepresentable_msg = u"<Unrepresentable object '%s'. Error: %s>"
def _unrepresentable_object(item):
from robot.utils.error import get_error_message
return _unrepresentable_msg % (item.__class__.__name__, get_error_message())
| {
"content_hash": "2599b26e501762cd39e1ec60a794ca33",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 80,
"avg_line_length": 31.75,
"alnum_prop": 0.6561679790026247,
"repo_name": "Senseg/robotframework",
"id": "dbf240a7f89cadc8452b360e7c1c2d1aaa21d2cb",
"size": "2511",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/robot/utils/unic.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "716"
},
{
"name": "Java",
"bytes": "48873"
},
{
"name": "JavaScript",
"bytes": "149654"
},
{
"name": "Python",
"bytes": "1637427"
},
{
"name": "Shell",
"bytes": "1323"
}
],
"symlink_target": ""
} |
import os
import zipfile
import shutil
from tempfile import gettempdir
from uuid import uuid4
from lagring import Asset, StorageException, AssetProcessingException
class DirectoryAsset(Asset):
"""
Asset type to store directory assets. Source can be a directory or zip archive which
is unpacked upon upload to the storage.
"""
def _get_temp_path(self):
return os.path.join(gettempdir(), str(uuid4()))
def _unzip(self, path, storage, meta):
if zipfile.is_zipfile(path):
temp_dir = self._get_temp_path()
def cleanup():
shutil.rmtree(temp_dir)
with zipfile.ZipFile(path, 'r') as z:
z.extractall(temp_dir)
temp_src = storage.asset_source_adapter(temp_dir)
return temp_src, meta, cleanup
else:
raise AssetProcessingException('Valid zip-archive expected')
def upload(self, storage, src, meta=None):
if src.type == 'directory':
return src, meta, None
elif src.type == 'file':
return self._unzip(src.path, storage, meta)
elif src.type == 'stream':
temp_path = self._get_temp_path()
with open(temp_path, 'wb') as f:
shutil.copyfileobj(src.stream, f)
new_src, _, cleanup = self._unzip(temp_path, storage, meta)
def cleanup2():
cleanup()
os.remove(temp_path)
return new_src, meta, cleanup2
else:
raise StorageException('Unknown source type')
| {
"content_hash": "36164ff917cef9601393d868f362be50",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 88,
"avg_line_length": 31.6,
"alnum_prop": 0.5854430379746836,
"repo_name": "neoden/lagring",
"id": "5640345344fbf8e8bfcf6eefb4be1bfed09a742b",
"size": "1580",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lagring/assets/directory.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "32644"
},
{
"name": "Shell",
"bytes": "20"
}
],
"symlink_target": ""
} |
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Logit'] , ['LinearTrend'] , ['BestCycle'] , ['ARX'] ); | {
"content_hash": "4e616f983720044e1360c137b1a4b883",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 77,
"avg_line_length": 37.5,
"alnum_prop": 0.7,
"repo_name": "antoinecarme/pyaf",
"id": "c584a34d5c51c201367f583d13a7f283ca27e1c0",
"size": "150",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/model_control/detailed/transf_Logit/model_control_one_enabled_Logit_LinearTrend_BestCycle_ARX.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
'''
====================================================================
Copyright (c) 2016 Barry A Scott. All rights reserved.
This software is licensed as described in the file LICENSE.txt,
which you should have received as part of this distribution.
====================================================================
wb_hg_commit_dialog.py
'''
from PyQt5 import QtWidgets
from PyQt5 import QtCore
import wb_main_window
import wb_tracked_qwidget
import wb_ui_components
import wb_scm_table_view
#
# add tool bars and menu for use in the commit window
#
class HgCommitWindowComponents(wb_ui_components.WbMainWindowComponents):
def __init__( self, factory ):
super().__init__( 'hg', factory )
def setupTableContextMenu( self, m, addMenu ):
super().setupTableContextMenu( m, addMenu )
act = self.ui_actions
m.addSection( T_('Diff') )
addMenu( m, T_('Diff HEAD vs. Working'), act.tableActionHgDiffHeadVsWorking, act.enablerHgDiffHeadVsWorking, 'toolbar_images/diff.png' )
m.addSection( T_('Hg Actions') )
addMenu( m, T_('Add'), act.tableActionHgAdd_Bg, act.enablerHgFilesAdd, 'toolbar_images/include.png' )
m.addSeparator()
addMenu( m, T_('Revert'), act.tableActionHgRevert_Bg, act.enablerHgFilesRevert, 'toolbar_images/revert.png' )
addMenu( m, T_('Delete…'), act.tableActionHgDelete_Bg, act.main_window.table_view.enablerTableFilesExists )
def setupToolBarAtLeft( self, addToolBar, addTool ):
t = addToolBar( T_('hg logo'), style='font-size: 20pt; width: 40px; color: #cc0000' )
self.all_toolbars.append( t )
addTool( t, 'Hg', self.main_window.projectActionSettings )
def setupToolBarAtRight( self, addToolBar, addTool ):
act = self.ui_actions
# ----------------------------------------
t = addToolBar( T_('hg info') )
self.all_toolbars.append( t )
addTool( t, T_('Diff'), act.tableActionHgDiffSmart, act.enablerHgDiffSmart, 'toolbar_images/diff.png' )
addTool( t, T_('Commit History'), act.tableActionHgLogHistory_Bg, act.enablerHgLogHistory, 'toolbar_images/history.png' )
# ----------------------------------------
t = addToolBar( T_('hg state') )
self.all_toolbars.append( t )
addTool( t, T_('Add'), act.tableActionHgAdd_Bg, act.enablerHgFilesAdd, 'toolbar_images/include.png' )
addTool( t, T_('Revert'), act.tableActionHgRevert_Bg, act.enablerHgFilesRevert, 'toolbar_images/revert.png' )
class WbHgCommitDialog(wb_main_window.WbMainWindow, wb_tracked_qwidget.WbTrackedModeless):
commitAccepted = QtCore.pyqtSignal()
commitClosed = QtCore.pyqtSignal()
def __init__( self, app, hg_project ):
self.__pyqt_bug_already_closed_why_call_close_event_again = False
self.app = app
self.hg_project = hg_project
self.table_view = None
super().__init__( app, app.debug_options.debugLogMainWindow )
self.ui_component = HgCommitWindowComponents( self.app.getScmFactory( 'hg' ) )
self.setWindowTitle( T_('Commit %(project_name)s - %(app_name)s') %
{'project_name': hg_project.projectName()
,'app_name': ' '.join( app.app_name_parts )} )
self.setWindowIcon( self.app.getAppQIcon() )
# on Qt on macOS table will trigger selectionChanged that needs table_model
self.table_view = wb_scm_table_view.WbScmTableView( self.app, self )
self.all_included_files = set()
self.table_view.setIncludedFilesSet( self.all_included_files )
# unchanged files should not be interesting for a commit
self.table_view.setShowControlledAndNotChangedFiles( False )
self.ui_component.setTopWindow( self.app.top_window )
self.ui_component.setMainWindow( self, self.table_view )
# setup the chrome
self.setupMenuBar( self.menuBar() )
self.setupToolBar()
self.__setupTableContextMenu()
# ----------------------------------------
self.filter_text = QtWidgets.QLineEdit()
self.filter_text.setClearButtonEnabled( True )
self.filter_text.setMaxLength( 256 )
self.filter_text.setPlaceholderText( T_('Filter by name') )
self.filter_text.textChanged.connect( self.table_view.table_sortfilter.setFilterText )
self.h_filter_layout = QtWidgets.QHBoxLayout()
self.h_filter_widget = QtWidgets.QWidget()
self.h_filter_widget.setLayout( self.h_filter_layout )
self.h_filter_layout.addWidget( QtWidgets.QLabel( T_('Filter:') ), 0 )
self.h_filter_layout.addWidget( self.filter_text, 1 )
self.v_table_layout = QtWidgets.QVBoxLayout()
self.v_table_layout.addWidget( self.h_filter_widget )
self.v_table_layout.addWidget( self.table_view )
self.v_table_widget = QtWidgets.QWidget()
self.v_table_widget.setLayout( self.v_table_layout )
self.label_message = QtWidgets.QLabel( T_('Commit Log Message') )
self.message = QtWidgets.QPlainTextEdit( '' )
self.v_message_layout = QtWidgets.QVBoxLayout()
self.v_message_layout.addWidget( self.label_message )
self.v_message_layout.addWidget( self.message )
self.v_message_widget = QtWidgets.QWidget()
self.v_message_widget.setLayout( self.v_message_layout )
self.buttons = QtWidgets.QDialogButtonBox()
self.ok_button = self.buttons.addButton( self.buttons.Ok )
self.buttons.addButton( self.buttons.Cancel )
# ----------------------------------------
self.v_split = QtWidgets.QSplitter()
self.v_split.setOrientation( QtCore.Qt.Vertical )
self.v_split.addWidget( self.v_table_widget )
self.v_split.addWidget( self.v_message_widget )
# ----------------------------------------
self.layout = QtWidgets.QVBoxLayout()
self.layout.addWidget( self.v_split )
self.layout.addWidget( self.buttons )
# ----------------------------------------
self.widget = QtWidgets.QWidget()
self.widget.setLayout( self.layout )
self.setCentralWidget( self.widget )
em = self.app.fontMetrics().width( 'm' )
ex = self.app.fontMetrics().lineSpacing()
self.resize( 100*em, 50*ex )
self.ok_button.setEnabled( False )
# connections
self.buttons.accepted.connect( self.handleAccepted )
self.buttons.rejected.connect( self.close )
self.message.textChanged.connect( self.enableOkButton )
def completeInit( self ):
self.debugLog( 'completeInit()' )
# set focus
self.message.setFocus()
self.updateTableView()
# Qt has a habit of resetting the column widths
# completeInit will set to the designed width
self.table_view.completeInit()
# set splitter position
table_size_ratio = 0.7
height = sum( self.v_split.sizes() )
table_height = int( height * table_size_ratio )
message_height = height - table_height
self.v_split.setSizes( [table_height, message_height] )
self.updateActionEnabledStates()
def setupMenuBar( self, mb ):
m = mb.addMenu( T_('&View') )
tv = self.table_view
self._addMenu( m, T_('Show Controlled and Changed files'), tv.setShowControlledAndChangedFiles, checker=tv.checkerShowControlledAndChangedFiles )
self._addMenu( m, T_('Show Controlled and Not Changed files'), tv.setShowControlledAndNotChangedFiles, checker=tv.checkerShowControlledAndNotChangedFiles )
self._addMenu( m, T_('Show Uncontrolled files'), tv.setShowUncontrolledFiles, checker=tv.checkerShowUncontrolledFiles )
self._addMenu( m, T_('Show Ignored files'), tv.setShowIgnoredFiles, checker=tv.checkerShowIgnoredFiles )
m = mb.addMenu( T_('File &Actions') )
self._addMenu( m, T_('Edit'), self.table_view.tableActionEdit, self.table_view.enablerTableFilesExists, 'toolbar_images/edit.png' )
self._addMenu( m, T_('Open'), self.table_view.tableActionOpen, self.table_view.enablerTableFilesExists, 'toolbar_images/open.png' )
self.ui_component.setupMenuBar( mb, self._addMenu )
def __setupTableContextMenu( self ):
self.debugLog( '__setupTableContextMenu' )
# --- setup scm_type specific menu
m = QtWidgets.QMenu( self )
m.addSection( T_('File Actions') )
self._addMenu( m, T_('Edit'), self.table_view.tableActionEdit, self.table_view.enablerTableFilesExists, 'toolbar_images/edit.png' )
self._addMenu( m, T_('Open'), self.table_view.tableActionOpen, self.table_view.enablerTableFilesExists, 'toolbar_images/open.png' )
self.ui_component.setupTableContextMenu( m, self._addMenu )
def tableContextMenu( self, global_pos ):
self.debugLog( 'tableContextMenu( %r )' % (global_pos,) )
self.ui_component.getTableContextMenu().exec_( global_pos )
def setupToolBar( self ):
# --- setup common toolbars
t = self.tool_bar_table = self._addToolBar( T_('table') )
self._addTool( t, T_('Edit'), self.table_view.tableActionEdit, self.table_view.enablerTableFilesExists, 'toolbar_images/edit.png' )
self._addTool( t, T_('Open'), self.table_view.tableActionOpen, self.table_view.enablerTableFilesExists, 'toolbar_images/open.png' )
# --- setup scm_type specific tool bars
self.ui_component.setupToolBarAtRight( self._addToolBar, self._addTool )
def closeEvent( self, event ):
super().closeEvent( event )
if self.__pyqt_bug_already_closed_why_call_close_event_again:
return
self.__pyqt_bug_already_closed_why_call_close_event_again = True
self.commitClosed.emit()
def handleAccepted( self ):
self.commitAccepted.emit()
def enableOkButton( self ):
text = self.message.toPlainText()
self.ok_button.setEnabled( text.strip() != '' and self.hg_project.numModifiedFiles() != 0 )
def getMessage( self ):
return self.message.toPlainText().strip()
def updateSingleton( self ):
self.updateTableView()
def updateTableView( self ):
# caller will have updated the hg project state already
self.table_view.setScmProjectTreeNode( self.hg_project.flat_tree )
def isScmTypeActive( self, scm_type ):
return scm_type == 'hg'
def updateActionEnabledStates( self ):
# can be called during __init__ on macOS version
if self.table_view is None or self.table_view.table_model is None:
return
self.updateEnableStates()
self.enableOkButton()
| {
"content_hash": "79cd9da07b21c1c8d605368a843fba6e",
"timestamp": "",
"source": "github",
"line_count": 266,
"max_line_length": 163,
"avg_line_length": 40.402255639097746,
"alnum_prop": 0.6331069135572718,
"repo_name": "barry-scott/git-workbench",
"id": "b6830989d07af080e909102c7b1ee3d75712822c",
"size": "10749",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Source/Hg/wb_hg_commit_dialog.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1280"
},
{
"name": "Makefile",
"bytes": "2088"
},
{
"name": "Python",
"bytes": "198101"
},
{
"name": "Shell",
"bytes": "4708"
}
],
"symlink_target": ""
} |
import os
import unittest
import numpy as np
from sourcefinder import accessors
from sourcefinder.accessors.fitsimage import FitsImage
from sourcefinder.testutil.data import DATAPATH
from sourcefinder.testutil.data import fits_file
from sourcefinder.testutil.decorators import requires_data
from sourcefinder.testutil.mock import SyntheticImage
import sourcefinder
from sourcefinder import image as sfimage
from sourcefinder.image import ImageData
from sourcefinder.utility.uncertain import Uncertain
BOX_IN_BEAMPIX = 10 # HARDCODING - FIXME! (see also monitoringlist recipe)
GRB120422A = os.path.join(DATAPATH, "GRB120422A-120429.fits")
class TestNumpySubroutines(unittest.TestCase):
def testBoxSlicing(self):
"""
Tests a routine to return a window on an image.
Previous implementation returned correct sized box,
but central pixel was often offset unnecessarily.
This method always returns a centred chunk.
"""
a = np.arange(1, 101)
a = a.reshape(10, 10)
x, y = 3, 3
central_value = a[y, x] # 34
round_down_to_single_pixel = a[
sfimage.ImageData.box_slice_about_pixel(x, y, 0.9)]
self.assertEquals(round_down_to_single_pixel, [[central_value]])
chunk_3_by_3 = a[sfimage.ImageData.box_slice_about_pixel(x, y, 1)]
self.assertEquals(chunk_3_by_3.shape, (3, 3))
self.assertEqual(central_value, chunk_3_by_3[1, 1])
chunk_3_by_3_round_down = a[
sfimage.ImageData.box_slice_about_pixel(x, y, 1.9)]
self.assertListEqual(list(chunk_3_by_3.reshape(9)),
list(chunk_3_by_3_round_down.reshape(9))
)
class TestMapsType(unittest.TestCase):
"""
Check that rms, bg maps are of correct type.
"""
@requires_data(GRB120422A)
def testmaps_array_type(self):
self.image = accessors.sourcefinder_image_from_accessor(
FitsImage(GRB120422A), margin=10)
self.assertIsInstance(self.image.rmsmap, np.ma.MaskedArray)
self.assertIsInstance(self.image.backmap, np.ma.MaskedArray)
class TestFitFixedPositions(unittest.TestCase):
"""Test various fitting cases where the pixel position is predetermined"""
@requires_data(
os.path.join(DATAPATH, 'NCP_sample_image_1.fits'))
def setUp(self):
"""
NB the required image has been committed to the tkp/data subversion repository.
(See tkp/data/unittests/tkp_lib for a full copy of all the unittest data).
Source positions / background positions were simply picked out by eye in DS9
"""
self.image = accessors.sourcefinder_image_from_accessor(
accessors.open(
os.path.join(DATAPATH, 'NCP_sample_image_1.fits'))
)
self.assertListEqual(list(self.image.data.shape), [1024, 1024])
self.boxsize = BOX_IN_BEAMPIX * max(self.image.beam[0],
self.image.beam[1])
self.bright_src_posn = (35.76726, 86.305771) # RA, DEC
self.background_posn = (6.33731, 82.70002) # RA, DEC
# #NB Peak of forced gaussian fit is simply plucked from a previous run;
# so merely ensures *consistent*, rather than *correct*, results.
self.known_fit_results = (self.bright_src_posn[0], # RA,
self.bright_src_posn[1], # Dec
13.457697411730384) # Peak
def testSourceAtGivenPosition(self):
posn = self.bright_src_posn
img = self.image
results = self.image.fit_fixed_positions(positions=[posn],
boxsize=self.boxsize,
threshold=0.0)[0]
self.assertAlmostEqual(results.ra.value, self.known_fit_results[0],
delta=0.01)
self.assertAlmostEqual(results.dec.value, self.known_fit_results[1],
delta=0.01)
self.assertAlmostEqual(results.peak.value, self.known_fit_results[2],
delta=0.01)
def testLowFitThreshold(self):
"""
Low fit threshold is equivalent to zero threshold
If we supply an extremely low threshold
do we get a similar result to a zero threshold, for a bright source?
"""
posn = self.bright_src_posn
img = self.image
low_thresh_results = self.image.fit_fixed_positions(positions=[posn],
boxsize=BOX_IN_BEAMPIX * max(
img.beam[0],
img.beam[1]),
threshold=-1e20)[0]
self.assertAlmostEqual(low_thresh_results.ra.value,
self.known_fit_results[0],
delta=0.01)
self.assertAlmostEqual(low_thresh_results.dec.value,
self.known_fit_results[1],
delta=0.01)
self.assertAlmostEqual(low_thresh_results.peak.value,
self.known_fit_results[2],
delta=0.01)
def testHighFitThreshold(self):
"""
High fit threshold throws error
If we supply an extremely high threshold, we expect to get back
a fitting error since all pixels should be masked out.
"""
posn = self.bright_src_posn
img = self.image
with self.assertRaises(ValueError):
results = self.image.fit_fixed_positions(positions=[posn],
boxsize=BOX_IN_BEAMPIX * max(
img.beam[0],
img.beam[1]),
threshold=1e20)
def testBackgroundAtGivenPosition(self):
"""
No source at given position (but still in the image frame)
Note, if we request zero threshold, then the region will be unfittable,
since it is largely below that thresh.
Rather than pick an arbitrarily low threshold, we set it to None.
"""
img = self.image
results = self.image.fit_fixed_positions(
positions=[self.background_posn],
boxsize=BOX_IN_BEAMPIX * max(img.beam[0], img.beam[1]),
threshold=None
)[0]
self.assertAlmostEqual(results.peak.value, 0,
delta=results.peak.error * 1.0)
def testGivenPositionOutsideImage(self):
"""If given position is outside image then result should be NoneType"""
img = self.image
# Generate a position halfway up the y-axis, but at negative x-position.
pixel_posn_negative_x = (-50, img.data.shape[1] / 2.0)
# and halfway up the y-axis, but at x-position outside array limit:
pixel_posn_high_x = (img.data.shape[0] + 50, img.data.shape[1] / 2.0)
sky_posns_out_of_img = [
img.wcs.p2s(pixel_posn_negative_x),
img.wcs.p2s(pixel_posn_high_x),
]
# print "Out of image?", sky_posn_out_of_img
# print "Out of image (pixel backconvert)?", img.wcs.s2p(sky_posn_out_of_img)
results = self.image.fit_fixed_positions(positions=sky_posns_out_of_img,
boxsize=BOX_IN_BEAMPIX * max(
img.beam[0], img.beam[1]))
self.assertListEqual([], results)
def testTooCloseToEdgePosition(self):
"""Same if right on the edge -- too few pixels to fit"""
img = self.image
boxsize = BOX_IN_BEAMPIX * max(img.beam[0], img.beam[1])
edge_posn = img.wcs.p2s((0 + boxsize / 2 - 2, img.data.shape[1] / 2.0))
results = self.image.fit_fixed_positions(
positions=[edge_posn],
boxsize=boxsize,
threshold=-1e10
)
self.assertListEqual([], results)
def testErrorBoxOverlapsEdge(self):
"""
Error box overflows image
Sometimes when fitting at a fixed position, we get extremely large
uncertainty values. These create an error box on position which
extends outside the image, causing errors when we try to calculate the
RA / Dec uncertainties. This test ensures we handle this case
gracefully.
"""
img = self.image
fake_params = sourcefinder.extract.ParamSet()
fake_params.values.update({
'peak': Uncertain(0.0, 0.5),
'flux': Uncertain(0.0, 0.5),
'xbar': Uncertain(5.5, 10000.5), # Danger Will Robinson
'ybar': Uncertain(5.5, 3),
'semimajor': Uncertain(4, 200),
'semiminor': Uncertain(4, 2),
'theta': Uncertain(30, 10),
})
fake_params.sig = 0
det = sourcefinder.extract.Detection(fake_params, img)
# Raises runtime error prior to bugfix for issue #3294
det._physical_coordinates()
self.assertEqual(det.ra.error, float('inf'))
self.assertEqual(det.dec.error, float('inf'))
def testForcedFitAtNans(self):
"""
Should not return a fit if the position was largely masked due to NaNs
"""
forcedfit_sky_posn = self.bright_src_posn
forcedfit_pixel_posn = self.image.wcs.s2p(forcedfit_sky_posn)
fitting_boxsize = BOX_IN_BEAMPIX * max(self.image.beam[0],
self.image.beam[1])
nandata = self.image.rawdata.copy()
x0, y0 = forcedfit_pixel_posn
# If we totally cover the fitting box in NaNs, then there are no
# valid pixels and fit gets rejected.
# However, if we only cover the central quarter (containing all the
# real signal!) then we get a dodgy fit back.
nanbox_radius = fitting_boxsize / 2
boxsize_proportion = 0.5
nanbox_radius *= boxsize_proportion
nandata[int(x0 - nanbox_radius):int(x0 + nanbox_radius + 1),
int(y0 - nanbox_radius):int(y0 + nanbox_radius + 1)] = float('nan')
# Dump image data for manual inspection:
# import astropy.io.fits as fits
# # output_data = self.image.rawdata
# output_data = nandata
# hdu = fits.PrimaryHDU((output_data).transpose())
# hdu.writeto('/tmp/nandata.fits',clobber=True)
nan_image = ImageData(nandata, beam=self.image.beam,
wcs=self.image.wcs)
results = nan_image.fit_fixed_positions(
positions=[self.bright_src_posn],
boxsize=fitting_boxsize,
threshold=None
)
print(results)
self.assertFalse(results)
class TestSimpleImageSourceFind(unittest.TestCase):
"""Now lets test drive the routines which find new sources"""
@requires_data(GRB120422A)
def testSingleSourceExtraction(self):
"""
Single source extaction
From visual inspection we only expect a single source in the image,
at around 5 or 6 sigma detection level."""
ew_sys_err, ns_sys_err = 0.0, 0.0
known_result = (
136.89603241069054, 14.022184792492785, # RA, DEC
5.341819139061954e-4, 1.3428186757078464e-3, # Err, Err
7.226590529214518e-4, 1.0918184742211533e-4, # Peak flux, err
6.067963179204716e-4, 1.7037685531724465e-4,
# Integrated flux, err
6.192259965962862, 25.516190123153514,
# Significance level, Beam semimajor-axis width (arcsec)
10.718798843620489, 178.62899212789304,
# Beam semiminor-axis width (arcsec), Beam parallactic angle
ew_sys_err, ns_sys_err,
5.181697175052841, # error_radius
1, # fit_type
0.59184643302, # chisq
0.67199741142, # reduced chisq
)
self.image = accessors.sourcefinder_image_from_accessor(
FitsImage(GRB120422A))
results = self.image.extract(det=5, anl=3)
results = [result.serialize(ew_sys_err, ns_sys_err) for result in
results]
# Our modified kappa,sigma clipper gives a slightly lower noise
# which catches an extra noise peak at the 5 sigma level.
self.assertEqual(len(results), 2)
r = results[1]
self.assertEqual(len(r), len(known_result))
for i in range(len(r)):
self.assertAlmostEqual(r[i], known_result[i], places=0)
@requires_data(GRB120422A)
def testForceSourceShape(self):
"""
Force source shape to beam
This image contains a single source (with parameters as listed under
testSingleSourceExtraction(), above). Here we force the lengths of the
major/minor axes to be held constant when fitting.
"""
self.image = accessors.sourcefinder_image_from_accessor(
FitsImage(GRB120422A))
results = self.image.extract(det=5, anl=3, force_beam=True)
self.assertEqual(results[0].smaj.value, self.image.beam[0])
self.assertEqual(results[0].smin.value, self.image.beam[1])
@requires_data(os.path.join(DATAPATH, 'SWIFT_554620-130504.fits'))
@requires_data(os.path.join(DATAPATH, 'SWIFT_554620-130504.image'))
def testWcsConversionConsistency(self):
"""
Check that extracting a source from FITS and CASA versions of the
same dataset gives the same results (especially, RA and Dec).
"""
fits_image = accessors.sourcefinder_image_from_accessor(
FitsImage(os.path.join(DATAPATH, 'SWIFT_554620-130504.fits')))
# Abuse the KAT7 CasaImage class here, since we just want to access
# the pixel data and the WCS:
casa_image = accessors.sourcefinder_image_from_accessor(
accessors.kat7casaimage.Kat7CasaImage(
os.path.join(DATAPATH, 'SWIFT_554620-130504.image')))
ew_sys_err, ns_sys_err = 0.0, 0.0
fits_results = fits_image.extract(det=5, anl=3)
fits_results = [result.serialize(ew_sys_err, ns_sys_err) for result in
fits_results]
casa_results = casa_image.extract(det=5, anl=3)
casa_results = [result.serialize(ew_sys_err, ns_sys_err) for result in
casa_results]
# Our modified kappa,sigma clipper gives a slightly lower noise
# which catches two extra noise peaks at the 5 sigma level.
self.assertEqual(len(fits_results), 3)
self.assertEqual(len(casa_results), 3)
fits_src = fits_results[0]
casa_src = casa_results[0]
self.assertEqual(len(fits_src), len(casa_src))
for idx, _ in enumerate(fits_src):
self.assertAlmostEqual(fits_src[idx], casa_src[idx], places=5)
@requires_data(GRB120422A)
def testNoLabelledIslandsCase(self):
"""
If an image is in fact very boring and flat/empty, then we may not even
locate any labelled islands, if the analysis threshold is set high enough.
(We reproduce this test case, even though GRB120422A-120429 has a
source in the image, just by setting the thresholds very high -
this avoids requiring additional data).
"""
self.image = accessors.sourcefinder_image_from_accessor(
FitsImage(GRB120422A))
results = self.image.extract(det=5e10, anl=5e10)
results = [result.serialize() for result in results]
self.assertEqual(len(results), 0)
class TestMaskedSource(unittest.TestCase):
"""
Source is masked
Check that we don't find sources when they fall within a masked region
of the image.
"""
@requires_data(GRB120422A)
def testWholeSourceMasked(self):
"""
Source in masked region
"""
self.image = accessors.sourcefinder_image_from_accessor(
FitsImage(GRB120422A))
self.image.data[250:280, 250:280] = np.ma.masked
results = self.image.extract(det=5, anl=3)
self.assertFalse(results)
@requires_data(GRB120422A)
def testWholeSourceMasked(self):
"""
Part of source masked
Tip of major axis is around 267, 264
"""
self.image = accessors.sourcefinder_image_from_accessor(
FitsImage(GRB120422A))
self.image.data[266:269, 263:266] = np.ma.masked
# Our modified kappa,sigma clipper gives a slightly lower noise
# which catches an extra noise peak at the 5 sigma level.
self.image.data[42:50, 375:386] = np.ma.masked
results = self.image.extract(det=5, anl=3)
self.assertFalse(results)
class TestMaskedBackground(unittest.TestCase):
# We force the mask by setting the usable region << grid size.
@requires_data(fits_file)
def testMaskedBackgroundForcedFit(self):
"""
Background at forced fit is masked
"""
self.image = accessors.sourcefinder_image_from_accessor(
accessors.open(fits_file), radius=1.0)
result = self.image.fit_to_point(256, 256, 10, 0, None)
self.assertFalse(result)
@requires_data(fits_file)
def testMaskedBackgroundBlind(self):
self.image = accessors.sourcefinder_image_from_accessor(
accessors.open(fits_file), radius=1.0)
result = self.image.extract(det=10.0, anl=3.0)
self.assertFalse(result)
class TestFailureModes(unittest.TestCase):
"""
If we get pathological data we should probably throw an exception
and let the calling code decide what to do.
"""
def testFlatImage(self):
sfimage = accessors.sourcefinder_image_from_accessor(
SyntheticImage(data=np.zeros((512, 512))))
self.assertTrue(np.ma.max(sfimage.data) == np.ma.min(sfimage.data),
msg="Data should be flat")
with self.assertRaises(RuntimeError):
sfimage.extract(det=5, anl=3)
| {
"content_hash": "d5e37da14f5b4dc0a655c8417d782d4c",
"timestamp": "",
"source": "github",
"line_count": 448,
"max_line_length": 89,
"avg_line_length": 40.75892857142857,
"alnum_prop": 0.5963855421686747,
"repo_name": "transientskp/pyse",
"id": "5f83fcd781caa72c2d3e2f9f155a1a7172b8714d",
"size": "18260",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_image.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "368"
},
{
"name": "Python",
"bytes": "277081"
},
{
"name": "Shell",
"bytes": "463"
}
],
"symlink_target": ""
} |
from core import *
| {
"content_hash": "0abf8f657310478c4a5c3bcb33c8bb55",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 18,
"avg_line_length": 19,
"alnum_prop": 0.7368421052631579,
"repo_name": "breuleux/descr",
"id": "5f7bf5b224078fe840877dc1348a2275cd47b7b8",
"size": "20",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "descr/terminal/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "65167"
}
],
"symlink_target": ""
} |
"""Unit tests for rbtools.diffs.tools.base.diff_file_result.DiffFileResult.
Version Added:
4.0
"""
import inspect
import io
from rbtools.testing import TestCase
from rbtools.diffs.tools.base.diff_file_result import DiffFileResult
class DiffFileResultTests(TestCase):
"""Unit tests for DiffFileResult."""
def test_has_differences_with_text_changes(self):
"""Testing DiffFileResult.has_differences with text diff and
changes
"""
diff_result = DiffFileResult(orig_path='orig-file',
modified_path='modified-file',
diff=io.BytesIO(b'...'),
has_text_differences=True)
self.assertTrue(diff_result.has_differences)
def test_has_differences_with_text_no_changes(self):
"""Testing DiffFileResult.has_differences with text diff and no
changes
"""
diff_result = DiffFileResult(orig_path='orig-file',
modified_path='modified-file',
diff=io.BytesIO(b''),
has_text_differences=False)
self.assertFalse(diff_result.has_differences)
def test_has_differences_with_binary(self):
"""Testing DiffFileResult.has_differences with binary diff"""
diff_result = DiffFileResult(orig_path='orig-file',
modified_path='modified-file',
diff=io.BytesIO(b''),
is_binary=True,
has_text_differences=False)
self.assertTrue(diff_result.has_differences)
def test_orig_file_header(self):
"""Testing DiffFileResult.orig_file_header"""
diff_result = DiffFileResult(
orig_path='orig-file',
modified_path='modified-file',
diff=io.BytesIO(
b'--- orig-file\txxx\n'
b'+++ modified-file\txxx\n'
b'@@ -1 +1 @@\n'
b'- foo\n'
b'+ bar\n'
))
self.assertEqual(diff_result.diff.tell(), 0)
self.assertEqual(diff_result.orig_file_header,
b'--- orig-file\txxx\n')
self.assertEqual(diff_result.diff.tell(), 18)
self.assertEqual(diff_result._line_offset_cache, [(0, 18)])
# Since we're seeking, reading, and caching, check again.
self.assertEqual(diff_result.orig_file_header,
b'--- orig-file\txxx\n')
self.assertEqual(diff_result.diff.tell(), 18)
self.assertEqual(diff_result._line_offset_cache, [(0, 18)])
def test_orig_file_header_with_crlf(self):
"""Testing DiffFileResult.orig_file_header with CRLF"""
diff_result = DiffFileResult(
orig_path='orig-file',
modified_path='modified=file',
diff=io.BytesIO(
b'--- orig-file\txxx\r\n'
b'+++ modified-file\txxx\r\n'
b'@@ -1 +1 @@\r\n'
b'- foo\r\n'
b'+ bar\r\n'
))
self.assertEqual(diff_result.diff.tell(), 0)
self.assertEqual(diff_result.orig_file_header,
b'--- orig-file\txxx\r\n')
self.assertEqual(diff_result.diff.tell(), 19)
self.assertEqual(diff_result._line_offset_cache, [(0, 19)])
# Since we're seeking, reading, and caching, check again.
self.assertEqual(diff_result.orig_file_header,
b'--- orig-file\txxx\r\n')
self.assertEqual(diff_result.diff.tell(), 19)
self.assertEqual(diff_result._line_offset_cache, [(0, 19)])
def test_orig_file_header_with_crcrlf(self):
"""Testing DiffFileResult.orig_file_header with CRCRLF"""
diff_result = DiffFileResult(
orig_path='orig-file',
modified_path='modified-file',
diff=io.BytesIO(
b'--- orig-file\txxx\r\r\n'
b'+++ modified-file\txxx\r\r\n'
b'@@ -1 +1 @@\r\n'
b'- foo\r\r\n'
b'+ bar\r\r\n'
))
self.assertEqual(diff_result.diff.tell(), 0)
self.assertEqual(diff_result.orig_file_header,
b'--- orig-file\txxx\r\r\n')
self.assertEqual(diff_result.diff.tell(), 20)
self.assertEqual(diff_result._line_offset_cache, [(0, 20)])
# Since we're seeking, reading, and caching, check again.
self.assertEqual(diff_result.orig_file_header,
b'--- orig-file\txxx\r\r\n')
self.assertEqual(diff_result.diff.tell(), 20)
self.assertEqual(diff_result._line_offset_cache, [(0, 20)])
def test_orig_file_header_with_no_text_diff(self):
"""Testing DiffFileResult.orig_file_header with no text differences"""
diff_result = DiffFileResult(
orig_path='orig-file',
modified_path='modified-file',
has_text_differences=False,
is_binary=True,
diff=io.BytesIO(
b'Binary files orig-file and modified-file differ\n'
))
self.assertEqual(diff_result.diff.tell(), 0)
self.assertEqual(diff_result.orig_file_header, b'')
self.assertEqual(diff_result.diff.tell(), 0)
self.assertEqual(diff_result._line_offset_cache, [])
def test_orig_file_header_with_no_header(self):
"""Testing DiffFileResult.orig_file_header with no '---' header"""
diff_result = DiffFileResult(
orig_path='orig-file',
modified_path='modified-file',
has_text_differences=True,
diff=io.BytesIO(
b'Something else\n'
))
self.assertEqual(diff_result.diff.tell(), 0)
self.assertEqual(diff_result.orig_file_header, b'')
self.assertEqual(diff_result.diff.tell(), 15)
self.assertEqual(diff_result._line_offset_cache, [(0, 15)])
def test_orig_file_header_with_out_of_bounds(self):
"""Testing DiffFileResult.orig_file_header with out-of-bounds line"""
diff_result = DiffFileResult(orig_path='orig-file',
modified_path='modified-file',
diff=io.BytesIO())
self.assertEqual(diff_result.orig_file_header, b'')
self.assertEqual(diff_result.diff.tell(), 0)
def test_parsed_orig_file_header_with_tab(self):
"""Testing DiffFileResult.parsed_orig_file_header with tab separator"""
diff_result = DiffFileResult(
orig_path='orig file',
modified_path='modified file',
diff=io.BytesIO(
b'--- orig file\txxx yyy zzz\n'
b'+++ modified file\txxx yyy zzz\n'
b'@@ -1 +1 @@\n'
b'- foo\n'
b'+ bar\n'
))
self.assertEqual(diff_result.diff.tell(), 0)
self.assertEqual(
diff_result.parsed_orig_file_header,
{
'extra': b'xxx yyy zzz',
'marker': b'---',
'path': b'orig file',
})
self.assertEqual(diff_result.diff.tell(), 26)
self.assertEqual(diff_result._line_offset_cache, [(0, 26)])
def test_parsed_orig_file_header_with_spaces(self):
"""Testing DiffFileResult.parsed_orig_file_header with two-space
separator
"""
diff_result = DiffFileResult(
orig_path='orig file',
modified_path='modified file',
diff=io.BytesIO(
b'--- orig file xxx yyy zzz\n'
b'+++ modified file xxx yyy zzz\n'
b'@@ -1 +1 @@\n'
b'- foo\n'
b'+ bar\n'
))
self.assertEqual(diff_result.diff.tell(), 0)
self.assertEqual(
diff_result.parsed_orig_file_header,
{
'extra': b'xxx yyy zzz',
'marker': b'---',
'path': b'orig file',
})
self.assertEqual(diff_result.diff.tell(), 27)
self.assertEqual(diff_result._line_offset_cache, [(0, 27)])
def test_parsed_orig_file_header_with_no_separator(self):
"""Testing DiffFileResult.parsed_orig_file_header with no
distinguishable separator
"""
diff_result = DiffFileResult(
orig_path='orig file',
modified_path='modified file',
diff=io.BytesIO(
b'--- orig file xxx yyy zzz\n'
b'+++ modified file xxx yyy zzz\n'
b'@@ -1 +1 @@\n'
b'- foo\n'
b'+ bar\n'
))
self.assertEqual(diff_result.diff.tell(), 0)
self.assertEqual(
diff_result.parsed_orig_file_header,
{
'extra': b'',
'marker': b'---',
'path': b'orig file xxx yyy zzz',
})
self.assertEqual(diff_result.diff.tell(), 26)
self.assertEqual(diff_result._line_offset_cache, [(0, 26)])
def test_modified_file_header(self):
"""Testing DiffFileResult.modified_file_header"""
diff_result = DiffFileResult(
orig_path='orig-file',
modified_path='modified-file',
diff=io.BytesIO(
b'--- orig-file\txxx\n'
b'+++ modified-file\txxx\n'
b'@@ -1 +1 @@\n'
b'- foo\n'
b'+ bar\n'
))
self.assertEqual(diff_result.diff.tell(), 0)
self.assertEqual(diff_result.modified_file_header,
b'+++ modified-file\txxx\n')
self.assertEqual(diff_result.diff.tell(), 40)
self.assertEqual(diff_result._line_offset_cache, [
(0, 18),
(18, 22),
])
# Since we're seeking, reading, and caching, check again.
self.assertEqual(diff_result.modified_file_header,
b'+++ modified-file\txxx\n')
self.assertEqual(diff_result.diff.tell(), 40)
self.assertEqual(diff_result._line_offset_cache, [
(0, 18),
(18, 22),
])
def test_modified_file_header_with_crlf(self):
"""Testing DiffFileResult.modified_file_header with CRLF"""
diff_result = DiffFileResult(
orig_path='orig-file',
modified_path='modified-file',
diff=io.BytesIO(
b'--- orig-file\txxx\r\n'
b'+++ modified-file\txxx\r\n'
b'@@ -1 +1 @@\r\n'
b'- foo\r\n'
b'+ bar\r\n'
))
self.assertEqual(diff_result.diff.tell(), 0)
self.assertEqual(diff_result.modified_file_header,
b'+++ modified-file\txxx\r\n')
self.assertEqual(diff_result.diff.tell(), 42)
self.assertEqual(diff_result._line_offset_cache, [
(0, 19),
(19, 23),
])
# Since we're seeking, reading, and caching, check again.
self.assertEqual(diff_result.modified_file_header,
b'+++ modified-file\txxx\r\n')
self.assertEqual(diff_result.diff.tell(), 42)
self.assertEqual(diff_result._line_offset_cache, [
(0, 19),
(19, 23),
])
def test_modified_file_header_with_crcrlf(self):
"""Testing DiffFileResult.modified_file_header with CRCRLF"""
diff_result = DiffFileResult(
orig_path='orig-file',
modified_path='modified-file',
diff=io.BytesIO(
b'--- orig-file\txxx\r\r\n'
b'+++ modified-file\txxx\r\r\n'
b'@@ -1 +1 @@\r\n'
b'- foo\r\r\n'
b'+ bar\r\r\n'
))
self.assertEqual(diff_result.diff.tell(), 0)
self.assertEqual(diff_result.modified_file_header,
b'+++ modified-file\txxx\r\r\n')
self.assertEqual(diff_result.diff.tell(), 44)
self.assertEqual(diff_result._line_offset_cache, [
(0, 20),
(20, 24),
])
# Since we're seeking, reading, and caching, check again.
self.assertEqual(diff_result.modified_file_header,
b'+++ modified-file\txxx\r\r\n')
self.assertEqual(diff_result.diff.tell(), 44)
self.assertEqual(diff_result._line_offset_cache, [
(0, 20),
(20, 24),
])
def test_modified_file_header_with_no_text_diff(self):
"""Testing DiffFileResult.modified_file_header with no text
differences
"""
diff_result = DiffFileResult(
orig_path='orig-file',
modified_path='modified-file',
has_text_differences=False,
is_binary=True,
diff=io.BytesIO(
b'Binary files orig-file and modified-file differ\n'
))
self.assertEqual(diff_result.diff.tell(), 0)
self.assertEqual(diff_result.modified_file_header, b'')
self.assertEqual(diff_result.diff.tell(), 0)
self.assertEqual(diff_result._line_offset_cache, [])
def test_modified_file_header_with_no_header(self):
"""Testing DiffFileResult.modified_file_header with no '+++' header"""
diff_result = DiffFileResult(
orig_path='orig-file',
modified_path='modified-file',
has_text_differences=True,
diff=io.BytesIO(
b'--- file1\txxx\n'
b'Something else\n'
))
self.assertEqual(diff_result.diff.tell(), 0)
self.assertEqual(diff_result.modified_file_header, b'')
self.assertEqual(diff_result.diff.tell(), 29)
self.assertEqual(diff_result._line_offset_cache, [
(0, 14),
(14, 15),
])
def test_modified_file_header_with_out_of_bounds(self):
"""Testing DiffFileResult.modified_file_header with out-of-bounds
line
"""
diff_result = DiffFileResult(orig_path='orig-file',
modified_path='modified-file',
diff=io.BytesIO())
self.assertEqual(diff_result.modified_file_header, b'')
self.assertEqual(diff_result.diff.tell(), 0)
def test_modified_file_header_after_orig_header(self):
"""Testing DiffFileResult.modified_file_header after orig_file_header
"""
diff_result = DiffFileResult(
orig_path='orig-file',
modified_path='modified-file',
diff=io.BytesIO(
b'--- orig-file\txxx\n'
b'+++ modified-file\txxx\n'
b'@@ -1 +1 @@\n'
b'- foo\n'
b'+ bar\n'
))
# Start by fetching the first header.
self.assertEqual(diff_result.orig_file_header,
b'--- orig-file\txxx\n')
self.assertEqual(diff_result.diff.tell(), 18)
self.assertEqual(diff_result.modified_file_header,
b'+++ modified-file\txxx\n')
self.assertEqual(diff_result.diff.tell(), 40)
self.assertEqual(diff_result._line_offset_cache, [
(0, 18),
(18, 22),
])
# Since we're seeking, reading, and caching, check again.
self.assertEqual(diff_result.modified_file_header,
b'+++ modified-file\txxx\n')
self.assertEqual(diff_result.diff.tell(), 40)
self.assertEqual(diff_result._line_offset_cache, [
(0, 18),
(18, 22),
])
def test_parsed_modified_file_header_with_tab(self):
"""Testing DiffFileResult.parsed_modified_file_header with tab
separator
"""
diff_result = DiffFileResult(
orig_path='orig file',
modified_path='modified file',
diff=io.BytesIO(
b'--- orig file\txxx yyy zzz\n'
b'+++ modified file\txxx yyy zzz\n'
b'@@ -1 +1 @@\n'
b'- foo\n'
b'+ bar\n'
))
self.assertEqual(diff_result.diff.tell(), 0)
self.assertEqual(
diff_result.parsed_modified_file_header,
{
'extra': b'xxx yyy zzz',
'marker': b'+++',
'path': b'modified file',
})
self.assertEqual(diff_result.diff.tell(), 56)
self.assertEqual(diff_result._line_offset_cache, [
(0, 26),
(26, 30),
])
def test_parsed_modified_file_header_with_spaces(self):
"""Testing DiffFileResult.parsed_modified_file_header with two-space
separator
"""
diff_result = DiffFileResult(
orig_path='orig file',
modified_path='modified file',
diff=io.BytesIO(
b'--- orig file xxx yyy zzz\n'
b'+++ modified file xxx yyy zzz\n'
b'@@ -1 +1 @@\n'
b'- foo\n'
b'+ bar\n'
))
self.assertEqual(diff_result.diff.tell(), 0)
self.assertEqual(
diff_result.parsed_modified_file_header,
{
'extra': b'xxx yyy zzz',
'marker': b'+++',
'path': b'modified file',
})
self.assertEqual(diff_result.diff.tell(), 58)
self.assertEqual(diff_result._line_offset_cache, [
(0, 27),
(27, 31),
])
def test_parsed_modified_file_header_with_no_separator(self):
"""Testing DiffFileResult.parsed_modified_file_header with no
distinguishable separator
"""
diff_result = DiffFileResult(
orig_path='orig file',
modified_path='modified file',
diff=io.BytesIO(
b'--- orig file xxx yyy zzz\n'
b'+++ modified file xxx yyy zzz\n'
b'@@ -1 +1 @@\n'
b'- foo\n'
b'+ bar\n'
))
self.assertEqual(diff_result.diff.tell(), 0)
self.assertEqual(
diff_result.parsed_modified_file_header,
{
'extra': b'',
'marker': b'+++',
'path': b'modified file xxx yyy zzz',
})
self.assertEqual(diff_result.diff.tell(), 56)
self.assertEqual(diff_result._line_offset_cache, [
(0, 26),
(26, 30),
])
def test_hunks(self):
"""Testing DiffFileResult.hunks"""
diff_result = DiffFileResult(
orig_path='orig-file',
modified_path='modified-file',
diff=io.BytesIO(
b'--- orig-file\txxx\n'
b'+++ modified-file\txxx\n'
b'@@ -1 +1 @@\n'
b'- foo\n'
b'+ bar\n'
))
self.assertEqual(diff_result.diff.tell(), 0)
self.assertEqual(
diff_result.hunks,
b'@@ -1 +1 @@\n'
b'- foo\n'
b'+ bar\n')
self.assertEqual(diff_result.diff.tell(), 64)
self.assertEqual(diff_result._line_offset_cache, [
(0, 18),
(18, 22),
(40, 12),
])
# Since we're seeking, reading, and caching, check again.
self.assertEqual(
diff_result.hunks,
b'@@ -1 +1 @@\n'
b'- foo\n'
b'+ bar\n')
self.assertEqual(diff_result.diff.tell(), 64)
self.assertEqual(diff_result._line_offset_cache, [
(0, 18),
(18, 22),
(40, 12),
])
def test_hunks_with_crlf(self):
"""Testing DiffFileResult.hunks with CRLF"""
diff_result = DiffFileResult(
orig_path='orig-file',
modified_path='modified-file',
diff=io.BytesIO(
b'--- orig-file\txxx\r\n'
b'+++ modified-file\txxx\r\n'
b'@@ -1 +1 @@\r\n'
b'- foo\r\n'
b'+ bar\r\n'
))
self.assertEqual(diff_result.diff.tell(), 0)
self.assertEqual(
diff_result.hunks,
b'@@ -1 +1 @@\r\n'
b'- foo\r\n'
b'+ bar\r\n')
self.assertEqual(diff_result.diff.tell(), 69)
self.assertEqual(diff_result._line_offset_cache, [
(0, 19),
(19, 23),
(42, 13),
])
# Since we're seeking, reading, and caching, check again.
self.assertEqual(
diff_result.hunks,
b'@@ -1 +1 @@\r\n'
b'- foo\r\n'
b'+ bar\r\n')
self.assertEqual(diff_result.diff.tell(), 69)
self.assertEqual(diff_result._line_offset_cache, [
(0, 19),
(19, 23),
(42, 13),
])
def test_hunks_with_crcrlf(self):
"""Testing DiffFileResult.hunks with CRCRLF"""
diff_result = DiffFileResult(
orig_path='orig-file',
modified_path='modified-file',
diff=io.BytesIO(
b'--- orig-file\txxx\r\r\n'
b'+++ modified-file\txxx\r\r\n'
b'@@ -1 +1 @@\r\r\n'
b'- foo\r\r\n'
b'+ bar\r\r\n'
))
self.assertEqual(diff_result.diff.tell(), 0)
self.assertEqual(
diff_result.hunks,
b'@@ -1 +1 @@\r\r\n'
b'- foo\r\r\n'
b'+ bar\r\r\n')
self.assertEqual(diff_result.diff.tell(), 74)
self.assertEqual(diff_result._line_offset_cache, [
(0, 20),
(20, 24),
(44, 14),
])
# Since we're seeking, reading, and caching, check again.
self.assertEqual(
diff_result.hunks,
b'@@ -1 +1 @@\r\r\n'
b'- foo\r\r\n'
b'+ bar\r\r\n')
self.assertEqual(diff_result.diff.tell(), 74)
self.assertEqual(diff_result._line_offset_cache, [
(0, 20),
(20, 24),
(44, 14),
])
def test_hunks_with_no_text_diff(self):
"""Testing DiffFileResult.hunks with no text diff"""
diff_result = DiffFileResult(
orig_path='orig-file',
modified_path='modified-file',
has_text_differences=False,
is_binary=True,
diff=io.BytesIO(
b'Binary files orig-file and modified-file differ\n'
))
self.assertEqual(diff_result.diff.tell(), 0)
self.assertEqual(
diff_result.hunks,
b'Binary files orig-file and modified-file differ\n')
self.assertEqual(diff_result.diff.tell(), 48)
self.assertEqual(diff_result._line_offset_cache, [])
def test_hunks_with_out_of_bounds(self):
"""Testing DiffFileResult.hunks with out-of-bounds
line
"""
diff_result = DiffFileResult(orig_path='orig-file',
modified_path='modified-file',
diff=io.BytesIO())
self.assertEqual(diff_result.hunks, b'')
self.assertEqual(diff_result.diff.tell(), 0)
def test_hunks_after_orig_header(self):
"""Testing DiffFileResult.hunks after orig_file_header
"""
diff_result = DiffFileResult(
orig_path='orig-file',
modified_path='modified-file',
diff=io.BytesIO(
b'--- orig-file\txxx\n'
b'+++ modified-file\txxx\n'
b'@@ -1 +1 @@\n'
b'- foo\n'
b'+ bar\n'
))
# Start by fetching the first header.
self.assertEqual(diff_result.modified_file_header,
b'+++ modified-file\txxx\n')
self.assertEqual(diff_result.diff.tell(), 40)
self.assertEqual(
diff_result.hunks,
b'@@ -1 +1 @@\n'
b'- foo\n'
b'+ bar\n')
self.assertEqual(diff_result.diff.tell(), 64)
self.assertEqual(diff_result._line_offset_cache, [
(0, 18),
(18, 22),
(40, 12),
])
# Since we're seeking, reading, and caching, check again.
self.assertEqual(
diff_result.hunks,
b'@@ -1 +1 @@\n'
b'- foo\n'
b'+ bar\n')
self.assertEqual(diff_result.diff.tell(), 64)
self.assertEqual(diff_result._line_offset_cache, [
(0, 18),
(18, 22),
(40, 12),
])
def test_iter_hunk_lines(self):
"""Testing DiffFileResult.iter_hunk_lines"""
diff_result = DiffFileResult(
orig_path='orig-file',
modified_path='modified-file',
diff=io.BytesIO(
b'--- orig-file\txxx\n'
b'+++ modified-file\txxx\n'
b'@@ -1 +1 @@\n'
b'- foo\n'
b'+ bar\n'
))
self.assertEqual(diff_result.diff.tell(), 0)
lines = diff_result.iter_hunk_lines()
self.assertTrue(inspect.isgenerator(lines))
self.assertEqual(
list(lines),
[
b'@@ -1 +1 @@',
b'- foo',
b'+ bar',
])
self.assertEqual(diff_result.diff.tell(), 64)
self.assertEqual(diff_result._line_offset_cache, [
(0, 18),
(18, 22),
(40, 12),
])
# Since we're seeking, reading, and caching, check again.
self.assertEqual(
list(diff_result.iter_hunk_lines()),
[
b'@@ -1 +1 @@',
b'- foo',
b'+ bar',
])
self.assertEqual(diff_result.diff.tell(), 64)
self.assertEqual(diff_result._line_offset_cache, [
(0, 18),
(18, 22),
(40, 12),
])
def test_iter_hunk_lines_with_keep_newlines(self):
"""Testing DiffFileResult.iter_hunk_lines with keep_newlines=True"""
diff_result = DiffFileResult(
orig_path='orig-file',
modified_path='modified-file',
diff=io.BytesIO(
b'--- orig-file\txxx\n'
b'+++ modified-file\txxx\n'
b'@@ -1 +1 @@\n'
b'- foo\n'
b'+ bar\n'
))
self.assertEqual(diff_result.diff.tell(), 0)
lines = diff_result.iter_hunk_lines(keep_newlines=True)
self.assertTrue(inspect.isgenerator(lines))
self.assertEqual(
list(lines),
[
b'@@ -1 +1 @@\n',
b'- foo\n',
b'+ bar\n',
])
self.assertEqual(diff_result.diff.tell(), 64)
self.assertEqual(diff_result._line_offset_cache, [
(0, 18),
(18, 22),
(40, 12),
])
# Since we're seeking, reading, and caching, check again.
self.assertEqual(
list(diff_result.iter_hunk_lines(keep_newlines=True)),
[
b'@@ -1 +1 @@\n',
b'- foo\n',
b'+ bar\n',
])
self.assertEqual(diff_result.diff.tell(), 64)
self.assertEqual(diff_result._line_offset_cache, [
(0, 18),
(18, 22),
(40, 12),
])
def test_iter_hunk_lines_with_crlf(self):
"""Testing DiffFileResult.iter_hunk_lines with CRLF"""
diff_result = DiffFileResult(
orig_path='orig-file',
modified_path='modified-file',
diff=io.BytesIO(
b'--- orig-file\txxx\r\n'
b'+++ modified-file\txxx\r\n'
b'@@ -1 +1 @@\r\n'
b'- foo\r\n'
b'+ bar\r\n'
))
self.assertEqual(diff_result.diff.tell(), 0)
self.assertEqual(
list(diff_result.iter_hunk_lines()),
[
b'@@ -1 +1 @@',
b'- foo',
b'+ bar',
])
self.assertEqual(diff_result.diff.tell(), 69)
self.assertEqual(diff_result._line_offset_cache, [
(0, 19),
(19, 23),
(42, 13),
])
# Since we're seeking, reading, and caching, check again.
self.assertEqual(
list(diff_result.iter_hunk_lines()),
[
b'@@ -1 +1 @@',
b'- foo',
b'+ bar',
])
self.assertEqual(diff_result.diff.tell(), 69)
self.assertEqual(diff_result._line_offset_cache, [
(0, 19),
(19, 23),
(42, 13),
])
def test_iter_hunk_lines_with_crlf_and_keep_newlines(self):
"""Testing DiffFileResult.iter_hunk_lines with CRLF and
keep_newlines=True
"""
diff_result = DiffFileResult(
orig_path='orig-file',
modified_path='modified-file',
diff=io.BytesIO(
b'--- orig-file\txxx\r\n'
b'+++ modified-file\txxx\r\n'
b'@@ -1 +1 @@\r\n'
b'- foo\r\n'
b'+ bar\r\n'
))
self.assertEqual(diff_result.diff.tell(), 0)
self.assertEqual(
list(diff_result.iter_hunk_lines(keep_newlines=True)),
[
b'@@ -1 +1 @@\r\n',
b'- foo\r\n',
b'+ bar\r\n',
])
self.assertEqual(diff_result.diff.tell(), 69)
self.assertEqual(diff_result._line_offset_cache, [
(0, 19),
(19, 23),
(42, 13),
])
# Since we're seeking, reading, and caching, check again.
self.assertEqual(
list(diff_result.iter_hunk_lines(keep_newlines=True)),
[
b'@@ -1 +1 @@\r\n',
b'- foo\r\n',
b'+ bar\r\n',
])
self.assertEqual(diff_result.diff.tell(), 69)
self.assertEqual(diff_result._line_offset_cache, [
(0, 19),
(19, 23),
(42, 13),
])
def test_iter_hunk_lines_with_crcrlf(self):
"""Testing DiffFileResult.iter_hunk_lines with CRCRLF"""
diff_result = DiffFileResult(
orig_path='orig-file',
modified_path='modified-file',
diff=io.BytesIO(
b'--- orig-file\txxx\r\r\n'
b'+++ modified-file\txxx\r\r\n'
b'@@ -1 +1 @@\r\r\n'
b'- foo\r\r\n'
b'+ bar\r\r\n'
))
self.assertEqual(diff_result.diff.tell(), 0)
self.assertEqual(
list(diff_result.iter_hunk_lines()),
[
b'@@ -1 +1 @@',
b'- foo',
b'+ bar',
])
self.assertEqual(diff_result.diff.tell(), 74)
self.assertEqual(diff_result._line_offset_cache, [
(0, 20),
(20, 24),
(44, 14),
])
# Since we're seeking, reading, and caching, check again.
self.assertEqual(
list(diff_result.iter_hunk_lines()),
[
b'@@ -1 +1 @@',
b'- foo',
b'+ bar',
])
self.assertEqual(diff_result.diff.tell(), 74)
self.assertEqual(diff_result._line_offset_cache, [
(0, 20),
(20, 24),
(44, 14),
])
def test_iter_hunk_lines_with_crcrlf_and_keep_newlines(self):
"""Testing DiffFileResult.iter_hunk_lines with CRCRLF and
keep_newlines=True
"""
diff_result = DiffFileResult(
orig_path='orig-file',
modified_path='modified-file',
diff=io.BytesIO(
b'--- orig-file\txxx\r\r\n'
b'+++ modified-file\txxx\r\r\n'
b'@@ -1 +1 @@\r\r\n'
b'- foo\r\r\n'
b'+ bar\r\r\n'
))
self.assertEqual(diff_result.diff.tell(), 0)
self.assertEqual(
list(diff_result.iter_hunk_lines(keep_newlines=True)),
[
b'@@ -1 +1 @@\r\n',
b'- foo\r\n',
b'+ bar\r\n',
])
self.assertEqual(diff_result.diff.tell(), 74)
self.assertEqual(diff_result._line_offset_cache, [
(0, 20),
(20, 24),
(44, 14),
])
# Since we're seeking, reading, and caching, check again.
self.assertEqual(
list(diff_result.iter_hunk_lines(keep_newlines=True)),
[
b'@@ -1 +1 @@\r\n',
b'- foo\r\n',
b'+ bar\r\n',
])
self.assertEqual(diff_result.diff.tell(), 74)
self.assertEqual(diff_result._line_offset_cache, [
(0, 20),
(20, 24),
(44, 14),
])
def test_iter_hunk_lines_with_out_of_bounds(self):
"""Testing DiffFileResult.iter_hunk_lines with out-of-bounds
line
"""
diff_result = DiffFileResult(orig_path='orig-file',
modified_path='modified-file',
diff=io.BytesIO())
self.assertEqual(list(diff_result.iter_hunk_lines()), [])
self.assertEqual(diff_result.diff.tell(), 0)
def test_iter_hunk_lines_after_orig_header(self):
"""Testing DiffFileResult.iter_hunk_lines after orig_file_header
"""
diff_result = DiffFileResult(
orig_path='orig-file',
modified_path='modified-file',
diff=io.BytesIO(
b'--- orig-file\txxx\n'
b'+++ modified-file\txxx\n'
b'@@ -1 +1 @@\n'
b'- foo\n'
b'+ bar\n'
))
# Start by fetching the first header.
self.assertEqual(diff_result.modified_file_header,
b'+++ modified-file\txxx\n')
self.assertEqual(diff_result.diff.tell(), 40)
self.assertEqual(
list(diff_result.iter_hunk_lines()),
[
b'@@ -1 +1 @@',
b'- foo',
b'+ bar',
])
self.assertEqual(diff_result.diff.tell(), 64)
self.assertEqual(diff_result._line_offset_cache, [
(0, 18),
(18, 22),
(40, 12),
])
# Since we're seeking, reading, and caching, check again.
self.assertEqual(
list(diff_result.iter_hunk_lines()),
[
b'@@ -1 +1 @@',
b'- foo',
b'+ bar',
])
self.assertEqual(diff_result.diff.tell(), 64)
self.assertEqual(diff_result._line_offset_cache, [
(0, 18),
(18, 22),
(40, 12),
])
| {
"content_hash": "cbb831a7f082b8f1c5d2bc62a960ef5f",
"timestamp": "",
"source": "github",
"line_count": 1052,
"max_line_length": 79,
"avg_line_length": 33.39923954372624,
"alnum_prop": 0.484375,
"repo_name": "reviewboard/rbtools",
"id": "67dce89edbc82bb5d40ed019fdb0c56c37a9feb1",
"size": "35136",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rbtools/diffs/tests/test_diff_file_result.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11069"
},
{
"name": "HTML",
"bytes": "1000"
},
{
"name": "Python",
"bytes": "2298293"
},
{
"name": "Shell",
"bytes": "5491"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault(
"DJANGO_SETTINGS_MODULE",
"config.settings.development")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| {
"content_hash": "12ab43368bb2c3ea18322bea21af5c6e",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 64,
"avg_line_length": 23.272727272727273,
"alnum_prop": 0.671875,
"repo_name": "Alexx-G/django-project-template",
"id": "1881c3618b81330318e2b1e8f840a01d656b2cf9",
"size": "278",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/manage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "81"
},
{
"name": "Python",
"bytes": "5061"
}
],
"symlink_target": ""
} |
from test_framework import MonetaTestFramework
from monetarpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
def get_sub_array_from_array(object_array, to_match):
'''
Finds and returns a sub array from an array of arrays.
to_match should be a unique idetifier of a sub array
'''
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
return item
return []
def check_array_result(object_array, to_match, expected, should_not_find = False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found in object_array
"""
if should_not_find == True:
expected = { }
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0 and should_not_find != True:
raise AssertionError("No objects matched %s"%(str(to_match)))
if num_matched > 0 and should_not_find == True:
raise AssertionError("Objects was matched %s"%(str(to_match)))
class ReceivedByTest(MonetaTestFramework):
def run_test(self):
'''
listreceivedbyaddress Test
'''
# Send from node 0 to 1
addr = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendtoaddress(addr, 0.1)
self.sync_all()
#Check not listed in listreceivedbyaddress because has 0 confirmations
check_array_result(self.nodes[1].listreceivedbyaddress(),
{"address":addr},
{ },
True)
#Bury Tx under 10 block so it will be returned by listreceivedbyaddress
self.nodes[1].setgenerate(True, 10)
self.sync_all()
check_array_result(self.nodes[1].listreceivedbyaddress(),
{"address":addr},
{"address":addr, "account":"", "amount":Decimal("0.1"), "confirmations":10, "txids":[txid,]})
#With min confidence < 10
check_array_result(self.nodes[1].listreceivedbyaddress(5),
{"address":addr},
{"address":addr, "account":"", "amount":Decimal("0.1"), "confirmations":10, "txids":[txid,]})
#With min confidence > 10, should not find Tx
check_array_result(self.nodes[1].listreceivedbyaddress(11),{"address":addr},{ },True)
#Empty Tx
addr = self.nodes[1].getnewaddress()
check_array_result(self.nodes[1].listreceivedbyaddress(0,True),
{"address":addr},
{"address":addr, "account":"", "amount":0, "confirmations":0, "txids":[]})
'''
getreceivedbyaddress Test
'''
# Send from node 0 to 1
addr = self.nodes[1].getnewaddress()
txid = self.nodes[0].sendtoaddress(addr, 0.1)
self.sync_all()
#Check balance is 0 because of 0 confirmations
balance = self.nodes[1].getreceivedbyaddress(addr)
if balance != Decimal("0.0"):
raise AssertionError("Wrong balance returned by getreceivedbyaddress, %0.2f"%(balance))
#Check balance is 0.1
balance = self.nodes[1].getreceivedbyaddress(addr,0)
if balance != Decimal("0.1"):
raise AssertionError("Wrong balance returned by getreceivedbyaddress, %0.2f"%(balance))
#Bury Tx under 10 block so it will be returned by the default getreceivedbyaddress
self.nodes[1].setgenerate(True, 10)
self.sync_all()
balance = self.nodes[1].getreceivedbyaddress(addr)
if balance != Decimal("0.1"):
raise AssertionError("Wrong balance returned by getreceivedbyaddress, %0.2f"%(balance))
'''
listreceivedbyaccount + getreceivedbyaccount Test
'''
#set pre-state
addrArr = self.nodes[1].getnewaddress()
account = self.nodes[1].getaccount(addrArr)
received_by_account_json = get_sub_array_from_array(self.nodes[1].listreceivedbyaccount(),{"account":account})
if len(received_by_account_json) == 0:
raise AssertionError("No accounts found in node")
balance_by_account = rec_by_accountArr = self.nodes[1].getreceivedbyaccount(account)
txid = self.nodes[0].sendtoaddress(addr, 0.1)
self.sync_all()
# listreceivedbyaccount should return received_by_account_json because of 0 confirmations
check_array_result(self.nodes[1].listreceivedbyaccount(),
{"account":account},
received_by_account_json)
# getreceivedbyaddress should return same balance because of 0 confirmations
balance = self.nodes[1].getreceivedbyaccount(account)
if balance != balance_by_account:
raise AssertionError("Wrong balance returned by getreceivedbyaccount, %0.2f"%(balance))
self.nodes[1].setgenerate(True, 10)
self.sync_all()
# listreceivedbyaccount should return updated account balance
check_array_result(self.nodes[1].listreceivedbyaccount(),
{"account":account},
{"account":received_by_account_json["account"], "amount":(received_by_account_json["amount"] + Decimal("0.1"))})
# getreceivedbyaddress should return updates balance
balance = self.nodes[1].getreceivedbyaccount(account)
if balance != balance_by_account + Decimal("0.1"):
raise AssertionError("Wrong balance returned by getreceivedbyaccount, %0.2f"%(balance))
#Create a new account named "mynewaccount" that has a 0 balance
self.nodes[1].getaccountaddress("mynewaccount")
received_by_account_json = get_sub_array_from_array(self.nodes[1].listreceivedbyaccount(0,True),{"account":"mynewaccount"})
if len(received_by_account_json) == 0:
raise AssertionError("No accounts found in node")
# Test includeempty of listreceivedbyaccount
if received_by_account_json["amount"] != Decimal("0.0"):
raise AssertionError("Wrong balance returned by listreceivedbyaccount, %0.2f"%(received_by_account_json["amount"]))
# Test getreceivedbyaccount for 0 amount accounts
balance = self.nodes[1].getreceivedbyaccount("mynewaccount")
if balance != Decimal("0.0"):
raise AssertionError("Wrong balance returned by getreceivedbyaccount, %0.2f"%(balance))
if __name__ == '__main__':
ReceivedByTest().main()
| {
"content_hash": "a4f8de15890769217d0aed97151bf60c",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 139,
"avg_line_length": 44.71875,
"alnum_prop": 0.610062893081761,
"repo_name": "habibmasuro/moneta-0.10.0",
"id": "7e4a6002c3e9c3d86e40aa60ef5e897cad08f959",
"size": "7407",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "qa/rpc-tests/receivedby.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "7639"
},
{
"name": "Batchfile",
"bytes": "570"
},
{
"name": "C",
"bytes": "344750"
},
{
"name": "C++",
"bytes": "3685223"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "Groff",
"bytes": "17983"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "2099"
},
{
"name": "Makefile",
"bytes": "61678"
},
{
"name": "Objective-C",
"bytes": "2020"
},
{
"name": "Objective-C++",
"bytes": "7244"
},
{
"name": "Protocol Buffer",
"bytes": "2304"
},
{
"name": "Python",
"bytes": "211715"
},
{
"name": "QMake",
"bytes": "2018"
},
{
"name": "Shell",
"bytes": "42211"
}
],
"symlink_target": ""
} |
import logging
import os
from collections import defaultdict, deque
from copy import deepcopy
from random import shuffle
from sys import exit
from threading import Lock, Thread, Event
logging.basicConfig(level=logging.INFO, format='%(name)s %(threadName)s: %(message)s')
LOG = logging.getLogger('scheduler')
LOG.setLevel(level=logging.DEBUG)
class Scheduler(object):
"""Schedules the submission of workloads across one of more clients.
Args:
query_executors (list of QueryExecutor): the objects should be initialized.
shuffle (boolean): If True, change the order of execution of queries in a workload.
By default, the queries are executed sorted by query name.
num_clients (int): Number of concurrent clients.
impalads (list of str): A list of impalads to connect to. Ignored when the executor
is hive.
Attributes:
query_executors (list of QueryExecutor): initialized query executors
shuffle (boolean): shuffle query executors
iterations (int): number of iterations ALL query executors will run
query_iterations (int): number of times each query executor will execute
impalads (list of str?): list of impalads for execution. It is rotated after each execution.
num_clients (int): Number of concurrent clients
"""
def __init__(self, **kwargs):
self.query_executors = kwargs.get('query_executors')
self.shuffle = kwargs.get('shuffle', False)
self.iterations = kwargs.get('iterations', 1)
self.query_iterations = kwargs.get('query_iterations', 1)
self.impalads = kwargs.get('impalads')
self.num_clients = kwargs.get('num_clients', 1)
self.__exit = Event()
self.__results = list()
self.__result_dict_lock = Lock()
self.__thread_name = "[%s " % self.query_executors[0].query.db + "Thread %d]"
self.__threads = []
self.__create_workload_threads()
@property
def results(self):
"""Return execution results."""
return self.__results
def __create_workload_threads(self):
"""Create workload threads.
Each workload thread is analogus to a client name, and is identified by a unique ID,
the workload that's being run and the table formats it's being run on."""
for thread_num in xrange(self.num_clients):
thread = Thread(target=self.__run_queries, args=[thread_num],
name=self.__thread_name % thread_num)
thread.daemon = True
self.__threads.append(thread)
def __get_next_impalad(self):
"""Maintains a rotating list of impalads"""
self.impalads.rotate(-1)
return self.impalads[-1]
def __run_queries(self, thread_num):
"""This method is run by every thread concurrently.
Args:
thread_num (int): Thread number. Used for setting the client name in the result.
"""
# each thread gets its own copy of query_executors
query_executors = deepcopy(sorted(self.query_executors, key=lambda x: x.query.name))
for j in xrange(self.iterations):
# Randomize the order of execution for each iteration if specified.
if self.shuffle: shuffle(query_executors)
results = defaultdict(list)
workload_time_sec = 0
for query_executor in query_executors:
query_name = query_executor.query.name
LOG.info("Running Query: %s" % query_name)
for i in xrange(self.query_iterations):
if self.__exit.isSet():
LOG.error("Another thread failed, exiting.")
exit(1)
try:
query_executor.prepare(self.__get_next_impalad())
query_executor.execute()
# QueryExecutor only throws an exception if the query fails and abort_on_error
# is set to True. If abort_on_error is False, then the exception is logged on
# the console and execution moves on to the next query.
except Exception as e:
LOG.error("Query %s Failed: %s" % (query_name, str(e)))
self.__exit.set()
finally:
LOG.info("%s query iteration %d finished in %.2f seconds" % (query_name, i+1,
query_executor.result.time_taken))
result = query_executor.result
result.client_name = thread_num + 1
self.__results.append(result)
workload_time_sec += query_executor.result.time_taken
if self.query_iterations == 1:
LOG.info("Workload iteration %d finished in %s seconds" % (j+1, workload_time_sec))
def run(self):
"""Run the query pipelines concurrently"""
for thread_num, t in enumerate(self.__threads):
LOG.info("Starting %s" % self.__thread_name % thread_num)
t.start()
for thread_num,t in enumerate(self.__threads):
t.join()
LOG.info("Finished %s" % self.__thread_name % thread_num)
| {
"content_hash": "135284465f7676933d6fc4adea43dc47",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 96,
"avg_line_length": 40.76724137931034,
"alnum_prop": 0.6641996193698456,
"repo_name": "henryr/Impala",
"id": "bc98c587980141fe2dfb01ece9274223afe3db7a",
"size": "5583",
"binary": false,
"copies": "10",
"ref": "refs/heads/cdh5-trunk",
"path": "tests/common/scheduler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Bison",
"bytes": "78633"
},
{
"name": "C",
"bytes": "15836"
},
{
"name": "C++",
"bytes": "5841728"
},
{
"name": "CMake",
"bytes": "89740"
},
{
"name": "CSS",
"bytes": "86925"
},
{
"name": "Groff",
"bytes": "1633"
},
{
"name": "HTML",
"bytes": "56"
},
{
"name": "Java",
"bytes": "3270730"
},
{
"name": "PLpgSQL",
"bytes": "393"
},
{
"name": "Python",
"bytes": "1642846"
},
{
"name": "SQLPL",
"bytes": "3253"
},
{
"name": "Shell",
"bytes": "143698"
},
{
"name": "Thrift",
"bytes": "240077"
}
],
"symlink_target": ""
} |
"""The File Upload integration."""
from __future__ import annotations
import asyncio
from collections.abc import Iterator
from contextlib import contextmanager
from dataclasses import dataclass
from pathlib import Path
import shutil
import tempfile
from aiohttp import web
import voluptuous as vol
from homeassistant.components.http import HomeAssistantView
from homeassistant.components.http.data_validator import RequestDataValidator
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from homeassistant.core import Event, HomeAssistant, callback
from homeassistant.helpers.typing import ConfigType
from homeassistant.util import raise_if_invalid_filename
from homeassistant.util.ulid import ulid_hex
DOMAIN = "file_upload"
# If increased, change upload view to streaming
# https://docs.aiohttp.org/en/stable/web_quickstart.html#file-uploads
MAX_SIZE = 1024 * 1024 * 10
TEMP_DIR_NAME = f"home-assistant-{DOMAIN}"
@contextmanager
def process_uploaded_file(hass: HomeAssistant, file_id: str) -> Iterator[Path]:
"""Get an uploaded file.
File is removed at the end of the context.
"""
if DOMAIN not in hass.data:
raise ValueError("File does not exist")
file_upload_data: FileUploadData = hass.data[DOMAIN]
if not file_upload_data.has_file(file_id):
raise ValueError("File does not exist")
try:
yield file_upload_data.file_path(file_id)
finally:
file_upload_data.files.pop(file_id)
shutil.rmtree(file_upload_data.file_dir(file_id))
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up File Upload."""
hass.http.register_view(FileUploadView)
return True
@dataclass(frozen=True)
class FileUploadData:
"""File upload data."""
temp_dir: Path
files: dict[str, str]
@classmethod
async def create(cls, hass: HomeAssistant) -> FileUploadData:
"""Initialize the file upload data."""
def _create_temp_dir() -> Path:
"""Create temporary directory."""
temp_dir = Path(tempfile.gettempdir()) / TEMP_DIR_NAME
# If it exists, it's an old one and Home Assistant didn't shut down correctly.
if temp_dir.exists():
shutil.rmtree(temp_dir)
temp_dir.mkdir(0o700)
return temp_dir
temp_dir = await hass.async_add_executor_job(_create_temp_dir)
def cleanup_unused_files(ev: Event) -> None:
"""Clean up unused files."""
shutil.rmtree(temp_dir)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, cleanup_unused_files)
return cls(temp_dir, {})
def has_file(self, file_id: str) -> bool:
"""Return if file exists."""
return file_id in self.files
def file_dir(self, file_id: str) -> Path:
"""Return the file directory."""
return self.temp_dir / file_id
def file_path(self, file_id: str) -> Path:
"""Return the file path."""
return self.file_dir(file_id) / self.files[file_id]
class FileUploadView(HomeAssistantView):
"""HTTP View to upload files."""
url = "/api/file_upload"
name = "api:file_upload"
_upload_lock: asyncio.Lock | None = None
@callback
def _get_upload_lock(self) -> asyncio.Lock:
"""Get upload lock."""
if self._upload_lock is None:
self._upload_lock = asyncio.Lock()
return self._upload_lock
async def post(self, request: web.Request) -> web.Response:
"""Upload a file."""
async with self._get_upload_lock():
return await self._upload_file(request)
async def _upload_file(self, request: web.Request) -> web.Response:
"""Handle uploaded file."""
# Increase max payload
request._client_max_size = MAX_SIZE # pylint: disable=protected-access
data = await request.post()
file_field = data.get("file")
if not isinstance(file_field, web.FileField):
raise vol.Invalid("Expected a file")
try:
raise_if_invalid_filename(file_field.filename)
except ValueError as err:
raise web.HTTPBadRequest from err
hass: HomeAssistant = request.app["hass"]
file_id = ulid_hex()
if DOMAIN not in hass.data:
hass.data[DOMAIN] = await FileUploadData.create(hass)
file_upload_data: FileUploadData = hass.data[DOMAIN]
file_dir = file_upload_data.file_dir(file_id)
def _sync_work() -> None:
file_dir.mkdir()
# MyPy forgets about the isinstance check because we're in a function scope
assert isinstance(file_field, web.FileField)
with (file_dir / file_field.filename).open("wb") as target_fileobj:
shutil.copyfileobj(file_field.file, target_fileobj)
await hass.async_add_executor_job(_sync_work)
file_upload_data.files[file_id] = file_field.filename
return self.json({"file_id": file_id})
@RequestDataValidator({vol.Required("file_id"): str})
async def delete(self, request: web.Request, data: dict[str, str]) -> web.Response:
"""Delete a file."""
hass: HomeAssistant = request.app["hass"]
if DOMAIN not in hass.data:
raise web.HTTPNotFound()
file_id = data["file_id"]
file_upload_data: FileUploadData = hass.data[DOMAIN]
if file_upload_data.files.pop(file_id, None) is None:
raise web.HTTPNotFound()
await hass.async_add_executor_job(
lambda: shutil.rmtree(file_upload_data.file_dir(file_id))
)
return self.json_message("File deleted")
| {
"content_hash": "b87d4de37d47815e8b2492bcbb882019",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 90,
"avg_line_length": 31.126373626373628,
"alnum_prop": 0.6457193292144748,
"repo_name": "w1ll1am23/home-assistant",
"id": "9f548e1445939e5dda7c3357e9acbb6d6f2de949",
"size": "5665",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/file_upload/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "52277012"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
} |
from pymongo import MongoClient, MongoReplicaSetClient
from tapiriik.settings import MONGO_HOST, MONGO_REPLICA_SET, MONGO_CLIENT_OPTIONS, REDIS_HOST
# MongoDB
client_class = MongoClient if not MONGO_REPLICA_SET else MongoReplicaSetClient
if MONGO_REPLICA_SET:
MONGO_CLIENT_OPTIONS["replicaSet"] = MONGO_REPLICA_SET
_connection = client_class(host=MONGO_HOST, **MONGO_CLIENT_OPTIONS)
db = _connection["tapiriik"]
cachedb = _connection["tapiriik_cache"]
tzdb = _connection["tapiriik_tz"]
# The main db currently has an unfortunate lock contention rate
ratelimit = _connection["tapiriik_ratelimit"]
# Redis
if REDIS_HOST:
import redis as redis_client
redis = redis_client.Redis(host=REDIS_HOST)
else:
redis = None # Must be defined
def close_connections():
try:
_connection.close()
except:
pass
import atexit
atexit.register(close_connections)
| {
"content_hash": "151f6e849c1d2bf1d07f23a8ff3c3209",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 93,
"avg_line_length": 26.8125,
"alnum_prop": 0.7727272727272727,
"repo_name": "gavioto/tapiriik",
"id": "86b8a51528dc1f95592d3a021be3a8b4709ff5bd",
"size": "858",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "tapiriik/database/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "23791"
},
{
"name": "HTML",
"bytes": "66893"
},
{
"name": "JavaScript",
"bytes": "48483"
},
{
"name": "Python",
"bytes": "609011"
},
{
"name": "Shell",
"bytes": "1120"
}
],
"symlink_target": ""
} |
#!/usr/bin/env python
"""
python %prog [options] <in_schema.xsd> <out_schema.xsd>
Synopsis:
Prepare schema document. Replace include and import elements.
Examples:
python %prog myschema.xsd
python %prog myschema.xsd newschema.xsd
python %prog -f myschema.xsd newschema.xsd
cat infile.xsd | python %prog > outfile.xsd
"""
#
# Imports
import sys
import os
import urllib.request, urllib.error, urllib.parse
import copy
from optparse import OptionParser, Values
import itertools
from copy import deepcopy
from lxml import etree
#
# Globals and constants
#
# Do not modify the following VERSION comments.
# Used by updateversion.py.
##VERSION##
VERSION = '2.12a'
##VERSION##
Namespaces = {'xs': 'http://www.w3.org/2001/XMLSchema'}
Xsd_namespace_uri = 'http://www.w3.org/2001/XMLSchema'
CatalogDict = {}
# the base url to use for all relative paths in the catalog
CatalogBaseUrl = None
def load_catalog(catalogpath):
global CatalogBaseUrl
if catalogpath:
CatalogBaseUrl = os.path.split(catalogpath)[0]
catalog = etree.parse(open(catalogpath))
for elements in catalog.getroot().findall(
"{urn:oasis:names:tc:entity:xmlns:xml:catalog}public"):
CatalogDict[elements.get("publicId")] = elements.get("uri")
#
# Functions for external use
def process_include_files(
infile, outfile, inpath='', catalogpath=None,
fixtypenames=None):
load_catalog(catalogpath)
options = Values({
'force': False,
'fixtypenames': fixtypenames,
})
prep_schema_doc(infile, outfile, inpath, options)
def get_all_root_file_paths(infile, inpath='', catalogpath=None):
load_catalog(catalogpath)
doc1 = etree.parse(infile)
root1 = doc1.getroot()
rootPaths = []
params = Params()
params.parent_url = infile
params.base_url = os.path.split(inpath)[0]
get_root_file_paths(root1, params, rootPaths)
rootPaths.append(inpath)
return rootPaths
#
# Classes
class Params(object):
members = ('base_url', 'already_processed', 'parent_url', )
def __init__(self):
self.base_url = None
self.already_processed = set()
self.parent_url = None
def __setattr__(self, name, value):
if name not in self.members:
raise AttributeError('Class %s has no set-able attribute "%s"' % (
self.__class__.__name__, name, ))
self.__dict__[name] = value
class SchemaIOError(IOError):
pass
class RaiseComplexTypesError(Exception):
pass
#
# Functions for internal use and testing
def clear_includes_and_imports(node):
namespace = node.nsmap[node.prefix]
child_iter1 = node.iterfind('{%s}include' % (namespace, ))
child_iter2 = node.iterfind('{%s}import' % (namespace, ))
for child in itertools.chain(child_iter1, child_iter2):
repl = etree.Comment(etree.tostring(child))
repl.tail = '\n'
node.replace(child, repl)
def get_ref_info(node, params):
# first look for the schema location in the catalog, if not
# there, then see if it's specified in the node
namespace = node.get('namespace')
url = None
baseUrl = None
if namespace in CatalogDict:
url = CatalogDict[namespace]
# setup the base url in case the path
# in the catalog was a relative path
baseUrl = CatalogBaseUrl
if not url:
url = node.get('schemaLocation')
if not url:
msg = '*** Warning: missing "schemaLocation" attribute in %s\n' % (
params.parent_url, )
sys.stderr.write(msg)
return (None, None)
# Uncomment the next lines to help track down missing schemaLocation etc.
# print '(resolve_ref) url: %s\n parent-url: %s' % (
# url, params.parent_url, )
if not baseUrl:
baseUrl = params.base_url
if baseUrl and not (
url.startswith('/') or
url.startswith('http:') or
url.startswith('ftp:')):
locn = '%s/%s' % (baseUrl, url, )
schema_name = locn
else:
locn = url
schema_name = url
return locn, schema_name
def resolve_ref(node, params, options):
content = None
locn, schema_name = get_ref_info(node, params)
if locn is not None and not (
locn.startswith('/') or
locn.startswith('http:') or
locn.startswith('ftp:')):
schema_name = os.path.abspath(locn)
if locn is not None:
if schema_name not in params.already_processed:
params.already_processed.add(schema_name)
## print 'trace --'
## print ' url: : %s' % (url, )
## print ' base : %s' % (params.base_url, )
## print ' parent : %s' % (params.parent_url, )
## print ' locn : %s' % (locn, )
## print ' schema_name : %s\n' % (schema_name, )
if locn.startswith('http:') or locn.startswith('ftp:'):
try:
urlfile = urllib.request.urlopen(locn)
content = urlfile.read()
urlfile.close()
params.parent_url = locn
params.base_url = os.path.split(locn)[0]
except urllib.error.HTTPError:
msg = "Can't find file %s referenced in %s." % (
locn, params.parent_url, )
raise SchemaIOError(msg)
else:
if os.path.exists(locn):
infile = open(locn, 'rb')
content = infile.read()
infile.close()
params.parent_url = locn
params.base_url = os.path.split(locn)[0]
if content is None:
msg = "Can't find file %s referenced in %s." % (
locn, params.parent_url, )
raise SchemaIOError(msg)
## if content is None:
## msg = "Can't find file %s referenced in %s." % (
## locn, params.parent_url, )
## raise SchemaIOError(msg)
return content
def collect_inserts(node, params, inserts, options):
namespace = node.nsmap[node.prefix]
roots = []
child_iter1 = node.iterfind('{%s}include' % (namespace, ))
child_iter2 = node.iterfind('{%s}import' % (namespace, ))
for child in itertools.chain(child_iter1, child_iter2):
aux_roots = collect_inserts_aux(child, params, inserts, options)
roots.extend(aux_roots)
return roots
def collect_inserts_aux(child, params, inserts, options):
roots = []
save_base_url = params.base_url
string_content = resolve_ref(child, params, options)
if string_content is not None:
root = etree.fromstring(string_content, base_url=params.base_url)
roots.append(root)
for child1 in root:
if not isinstance(child1, etree._Comment):
namespace = child1.nsmap[child1.prefix]
if (child1.tag != '{%s}include' % (namespace, ) and
child1.tag != '{%s' % (namespace, )):
comment = etree.Comment(etree.tostring(child))
comment.tail = '\n'
inserts.append(comment)
inserts.append(child1)
insert_roots = collect_inserts(root, params, inserts, options)
roots.extend(insert_roots)
params.base_url = save_base_url
return roots
def get_root_file_paths(node, params, rootPaths):
namespace = node.nsmap[node.prefix]
child_iter1 = node.iterfind('{%s}include' % (namespace, ))
child_iter2 = node.iterfind('{%s}import' % (namespace, ))
for child in itertools.chain(child_iter1, child_iter2):
get_root_file_paths_aux(child, params, rootPaths)
def get_root_file_paths_aux(child, params, rootPaths):
save_base_url = params.base_url
path, _ = get_ref_info(child, params)
string_content = resolve_ref(child, params, None)
if string_content is not None:
root = etree.fromstring(string_content, base_url=params.base_url)
get_root_file_paths(root, params, rootPaths)
if path is not None and path not in rootPaths:
rootPaths.append(path)
params.base_url = save_base_url
def make_file(outFileName, options):
outFile = None
if (not options.force) and os.path.exists(outFileName):
reply = input('File %s exists. Overwrite? (y/n): ' % outFileName)
if reply == 'y':
outFile = open(outFileName, 'w')
else:
outFile = open(outFileName, 'w')
return outFile
def prep_schema_doc(infile, outfile, inpath, options):
doc1 = etree.parse(infile)
root1 = doc1.getroot()
params = Params()
params.parent_url = infile
params.base_url = os.path.split(inpath)[0]
inserts = []
collect_inserts(root1, params, inserts, options)
root2 = copy.copy(root1)
clear_includes_and_imports(root2)
for insert_node in inserts:
root2.append(insert_node)
process_groups(root2)
raise_anon_complextypes(root2)
fix_type_names(root2, options)
doc2 = etree.ElementTree(root2)
doc2.write(outfile)
return doc2
def prep_schema(inpath, outpath, options):
if inpath:
infile = open(inpath, 'r')
else:
infile = sys.stdin
if outpath:
outfile = make_file(outpath, options)
else:
outfile = sys.stdout
if outfile is None:
return
prep_schema_doc(infile, outfile, inpath, options)
if inpath:
infile.close()
if outpath:
outfile.close()
def process_groups(root):
# Get all the xs:group definitions at top level.
defs = root.xpath('./xs:group', namespaces=Namespaces)
defs = [node for node in defs if node.get('name') is not None]
# Get all the xs:group references (below top level).
refs = root.xpath('./*//xs:group', namespaces=Namespaces)
refs = [node for node in refs if node.get('ref') is not None]
# Create a dictionary of the named model groups (definitions).
def_dict = {}
for node in defs:
def_dict[trim_prefix(node.get('name'))] = node
replace_group_defs(def_dict, refs)
def fix_type_names(root, options):
fixnamespec = options.fixtypenames
if fixnamespec:
namespecs = fixnamespec.split(';')
else:
namespecs = []
for namespec in namespecs:
names = namespec.split(':')
if len(names) == 2:
oldname = names[0]
newname = names[1]
elif len(names) == 1:
oldname = names[0]
newname = '%sxx' % (oldname, )
else:
continue
# Change the name (name attribute) of the complexType.
pat = './/%s:complexType[@name="%s"]' % (
root.prefix, oldname)
elements = xpath_find(root, pat)
if len(elements) < 1:
sys.stderr.write(
"\nWarning: fix-type-names can't find complexType '%s'. "
"Exiting.\n\n" % (oldname, ))
sys.exit(1)
if len(elements) < 1:
sys.stderr.write(
"Warning: fix-type-names found more than "
"one complexType '%s'. "
"Changing first." % (oldname, ))
element = elements[0]
element.set('name', newname)
# Change the reference (type attribute) of child elements.
pat = './/%s:element' % (root.prefix, )
elements = xpath_find(root, pat)
for element in elements:
typename = element.get('type')
if not typename:
continue
names = typename.split(':')
if len(names) == 2:
typename = names[1]
elif len(names) == 1:
typename = names[0]
else:
continue
if typename != oldname:
continue
if not element.getchildren():
element.set('type', newname)
# Change the extensions ('base' attribute) that refer to the old type.
pat = './/%s:extension' % (root.prefix, )
elements = xpath_find(root, pat)
for element in elements:
typename = element.get('base')
if not typename:
continue
names = typename.split(':')
if len(names) == 2:
typename = names[1]
elif len(names) == 1:
typename = names[0]
else:
continue
if typename != oldname:
continue
element.set('base', newname)
def xpath_find(node, pat):
namespaces = {node.prefix: node.nsmap[node.prefix]}
elements = node.xpath(pat, namespaces=namespaces)
return elements
def replace_group_defs(def_dict, refs):
for ref_node in refs:
name = trim_prefix(ref_node.get('ref'))
if name is None:
continue
def_node = def_dict.get(name)
if def_node is not None:
content = def_node.xpath(
'./xs:sequence|./xs:choice|./xs:all',
namespaces=Namespaces)
if content:
content = content[0]
parent = ref_node.getparent()
for node in content:
new_node = deepcopy(node)
# Copy minOccurs and maxOccurs attributes to new node.
value = ref_node.get('minOccurs')
if value is not None:
new_node.set('minOccurs', value)
value = ref_node.get('maxOccurs')
if value is not None:
new_node.set('maxOccurs', value)
ref_node.addprevious(new_node)
parent.remove(ref_node)
def raise_anon_complextypes(root):
""" Raise each anonymous complexType to top level and give it a name.
Rename if necessary to prevent duplicates.
"""
def_names = collect_type_names(root)
def_count = 0
# Find all complexTypes below top level.
# Raise them to top level and name them.
# Re-name if there is a duplicate (simpleType, complexType, or
# previous renamed type).
# Change the parent (xs:element) so the "type" attribute refers to
# the raised and renamed type.
# Collect the new types.
el = etree.Comment(text="Raised anonymous complexType definitions")
el.tail = "\n\n"
root.append(el)
prefix = root.prefix
if prefix:
pattern = './*/*//%s:complexType|./*/*//%s:simpleType' % (
prefix, prefix, )
element_tag = '{%s}element' % (root.nsmap[prefix], )
else:
pattern = './*/*//complexType|./*/*//simpleType'
element_tag = 'element'
defs = root.xpath(pattern, namespaces=Namespaces)
for node in defs:
parent = node.getparent()
if parent.tag != element_tag:
continue
name = parent.get('name')
if not name:
continue
type_name = '%sType' % (name, )
type_name, def_count = unique_name(type_name, def_names, def_count)
def_names.add(type_name)
parent.set('type', type_name)
node.set('name', type_name)
# Move the complexType node to top level.
root.append(node)
#
# Collect the names of all currently defined types (complexType,
# simpleType, element).
def collect_type_names(node):
prefix = node.prefix
if prefix is not None and prefix.strip():
pattern = './/%s:complexType|.//%s:simpleType|.//%s:element' % (
prefix, prefix, prefix)
# Must make sure that we have a namespace dictionary that does *not*
# have a key None.
namespaces = {prefix: node.nsmap[prefix]}
elements = node.xpath(pattern, namespaces=namespaces)
else:
pattern = './/complexType|.//simpleType|.//element'
elements = node.xpath(pattern)
names = [
el.attrib['name'] for el in elements if
'name' in el.attrib and el.getchildren()
]
names = set(names)
return names
def unique_name(type_name, def_names, def_count):
orig_type_name = type_name
while True:
if type_name not in def_names:
return type_name, def_count
def_count += 1
type_name = '%s%d' % (orig_type_name, def_count, )
def trim_prefix(name):
names = name.split(':')
if len(names) == 1:
return names[0]
elif len(names) == 2:
return names[1]
else:
return None
USAGE_TEXT = __doc__
def usage(parser):
parser.print_help()
sys.exit(1)
def main():
parser = OptionParser(USAGE_TEXT)
parser.add_option(
"-f", "--force", action="store_true",
dest="force", default=False,
help="force overwrite without asking")
(options, args) = parser.parse_args()
if len(args) == 2:
inpath = args[0]
outpath = args[1]
elif len(args) == 1:
inpath = args[0]
outpath = None
elif len(args) == 0:
inpath = None
outpath = None
else:
usage(parser)
prep_schema(inpath, outpath, options)
if __name__ == "__main__":
#import pdb; pdb.set_trace()
main()
| {
"content_hash": "e2f2c05eb2d560966280ead93d9cde58",
"timestamp": "",
"source": "github",
"line_count": 542,
"max_line_length": 78,
"avg_line_length": 31.833948339483396,
"alnum_prop": 0.5740697809203663,
"repo_name": "ricksladkey/generateDS",
"id": "a0902de74cb6a000027b783410a6b4b3103253cd",
"size": "17254",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "process_includes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "321"
},
{
"name": "C++",
"bytes": "665"
},
{
"name": "Python",
"bytes": "2717835"
},
{
"name": "Shell",
"bytes": "695"
}
],
"symlink_target": ""
} |
import unittest
if False:
import os, sys, imp
sys.path.append(os.path.realpath(os.path.dirname(__file__)+"/../../../"))
imp.load_module('electroncash', *imp.find_module('lib'))
imp.load_module('electroncash_gui', *imp.find_module('gui/qt'))
imp.load_module('electroncash_plugins', *imp.find_module('plugins'))
from plugins.fusion import encrypt
def fastslowcase(testmethod):
""" method -> class decorator to run with pycryptodomex's fast AES enabled/disabled """
class _TestClass(unittest.TestCase):
def test_slow(self):
saved = encrypt.AES
encrypt.AES = None
try:
testmethod(self)
finally:
encrypt.AES = saved
def test_fast(self):
if not encrypt.AES:
self.skipTest("accelerated AES library not available")
testmethod(self)
_TestClass.__name__ = testmethod.__name__
return _TestClass
@fastslowcase
def TestNormal(self):
Apriv = bytes.fromhex('0000000000000000000000000000000000000000000000000000000000000005')
Apub = bytes.fromhex('022f8bde4d1a07209355b4a7250a5c5128e88b84bddc619ab7cba8d569b240efe4')
# short message
msg12 = b'test message'
assert len(msg12) == 12
e12 = encrypt.encrypt(msg12, Apub)
self.assertEqual(len(e12), 65) # since it's only 12 bytes, it and length fit into one block
e12 = encrypt.encrypt(msg12, Apub, pad_to_length = 16)
self.assertEqual(len(e12), 65)
d12, k = encrypt.decrypt(e12, Apriv)
self.assertEqual(d12, msg12)
d12 = encrypt.decrypt_with_symmkey(e12, k)
self.assertEqual(d12, msg12)
# tweak the nonce point's oddness bit
e12_bad = bytearray(e12) ; e12_bad[0] ^= 1
with self.assertRaises(encrypt.DecryptionFailed):
encrypt.decrypt(e12_bad, Apriv)
d12 = encrypt.decrypt_with_symmkey(e12_bad, k) # works because it doesn't care about nonce point
self.assertEqual(d12, msg12)
# tweak the hmac
e12_bad = bytearray(e12) ; e12_bad[-1] ^= 1
with self.assertRaises(encrypt.DecryptionFailed):
encrypt.decrypt(e12_bad, Apriv)
with self.assertRaises(encrypt.DecryptionFailed):
encrypt.decrypt_with_symmkey(e12_bad, k)
# tweak the message
e12_bad = bytearray(e12) ; e12_bad[35] ^= 1
with self.assertRaises(encrypt.DecryptionFailed):
encrypt.decrypt(e12_bad, Apriv)
with self.assertRaises(encrypt.DecryptionFailed):
encrypt.decrypt_with_symmkey(e12_bad, k)
# drop a byte
e12_bad = bytearray(e12) ; e12_bad.pop()
with self.assertRaises(encrypt.DecryptionFailed):
encrypt.decrypt(e12_bad, Apriv)
with self.assertRaises(encrypt.DecryptionFailed):
encrypt.decrypt_with_symmkey(e12_bad, k)
msg13 = msg12 + b'!'
e13 = encrypt.encrypt(msg13, Apub)
self.assertEqual(len(e13), 81) # need another block
with self.assertRaises(ValueError):
encrypt.encrypt(msg13, Apub, pad_to_length = 16)
e13 = encrypt.encrypt(msg13, Apub, pad_to_length = 32)
self.assertEqual(len(e13), 81)
encrypt.decrypt(e13, Apriv)
msgbig = b'a'*1234
ebig = encrypt.encrypt(msgbig, Apub)
self.assertEqual(len(ebig), 33 + (1234+4+10) + 16)
dbig, k = encrypt.decrypt(ebig, Apriv)
self.assertEqual(dbig, msgbig)
self.assertEqual(len(encrypt.encrypt(b'', Apub)), 65)
self.assertEqual(len(encrypt.encrypt(b'', Apub, pad_to_length = 1248)), 1297)
with self.assertRaises(ValueError):
encrypt.encrypt(b'', Apub, pad_to_length = 0)
| {
"content_hash": "ce85164415f1c8b5f8f117b1a87029aa",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 101,
"avg_line_length": 36.56701030927835,
"alnum_prop": 0.6630955737242741,
"repo_name": "fyookball/electrum",
"id": "27031afb7a4a5b5e3a23cad231873e5a311ee667",
"size": "3708",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/fusion/tests/test_encrypt.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "GLSL",
"bytes": "289"
},
{
"name": "Java",
"bytes": "1574"
},
{
"name": "Makefile",
"bytes": "842"
},
{
"name": "NSIS",
"bytes": "7309"
},
{
"name": "Objective-C",
"bytes": "415997"
},
{
"name": "Python",
"bytes": "2365528"
},
{
"name": "Shell",
"bytes": "26389"
}
],
"symlink_target": ""
} |
from ..constructs import Instruction
class Trie(object):
BUCKET_LEN = 1
BUCKET_MASK = (2**BUCKET_LEN)-1
def __init__(self):
self.children = [None for _ in range(2**Trie.BUCKET_LEN)]
self.value = None
def __setitem__(self, key, value):
assert type(value) == Instruction
node = self
for bucket in [(key >> i) & Trie.BUCKET_MASK for \
i in range(64, -1, -Trie.BUCKET_LEN)]:
if not node.children[bucket]:
node.children[bucket] = Trie()
node = node.children[bucket]
node.value = value
def __getitem__(self, item):
if type(item) in (int, long):
node = self
for bucket in [(item >> i) & Trie.BUCKET_MASK for \
i in range(64, -1, -Trie.BUCKET_LEN)]:
if not node.children[bucket]:
raise KeyError()
node = node.children[bucket]
return node.value
elif type(item) == slice:
start = item.start
stop = item.stop
if start is None:
start = 0
if stop is None:
# 128 bits max address. Seems big enough for practical purposes
stop = 0xFFFFFFFFFFFFFFFF
uncommon_bits = (stop ^ start).bit_length()
node = self
for bucket in [(start >> i) & Trie.BUCKET_MASK for \
i in range(64, uncommon_bits, -Trie.BUCKET_LEN)]:
if not node.children[bucket]:
raise KeyError()
node = node.children[bucket]
return [v for v in iter(node) if start <= v.address < stop][::item.step]
def __iter__(self):
if self.value:
yield self.value
for child in filter(None, self.children):
for v in child:
yield v
def __contains__(self, item):
node = self
for bucket in [(item >> i) & Trie.BUCKET_MASK for \
i in range(64, -1, -Trie.BUCKET_LEN)]:
if not node.children[bucket]:
return False
node = node.children[bucket]
return True
def __delitem__(self, key):
node = self
for bucket in [(key >> i) & Trie.BUCKET_MASK for \
i in range(64, -1, -Trie.BUCKET_LEN)]:
if not node.children[bucket]:
raise KeyError()
node = node.children[bucket]
node.value = None
| {
"content_hash": "a3f78b9e1aa61bfdd526ac45f51bb8b6",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 84,
"avg_line_length": 33.421052631578945,
"alnum_prop": 0.494488188976378,
"repo_name": "isislab/dispatch",
"id": "e6de4d7db0a9316b047ba425c2692a42d9442369",
"size": "2540",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dispatch/util/trie.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2241"
},
{
"name": "Python",
"bytes": "112692"
}
],
"symlink_target": ""
} |
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
from __future__ import print_function
import atexit
import copy
import json
import os
import re
import shutil
import subprocess
import tempfile
# pylint: disable=import-error
try:
import ruamel.yaml as yaml
except ImportError:
import yaml
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: doc/registry -*- -*- -*-
DOCUMENTATION = '''
---
module: oc_adm_registry
short_description: Module to manage openshift registry
description:
- Manage openshift registry programmatically.
options:
state:
description:
- The desired action when managing openshift registry
- present - update or create the registry
- absent - tear down the registry service and deploymentconfig
- list - returns the current representiation of a registry
required: false
default: False
aliases: []
kubeconfig:
description:
- The path for the kubeconfig file to use for authentication
required: false
default: /etc/origin/master/admin.kubeconfig
aliases: []
debug:
description:
- Turn on debug output.
required: false
default: False
aliases: []
name:
description:
- The name of the registry
required: false
default: None
aliases: []
namespace:
description:
- The selector when filtering on node labels
required: false
default: None
aliases: []
images:
description:
- The image to base this registry on - ${component} will be replaced with --type
required: 'openshift3/ose-${component}:${version}'
default: None
aliases: []
latest_images:
description:
- If true, attempt to use the latest image for the registry instead of the latest release.
required: false
default: False
aliases: []
labels:
description:
- A set of labels to uniquely identify the registry and its components.
required: false
default: None
aliases: []
enforce_quota:
description:
- If set, the registry will refuse to write blobs if they exceed quota limits
required: False
default: False
aliases: []
mount_host:
description:
- If set, the registry volume will be created as a host-mount at this path.
required: False
default: False
aliases: []
ports:
description:
- A comma delimited list of ports or port pairs to expose on the registry pod. The default is set for 5000.
required: False
default: [5000]
aliases: []
replicas:
description:
- The replication factor of the registry; commonly 2 when high availability is desired.
required: False
default: 1
aliases: []
selector:
description:
- Selector used to filter nodes on deployment. Used to run registries on a specific set of nodes.
required: False
default: None
aliases: []
service_account:
description:
- Name of the service account to use to run the registry pod.
required: False
default: 'registry'
aliases: []
tls_certificate:
description:
- An optional path to a PEM encoded certificate (which may contain the private key) for serving over TLS
required: false
default: None
aliases: []
tls_key:
description:
- An optional path to a PEM encoded private key for serving over TLS
required: false
default: None
aliases: []
volume_mounts:
description:
- The volume mounts for the registry.
required: false
default: None
aliases: []
daemonset:
description:
- Use a daemonset instead of a deployment config.
required: false
default: False
aliases: []
edits:
description:
- A list of modifications to make on the deploymentconfig
required: false
default: None
aliases: []
env_vars:
description:
- A dictionary of modifications to make on the deploymentconfig. e.g. FOO: BAR
required: false
default: None
aliases: []
force:
description:
- Force a registry update.
required: false
default: False
aliases: []
author:
- "Kenny Woodson <[email protected]>"
extends_documentation_fragment: []
'''
EXAMPLES = '''
- name: create a secure registry
oc_adm_registry:
name: docker-registry
service_account: registry
replicas: 2
namespace: default
selector: type=infra
images: "registry.ops.openshift.com/openshift3/ose-${component}:${version}"
env_vars:
REGISTRY_CONFIGURATION_PATH: /etc/registryconfig/config.yml
REGISTRY_HTTP_TLS_CERTIFICATE: /etc/secrets/registry.crt
REGISTRY_HTTP_TLS_KEY: /etc/secrets/registry.key
REGISTRY_HTTP_SECRET: supersecret
volume_mounts:
- path: /etc/secrets
name: dockercerts
type: secret
secret_name: registry-secret
- path: /etc/registryconfig
name: dockersecrets
type: secret
secret_name: docker-registry-config
edits:
- key: spec.template.spec.containers[0].livenessProbe.httpGet.scheme
value: HTTPS
action: put
- key: spec.template.spec.containers[0].readinessProbe.httpGet.scheme
value: HTTPS
action: put
- key: spec.strategy.rollingParams
value:
intervalSeconds: 1
maxSurge: 50%
maxUnavailable: 50%
timeoutSeconds: 600
updatePeriodSeconds: 1
action: put
- key: spec.template.spec.containers[0].resources.limits.memory
value: 2G
action: update
- key: spec.template.spec.containers[0].resources.requests.memory
value: 1G
action: update
register: registryout
'''
# -*- -*- -*- End included fragment: doc/registry -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self,
filename=None,
content=None,
content_type='yaml',
separator='.',
backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
self.load(content_type=self.content_type)
if self.__yaml_dict is None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for separator '''
return self._separator
@separator.setter
def separator(self, inc_sep):
''' setter method for separator '''
self._separator = inc_sep
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
@staticmethod
def remove_entry(data, key, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
data.clear()
return True
elif key == '' and isinstance(data, list):
del data[:]
return True
if not (key and Yedit.valid_key(key, sep)) and \
isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
raise YeditException("Unexpected item type found while going through key " +
"path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
# didn't add/update to an existing list, nor add/update key to a dict
# so we must have been provided some syntax like a.b.c[<int>] = "data" for a
# non-existent array
else:
raise YeditException("Error adding to object at path: {}".format(key))
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
return data
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
tmp_filename = filename + '.yedit'
with open(tmp_filename, 'w') as yfd:
yfd.write(contents)
os.rename(tmp_filename, filename)
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripDumper if supported.
try:
Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except AttributeError:
Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename is None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripLoader if supported.
try:
self.yaml_dict = yaml.safe_load(contents, yaml.RoundTripLoader)
except AttributeError:
self.yaml_dict = yaml.safe_load(contents)
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# AUDIT:maybe-no-member makes sense due to loading data from
# a serialized format.
# pylint: disable=maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index is not None:
ind = index
if ind is not None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
# already exists, return
if ind is not None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is None:
return (False, self.yaml_dict)
# When path equals "" it is a special case.
# "" refers to the root of the document
# Only update the root path (entire document) when its a list or dict
if path == '':
if isinstance(result, list) or isinstance(result, dict):
self.yaml_dict = result
return (True, self.yaml_dict)
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
@staticmethod
def get_curr_value(invalue, val_type):
'''return the current value'''
if invalue is None:
return None
curr_value = invalue
if val_type == 'yaml':
curr_value = yaml.load(invalue)
elif val_type == 'json':
curr_value = json.loads(invalue)
return curr_value
@staticmethod
def parse_value(inc_value, vtype=''):
'''determine value type passed'''
true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
'on', 'On', 'ON', ]
false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
'off', 'Off', 'OFF']
# It came in as a string but you didn't specify value_type as string
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
# There is a special case where '' will turn into None after yaml loading it so skip
if isinstance(inc_value, str) and inc_value == '':
pass
# If vtype is not str then go ahead and attempt to yaml load it.
elif isinstance(inc_value, str) and 'str' not in vtype:
try:
inc_value = yaml.safe_load(inc_value)
except Exception:
raise YeditException('Could not determine type of incoming value. ' +
'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
@staticmethod
def process_edits(edits, yamlfile):
'''run through a list of edits and process them one-by-one'''
results = []
for edit in edits:
value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
if edit.get('action') == 'update':
# pylint: disable=line-too-long
curr_value = Yedit.get_curr_value(
Yedit.parse_value(edit.get('curr_value')),
edit.get('curr_value_format'))
rval = yamlfile.update(edit['key'],
value,
edit.get('index'),
curr_value)
elif edit.get('action') == 'append':
rval = yamlfile.append(edit['key'], value)
else:
rval = yamlfile.put(edit['key'], value)
if rval[0]:
results.append({'key': edit['key'], 'edit': rval[1]})
return {'changed': len(results) > 0, 'results': results}
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(params):
'''perform the idempotent crud operations'''
yamlfile = Yedit(filename=params['src'],
backup=params['backup'],
separator=params['separator'])
state = params['state']
if params['src']:
rval = yamlfile.load()
if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
'file exists, that it is has correct permissions, and is valid yaml.'}
if state == 'list':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['key']:
rval = yamlfile.get(params['key']) or {}
return {'changed': False, 'result': rval, 'state': state}
elif state == 'absent':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['update']:
rval = yamlfile.pop(params['key'], params['value'])
else:
rval = yamlfile.delete(params['key'])
if rval[0] and params['src']:
yamlfile.write()
return {'changed': rval[0], 'result': rval[1], 'state': state}
elif state == 'present':
# check if content is different than what is in the file
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
params['value'] is None:
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
# If we were passed a key, value then
# we enapsulate it in a list and process it
# Key, Value passed to the module : Converted to Edits list #
edits = []
_edit = {}
if params['value'] is not None:
_edit['value'] = params['value']
_edit['value_type'] = params['value_type']
_edit['key'] = params['key']
if params['update']:
_edit['action'] = 'update'
_edit['curr_value'] = params['curr_value']
_edit['curr_value_format'] = params['curr_value_format']
_edit['index'] = params['index']
elif params['append']:
_edit['action'] = 'append'
edits.append(_edit)
elif params['edits'] is not None:
edits = params['edits']
if edits:
results = Yedit.process_edits(edits, yamlfile)
# if there were changes and a src provided to us we need to write
if results['changed'] and params['src']:
yamlfile.write()
return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
'state': state}
# We were passed content but no src, key or value, or edits. Return contents in memory
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
# pylint: disable=too-many-lines
# noqa: E301,E302,E303,T001
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
def locate_oc_binary():
''' Find and return oc binary file '''
# https://github.com/openshift/openshift-ansible/issues/3410
# oc can be in /usr/local/bin in some cases, but that may not
# be in $PATH due to ansible/sudo
paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
oc_binary = 'oc'
# Use shutil.which if it is available, otherwise fallback to a naive path search
try:
which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
if which_result is not None:
oc_binary = which_result
except AttributeError:
for path in paths:
if os.path.exists(os.path.join(path, oc_binary)):
oc_binary = os.path.join(path, oc_binary)
break
return oc_binary
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
self.all_namespaces = all_namespaces
self.oc_binary = locate_oc_binary()
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, res['results'][0], separator=sep)
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
# We are removing the 'resourceVersion' to handle
# a race condition when modifying oc objects
yed = Yedit(fname)
results = yed.delete('metadata.resourceVersion')
if results[0]:
yed.write()
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''create a temporary file and then call oc create on it'''
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
cmd = ['delete', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
elif name is not None:
cmd.append(name)
else:
raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
'''process a template
template_name: the name of the template to process
create: whether to send to oc create after processing
params: the parameters for the template
template_data: the incoming template's data; instead of a file
'''
cmd = ['process']
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = Utils.create_tmpfile(template_name + '-')
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['create', '-f', fname])
def _get(self, resource, name=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
elif name is not None:
cmd.append(name)
cmd.extend(['-o', 'json'])
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if 'items' in rval:
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm list pods
node: the node in which to list pods
selector: the label selector filter if provided
pod_selector: the pod selector filter if provided
'''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
# pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _version(self):
''' return the openshift version'''
return self.openshift_cmd(['version'], output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
def _run(self, cmds, input_data):
''' Actually executes the command. This makes mocking easier. '''
curr_env = os.environ.copy()
curr_env.update({'KUBECONFIG': self.kubeconfig})
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=curr_env)
stdout, stderr = proc.communicate(input_data)
return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = [self.oc_binary]
if oadm:
cmds.append('adm')
cmds.extend(cmd)
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
if self.verbose:
print(' '.join(cmds))
try:
returncode, stdout, stderr = self._run(cmds, input_data)
except OSError as ex:
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
"cmd": ' '.join(cmds)}
if output_type == 'json':
rval['results'] = {}
if output and stdout:
try:
rval['results'] = json.loads(stdout)
except ValueError as verr:
if "No JSON object could be decoded" in verr.args:
rval['err'] = verr.args
elif output_type == 'raw':
rval['results'] = stdout if output else ''
if self.verbose:
print("STDOUT: {0}".format(stdout))
print("STDERR: {0}".format(stderr))
if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
"stdout": stdout})
return rval
class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
sfd.write(contents)
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
tmp = Utils.create_tmpfile(prefix=rname)
if ftype == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripDumper'):
Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
else:
Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
elif ftype == 'json':
Utils._write(tmp, json.dumps(data))
else:
Utils._write(tmp, data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [tmp])
return tmp
@staticmethod
def create_tmpfile_copy(inc_file):
'''create a temporary copy of a file'''
tmpfile = Utils.create_tmpfile('lib_openshift-')
Utils._write(tmpfile, open(inc_file).read())
# Cleanup the tmpfile
atexit.register(Utils.cleanup, [tmpfile])
return tmpfile
@staticmethod
def create_tmpfile(prefix='tmp'):
''' Generates and returns a temporary file name '''
with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
return tmp.name
@staticmethod
def create_tmp_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_tmp_file_from_contents(item['path'] + '-',
item['data'],
ftype=content_type)
files.append({'name': os.path.basename(item['path']),
'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if 'metadata' in result and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripLoader'):
contents = yaml.load(contents, yaml.RoundTripLoader)
else:
contents = yaml.safe_load(contents)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
@staticmethod
def filter_versions(stdout):
''' filter the oc version output '''
version_dict = {}
version_search = ['oc', 'openshift', 'kubernetes']
for line in stdout.strip().split('\n'):
for term in version_search:
if not line:
continue
if line.startswith(term):
version_dict[term] = line.split()[-1]
# horrible hack to get openshift version in Openshift 3.2
# By default "oc version in 3.2 does not return an "openshift" version
if "openshift" not in version_dict:
version_dict["openshift"] = version_dict["oc"]
return version_dict
@staticmethod
def add_custom_versions(versions):
''' create custom versions strings '''
versions_dict = {}
for tech, version in versions.items():
# clean up "-" from version
if "-" in version:
version = version.split("-")[0]
if version.startswith('v'):
versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
# "v3.3.0.33" is what we have, we want "3.3"
versions_dict[tech + '_short'] = version[1:4]
return versions_dict
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
import rpm
transaction_set = rpm.TransactionSet()
rpmquery = transaction_set.dbMatch("name", "atomic-openshift")
return rpmquery.count() > 0
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if key not in user_def:
if debug:
print('User data does not have key [%s]' % key)
print('User data: %s' % user_def)
return False
if not isinstance(user_def[key], list):
if debug:
print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
return False
if len(user_def[key]) != len(value):
if debug:
print("List lengths are not equal.")
print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
print("user_def: %s" % user_def[key])
print("value: %s" % value)
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print('sending list - list')
print(type(values[0]))
print(type(values[1]))
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print('list compare returned false')
return False
elif value != user_def[key]:
if debug:
print('value should be identical')
print(user_def[key])
print(value)
return False
# recurse on a dictionary
elif isinstance(value, dict):
if key not in user_def:
if debug:
print("user_def does not have key [%s]" % key)
return False
if not isinstance(user_def[key], dict):
if debug:
print("dict returned false: not instance of dict")
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print("keys are not equal in dict")
print(user_values)
print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print("dict returned false")
print(result)
return False
# Verify each key, value pair is the same
else:
if key not in user_def or value != user_def[key]:
if debug:
print("value not equal; user_def does not have key")
print(key)
print(value)
if key in user_def:
print(user_def[key])
return False
if debug:
print('returning true')
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self, ascommalist=''):
'''return all options as a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs'''
return self.stringify(ascommalist)
def stringify(self, ascommalist=''):
''' return the options hash as cli params in a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
val = data['value']
rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/deploymentconfig.py -*- -*- -*-
# pylint: disable=too-many-public-methods
class DeploymentConfig(Yedit):
''' Class to model an openshift DeploymentConfig'''
default_deployment_config = '''
apiVersion: v1
kind: DeploymentConfig
metadata:
name: default_dc
namespace: default
spec:
replicas: 0
selector:
default_dc: default_dc
strategy:
resources: {}
rollingParams:
intervalSeconds: 1
maxSurge: 0
maxUnavailable: 25%
timeoutSeconds: 600
updatePercent: -25
updatePeriodSeconds: 1
type: Rolling
template:
metadata:
spec:
containers:
- env:
- name: default
value: default
image: default
imagePullPolicy: IfNotPresent
name: default_dc
ports:
- containerPort: 8000
hostPort: 8000
protocol: TCP
name: default_port
resources: {}
terminationMessagePath: /dev/termination-log
dnsPolicy: ClusterFirst
hostNetwork: true
nodeSelector:
type: compute
restartPolicy: Always
securityContext: {}
serviceAccount: default
serviceAccountName: default
terminationGracePeriodSeconds: 30
triggers:
- type: ConfigChange
'''
replicas_path = "spec.replicas"
env_path = "spec.template.spec.containers[0].env"
volumes_path = "spec.template.spec.volumes"
container_path = "spec.template.spec.containers"
volume_mounts_path = "spec.template.spec.containers[0].volumeMounts"
def __init__(self, content=None):
''' Constructor for deploymentconfig '''
if not content:
content = DeploymentConfig.default_deployment_config
super(DeploymentConfig, self).__init__(content=content)
def add_env_value(self, key, value):
''' add key, value pair to env array '''
rval = False
env = self.get_env_vars()
if env:
env.append({'name': key, 'value': value})
rval = True
else:
result = self.put(DeploymentConfig.env_path, {'name': key, 'value': value})
rval = result[0]
return rval
def exists_env_value(self, key, value):
''' return whether a key, value pair exists '''
results = self.get_env_vars()
if not results:
return False
for result in results:
if result['name'] == key and result['value'] == value:
return True
return False
def exists_env_key(self, key):
''' return whether a key, value pair exists '''
results = self.get_env_vars()
if not results:
return False
for result in results:
if result['name'] == key:
return True
return False
def get_env_var(self, key):
'''return a environment variables '''
results = self.get(DeploymentConfig.env_path) or []
if not results:
return None
for env_var in results:
if env_var['name'] == key:
return env_var
return None
def get_env_vars(self):
'''return a environment variables '''
return self.get(DeploymentConfig.env_path) or []
def delete_env_var(self, keys):
'''delete a list of keys '''
if not isinstance(keys, list):
keys = [keys]
env_vars_array = self.get_env_vars()
modified = False
idx = None
for key in keys:
for env_idx, env_var in enumerate(env_vars_array):
if env_var['name'] == key:
idx = env_idx
break
if idx:
modified = True
del env_vars_array[idx]
if modified:
return True
return False
def update_env_var(self, key, value):
'''place an env in the env var list'''
env_vars_array = self.get_env_vars()
idx = None
for env_idx, env_var in enumerate(env_vars_array):
if env_var['name'] == key:
idx = env_idx
break
if idx:
env_vars_array[idx]['value'] = value
else:
self.add_env_value(key, value)
return True
def exists_volume_mount(self, volume_mount):
''' return whether a volume mount exists '''
exist_volume_mounts = self.get_volume_mounts()
if not exist_volume_mounts:
return False
volume_mount_found = False
for exist_volume_mount in exist_volume_mounts:
if exist_volume_mount['name'] == volume_mount['name']:
volume_mount_found = True
break
return volume_mount_found
def exists_volume(self, volume):
''' return whether a volume exists '''
exist_volumes = self.get_volumes()
volume_found = False
for exist_volume in exist_volumes:
if exist_volume['name'] == volume['name']:
volume_found = True
break
return volume_found
def find_volume_by_name(self, volume, mounts=False):
''' return the index of a volume '''
volumes = []
if mounts:
volumes = self.get_volume_mounts()
else:
volumes = self.get_volumes()
for exist_volume in volumes:
if exist_volume['name'] == volume['name']:
return exist_volume
return None
def get_replicas(self):
''' return replicas setting '''
return self.get(DeploymentConfig.replicas_path)
def get_volume_mounts(self):
'''return volume mount information '''
return self.get_volumes(mounts=True)
def get_volumes(self, mounts=False):
'''return volume mount information '''
if mounts:
return self.get(DeploymentConfig.volume_mounts_path) or []
return self.get(DeploymentConfig.volumes_path) or []
def delete_volume_by_name(self, volume):
'''delete a volume '''
modified = False
exist_volume_mounts = self.get_volume_mounts()
exist_volumes = self.get_volumes()
del_idx = None
for idx, exist_volume in enumerate(exist_volumes):
if 'name' in exist_volume and exist_volume['name'] == volume['name']:
del_idx = idx
break
if del_idx != None:
del exist_volumes[del_idx]
modified = True
del_idx = None
for idx, exist_volume_mount in enumerate(exist_volume_mounts):
if 'name' in exist_volume_mount and exist_volume_mount['name'] == volume['name']:
del_idx = idx
break
if del_idx != None:
del exist_volume_mounts[idx]
modified = True
return modified
def add_volume_mount(self, volume_mount):
''' add a volume or volume mount to the proper location '''
exist_volume_mounts = self.get_volume_mounts()
if not exist_volume_mounts and volume_mount:
self.put(DeploymentConfig.volume_mounts_path, [volume_mount])
else:
exist_volume_mounts.append(volume_mount)
def add_volume(self, volume):
''' add a volume or volume mount to the proper location '''
exist_volumes = self.get_volumes()
if not volume:
return
if not exist_volumes:
self.put(DeploymentConfig.volumes_path, [volume])
else:
exist_volumes.append(volume)
def update_replicas(self, replicas):
''' update replicas value '''
self.put(DeploymentConfig.replicas_path, replicas)
def update_volume(self, volume):
'''place an env in the env var list'''
exist_volumes = self.get_volumes()
if not volume:
return False
# update the volume
update_idx = None
for idx, exist_vol in enumerate(exist_volumes):
if exist_vol['name'] == volume['name']:
update_idx = idx
break
if update_idx != None:
exist_volumes[update_idx] = volume
else:
self.add_volume(volume)
return True
def update_volume_mount(self, volume_mount):
'''place an env in the env var list'''
modified = False
exist_volume_mounts = self.get_volume_mounts()
if not volume_mount:
return False
# update the volume mount
for exist_vol_mount in exist_volume_mounts:
if exist_vol_mount['name'] == volume_mount['name']:
if 'mountPath' in exist_vol_mount and \
str(exist_vol_mount['mountPath']) != str(volume_mount['mountPath']):
exist_vol_mount['mountPath'] = volume_mount['mountPath']
modified = True
break
if not modified:
self.add_volume_mount(volume_mount)
modified = True
return modified
def needs_update_volume(self, volume, volume_mount):
''' verify a volume update is needed '''
exist_volume = self.find_volume_by_name(volume)
exist_volume_mount = self.find_volume_by_name(volume, mounts=True)
results = []
results.append(exist_volume['name'] == volume['name'])
if 'secret' in volume:
results.append('secret' in exist_volume)
results.append(exist_volume['secret']['secretName'] == volume['secret']['secretName'])
results.append(exist_volume_mount['name'] == volume_mount['name'])
results.append(exist_volume_mount['mountPath'] == volume_mount['mountPath'])
elif 'emptyDir' in volume:
results.append(exist_volume_mount['name'] == volume['name'])
results.append(exist_volume_mount['mountPath'] == volume_mount['mountPath'])
elif 'persistentVolumeClaim' in volume:
pvc = 'persistentVolumeClaim'
results.append(pvc in exist_volume)
if results[-1]:
results.append(exist_volume[pvc]['claimName'] == volume[pvc]['claimName'])
if 'claimSize' in volume[pvc]:
results.append(exist_volume[pvc]['claimSize'] == volume[pvc]['claimSize'])
elif 'hostpath' in volume:
results.append('hostPath' in exist_volume)
results.append(exist_volume['hostPath']['path'] == volume_mount['mountPath'])
return not all(results)
def needs_update_replicas(self, replicas):
''' verify whether a replica update is needed '''
current_reps = self.get(DeploymentConfig.replicas_path)
return not current_reps == replicas
# -*- -*- -*- End included fragment: lib/deploymentconfig.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/secret.py -*- -*- -*-
# pylint: disable=too-many-instance-attributes
class SecretConfig(object):
''' Handle secret options '''
# pylint: disable=too-many-arguments
def __init__(self,
sname,
namespace,
kubeconfig,
secrets=None,
stype=None):
''' constructor for handling secret options '''
self.kubeconfig = kubeconfig
self.name = sname
self.type = stype
self.namespace = namespace
self.secrets = secrets
self.data = {}
self.create_dict()
def create_dict(self):
''' assign the correct properties for a secret dict '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'Secret'
self.data['type'] = self.type
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
self.data['data'] = {}
if self.secrets:
for key, value in self.secrets.items():
self.data['data'][key] = value
# pylint: disable=too-many-instance-attributes
class Secret(Yedit):
''' Class to wrap the oc command line tools '''
secret_path = "data"
kind = 'secret'
def __init__(self, content):
'''secret constructor'''
super(Secret, self).__init__(content=content)
self._secrets = None
@property
def secrets(self):
'''secret property getter'''
if self._secrets is None:
self._secrets = self.get_secrets()
return self._secrets
@secrets.setter
def secrets(self):
'''secret property setter'''
if self._secrets is None:
self._secrets = self.get_secrets()
return self._secrets
def get_secrets(self):
''' returns all of the defined secrets '''
return self.get(Secret.secret_path) or {}
def add_secret(self, key, value):
''' add a secret '''
if self.secrets:
self.secrets[key] = value
else:
self.put(Secret.secret_path, {key: value})
return True
def delete_secret(self, key):
''' delete secret'''
try:
del self.secrets[key]
except KeyError as _:
return False
return True
def find_secret(self, key):
''' find secret'''
rval = None
try:
rval = self.secrets[key]
except KeyError as _:
return None
return {'key': key, 'value': rval}
def update_secret(self, key, value):
''' update a secret'''
if key in self.secrets:
self.secrets[key] = value
else:
self.add_secret(key, value)
return True
# -*- -*- -*- End included fragment: lib/secret.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/service.py -*- -*- -*-
# pylint: disable=too-many-instance-attributes
class ServiceConfig(object):
''' Handle service options '''
# pylint: disable=too-many-arguments
def __init__(self,
sname,
namespace,
ports,
selector=None,
labels=None,
cluster_ip=None,
portal_ip=None,
session_affinity=None,
service_type=None,
external_ips=None):
''' constructor for handling service options '''
self.name = sname
self.namespace = namespace
self.ports = ports
self.selector = selector
self.labels = labels
self.cluster_ip = cluster_ip
self.portal_ip = portal_ip
self.session_affinity = session_affinity
self.service_type = service_type
self.external_ips = external_ips
self.data = {}
self.create_dict()
def create_dict(self):
''' instantiates a service dict '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'Service'
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
if self.labels:
self.data['metadata']['labels'] = {}
for lab, lab_value in self.labels.items():
self.data['metadata']['labels'][lab] = lab_value
self.data['spec'] = {}
if self.ports:
self.data['spec']['ports'] = self.ports
else:
self.data['spec']['ports'] = []
if self.selector:
self.data['spec']['selector'] = self.selector
self.data['spec']['sessionAffinity'] = self.session_affinity or 'None'
if self.cluster_ip:
self.data['spec']['clusterIP'] = self.cluster_ip
if self.portal_ip:
self.data['spec']['portalIP'] = self.portal_ip
if self.service_type:
self.data['spec']['type'] = self.service_type
if self.external_ips:
self.data['spec']['externalIPs'] = self.external_ips
# pylint: disable=too-many-instance-attributes,too-many-public-methods
class Service(Yedit):
''' Class to model the oc service object '''
port_path = "spec.ports"
portal_ip = "spec.portalIP"
cluster_ip = "spec.clusterIP"
selector_path = 'spec.selector'
kind = 'Service'
external_ips = "spec.externalIPs"
def __init__(self, content):
'''Service constructor'''
super(Service, self).__init__(content=content)
def get_ports(self):
''' get a list of ports '''
return self.get(Service.port_path) or []
def get_selector(self):
''' get the service selector'''
return self.get(Service.selector_path) or {}
def add_ports(self, inc_ports):
''' add a port object to the ports list '''
if not isinstance(inc_ports, list):
inc_ports = [inc_ports]
ports = self.get_ports()
if not ports:
self.put(Service.port_path, inc_ports)
else:
ports.extend(inc_ports)
return True
def find_ports(self, inc_port):
''' find a specific port '''
for port in self.get_ports():
if port['port'] == inc_port['port']:
return port
return None
def delete_ports(self, inc_ports):
''' remove a port from a service '''
if not isinstance(inc_ports, list):
inc_ports = [inc_ports]
ports = self.get(Service.port_path) or []
if not ports:
return True
removed = False
for inc_port in inc_ports:
port = self.find_ports(inc_port)
if port:
ports.remove(port)
removed = True
return removed
def add_cluster_ip(self, sip):
'''add cluster ip'''
self.put(Service.cluster_ip, sip)
def add_portal_ip(self, pip):
'''add cluster ip'''
self.put(Service.portal_ip, pip)
def get_external_ips(self):
''' get a list of external_ips '''
return self.get(Service.external_ips) or []
def add_external_ips(self, inc_external_ips):
''' add an external_ip to the external_ips list '''
if not isinstance(inc_external_ips, list):
inc_external_ips = [inc_external_ips]
external_ips = self.get_external_ips()
if not external_ips:
self.put(Service.external_ips, inc_external_ips)
else:
external_ips.extend(inc_external_ips)
return True
def find_external_ips(self, inc_external_ip):
''' find a specific external IP '''
val = None
try:
idx = self.get_external_ips().index(inc_external_ip)
val = self.get_external_ips()[idx]
except ValueError:
pass
return val
def delete_external_ips(self, inc_external_ips):
''' remove an external IP from a service '''
if not isinstance(inc_external_ips, list):
inc_external_ips = [inc_external_ips]
external_ips = self.get(Service.external_ips) or []
if not external_ips:
return True
removed = False
for inc_external_ip in inc_external_ips:
external_ip = self.find_external_ips(inc_external_ip)
if external_ip:
external_ips.remove(external_ip)
removed = True
return removed
# -*- -*- -*- End included fragment: lib/service.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/volume.py -*- -*- -*-
class Volume(object):
''' Class to represent an openshift volume object'''
volume_mounts_path = {"pod": "spec.containers[0].volumeMounts",
"dc": "spec.template.spec.containers[0].volumeMounts",
"rc": "spec.template.spec.containers[0].volumeMounts",
}
volumes_path = {"pod": "spec.volumes",
"dc": "spec.template.spec.volumes",
"rc": "spec.template.spec.volumes",
}
@staticmethod
def create_volume_structure(volume_info):
''' return a properly structured volume '''
volume_mount = None
volume = {'name': volume_info['name']}
volume_type = volume_info['type'].lower()
if volume_type == 'secret':
volume['secret'] = {}
volume[volume_info['type']] = {'secretName': volume_info['secret_name']}
volume_mount = {'mountPath': volume_info['path'],
'name': volume_info['name']}
elif volume_type == 'emptydir':
volume['emptyDir'] = {}
volume_mount = {'mountPath': volume_info['path'],
'name': volume_info['name']}
elif volume_type == 'pvc' or volume_type == 'persistentvolumeclaim':
volume['persistentVolumeClaim'] = {}
volume['persistentVolumeClaim']['claimName'] = volume_info['claimName']
volume['persistentVolumeClaim']['claimSize'] = volume_info['claimSize']
elif volume_type == 'hostpath':
volume['hostPath'] = {}
volume['hostPath']['path'] = volume_info['path']
elif volume_type == 'configmap':
volume['configMap'] = {}
volume['configMap']['name'] = volume_info['configmap_name']
volume_mount = {'mountPath': volume_info['path'],
'name': volume_info['name']}
return (volume, volume_mount)
# -*- -*- -*- End included fragment: lib/volume.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: class/oc_version.py -*- -*- -*-
# pylint: disable=too-many-instance-attributes
class OCVersion(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
# pylint allows 5
# pylint: disable=too-many-arguments
def __init__(self,
config,
debug):
''' Constructor for OCVersion '''
super(OCVersion, self).__init__(None, config)
self.debug = debug
def get(self):
'''get and return version information '''
results = {}
version_results = self._version()
if version_results['returncode'] == 0:
filtered_vers = Utils.filter_versions(version_results['results'])
custom_vers = Utils.add_custom_versions(filtered_vers)
results['returncode'] = version_results['returncode']
results.update(filtered_vers)
results.update(custom_vers)
return results
raise OpenShiftCLIError('Problem detecting openshift version.')
@staticmethod
def run_ansible(params):
'''run the idempotent ansible code'''
oc_version = OCVersion(params['kubeconfig'], params['debug'])
if params['state'] == 'list':
#pylint: disable=protected-access
result = oc_version.get()
return {'state': params['state'],
'results': result,
'changed': False}
# -*- -*- -*- End included fragment: class/oc_version.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: class/oc_adm_registry.py -*- -*- -*-
class RegistryException(Exception):
''' Registry Exception Class '''
pass
class RegistryConfig(OpenShiftCLIConfig):
''' RegistryConfig is a DTO for the registry. '''
def __init__(self, rname, namespace, kubeconfig, registry_options):
super(RegistryConfig, self).__init__(rname, namespace, kubeconfig, registry_options)
class Registry(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
volume_mount_path = 'spec.template.spec.containers[0].volumeMounts'
volume_path = 'spec.template.spec.volumes'
env_path = 'spec.template.spec.containers[0].env'
def __init__(self,
registry_config,
verbose=False):
''' Constructor for Registry
a registry consists of 3 or more parts
- dc/docker-registry
- svc/docker-registry
Parameters:
:registry_config:
:verbose:
'''
super(Registry, self).__init__(registry_config.namespace, registry_config.kubeconfig, verbose)
self.version = OCVersion(registry_config.kubeconfig, verbose)
self.svc_ip = None
self.portal_ip = None
self.config = registry_config
self.verbose = verbose
self.registry_parts = [{'kind': 'dc', 'name': self.config.name},
{'kind': 'svc', 'name': self.config.name},
]
self.__prepared_registry = None
self.volume_mounts = []
self.volumes = []
if self.config.config_options['volume_mounts']['value']:
for volume in self.config.config_options['volume_mounts']['value']:
volume_info = {'secret_name': volume.get('secret_name', None),
'name': volume.get('name', None),
'type': volume.get('type', None),
'path': volume.get('path', None),
'claimName': volume.get('claim_name', None),
'claimSize': volume.get('claim_size', None),
}
vol, vol_mount = Volume.create_volume_structure(volume_info)
self.volumes.append(vol)
self.volume_mounts.append(vol_mount)
self.dconfig = None
self.svc = None
@property
def deploymentconfig(self):
''' deploymentconfig property '''
return self.dconfig
@deploymentconfig.setter
def deploymentconfig(self, config):
''' setter for deploymentconfig property '''
self.dconfig = config
@property
def service(self):
''' service property '''
return self.svc
@service.setter
def service(self, config):
''' setter for service property '''
self.svc = config
@property
def prepared_registry(self):
''' prepared_registry property '''
if not self.__prepared_registry:
results = self.prepare_registry()
if not results or ('returncode' in results and results['returncode'] != 0):
raise RegistryException('Could not perform registry preparation. {}'.format(results))
self.__prepared_registry = results
return self.__prepared_registry
@prepared_registry.setter
def prepared_registry(self, data):
''' setter method for prepared_registry attribute '''
self.__prepared_registry = data
def get(self):
''' return the self.registry_parts '''
self.deploymentconfig = None
self.service = None
rval = 0
for part in self.registry_parts:
result = self._get(part['kind'], name=part['name'])
if result['returncode'] == 0 and part['kind'] == 'dc':
self.deploymentconfig = DeploymentConfig(result['results'][0])
elif result['returncode'] == 0 and part['kind'] == 'svc':
self.service = Service(result['results'][0])
if result['returncode'] != 0:
rval = result['returncode']
return {'returncode': rval, 'deploymentconfig': self.deploymentconfig, 'service': self.service}
def exists(self):
'''does the object exist?'''
if self.deploymentconfig and self.service:
return True
return False
def delete(self, complete=True):
'''return all pods '''
parts = []
for part in self.registry_parts:
if not complete and part['kind'] == 'svc':
continue
parts.append(self._delete(part['kind'], part['name']))
# Clean up returned results
rval = 0
for part in parts:
# pylint: disable=invalid-sequence-index
if 'returncode' in part and part['returncode'] != 0:
rval = part['returncode']
return {'returncode': rval, 'results': parts}
def prepare_registry(self):
''' prepare a registry for instantiation '''
options = self.config.to_option_list(ascommalist='labels')
cmd = ['registry']
cmd.extend(options)
cmd.extend(['--dry-run=True', '-o', 'json'])
results = self.openshift_cmd(cmd, oadm=True, output=True, output_type='json')
# probably need to parse this
# pylint thinks results is a string
# pylint: disable=no-member
if results['returncode'] != 0 and 'items' not in results['results']:
raise RegistryException('Could not perform registry preparation. {}'.format(results))
service = None
deploymentconfig = None
# pylint: disable=invalid-sequence-index
for res in results['results']['items']:
if res['kind'] == 'DeploymentConfig':
deploymentconfig = DeploymentConfig(res)
elif res['kind'] == 'Service':
service = Service(res)
# Verify we got a service and a deploymentconfig
if not service or not deploymentconfig:
return results
# results will need to get parsed here and modifications added
deploymentconfig = DeploymentConfig(self.add_modifications(deploymentconfig))
# modify service ip
if self.svc_ip:
service.put('spec.clusterIP', self.svc_ip)
if self.portal_ip:
service.put('spec.portalIP', self.portal_ip)
# the dry-run doesn't apply the selector correctly
if self.service:
service.put('spec.selector', self.service.get_selector())
# need to create the service and the deploymentconfig
service_file = Utils.create_tmp_file_from_contents('service', service.yaml_dict)
deployment_file = Utils.create_tmp_file_from_contents('deploymentconfig', deploymentconfig.yaml_dict)
return {"service": service,
"service_file": service_file,
"service_update": False,
"deployment": deploymentconfig,
"deployment_file": deployment_file,
"deployment_update": False}
def create(self):
'''Create a registry'''
results = []
self.needs_update()
# if the object is none, then we need to create it
# if the object needs an update, then we should call replace
# Handle the deploymentconfig
if self.deploymentconfig is None:
results.append(self._create(self.prepared_registry['deployment_file']))
elif self.prepared_registry['deployment_update']:
results.append(self._replace(self.prepared_registry['deployment_file']))
# Handle the service
if self.service is None:
results.append(self._create(self.prepared_registry['service_file']))
elif self.prepared_registry['service_update']:
results.append(self._replace(self.prepared_registry['service_file']))
# Clean up returned results
rval = 0
for result in results:
# pylint: disable=invalid-sequence-index
if 'returncode' in result and result['returncode'] != 0:
rval = result['returncode']
return {'returncode': rval, 'results': results}
def update(self):
'''run update for the registry. This performs a replace if required'''
# Store the current service IP
if self.service:
svcip = self.service.get('spec.clusterIP')
if svcip:
self.svc_ip = svcip
portip = self.service.get('spec.portalIP')
if portip:
self.portal_ip = portip
results = []
if self.prepared_registry['deployment_update']:
results.append(self._replace(self.prepared_registry['deployment_file']))
if self.prepared_registry['service_update']:
results.append(self._replace(self.prepared_registry['service_file']))
# Clean up returned results
rval = 0
for result in results:
if result['returncode'] != 0:
rval = result['returncode']
return {'returncode': rval, 'results': results}
def add_modifications(self, deploymentconfig):
''' update a deployment config with changes '''
# The environment variable for REGISTRY_HTTP_SECRET is autogenerated
# We should set the generated deploymentconfig to the in memory version
# the following modifications will overwrite if needed
if self.deploymentconfig:
result = self.deploymentconfig.get_env_var('REGISTRY_HTTP_SECRET')
if result:
deploymentconfig.update_env_var('REGISTRY_HTTP_SECRET', result['value'])
# Currently we know that our deployment of a registry requires a few extra modifications
# Modification 1
# we need specific environment variables to be set
for key, value in self.config.config_options['env_vars'].get('value', {}).items():
if not deploymentconfig.exists_env_key(key):
deploymentconfig.add_env_value(key, value)
else:
deploymentconfig.update_env_var(key, value)
# Modification 2
# we need specific volume variables to be set
for volume in self.volumes:
deploymentconfig.update_volume(volume)
for vol_mount in self.volume_mounts:
deploymentconfig.update_volume_mount(vol_mount)
# Modification 3
# Edits
edit_results = []
for edit in self.config.config_options['edits'].get('value', []):
if edit['action'] == 'put':
edit_results.append(deploymentconfig.put(edit['key'],
edit['value']))
if edit['action'] == 'update':
edit_results.append(deploymentconfig.update(edit['key'],
edit['value'],
edit.get('index', None),
edit.get('curr_value', None)))
if edit['action'] == 'append':
edit_results.append(deploymentconfig.append(edit['key'],
edit['value']))
if edit_results and not any([res[0] for res in edit_results]):
return None
return deploymentconfig.yaml_dict
def needs_update(self):
''' check to see if we need to update '''
exclude_list = ['clusterIP', 'portalIP', 'type', 'protocol']
if self.service is None or \
not Utils.check_def_equal(self.prepared_registry['service'].yaml_dict,
self.service.yaml_dict,
exclude_list,
debug=self.verbose):
self.prepared_registry['service_update'] = True
exclude_list = ['dnsPolicy',
'terminationGracePeriodSeconds',
'restartPolicy', 'timeoutSeconds',
'livenessProbe', 'readinessProbe',
'terminationMessagePath',
'securityContext',
'imagePullPolicy',
'protocol', # ports.portocol: TCP
'type', # strategy: {'type': 'rolling'}
'defaultMode', # added on secrets
'activeDeadlineSeconds', # added in 1.5 for timeouts
]
if self.deploymentconfig is None or \
not Utils.check_def_equal(self.prepared_registry['deployment'].yaml_dict,
self.deploymentconfig.yaml_dict,
exclude_list,
debug=self.verbose):
self.prepared_registry['deployment_update'] = True
return self.prepared_registry['deployment_update'] or self.prepared_registry['service_update'] or False
# In the future, we would like to break out each ansible state into a function.
# pylint: disable=too-many-branches,too-many-return-statements
@staticmethod
def run_ansible(params, check_mode):
'''run idempotent ansible code'''
registry_options = {'images': {'value': params['images'], 'include': True},
'latest_images': {'value': params['latest_images'], 'include': True},
'labels': {'value': params['labels'], 'include': True},
'ports': {'value': ','.join(params['ports']), 'include': True},
'replicas': {'value': params['replicas'], 'include': True},
'selector': {'value': params['selector'], 'include': True},
'service_account': {'value': params['service_account'], 'include': True},
'mount_host': {'value': params['mount_host'], 'include': True},
'env_vars': {'value': params['env_vars'], 'include': False},
'volume_mounts': {'value': params['volume_mounts'], 'include': False},
'edits': {'value': params['edits'], 'include': False},
'tls_key': {'value': params['tls_key'], 'include': True},
'tls_certificate': {'value': params['tls_certificate'], 'include': True},
}
# Do not always pass the daemonset and enforce-quota parameters because they are not understood
# by old versions of oc.
# Default value is false. So, it's safe to not pass an explicit false value to oc versions which
# understand these parameters.
if params['daemonset']:
registry_options['daemonset'] = {'value': params['daemonset'], 'include': True}
if params['enforce_quota']:
registry_options['enforce_quota'] = {'value': params['enforce_quota'], 'include': True}
rconfig = RegistryConfig(params['name'],
params['namespace'],
params['kubeconfig'],
registry_options)
ocregistry = Registry(rconfig, params['debug'])
api_rval = ocregistry.get()
state = params['state']
########
# get
########
if state == 'list':
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': False, 'results': api_rval, 'state': state}
########
# Delete
########
if state == 'absent':
if not ocregistry.exists():
return {'changed': False, 'state': state}
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete.'}
# Unsure as to why this is angry with the return type.
# pylint: disable=redefined-variable-type
api_rval = ocregistry.delete()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
if state == 'present':
########
# Create
########
if not ocregistry.exists():
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create.'}
api_rval = ocregistry.create()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
########
# Update
########
if not params['force'] and not ocregistry.needs_update():
return {'changed': False, 'state': state}
if check_mode:
return {'changed': True, 'msg': 'CHECK_MODE: Would have performed an update.'}
api_rval = ocregistry.update()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'results': api_rval, 'state': state}
return {'failed': True, 'msg': 'Unknown state passed. %s' % state}
# -*- -*- -*- End included fragment: class/oc_adm_registry.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: ansible/oc_adm_registry.py -*- -*- -*-
def main():
'''
ansible oc module for registry
'''
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present', type='str',
choices=['present', 'absent']),
debug=dict(default=False, type='bool'),
namespace=dict(default='default', type='str'),
name=dict(default=None, required=True, type='str'),
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
images=dict(default=None, type='str'),
latest_images=dict(default=False, type='bool'),
labels=dict(default=None, type='dict'),
ports=dict(default=['5000'], type='list'),
replicas=dict(default=1, type='int'),
selector=dict(default=None, type='str'),
service_account=dict(default='registry', type='str'),
mount_host=dict(default=None, type='str'),
volume_mounts=dict(default=None, type='list'),
env_vars=dict(default={}, type='dict'),
edits=dict(default=[], type='list'),
enforce_quota=dict(default=False, type='bool'),
force=dict(default=False, type='bool'),
daemonset=dict(default=False, type='bool'),
tls_key=dict(default=None, type='str'),
tls_certificate=dict(default=None, type='str'),
),
supports_check_mode=True,
)
results = Registry.run_ansible(module.params, module.check_mode)
if 'failed' in results:
module.fail_json(**results)
module.exit_json(**results)
if __name__ == '__main__':
main()
# -*- -*- -*- End included fragment: ansible/oc_adm_registry.py -*- -*- -*-
| {
"content_hash": "07fefed3f875876e0d2b42e10fc169c5",
"timestamp": "",
"source": "github",
"line_count": 2714,
"max_line_length": 118,
"avg_line_length": 34.16691230655859,
"alnum_prop": 0.5456976781805045,
"repo_name": "twiest/openshift-tools",
"id": "c00eee381b696fe4611af9f9f9e8573cdee069ea",
"size": "93891",
"binary": false,
"copies": "7",
"ref": "refs/heads/stg",
"path": "openshift/installer/vendored/openshift-ansible-3.6.173.0.59/roles/lib_openshift/library/oc_adm_registry.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "588"
},
{
"name": "Go",
"bytes": "382164"
},
{
"name": "Groovy",
"bytes": "6322"
},
{
"name": "HTML",
"bytes": "102550"
},
{
"name": "JavaScript",
"bytes": "1580"
},
{
"name": "Makefile",
"bytes": "3324"
},
{
"name": "PHP",
"bytes": "35793"
},
{
"name": "Python",
"bytes": "27786029"
},
{
"name": "Shell",
"bytes": "1378677"
},
{
"name": "Vim script",
"bytes": "1836"
}
],
"symlink_target": ""
} |
"""
==============================
Test for qplotutils.chart.view
==============================
Autogenerated package stub.
"""
import unittest
import logging
import sys
import os
import numpy as np
from qtpy.QtCore import *
from qtpy.QtGui import *
from qtpy.QtOpenGL import *
from qtpy.QtWidgets import *
from qplotutils.chart.view import *
__author__ = "Philipp Baust"
__copyright__ = "Copyright 2019, Philipp Baust"
__credits__ = []
__license__ = "MIT"
__version__ = "0.0.1"
__maintainer__ = "Philipp Baust"
__email__ = "[email protected]"
__status__ = "Development"
_log = logging.getLogger(__name__)
class ChartAreaTests(unittest.TestCase):
app = None
@classmethod
def setUpClass(cls):
ChartAreaTests.app = QApplication([])
def setUp(self):
""" Autogenerated. """
pass
def test_instantiate(self):
""" Autogenerated. """
obj = ChartArea() # TODO: may fail!
class ChartAxisTests(unittest.TestCase):
def setUp(self):
""" Autogenerated. """
pass
def test_instantiate(self):
""" Autogenerated. """
obj = ChartAxis() # TODO: may fail!
class ChartLabelTests(unittest.TestCase):
def setUp(self):
""" Autogenerated. """
pass
def test_instantiate(self):
""" Autogenerated. """
obj = ChartLabel() # TODO: may fail!
class ChartLegendTests(unittest.TestCase):
def setUp(self):
""" Autogenerated. """
pass
def test_instantiate(self):
""" Autogenerated. """
obj = ChartLegend() # TODO: may fail!
class ChartViewTests(unittest.TestCase):
app = None
@classmethod
def setUpClass(cls):
cls.app = QApplication([])
def setUp(self):
""" Autogenerated. """
pass
def test_instantiate(self):
""" Autogenerated. """
obj = ChartView() # TODO: may fail!
class ChartWidgetTests(unittest.TestCase):
def setUp(self):
""" Autogenerated. """
pass
def test_instantiate(self):
""" Autogenerated. """
obj = ChartWidget() # TODO: may fail!
class HorizontalAxisTests(unittest.TestCase):
def setUp(self):
""" Autogenerated. """
pass
def test_instantiate(self):
""" Autogenerated. """
obj = HorizontalAxis() # TODO: may fail!
class ScaleBoxTests(unittest.TestCase):
def setUp(self):
""" Autogenerated. """
pass
def test_instantiate(self):
""" Autogenerated. """
obj = ScaleBox() # TODO: may fail!
class SecondaryHorizontalAxisTests(unittest.TestCase):
def setUp(self):
""" Autogenerated. """
pass
def test_instantiate(self):
""" Autogenerated. """
obj = SecondaryHorizontalAxis([0,1], [0, 100]) # TODO: may fail!
class SecondaryVerticalAxisTests(unittest.TestCase):
def setUp(self):
""" Autogenerated. """
pass
def test_instantiate(self):
""" Autogenerated. """
obj = SecondaryVerticalAxis([0,1], [0, 100]) # TODO: may fail!
class StyleTests(unittest.TestCase):
def setUp(self):
""" Autogenerated. """
pass
def test_instantiate(self):
""" Autogenerated. """
obj = Style() # TODO: may fail!
class VerticalAxisTests(unittest.TestCase):
def setUp(self):
""" Autogenerated. """
pass
def test_instantiate(self):
""" Autogenerated. """
obj = VerticalAxis() # TODO: may fail!
class VerticalChartLabelTests(unittest.TestCase):
def setUp(self):
""" Autogenerated. """
pass
def test_instantiate(self):
""" Autogenerated. """
obj = VerticalChartLabel() # TODO: may fail! | {
"content_hash": "14608cb4f3801a80a09f0dcd7577ce52",
"timestamp": "",
"source": "github",
"line_count": 187,
"max_line_length": 73,
"avg_line_length": 20.705882352941178,
"alnum_prop": 0.5648243801652892,
"repo_name": "unrza72/qplotutils",
"id": "4928957a2d20d839d131cb9540f6d2936874d3a9",
"size": "3919",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/qplotutils/chart/test_view.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "983"
},
{
"name": "Python",
"bytes": "294832"
},
{
"name": "Shell",
"bytes": "271"
}
],
"symlink_target": ""
} |
import sys
if sys.hexversion < 0x020400f0: from sets import Set as set
typos={'feature':'features','sources':'source','targets':'target','include':'includes','export_include':'export_includes','define':'defines','importpath':'includes','installpath':'install_path',}
meths_typos=['__call__','program','shlib','stlib','objects']
from waflib import Logs,Build,Node,Task,TaskGen,ConfigSet,Errors,Utils
import waflib.Tools.ccroot
def check_same_targets(self):
mp=Utils.defaultdict(list)
uids={}
def check_task(tsk):
if not isinstance(tsk,Task.Task):
return
for node in tsk.outputs:
mp[node].append(tsk)
try:
uids[tsk.uid()].append(tsk)
except:
uids[tsk.uid()]=[tsk]
for g in self.groups:
for tg in g:
try:
for tsk in tg.tasks:
check_task(tsk)
except AttributeError:
check_task(tg)
dupe=False
for(k,v)in mp.items():
if len(v)>1:
dupe=True
msg='* Node %r is created by more than once%s. The task generators are:'%(k,Logs.verbose==1 and" (full message on 'waf -v -v')"or"")
Logs.error(msg)
for x in v:
if Logs.verbose>1:
Logs.error(' %d. %r'%(1+v.index(x),x.generator))
else:
Logs.error(' %d. %r in %r'%(1+v.index(x),x.generator.name,getattr(x.generator,'path',None)))
if not dupe:
for(k,v)in uids.items():
if len(v)>1:
Logs.error('* Several tasks use the same identifier. Please check the information on\n http://waf.googlecode.com/git/docs/apidocs/Task.html#waflib.Task.Task.uid')
for tsk in v:
Logs.error(' - object %r (%r) defined in %r'%(tsk.__class__.__name__,tsk,tsk.generator))
def check_invalid_constraints(self):
feat=set([])
for x in list(TaskGen.feats.values()):
feat.union(set(x))
for(x,y)in TaskGen.task_gen.prec.items():
feat.add(x)
feat.union(set(y))
ext=set([])
for x in TaskGen.task_gen.mappings.values():
ext.add(x.__name__)
invalid=ext&feat
if invalid:
Logs.error('The methods %r have invalid annotations: @extension <-> @feature/@before_method/@after_method'%list(invalid))
for cls in list(Task.classes.values()):
for x in('before','after'):
for y in Utils.to_list(getattr(cls,x,[])):
if not Task.classes.get(y,None):
Logs.error('Erroneous order constraint %r=%r on task class %r'%(x,y,cls.__name__))
def replace(m):
oldcall=getattr(Build.BuildContext,m)
def call(self,*k,**kw):
ret=oldcall(self,*k,**kw)
for x in typos:
if x in kw:
err=True
Logs.error('Fix the typo %r -> %r on %r'%(x,typos[x],ret))
return ret
setattr(Build.BuildContext,m,call)
def enhance_lib():
for m in meths_typos:
replace(m)
old_ant_glob=Node.Node.ant_glob
def ant_glob(self,*k,**kw):
if k:
lst=Utils.to_list(k[0])
for pat in lst:
if'..'in pat.split('/'):
Logs.error("In ant_glob pattern %r: '..' means 'two dots', not 'parent directory'"%k[0])
return old_ant_glob(self,*k,**kw)
Node.Node.ant_glob=ant_glob
old=Task.is_before
def is_before(t1,t2):
ret=old(t1,t2)
if ret and old(t2,t1):
Logs.error('Contradictory order constraints in classes %r %r'%(t1,t2))
return ret
Task.is_before=is_before
def check_err_features(self):
lst=self.to_list(self.features)
if'shlib'in lst:
Logs.error('feature shlib -> cshlib, dshlib or cxxshlib')
for x in('c','cxx','d','fc'):
if not x in lst and lst and lst[0]in[x+y for y in('program','shlib','stlib')]:
Logs.error('%r features is probably missing %r'%(self,x))
TaskGen.feature('*')(check_err_features)
def check_err_order(self):
if not hasattr(self,'rule'):
for x in('before','after','ext_in','ext_out'):
if hasattr(self,x):
Logs.warn('Erroneous order constraint %r on non-rule based task generator %r'%(x,self))
else:
for x in('before','after'):
for y in self.to_list(getattr(self,x,[])):
if not Task.classes.get(y,None):
Logs.error('Erroneous order constraint %s=%r on %r'%(x,y,self))
TaskGen.feature('*')(check_err_order)
def check_compile(self):
check_invalid_constraints(self)
try:
ret=self.orig_compile()
finally:
check_same_targets(self)
return ret
Build.BuildContext.orig_compile=Build.BuildContext.compile
Build.BuildContext.compile=check_compile
def use_rec(self,name,**kw):
try:
y=self.bld.get_tgen_by_name(name)
except Errors.WafError:
pass
else:
idx=self.bld.get_group_idx(self)
odx=self.bld.get_group_idx(y)
if odx>idx:
msg="Invalid 'use' across build groups:"
if Logs.verbose>1:
msg+='\n target %r\n uses:\n %r'%(self,y)
else:
msg+=" %r uses %r (try 'waf -v -v' for the full error)"%(self.name,name)
raise Errors.WafError(msg)
self.orig_use_rec(name,**kw)
TaskGen.task_gen.orig_use_rec=TaskGen.task_gen.use_rec
TaskGen.task_gen.use_rec=use_rec
def getattri(self,name,default=None):
if name=='append'or name=='add':
raise Errors.WafError('env.append and env.add do not exist: use env.append_value/env.append_unique')
elif name=='prepend':
raise Errors.WafError('env.prepend does not exist: use env.prepend_value')
if name in self.__slots__:
return object.__getattr__(self,name,default)
else:
return self[name]
ConfigSet.ConfigSet.__getattr__=getattri
def options(opt):
enhance_lib()
def configure(conf):
pass
| {
"content_hash": "a59cff99fd5c46902e242a0db008351a",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 195,
"avg_line_length": 34.83221476510067,
"alnum_prop": 0.666281310211946,
"repo_name": "dproc/trex_odp_porting_integration",
"id": "be501299f9ec2d1f18e181dd4bd5abb720a24e27",
"size": "5335",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "linux_odp/.waf-1.6.8-3e3391c5f23fbabad81e6d17c63a1b1e/waflib/Tools/errcheck.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9616073"
},
{
"name": "C++",
"bytes": "3147123"
},
{
"name": "CMake",
"bytes": "8882"
},
{
"name": "HTML",
"bytes": "4523"
},
{
"name": "JavaScript",
"bytes": "1234"
},
{
"name": "Makefile",
"bytes": "129776"
},
{
"name": "Python",
"bytes": "2740100"
},
{
"name": "Shell",
"bytes": "3026"
}
],
"symlink_target": ""
} |
""" Cloud API asynchronous "PDF To Text" job example.
Allows to avoid timeout errors when processing huge or scanned PDF documents.
"""
import os
import requests # pip install requests
import time
import datetime
# The authentication key (API Key).
# Get your own by registering at https://app.pdf.co
API_KEY = "*****************************************"
# Base URL for PDF.co Web API requests
BASE_URL = "https://api.pdf.co/v1"
# URL of web page to convert to PDF document.
SourceUrl = "http://en.wikipedia.org/wiki/Main_Page"
# Destination PDF file name
DestinationFile = ".\\result.pdf"
# (!) Make asynchronous job
Async = True
def main(args = None):
convertLinkToPDF(SourceUrl, DestinationFile)
def convertLinkToPDF(uploadedFileUrl, destinationFile):
"""Converts Link To PDF using PDF.co Web API"""
# Prepare requests params as JSON
# See documentation: https://apidocs.pdf.co
parameters = {}
parameters["async"] = Async
parameters["name"] = os.path.basename(destinationFile)
parameters["url"] = uploadedFileUrl
# Prepare URL for 'URL To PDF' API request
url = "{}/pdf/convert/from/url".format(BASE_URL)
# Execute request and get response as JSON
response = requests.post(url, data=parameters, headers={ "x-api-key": API_KEY })
if (response.status_code == 200):
json = response.json()
if json["error"] == False:
# Asynchronous job ID
jobId = json["jobId"]
# URL of the result file
resultFileUrl = json["url"]
# Check the job status in a loop.
# If you don't want to pause the main thread you can rework the code
# to use a separate thread for the status checking and completion.
while True:
status = checkJobStatus(jobId) # Possible statuses: "working", "failed", "aborted", "success".
# Display timestamp and status (for demo purposes)
print(datetime.datetime.now().strftime("%H:%M.%S") + ": " + status)
if status == "success":
# Download result file
r = requests.get(resultFileUrl, stream=True)
if (r.status_code == 200):
with open(destinationFile, 'wb') as file:
for chunk in r:
file.write(chunk)
print(f"Result file saved as \"{destinationFile}\" file.")
else:
print(f"Request error: {response.status_code} {response.reason}")
break
elif status == "working":
# Pause for a few seconds
time.sleep(3)
else:
print(status)
break
else:
# Show service reported error
print(json["message"])
else:
print(f"Request error: {response.status_code} {response.reason}")
def checkJobStatus(jobId):
"""Checks server job status"""
url = f"{BASE_URL}/job/check?jobid={jobId}"
response = requests.get(url, headers={ "x-api-key": API_KEY })
if (response.status_code == 200):
json = response.json()
return json["status"]
else:
print(f"Request error: {response.status_code} {response.reason}")
return None
if __name__ == '__main__':
main() | {
"content_hash": "cc83d6477ecac9c0ae9c34d4ac37b182",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 110,
"avg_line_length": 34.68,
"alnum_prop": 0.5628604382929643,
"repo_name": "bytescout/ByteScout-SDK-SourceCode",
"id": "67ac1a64851f93367116969a3518bf424e0e7e51",
"size": "3468",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PDF.co Web API/PDF from URL/Python/Convert Web Page To PDF From Link Asynchronously/ConvertWebPageToPdfFromLinkAsynchronously.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP.NET",
"bytes": "364116"
},
{
"name": "Apex",
"bytes": "243500"
},
{
"name": "Batchfile",
"bytes": "151832"
},
{
"name": "C",
"bytes": "224568"
},
{
"name": "C#",
"bytes": "12909855"
},
{
"name": "C++",
"bytes": "440474"
},
{
"name": "CSS",
"bytes": "56817"
},
{
"name": "Classic ASP",
"bytes": "46655"
},
{
"name": "Dockerfile",
"bytes": "776"
},
{
"name": "Gherkin",
"bytes": "3386"
},
{
"name": "HTML",
"bytes": "17276296"
},
{
"name": "Java",
"bytes": "1483408"
},
{
"name": "JavaScript",
"bytes": "3033610"
},
{
"name": "PHP",
"bytes": "838746"
},
{
"name": "Pascal",
"bytes": "398090"
},
{
"name": "PowerShell",
"bytes": "715204"
},
{
"name": "Python",
"bytes": "703542"
},
{
"name": "QMake",
"bytes": "880"
},
{
"name": "TSQL",
"bytes": "3080"
},
{
"name": "VBA",
"bytes": "383773"
},
{
"name": "VBScript",
"bytes": "1504410"
},
{
"name": "Visual Basic .NET",
"bytes": "9489450"
}
],
"symlink_target": ""
} |
''' author @ esilgard '''
#
# Copyright (c) 2014-2016 Fred Hutchinson Cancer Research Center
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
import sys
import make_datetime
import global_strings as gb
__version__ = 'cyto_parser1.0'
## header names exptected to be coming from the Amalga Import ##
REQUIRED_HEADER_SET = set([gb.SET_ID, gb.OBSERVATION_VALUE, gb.FILLER_ORDER_NO, gb.MRN_CAPS])
def parse(obx_file):
'''
this is a basic parser and sectioner for Amalga pathology reports
input = "obx_file" = a tab delimited text version of an Amalga obx table
output = "cytogenetics_dictionary" = a dictionary of \
{unique MRN_CAPSs:{unique FILLER_ORDER_NOs:{(section order, section heading, \
character onset of section):{row num/SET_ID:texts}}}}
**to work correctly first line must contain the expected headers**
--returns a tuple of output, return_type
'''
cytogenetics_dictionary = {}
section = 'NULL'
section_order = 0
specimen = None
try:
obx = open(obx_file, 'rU').readlines()
obx = [re.sub(r'[\r\n]', '', a).split('\t') for a in obx]
header_set = set(obx[0])
if set(header_set) >= (REQUIRED_HEADER_SET):
headers = dict((k, v) for v, k in enumerate(obx[0]))
try:
# sort records by MRN, acc, and then setid, ignore null lines
obx = sorted([y for y in obx[1:] if (y[headers.get(gb.MRN_CAPS)] != 'NULL' \
and y[headers.get(gb.MRN_CAPS)] != gb.MRN_CAPS and \
y[headers.get(gb.FILLER_ORDER_NO)] != 'NULL' and \
y[headers.get(gb.SET_ID)] != 'NULL')], key=lambda x: \
(x[headers.get(gb.MRN_CAPS)], x[headers.get(gb.FILLER_ORDER_NO)], \
int(x[headers.get(gb.SET_ID)])))
chars_onset=0
for line in obx:
mrn = line[headers.get(gb.MRN_CAPS)]
acc = line[headers.get(gb.FILLER_ORDER_NO)]
index = line[headers.get(gb.SET_ID)]
if index == '1':
section_order = 0
chars_onset = 0
text = line[headers.get(gb.OBSERVATION_VALUE)]
if gb.FILLER_ORDER_NO in line:
pass # ignore duplicate header lines
elif text == 'NULL' or text == 'None':
# maintain readability of fully constituted text by keeping empty 'NULL' lines
cytogenetics_dictionary[mrn] = cytogenetics_dictionary.get(mrn, {})
cytogenetics_dictionary[mrn][acc] = cytogenetics_dictionary[mrn].get(acc, {})
cytogenetics_dictionary[mrn][acc][(-1, 'FullText', 0, None)] = \
cytogenetics_dictionary[mrn][acc].get\
((-1, 'FullText', 0, None), '') + '\n'
chars_onset += 1
else:
## grab acc dictionary
cytogenetics_dictionary[mrn] = cytogenetics_dictionary.get(mrn, {})
cytogenetics_dictionary[mrn][acc] = cytogenetics_dictionary[mrn].get(acc, {})
if index == '1':
chars_onset = 0
# create a specimen source dictionary for each labeled specimen
#(in the same format as the regular pathology section dictionary
# catch NULL or empty string specimenSources
if not line[headers.get(gb.SPECIMEN_SOURCE)] or \
line[headers.get(gb.SPECIMEN_SOURCE)] == 'NULL':
specimen_dictionary = {}
else:
try:
specimen_dictionary = dict((x.split(')')[0], x.split(')')[1].replace('(',' ')) \
for x in line[headers.get(gb.SPECIMEN_SOURCE)].strip('"').split('~'))
except:
specimen_dictionary = {'NULL': 'NULL'}
cytogenetics_dictionary[mrn][acc][(0, gb.SPECIMEN_SOURCE, 0, None)] = {}
cytogenetics_dictionary[mrn][acc][(0, gb.SPECIMEN_SOURCE, 0, None)][0] = specimen_dictionary
# match general section header patterns
# (this section header matching is purposely broader than the pathology parser
section_header = re.match(r'[\*\" ]*([A-Za-z ]+)[\*:]+', text)
# reassign the section variable if you find a section pattern match
# reset specimen and increment section order
if section_header:
section = section_header.group(1).strip()
section_order += 1
specimen = ''
specimen_header = re.match(r'[\s\"]{,4}([,A-Z\- and&]+?)[\s]*(FS)?((-[A-Z])[\s]*FS)?[\s]*[)].*', text)
if specimen_header:
specimen = '' ## reset specimen if there is a new specimen header match
specimen_match = specimen_header.group(1).replace(' ', '')
## catch specimens listed in interop consults eg 'AFS-EFS: negative..'
if specimen_header.group(4) and '-' in specimen_header.group(4):
specimen_match = specimen_match + specimen_header.group(4)
for each in specimen_dictionary.keys():
if each and re.search(r'[' + specimen_match + ']', each):
specimen += each
cytogenetics_dictionary[mrn][acc][(section_order, section, chars_onset, specimen)] = \
cytogenetics_dictionary[mrn][acc].get((section_order,section,chars_onset,specimen), {})
cytogenetics_dictionary[mrn][acc]\
[(section_order, section, chars_onset, specimen)][index] = text
cytogenetics_dictionary[mrn][acc][(-1, 'FullText', 0, None)] = \
cytogenetics_dictionary[mrn][acc].get((-1, 'FullText', 0, None), '') + text + '\n'
# do we want received date? or collected?
if 'RECEIVED' in text and 'CASE' in text:
received_date = re.match(r'.*RECEIVED:[ ]+([A-Z][a-z]+)[ ]+([\d]+)[ ]+([\d]{4}).*', text)
if received_date:
cytogenetics_dictionary[mrn][acc][(-1, 'Date', 0, None)] = \
(make_datetime.get((received_date.group(3), received_date.group(1), \
received_date.group(2)), '%Y,%b,%d'), received_date.start(1)+chars_onset, received_date.end(3)+chars_onset)
else:
cytogenetics_dictionary[mrn][acc][(-1, 'Date', 0, None)] = (None, 0, 0)
chars_onset += len(text) + 1
return cytogenetics_dictionary, dict
except RuntimeError:
return ({gb.ERR_TYPE: 'Exception', gb.ERR_STR: "FATAL ERROR: " + \
str(sys.exc_info()[0]) + "," + str(sys.exc_info()[1]) + \
" trouble parsing " + str(obx_file) + " -- program aborted"}, Exception)
else:
return ({gb.ERR_TYPE: 'Exception', gb.ERR_STR: "FATAL ERROR: " + str(sys.exc_info()[0]) \
+ "," + str(sys.exc_info()[1]) + " required headers not found in inital \
line of " + str(obx_file) + " -- must include " + ','.join\
(REQUIRED_HEADER_SET - header_set) + " -- program aborted"}, Exception)
except EnvironmentError:
return ({gb.ERR_TYPE: 'Exception', gb.ERR_STR: "FATAL ERROR: " + str(sys.exc_info()[0]) + \
"," + str(sys.exc_info()[1]) + " -- could not find input file " + str(obx_file) + \
" -- program aborted"}, Exception)
| {
"content_hash": "183aa19f989f06bb9ab9e677b0874e42",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 139,
"avg_line_length": 58.73856209150327,
"alnum_prop": 0.49371314120396126,
"repo_name": "esilgard/argos_nlp",
"id": "9f39bcbe7f51b69a0d0b91bfce11abe29c548c9a",
"size": "8987",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "fhcrc_cytogenetics/parser.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "519"
},
{
"name": "Python",
"bytes": "158922"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'UserKey.last_used'
db.add_column('sshkey_userkey', 'last_used', self.gf('django.db.models.fields.DateTimeField')(null=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'UserKey.last_used'
db.delete_column('sshkey_userkey', 'last_used')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'django_sshkey.userkey': {
'Meta': {'unique_together': "[('user', 'name')]", 'object_name': 'UserKey', 'db_table': "'sshkey_userkey'"},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'fingerprint': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '47', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.TextField', [], {'max_length': '2000'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'last_used': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['django_sshkey']
| {
"content_hash": "67e77cd43d4c79813dc294296cf5326e",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 182,
"avg_line_length": 66.7,
"alnum_prop": 0.5564360676804455,
"repo_name": "ClemsonSoCUnix/django-sshkey",
"id": "68903ce03b8cba91b56da051af5b845b82528643",
"size": "4687",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "django_sshkey/south_migrations/0002_auto__add_field_userkey_last_used.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "1936"
},
{
"name": "Python",
"bytes": "85419"
},
{
"name": "Shell",
"bytes": "8229"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import unittest
from unittest import skipIf
import numpy as np
try:
import pandas as pd
is_pandas = True
except ImportError as e:
is_pandas = False
class TestBokehJSONEncoder(unittest.TestCase):
def setUp(self):
from bokeh._json_encoder import BokehJSONEncoder
self.encoder = BokehJSONEncoder()
def test_fail(self):
self.assertRaises(TypeError, self.encoder.default, {'testing': 1})
@skipIf(not is_pandas, "pandas does not work in PyPy.")
def test_panda_series(self):
s = pd.Series([1, 3, 5, 6, 8])
self.assertEqual(self.encoder.default(s), [1, 3, 5, 6, 8])
def test_numpyarray(self):
a = np.arange(5)
self.assertEqual(self.encoder.default(a), [0, 1, 2, 3, 4])
def test_numpyint(self):
npint = np.asscalar(np.int64(1))
self.assertEqual(self.encoder.default(npint), 1)
self.assertIsInstance(self.encoder.default(npint), int)
def test_numpyfloat(self):
npfloat = np.float64(1.33)
self.assertEqual(self.encoder.default(npfloat), 1.33)
self.assertIsInstance(self.encoder.default(npfloat), float)
def test_numpybool_(self):
nptrue = np.bool_(True)
self.assertEqual(self.encoder.default(nptrue), True)
self.assertIsInstance(self.encoder.default(nptrue), bool)
@skipIf(not is_pandas, "pandas does not work in PyPy.")
def test_pd_timestamp(self):
ts = pd.tslib.Timestamp('April 28, 1948')
self.assertEqual(self.encoder.default(ts), -684115200000)
class TestSerializeJson(unittest.TestCase):
def setUp(self):
from bokeh._json_encoder import serialize_json
from json import loads
self.serialize = serialize_json
self.deserialize = loads
def test_with_basic(self):
self.assertEqual(self.serialize({'test': [1, 2, 3]}), '{"test": [1, 2, 3]}')
def test_with_np_array(self):
a = np.arange(5)
self.assertEqual(self.serialize(a), '[0, 1, 2, 3, 4]')
@skipIf(not is_pandas, "pandas does not work in PyPy.")
def test_with_pd_series(self):
s = pd.Series([0, 1, 2, 3, 4])
self.assertEqual(self.serialize(s), '[0, 1, 2, 3, 4]')
def test_nans_and_infs(self):
arr = np.array([np.nan, np.inf, -np.inf, 0])
serialized = self.serialize(arr)
deserialized = self.deserialize(serialized)
assert deserialized[0] == 'NaN'
assert deserialized[1] == 'Infinity'
assert deserialized[2] == '-Infinity'
assert deserialized[3] == 0
@skipIf(not is_pandas, "pandas does not work in PyPy.")
def test_nans_and_infs_pandas(self):
arr = pd.Series(np.array([np.nan, np.inf, -np.inf, 0]))
serialized = self.serialize(arr)
deserialized = self.deserialize(serialized)
assert deserialized[0] == 'NaN'
assert deserialized[1] == 'Infinity'
assert deserialized[2] == '-Infinity'
assert deserialized[3] == 0
@skipIf(not is_pandas, "pandas does not work in PyPy.")
def test_datetime_types(self):
"""should convert to millis
"""
idx = pd.date_range('2001-1-1', '2001-1-5')
df = pd.DataFrame({'vals' :idx}, index=idx)
serialized = self.serialize({'vals' : df.vals,
'idx' : df.index})
deserialized = self.deserialize(serialized)
baseline = {u'vals': [978307200000,
978393600000,
978480000000,
978566400000,
978652800000],
u'idx': [978307200000,
978393600000,
978480000000,
978566400000,
978652800000]
}
assert deserialized == baseline
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "a9bad01f8cd391a7b36fd7fa37953016",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 84,
"avg_line_length": 34.23275862068966,
"alnum_prop": 0.5817174515235457,
"repo_name": "gpfreitas/bokeh",
"id": "a351bb8baa8ce60d634978fe4c3b2b23076cfe48",
"size": "3971",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "bokeh/tests/test_json_encoder.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5455"
},
{
"name": "CSS",
"bytes": "413470"
},
{
"name": "CoffeeScript",
"bytes": "2117773"
},
{
"name": "HTML",
"bytes": "72852"
},
{
"name": "JavaScript",
"bytes": "7337"
},
{
"name": "Makefile",
"bytes": "5785"
},
{
"name": "Python",
"bytes": "1560447"
},
{
"name": "Shell",
"bytes": "18109"
}
],
"symlink_target": ""
} |
import numpy as np
from jax.util import safe_map, safe_zip
from jax.core import Primitive
from jax.interpreters import ad, xla, batching, numpy_eval
from jax.lax import dynamic_update_slice_p
map = safe_map
zip = safe_zip
inplace_dynamic_update_slice_p = Primitive('inplace_dynamic_update_slice')
inplace_dynamic_update_slice_p.def_impl(dynamic_update_slice_p.impl)
inplace_dynamic_update_slice_p.def_abstract_eval(dynamic_update_slice_p.abstract_eval)
for rules in [xla.translations, ad.primitive_jvps, ad.primitive_transposes,
batching.primitive_batchers]:
rules[inplace_dynamic_update_slice_p] = rules[dynamic_update_slice_p]
def _numpy_inplace_dynamic_update_slice(operand, update, *start_indices):
slices = tuple(map(slice, start_indices, np.add(start_indices, update.shape)))
operand[slices] = update
return operand
numpy_eval.np_impl[inplace_dynamic_update_slice_p] = \
_numpy_inplace_dynamic_update_slice
def inplace_dynamic_update_slice(operand, update, start_indices):
return inplace_dynamic_update_slice_p.bind(operand, update, *start_indices)
| {
"content_hash": "bd303d698db87f71c350bb313e61d1b6",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 86,
"avg_line_length": 41.76923076923077,
"alnum_prop": 0.7707182320441989,
"repo_name": "j-towns/fastar",
"id": "f10ebf7713d70345c61827454dab1e807016ca19",
"size": "1086",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fastar/numpy_eval_util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Haskell",
"bytes": "3069"
},
{
"name": "Python",
"bytes": "64620"
}
],
"symlink_target": ""
} |
import rgplot
| {
"content_hash": "0b5fe660760c0be6929838f2fd9ddbe6",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 13,
"avg_line_length": 14,
"alnum_prop": 0.8571428571428571,
"repo_name": "vjuranek/rg-offline-plotting",
"id": "a08f452881e2e561a36ca7367631555bf4f12b85",
"size": "14",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/test/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12211"
},
{
"name": "R",
"bytes": "191"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals, print_function
import frappe
import hashlib
from frappe.model.db_schema import DbManager
from frappe.installer import get_root_connection
from frappe.database import Database
import os
from markdown2 import markdown
from bs4 import BeautifulSoup
import jinja2.exceptions
from six import text_type
def sync():
# make table
print('Syncing help database...')
help_db = HelpDatabase()
help_db.make_database()
help_db.connect()
help_db.make_table()
help_db.sync_pages()
help_db.build_index()
@frappe.whitelist()
def get_help(text):
return HelpDatabase().search(text)
@frappe.whitelist()
def get_help_content(path):
return HelpDatabase().get_content(path)
def get_improve_page_html(app_name, target):
docs_config = frappe.get_module(app_name + ".config.docs")
source_link = docs_config.source_link
branch = getattr(docs_config, "branch", "develop")
html = '''<div class="page-container">
<div class="page-content">
<div class="edit-container text-center">
<i class="fa fa-smile text-muted"></i>
<a class="edit text-muted" href="{source_link}/blob/{branch}/{target}">
Improve this page
</a>
</div>
</div>
</div>'''.format(source_link=source_link, app_name=app_name, target=target, branch=branch)
return html
class HelpDatabase(object):
def __init__(self):
self.global_help_setup = frappe.conf.get('global_help_setup')
if self.global_help_setup:
bench_name = os.path.basename(os.path.abspath(frappe.get_app_path('frappe')).split('/apps/')[0])
self.help_db_name = hashlib.sha224(bench_name).hexdigest()[:15]
def make_database(self):
'''make database for global help setup'''
if not self.global_help_setup:
return
dbman = DbManager(get_root_connection())
dbman.drop_database(self.help_db_name)
# make database
if not self.help_db_name in dbman.get_database_list():
try:
dbman.create_user(self.help_db_name, self.help_db_name)
except Exception as e:
# user already exists
if e.args[0] != 1396: raise
dbman.create_database(self.help_db_name)
dbman.grant_all_privileges(self.help_db_name, self.help_db_name)
dbman.flush_privileges()
def connect(self):
if self.global_help_setup:
self.db = Database(user=self.help_db_name, password=self.help_db_name)
else:
self.db = frappe.db
def make_table(self):
if not 'help' in self.db.get_tables():
self.db.sql('''create table help(
path varchar(255),
content text,
title text,
intro text,
full_path text,
fulltext(title),
fulltext(content),
index (path))
COLLATE=utf8mb4_unicode_ci
ENGINE=MyISAM
CHARACTER SET=utf8mb4''')
def search(self, words):
self.connect()
return self.db.sql('''
select title, intro, path from help where title like %s union
select title, intro, path from help where match(content) against (%s) limit 10''', ('%'+words+'%', words))
def get_content(self, path):
self.connect()
query = '''select title, content from help
where path like "{path}%" order by path desc limit 1'''
result = None
if not path.endswith('index'):
result = self.db.sql(query.format(path=os.path.join(path, 'index')))
if not result:
result = self.db.sql(query.format(path=path))
return {'title':result[0][0], 'content':result[0][1]} if result else {}
def sync_pages(self):
self.db.sql('truncate help')
doc_contents = '<ol>'
apps = os.listdir('../apps') if self.global_help_setup else frappe.get_installed_apps()
for app in apps:
docs_folder = '../apps/{app}/{app}/docs/user'.format(app=app)
self.out_base_path = '../apps/{app}/{app}/docs'.format(app=app)
if os.path.exists(docs_folder):
app_name = getattr(frappe.get_module(app), '__title__', None) or app.title()
doc_contents += '<li><a data-path="/{app}/index">{app_name}</a></li>'.format(
app=app, app_name=app_name)
for basepath, folders, files in os.walk(docs_folder):
files = self.reorder_files(files)
for fname in files:
if fname.rsplit('.', 1)[-1] in ('md', 'html'):
fpath = os.path.join(basepath, fname)
with open(fpath, 'r') as f:
try:
content = frappe.render_template(text_type(f.read(), 'utf-8'),
{'docs_base_url': '/assets/{app}_docs'.format(app=app)})
relpath = self.get_out_path(fpath)
relpath = relpath.replace("user", app)
content = markdown(content)
title = self.make_title(basepath, fname, content)
intro = self.make_intro(content)
content = self.make_content(content, fpath, relpath)
self.db.sql('''insert into help(path, content, title, intro, full_path)
values (%s, %s, %s, %s, %s)''', (relpath, content, title, intro, fpath))
except jinja2.exceptions.TemplateSyntaxError:
print("Invalid Jinja Template for {0}. Skipping".format(fpath))
doc_contents += "</ol>"
self.db.sql('''insert into help(path, content, title, intro, full_path) values (%s, %s, %s, %s, %s)''',
('/documentation/index', doc_contents, 'Documentation', '', ''))
def make_title(self, basepath, filename, html):
if '<h1>' in html:
title = html.split("<h1>", 1)[1].split("</h1>", 1)[0]
elif 'index' in filename:
title = basepath.rsplit('/', 1)[-1].title().replace("-", " ")
else:
title = filename.rsplit('.', 1)[0].title().replace("-", " ")
return title
def make_intro(self, html):
intro = ""
if '<p>' in html:
intro = html.split('<p>', 1)[1].split('</p>', 1)[0]
if 'Duration' in html:
intro = "Help Video: " + intro
return intro
def make_content(self, html, path, relpath):
if '<h1>' in html:
html = html.split('</h1>', 1)[1]
if '{next}' in html:
html = html.replace('{next}', '')
target = path.split('/', 3)[-1]
app_name = path.split('/', 3)[2]
html += get_improve_page_html(app_name, target)
soup = BeautifulSoup(html, 'html.parser')
self.fix_links(soup, app_name)
self.fix_images(soup, app_name)
parent = self.get_parent(relpath)
if parent:
parent_tag = soup.new_tag('a')
parent_tag.string = parent['title']
parent_tag['class'] = 'parent-link'
parent_tag['data-path'] = parent['path']
soup.find().insert_before(parent_tag)
return soup.prettify()
def fix_links(self, soup, app_name):
for link in soup.find_all('a'):
if link.has_attr('href'):
url = link['href']
if '/user' in url:
data_path = url[url.index('/user'):]
if '.' in data_path:
data_path = data_path[: data_path.rindex('.')]
if data_path:
link['data-path'] = data_path.replace("user", app_name)
def fix_images(self, soup, app_name):
for img in soup.find_all('img'):
if img.has_attr('src'):
url = img['src']
if '/docs/' in url:
img['src'] = url.replace('/docs/', '/assets/{0}_docs/'.format(app_name))
def build_index(self):
for data in self.db.sql('select path, full_path, content from help'):
self.make_index(data[0], data[1], data[2])
def make_index(self, original_path, full_path, content):
'''Make index from index.txt'''
if '{index}' in content:
path = os.path.dirname(full_path)
files = []
# get files from index.txt
index_path = os.path.join(path, "index.txt")
if os.path.exists(index_path):
with open(index_path, 'r') as f:
files = f.read().splitlines()
# files not in index.txt
for f in os.listdir(path):
if not os.path.isdir(os.path.join(path, f)):
name, extn = f.rsplit('.', 1)
if name not in files \
and name != 'index' and extn in ('md', 'html'):
files.append(name)
links_html = "<ol class='index-links'>"
for line in files:
fpath = os.path.join(os.path.dirname(original_path), line)
title = self.db.sql('select title from help where path like %s',
os.path.join(fpath, 'index') + '%')
if not title:
title = self.db.sql('select title from help where path like %s',
fpath + '%')
if title:
title = title[0][0]
links_html += "<li><a data-path='{fpath}'> {title} </a></li>".format(
fpath=fpath, title=title)
# else:
# bad entries in .txt files
# print fpath
links_html += "</ol>"
html = content.replace('{index}', links_html)
self.db.sql('update help set content=%s where path=%s', (html, original_path))
def get_out_path(self, path):
return '/' + os.path.relpath(path, self.out_base_path)
def get_parent(self, child_path):
if 'index' in child_path:
child_path = child_path[: child_path.rindex('index')]
if child_path[-1] == '/':
child_path = child_path[:-1]
child_path = child_path[: child_path.rindex('/')]
out = None
if child_path:
parent_path = child_path + "/index"
out = self.get_content(parent_path)
#if parent is documentation root
else:
parent_path = "/documentation/index"
out = {}
out['title'] = "Documentation"
if not out:
return None
out['path'] = parent_path
return out
def reorder_files(self, files):
pos = 0
if 'index.md' in files:
pos = files.index('index.md')
elif 'index.html' in files:
pos = files.index('index.html')
if pos:
files[0], files[pos] = files[pos], files[0]
return files
| {
"content_hash": "edfba66c884b1979b824a893476d1fa0",
"timestamp": "",
"source": "github",
"line_count": 298,
"max_line_length": 109,
"avg_line_length": 30.738255033557046,
"alnum_prop": 0.6354803493449782,
"repo_name": "tmimori/frappe",
"id": "45971820c0c91f8d35cdaba0834fc93548b9c0a2",
"size": "9288",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "frappe/utils/help.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "237768"
},
{
"name": "HTML",
"bytes": "133546"
},
{
"name": "JavaScript",
"bytes": "1331599"
},
{
"name": "Python",
"bytes": "1091219"
},
{
"name": "Shell",
"bytes": "517"
}
],
"symlink_target": ""
} |
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.node import PipeUnderground
log = logging.getLogger(__name__)
class TestPipeUnderground(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_pipeunderground(self):
pyidf.validation_level = ValidationLevel.error
obj = PipeUnderground()
# alpha
var_name = "Name"
obj.name = var_name
# object-list
var_construction_name = "object-list|Construction Name"
obj.construction_name = var_construction_name
# node
var_fluid_inlet_node_name = "node|Fluid Inlet Node Name"
obj.fluid_inlet_node_name = var_fluid_inlet_node_name
# node
var_fluid_outlet_node_name = "node|Fluid Outlet Node Name"
obj.fluid_outlet_node_name = var_fluid_outlet_node_name
# alpha
var_sun_exposure = "SunExposed"
obj.sun_exposure = var_sun_exposure
# real
var_pipe_inside_diameter = 0.0001
obj.pipe_inside_diameter = var_pipe_inside_diameter
# real
var_pipe_length = 0.0001
obj.pipe_length = var_pipe_length
# alpha
var_soil_material_name = "Soil Material Name"
obj.soil_material_name = var_soil_material_name
# alpha
var_undisturbed_ground_temperature_model_type = "Site:GroundTemperature:Undisturbed:FiniteDifference"
obj.undisturbed_ground_temperature_model_type = var_undisturbed_ground_temperature_model_type
# object-list
var_undisturbed_ground_temperature_model_name = "object-list|Undisturbed Ground Temperature Model Name"
obj.undisturbed_ground_temperature_model_name = var_undisturbed_ground_temperature_model_name
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.pipeundergrounds[0].name, var_name)
self.assertEqual(idf2.pipeundergrounds[0].construction_name, var_construction_name)
self.assertEqual(idf2.pipeundergrounds[0].fluid_inlet_node_name, var_fluid_inlet_node_name)
self.assertEqual(idf2.pipeundergrounds[0].fluid_outlet_node_name, var_fluid_outlet_node_name)
self.assertEqual(idf2.pipeundergrounds[0].sun_exposure, var_sun_exposure)
self.assertAlmostEqual(idf2.pipeundergrounds[0].pipe_inside_diameter, var_pipe_inside_diameter)
self.assertAlmostEqual(idf2.pipeundergrounds[0].pipe_length, var_pipe_length)
self.assertEqual(idf2.pipeundergrounds[0].soil_material_name, var_soil_material_name)
self.assertEqual(idf2.pipeundergrounds[0].undisturbed_ground_temperature_model_type, var_undisturbed_ground_temperature_model_type)
self.assertEqual(idf2.pipeundergrounds[0].undisturbed_ground_temperature_model_name, var_undisturbed_ground_temperature_model_name) | {
"content_hash": "69f8f8e91f575fbc3255043ab6fb37fd",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 139,
"avg_line_length": 42.36486486486486,
"alnum_prop": 0.6889952153110048,
"repo_name": "rbuffat/pyidf",
"id": "2e951d342ca9df4737920641062aed282e12cb49",
"size": "3135",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_pipeunderground.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "22271673"
}
],
"symlink_target": ""
} |
from django.conf import settings
from allauth.account.adapter import DefaultAccountAdapter
from allauth.account.models import EmailAddress
class MyAccountAdapter(DefaultAccountAdapter):
def pre_social_login(self, request, sociallogin):
"""
Invoked just after a user successfully authenticates via a
social provider, but before the login is actually processed
(and before the pre_social_login signal is emitted).
We're trying to solve different use cases:
- social account already exists, just go on
- social account has no email or email is unknown, just go on
- social account's email exists, link social account to existing user
"""
# Ignore existing social accounts, just do this stuff for new ones
if sociallogin.is_existing:
return
# some social logins don't have an email address, e.g. facebook accounts
# with mobile numbers only, but allauth takes care of this case so just
# ignore it
if 'email' not in sociallogin.account.extra_data:
return
# check if given email address already exists.
# Note: __iexact is used to ignore cases
try:
email = sociallogin.account.extra_data['email'].lower()
email_address = EmailAddress.objects.get(email__iexact=email)
# if it does not, let allauth take care of this new social account
except EmailAddress.DoesNotExist:
return
# if it does, connect this new social login to the existing user
user = email_address.user
sociallogin.connect(request, user)
def join_social_account(self, request, sociallogin):
if sociallogin.is_existing:
print sociallogin
return
def get_login_redirect_url(self, request):
path = "/accounts/dashboard/{username}/"
return path.format(username=request.user.username)
| {
"content_hash": "0fdf5edaf16a68e52653586005c6e2d6",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 80,
"avg_line_length": 40.04081632653061,
"alnum_prop": 0.663098878695209,
"repo_name": "c24b/playlab",
"id": "6520070f55ad1281630c486f1b2f2a95511497d5",
"size": "1990",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "playlab/adapter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "112952"
},
{
"name": "HTML",
"bytes": "70400"
},
{
"name": "JavaScript",
"bytes": "99591"
},
{
"name": "Python",
"bytes": "18480"
}
],
"symlink_target": ""
} |
__credits__ = ["Daniel McDonald", "Greg Caporaso", "Doug Wendel",
"Jai Ram Rideout"]
| {
"content_hash": "977f8edb1965566c03e194da08d6bd7e",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 65,
"avg_line_length": 50,
"alnum_prop": 0.56,
"repo_name": "biocore/pyqi",
"id": "87fe705947355ce3e5afe5bf02d0e8553bff9737",
"size": "473",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyqi/interfaces/optparse/config/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "198067"
},
{
"name": "Shell",
"bytes": "5092"
}
],
"symlink_target": ""
} |
"""
.. module:: utilities
:platform: Unix
:synopsis: Helpful function for ScatterBrane
.. moduleauthor:: Katherine Rosenfeld <[email protected]>
.. moduleauthor:: Michael Johnson
"""
from __future__ import print_function
import numpy as np
from scipy.interpolate import RectBivariateSpline
from scipy.ndimage.filters import gaussian_filter
from astropy.io import fits
def smoothImage(img,dx,fwhm):
'''
Returns Image smoothed by a gaussian kernel.
:param img: ``(n, n)``
numpy array
:param dx: scalar
Pixel scale in microarcseconds
:param fwhm: scalar
Gaussian full width at half maximum in microarcseconds
'''
return gaussian_filter(img,fwhm/(2*np.sqrt(np.log(4)))/dx)
def getCoherenceLength(theta,wavelength=1.3e-3,magnification=0.448):
'''
:param theta: scalar
FWHM of scattering kernel at 1 cm in milli-arcseconds.
:param wavelength: (optional) scalar
Observing wavelength in meters
:param magnification: (optional) scalar
Magnification factor (scatterer-observer)/(source-scatterer).
:returns: scalar
Coherence length in km.
'''
#return (wavelength*1e-3)*np.sqrt(np.log(4))/(np.pi*np.sqrt(1+magnification)**2*np.radians(1e-3/3600*theta*(wavelength*1e2)**2))
return (wavelength*1e-3)*np.sqrt(np.log(4))/(np.pi*(1+magnification)*np.radians(1e-3/3600*theta*(wavelength*1e2)**2))
def ensembleSmooth(img,dx,brane,return_kernel=False):
'''
Generates ensemble averaged image given scattering kernel parameters.
:param img: ``(n, n)``
numpy array
:param dx: scalar
Pixel scale in microarcseconds
:param brane: Brane object.
:param return_kernel: (optional) bool
Return tuple with uv kernel (:func:`nump.fft.rfft2` format). See :func:`getUVKernel` for an alternate method.
'''
nx = img.shape[0]
# scattering kernel parameters in wavelengths
sigma_maj = brane.wavelength*np.sqrt(np.log(4)) / (np.pi*(1.+brane.m)*brane.r0) / (2*np.sqrt(np.log(4)))
sigma_min = sigma_maj / brane.anisotropy
v = np.dot(np.transpose([np.fft.fftfreq(nx,d=dx*np.radians(1.)/(3600*1e6))]),[np.ones(nx/2 + 1)])
u = np.dot(np.transpose([np.ones(nx)]),[np.fft.rfftfreq(nx,d=dx*np.radians(1.)/(3600*1e6))])
# rotate
if brane.pa != None:
theta = np.radians(90-brane.pa)
else:
theta = np.radians(0.)
u_ = np.cos(theta)*u - np.sin(theta)*v
v = np.sin(theta)*u + np.cos(theta)*v
# rotate
G = np.exp(-2*np.pi**2*(u_**2*sigma_maj**2 + v**2*sigma_min**2))
V = np.fft.rfft2(img)
if return_kernel:
return (np.fft.irfft2(V*G,s=img.shape),G)
else:
return np.fft.irfft2(V*G,s=img.shape)
def getUVKernel(u,v,brane):
'''
Get ensemble kernel in visibility plane for specified uv points. See func:`ensembleSmooth` for an althernate method.
:param u: ``(n, )``
Samples of u in units of wavelengths.
:param v: ``(n, )``
Samples of v in units of wavelengths.
:param brane: Brane object
:returns: ``(n, )`` Ensemble kernel complex visibility
'''
# scattering kernel parameters in wavelengths
sigma_maj = brane.wavelength*np.sqrt(np.log(4)) / (np.pi*(1.+brane.m)*brane.r0) / (2*np.sqrt(np.log(4)))
sigma_min = sigma_maj / brane.anisotropy
# rotate
if brane.pa != None:
theta = np.radians(90-brane.pa)
else:
theta = np.radians(0.)
u_ = np.cos(theta)*u - np.sin(theta)*v
v_ = np.sin(theta)*u + np.cos(theta)*v
# rotate and return
return np.exp(-2*np.pi**2*(u_**2*sigma_maj**2 + v_**2*sigma_min**2))
def loadSettings(filename):
'''
Loads simulation settings from a file generated by :func:`Brane.save_settings`.
:param filename: string
File name that contains simulation settings.
:returns: A dictionary with simulation settings.
'''
return dict(np.genfromtxt(filename,\
dtype=[('a','|S10'),('f','float')],delimiter='\t',autostrip=True))
def regrid(a,inx,idx,onx,odx):
'''
Regrids array with a new resolution and pixel number.
:param a: ``(n, n)``
Input numpy image
:param inx: int
Number of input pixels on a side
:param idx: scalar
Input resolution element
:param onx: int
Number of output pixels on a side
:param odx: scalar
Output resolution element
:returns: Array regridded to the new resolution and field of view.
'''
x = idx * (np.arange(inx) - 0.5 * (inx - 1))
f = RectBivariateSpline(x,x,a)
x_ = odx * (np.arange(onx) - 0.5 * (onx - 1))
xx_,yy_ = np.meshgrid(x_,x_,indexing='xy')
m = f.ev(yy_.flatten(),xx_.flatten()).reshape((onx,onx))
return m*(odx/idx)**2
def writefits(m,dx,dest='image.fits',obsra=266.4168370833333,obsdec=-29.00781055555555,freq=230e9):
'''
Write fits file with header. Defaults are set for Sgr A* at 1.3mm.
:param m: ``(n, n)``
numpy image array
:param dx: scalar
Pixel size in microarcseconds
:param dest: (optional) string
Output fits file name
:param obsra: (optional) scalar
Source right ascension
:param obsdec: (optional) scalar
Source declination
'''
hdu = fits.PrimaryHDU(m)
hdu.header['CDELT1'] = -1*dx*np.radians(1.)/(3600.*1e6)
hdu.header['CDELT2'] = dx*np.radians(1.)/(3600.*1e6)
hdu.header['OBSRA'] = obsra
hdu.header['OBSDEC'] = obsdec
hdu.header['FREQ'] = freq
hdu.writeto(dest,clobber=True)
def FTElementFast(img,dx,baseline):
'''
Return complex visibility.
:param img: ``(n, n)``
numpy image array
:param dx: scalar
Pixel size in microarcseconds
:param baseline: ``(2, )``
(u,v) point in wavelengths
.. note:: To shift center try multipliny by :math:`\\mathrm{exp}(\\pi i u n_x\\Delta_x)` and watch out for the axis orientation.
'''
nx = img.shape[-1]
du = 1./(nx * dx * np.radians(1.)/(3600*1e6))
ind = np.arange(nx)
return np.sum(img * np.dot(\
np.transpose([np.exp(-2j*np.pi/du/nx*baseline[1]*np.flipud(ind))]),\
[np.exp(-2j*np.pi/du/nx*baseline[0]*ind)]))
| {
"content_hash": "f2fb37230de161ebe2fb3bc20451a8ea",
"timestamp": "",
"source": "github",
"line_count": 189,
"max_line_length": 132,
"avg_line_length": 32.58201058201058,
"alnum_prop": 0.6295875284183177,
"repo_name": "krosenfeld/scatterbrane",
"id": "318109b52f59f97f58544d578401e4239d639a0e",
"size": "6159",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scatterbrane/utilities.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "40405"
}
],
"symlink_target": ""
} |
"""This module is deprecated. Please use `airflow.providers.databricks.hooks.databricks`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.databricks.hooks.databricks import ( # noqa
CANCEL_RUN_ENDPOINT, GET_RUN_ENDPOINT, RESTART_CLUSTER_ENDPOINT, RUN_LIFE_CYCLE_STATES, RUN_NOW_ENDPOINT,
START_CLUSTER_ENDPOINT, SUBMIT_RUN_ENDPOINT, TERMINATE_CLUSTER_ENDPOINT, USER_AGENT_HEADER,
DatabricksHook, RunState,
)
warnings.warn(
"This module is deprecated. Please use `airflow.providers.databricks.hooks.databricks`.",
DeprecationWarning, stacklevel=2
)
| {
"content_hash": "684cf857b774e98b61bc1889501dedb0",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 109,
"avg_line_length": 39.86666666666667,
"alnum_prop": 0.7725752508361204,
"repo_name": "wileeam/airflow",
"id": "3c610e316c21aef4add7d0c700646411e19ba55a",
"size": "1385",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "airflow/contrib/hooks/databricks_hook.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13715"
},
{
"name": "Dockerfile",
"bytes": "17179"
},
{
"name": "HTML",
"bytes": "148281"
},
{
"name": "JavaScript",
"bytes": "25233"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "9763694"
},
{
"name": "Shell",
"bytes": "221331"
},
{
"name": "TSQL",
"bytes": "879"
}
],
"symlink_target": ""
} |
"""
Esercizio 1: ricerca dei percorsi dei file a partire da una cartella
python3 23_03_es1.py <cartella_di_partenza>
"""
import os
import sys
import pathlib
import stat
def insert_dict(dictlink, key, value):
"""
Inserisce un nuovo elemento nel dizionario rispettando il default
"""
if key not in dictlink:
dictlink[key] = list()
dictlink[key].append(value)
def main():
"""
Funzione principale, analizza gli input, calcola gli output, fa tutto lei
"""
if len(sys.argv) < 2:
print("Please provide the directory to analyze")
return
elif len(sys.argv) > 2:
print("Too many arguments, please provide only the directory")
return
elif not pathlib.Path(sys.argv[1]).is_dir():
print("Provide a valid path, please")
return
rootdir = pathlib.Path(sys.argv[1])
allfiles = [str(pathfile) for pathfile in rootdir.glob('**/*')]
dictlinks = dict()
for pathfile in allfiles:
inodenum = os.stat(pathfile, follow_symlinks=True)[stat.ST_INO]
if os.path.islink(pathfile):
insert_dict(dictlinks, inodenum, "s " + pathfile)
else:
insert_dict(dictlinks, inodenum, "h " + pathfile)
for keyval in dictlinks:
for pathval in sorted(dictlinks[keyval]):
print(pathval)
print("##########################")
if __name__ == '__main__':
main()
| {
"content_hash": "bd912e94cf8a374f1e9904da880609cd",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 77,
"avg_line_length": 28.959183673469386,
"alnum_prop": 0.6102889358703312,
"repo_name": "sb00nk/PythonExercises",
"id": "212dd8a58dfb985a587916989bfe2380afe6a785",
"size": "1419",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "23_03_es1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19044"
}
],
"symlink_target": ""
} |
import re
from enum import Enum
from knack.log import get_logger
from knack.util import CLIError
from msrestazure.azure_exceptions import CloudError
from azure.cli.core.azclierror import (
ArgumentUsageError,
BadRequestError,
InvalidArgumentValueError,
RequiredArgumentMissingError,
ResourceNotFoundError,
UnclassifiedUserFault
)
from azure.cli.core.commands import LongRunningOperation
from azure.cli.core.util import sdk_no_wait
from azure.cli.core.profiles._shared import AZURE_API_PROFILES, ResourceType
from azure.mgmt.iothub.models import (IotHubSku,
AccessRights,
ArmIdentity,
CertificateDescription,
CertificateProperties,
CertificateVerificationDescription,
CloudToDeviceProperties,
IotHubDescription,
IotHubSkuInfo,
SharedAccessSignatureAuthorizationRule,
IotHubProperties,
EventHubProperties,
EventHubConsumerGroupBodyDescription,
EventHubConsumerGroupName,
FailoverInput,
FeedbackProperties,
ManagedIdentity,
MessagingEndpointProperties,
OperationInputs,
EnrichmentProperties,
RoutingEventHubProperties,
RoutingServiceBusQueueEndpointProperties,
RoutingServiceBusTopicEndpointProperties,
RoutingStorageContainerProperties,
RouteProperties,
RoutingMessage,
StorageEndpointProperties,
TestRouteInput,
TestAllRoutesInput)
from azure.mgmt.iothubprovisioningservices.models import (CertificateBodyDescription,
ProvisioningServiceDescription,
IotDpsPropertiesDescription,
IotHubDefinitionDescription,
IotDpsSkuInfo,
IotDpsSku,
OperationInputs as DpsOperationInputs,
SharedAccessSignatureAuthorizationRuleAccessRightsDescription,
VerificationCodeRequest)
from azure.mgmt.iotcentral.models import (AppSkuInfo,
App)
from azure.cli.command_modules.iot._constants import SYSTEM_ASSIGNED_IDENTITY
from azure.cli.command_modules.iot.shared import EndpointType, EncodingFormat, RenewKeyType, AuthenticationType, IdentityType
from azure.cli.command_modules.iot._client_factory import resource_service_factory
from azure.cli.command_modules.iot._client_factory import iot_hub_service_factory
from azure.cli.command_modules.iot._utils import open_certificate, generate_key
logger = get_logger(__name__)
# Identity types
SYSTEM_ASSIGNED = 'SystemAssigned'
NONE_IDENTITY = 'None'
# CUSTOM TYPE
class KeyType(Enum):
primary = 'primary'
secondary = 'secondary'
# This is a work around to simplify the permission parameter for access policy creation, and also align with the other
# command modules.
# The original AccessRights enum is a combination of below four basic access rights.
# In order to avoid asking for comma- & space-separated strings from the user, a space-separated list is supported for
# assigning multiple permissions.
# The underlying IoT SDK should handle this. However it isn't right now. Remove this after it is fixed in IoT SDK.
class SimpleAccessRights(Enum):
registry_read = AccessRights.registry_read.value
registry_write = AccessRights.registry_write.value
service_connect = AccessRights.service_connect.value
device_connect = AccessRights.device_connect.value
# CUSTOM METHODS FOR DPS
def iot_dps_list(client, resource_group_name=None):
if resource_group_name is None:
return client.iot_dps_resource.list_by_subscription()
return client.iot_dps_resource.list_by_resource_group(resource_group_name)
def iot_dps_get(client, dps_name, resource_group_name=None):
if resource_group_name is None:
return _get_iot_dps_by_name(client, dps_name, resource_group_name)
return client.iot_dps_resource.get(dps_name, resource_group_name)
def iot_dps_create(cmd, client, dps_name, resource_group_name, location=None, sku=IotDpsSku.s1.value, unit=1, tags=None, enable_data_residency=None):
cli_ctx = cmd.cli_ctx
_check_dps_name_availability(client.iot_dps_resource, dps_name)
location = _ensure_location(cli_ctx, resource_group_name, location)
dps_property = IotDpsPropertiesDescription(enable_data_residency=enable_data_residency)
dps_description = ProvisioningServiceDescription(location=location,
properties=dps_property,
sku=IotDpsSkuInfo(name=sku, capacity=unit),
tags=tags)
return client.iot_dps_resource.begin_create_or_update(resource_group_name, dps_name, dps_description)
def iot_dps_update(client, dps_name, parameters, resource_group_name=None, tags=None):
resource_group_name = _ensure_dps_resource_group_name(client, resource_group_name, dps_name)
if tags is not None:
parameters.tags = tags
return client.iot_dps_resource.begin_create_or_update(resource_group_name, dps_name, parameters)
def iot_dps_delete(client, dps_name, resource_group_name=None):
resource_group_name = _ensure_dps_resource_group_name(client, resource_group_name, dps_name)
return client.iot_dps_resource.begin_delete(dps_name, resource_group_name)
# DPS policy methods
def iot_dps_policy_list(client, dps_name, resource_group_name=None):
resource_group_name = _ensure_dps_resource_group_name(client, resource_group_name, dps_name)
return client.iot_dps_resource.list_keys(dps_name, resource_group_name)
def iot_dps_policy_get(client, dps_name, access_policy_name, resource_group_name=None):
resource_group_name = _ensure_dps_resource_group_name(client, resource_group_name, dps_name)
return client.iot_dps_resource.list_keys_for_key_name(dps_name, access_policy_name, resource_group_name)
def iot_dps_policy_create(
cmd,
client,
dps_name,
access_policy_name,
rights,
resource_group_name=None,
primary_key=None,
secondary_key=None,
no_wait=False
):
resource_group_name = _ensure_dps_resource_group_name(client, resource_group_name, dps_name)
dps_access_policies = []
dps_access_policies.extend(iot_dps_policy_list(client, dps_name, resource_group_name))
if _does_policy_exist(dps_access_policies, access_policy_name):
raise BadRequestError("Access policy {} already exists.".format(access_policy_name))
dps = iot_dps_get(client, dps_name, resource_group_name)
access_policy_rights = _convert_rights_to_access_rights(rights)
dps_access_policies.append(SharedAccessSignatureAuthorizationRuleAccessRightsDescription(
key_name=access_policy_name, rights=access_policy_rights, primary_key=primary_key, secondary_key=secondary_key))
dps.properties.authorization_policies = dps_access_policies
if no_wait:
return client.iot_dps_resource.begin_create_or_update(resource_group_name, dps_name, dps)
LongRunningOperation(cmd.cli_ctx)(client.iot_dps_resource.begin_create_or_update(resource_group_name, dps_name, dps))
return iot_dps_policy_get(client, dps_name, access_policy_name, resource_group_name)
def iot_dps_policy_update(
cmd,
client,
dps_name,
access_policy_name,
resource_group_name=None,
primary_key=None,
secondary_key=None,
rights=None,
no_wait=False
):
resource_group_name = _ensure_dps_resource_group_name(client, resource_group_name, dps_name)
dps_access_policies = []
dps_access_policies.extend(iot_dps_policy_list(client, dps_name, resource_group_name))
if not _does_policy_exist(dps_access_policies, access_policy_name):
raise ResourceNotFoundError("Access policy {} doesn't exist.".format(access_policy_name))
for policy in dps_access_policies:
if policy.key_name == access_policy_name:
if primary_key is not None:
policy.primary_key = primary_key
if secondary_key is not None:
policy.secondary_key = secondary_key
if rights is not None:
policy.rights = _convert_rights_to_access_rights(rights)
dps = iot_dps_get(client, dps_name, resource_group_name)
dps.properties.authorization_policies = dps_access_policies
if no_wait:
return client.iot_dps_resource.begin_create_or_update(resource_group_name, dps_name, dps)
LongRunningOperation(cmd.cli_ctx)(client.iot_dps_resource.begin_create_or_update(resource_group_name, dps_name, dps))
return iot_dps_policy_get(client, dps_name, access_policy_name, resource_group_name)
def iot_dps_policy_delete(cmd, client, dps_name, access_policy_name, resource_group_name=None, no_wait=False):
resource_group_name = _ensure_dps_resource_group_name(client, resource_group_name, dps_name)
dps_access_policies = []
dps_access_policies.extend(iot_dps_policy_list(client, dps_name, resource_group_name))
if not _does_policy_exist(dps_access_policies, access_policy_name):
raise ResourceNotFoundError("Access policy {0} doesn't exist.".format(access_policy_name))
updated_policies = [p for p in dps_access_policies if p.key_name.lower() != access_policy_name.lower()]
dps = iot_dps_get(client, dps_name, resource_group_name)
dps.properties.authorization_policies = updated_policies
if no_wait:
return client.iot_dps_resource.begin_create_or_update(resource_group_name, dps_name, dps)
LongRunningOperation(cmd.cli_ctx)(client.iot_dps_resource.begin_create_or_update(resource_group_name, dps_name, dps))
return iot_dps_policy_list(client, dps_name, resource_group_name)
# DPS linked hub methods
def iot_dps_linked_hub_list(client, dps_name, resource_group_name=None):
dps = iot_dps_get(client, dps_name, resource_group_name)
return dps.properties.iot_hubs
def iot_dps_linked_hub_get(cmd, client, dps_name, linked_hub, resource_group_name=None):
if '.' not in linked_hub:
hub_client = iot_hub_service_factory(cmd.cli_ctx)
linked_hub = _get_iot_hub_hostname(hub_client, linked_hub)
dps = iot_dps_get(client, dps_name, resource_group_name)
for hub in dps.properties.iot_hubs:
if hub.name == linked_hub:
return hub
raise ResourceNotFoundError("Linked hub '{0}' does not exist. Use 'iot dps linked-hub show to see all linked hubs.".format(linked_hub))
def iot_dps_linked_hub_create(
cmd,
client,
dps_name,
hub_name=None,
hub_resource_group=None,
connection_string=None,
location=None,
resource_group_name=None,
apply_allocation_policy=None,
allocation_weight=None,
no_wait=False
):
if not any([connection_string, hub_name]):
raise RequiredArgumentMissingError("Please provide the IoT Hub name or connection string.")
if not connection_string:
# Get the connection string for the hub
hub_client = iot_hub_service_factory(cmd.cli_ctx)
connection_string = iot_hub_show_connection_string(
hub_client, hub_name=hub_name, resource_group_name=hub_resource_group
)['connectionString']
if not location:
# Parse out hub name from connection string if needed
if not hub_name:
try:
hub_name = re.search(r"hostname=(.[^\;\.]+)?", connection_string, re.IGNORECASE).group(1)
except AttributeError:
raise InvalidArgumentValueError("Please provide a valid IoT Hub connection string.")
hub_client = iot_hub_service_factory(cmd.cli_ctx)
location = iot_hub_get(cmd, hub_client, hub_name=hub_name, resource_group_name=hub_resource_group).location
resource_group_name = _ensure_dps_resource_group_name(client, resource_group_name, dps_name)
dps = iot_dps_get(client, dps_name, resource_group_name)
dps.properties.iot_hubs.append(IotHubDefinitionDescription(connection_string=connection_string,
location=location,
apply_allocation_policy=apply_allocation_policy,
allocation_weight=allocation_weight))
if no_wait:
return client.iot_dps_resource.begin_create_or_update(resource_group_name, dps_name, dps)
LongRunningOperation(cmd.cli_ctx)(client.iot_dps_resource.begin_create_or_update(resource_group_name, dps_name, dps))
return iot_dps_linked_hub_list(client, dps_name, resource_group_name)
def iot_dps_linked_hub_update(cmd, client, dps_name, linked_hub, resource_group_name=None, apply_allocation_policy=None,
allocation_weight=None, no_wait=False):
if '.' not in linked_hub:
hub_client = iot_hub_service_factory(cmd.cli_ctx)
linked_hub = _get_iot_hub_hostname(hub_client, linked_hub)
resource_group_name = _ensure_dps_resource_group_name(client, resource_group_name, dps_name)
dps_linked_hubs = []
dps_linked_hubs.extend(iot_dps_linked_hub_list(client, dps_name, resource_group_name))
if not _is_linked_hub_existed(dps_linked_hubs, linked_hub):
raise ResourceNotFoundError("Access policy {0} doesn't exist.".format(linked_hub))
for hub in dps_linked_hubs:
if hub.name == linked_hub:
if apply_allocation_policy is not None:
hub.apply_allocation_policy = apply_allocation_policy
if allocation_weight is not None:
hub.allocation_weight = allocation_weight
dps = iot_dps_get(client, dps_name, resource_group_name)
dps.properties.iot_hubs = dps_linked_hubs
if no_wait:
return client.iot_dps_resource.begin_create_or_update(resource_group_name, dps_name, dps)
LongRunningOperation(cmd.cli_ctx)(client.iot_dps_resource.begin_create_or_update(resource_group_name, dps_name, dps))
return iot_dps_linked_hub_get(cmd, client, dps_name, linked_hub, resource_group_name)
def iot_dps_linked_hub_delete(cmd, client, dps_name, linked_hub, resource_group_name=None, no_wait=False):
if '.' not in linked_hub:
hub_client = iot_hub_service_factory(cmd.cli_ctx)
linked_hub = _get_iot_hub_hostname(hub_client, linked_hub)
resource_group_name = _ensure_dps_resource_group_name(client, resource_group_name, dps_name)
dps_linked_hubs = []
dps_linked_hubs.extend(iot_dps_linked_hub_list(client, dps_name, resource_group_name))
if not _is_linked_hub_existed(dps_linked_hubs, linked_hub):
raise ResourceNotFoundError("Linked hub {0} doesn't exist.".format(linked_hub))
updated_hubs = [p for p in dps_linked_hubs if p.name.lower() != linked_hub.lower()]
dps = iot_dps_get(client, dps_name, resource_group_name)
dps.properties.iot_hubs = updated_hubs
if no_wait:
return client.iot_dps_resource.begin_create_or_update(resource_group_name, dps_name, dps)
LongRunningOperation(cmd.cli_ctx)(client.iot_dps_resource.begin_create_or_update(resource_group_name, dps_name, dps))
return iot_dps_linked_hub_list(client, dps_name, resource_group_name)
# DPS certificate methods
def iot_dps_certificate_list(client, dps_name, resource_group_name=None):
resource_group_name = _ensure_dps_resource_group_name(client, resource_group_name, dps_name)
return client.dps_certificate.list(resource_group_name, dps_name)
def iot_dps_certificate_get(client, dps_name, certificate_name, resource_group_name=None):
resource_group_name = _ensure_dps_resource_group_name(client, resource_group_name, dps_name)
return client.dps_certificate.get(certificate_name, resource_group_name, dps_name)
def iot_dps_certificate_create(client, dps_name, certificate_name, certificate_path, resource_group_name=None, is_verified=None):
resource_group_name = _ensure_dps_resource_group_name(client, resource_group_name, dps_name)
cert_list = client.dps_certificate.list(resource_group_name, dps_name)
for cert in cert_list.value:
if cert.name == certificate_name:
raise CLIError("Certificate '{0}' already exists. Use 'iot dps certificate update'"
" to update an existing certificate.".format(certificate_name))
certificate = open_certificate(certificate_path)
if not certificate:
raise CLIError("Error uploading certificate '{0}'.".format(certificate_path))
cert_description = CertificateBodyDescription(certificate=certificate, is_verified=is_verified)
return client.dps_certificate.create_or_update(resource_group_name, dps_name, certificate_name, cert_description)
def iot_dps_certificate_update(client, dps_name, certificate_name, certificate_path, etag, resource_group_name=None, is_verified=None):
resource_group_name = _ensure_dps_resource_group_name(client, resource_group_name, dps_name)
cert_list = client.dps_certificate.list(resource_group_name, dps_name)
for cert in cert_list.value:
if cert.name == certificate_name:
certificate = open_certificate(certificate_path)
if not certificate:
raise CLIError("Error uploading certificate '{0}'.".format(certificate_path))
cert_description = CertificateBodyDescription(certificate=certificate, is_verified=is_verified)
return client.dps_certificate.create_or_update(resource_group_name, dps_name, certificate_name, cert_description, etag)
raise CLIError("Certificate '{0}' does not exist. Use 'iot dps certificate create' to create a new certificate."
.format(certificate_name))
def iot_dps_certificate_delete(client, dps_name, certificate_name, etag, resource_group_name=None):
resource_group_name = _ensure_dps_resource_group_name(client, resource_group_name, dps_name)
return client.dps_certificate.delete(resource_group_name, etag, dps_name, certificate_name)
def iot_dps_certificate_gen_code(client, dps_name, certificate_name, etag, resource_group_name=None):
resource_group_name = _ensure_dps_resource_group_name(client, resource_group_name, dps_name)
return client.dps_certificate.generate_verification_code(certificate_name, etag, resource_group_name, dps_name)
def iot_dps_certificate_verify(client, dps_name, certificate_name, certificate_path, etag, resource_group_name=None):
resource_group_name = _ensure_dps_resource_group_name(client, resource_group_name, dps_name)
certificate = open_certificate(certificate_path)
if not certificate:
raise CLIError("Error uploading certificate '{0}'.".format(certificate_path))
request = VerificationCodeRequest(certificate=certificate)
return client.dps_certificate.verify_certificate(certificate_name, etag, resource_group_name, dps_name, request)
# CUSTOM METHODS
def iot_hub_certificate_list(client, hub_name, resource_group_name=None):
resource_group_name = _ensure_hub_resource_group_name(client, resource_group_name, hub_name)
return client.certificates.list_by_iot_hub(resource_group_name, hub_name)
def iot_hub_certificate_get(client, hub_name, certificate_name, resource_group_name=None):
resource_group_name = _ensure_hub_resource_group_name(client, resource_group_name, hub_name)
return client.certificates.get(resource_group_name, hub_name, certificate_name)
def iot_hub_certificate_create(client, hub_name, certificate_name, certificate_path, resource_group_name=None, is_verified=None):
resource_group_name = _ensure_hub_resource_group_name(client, resource_group_name, hub_name)
# Get list of certs
cert_list = client.certificates.list_by_iot_hub(resource_group_name, hub_name)
for cert in cert_list.value:
if cert.name == certificate_name:
raise CLIError("Certificate '{0}' already exists. Use 'iot hub certificate update'"
" to update an existing certificate.".format(certificate_name))
certificate = open_certificate(certificate_path)
if not certificate:
raise CLIError("Error uploading certificate '{0}'.".format(certificate_path))
cert_properties = CertificateProperties(certificate=certificate, is_verified=is_verified)
if AZURE_API_PROFILES["latest"][ResourceType.MGMT_IOTHUB] in client.profile.label:
cert_description = CertificateDescription(properties=cert_properties)
return client.certificates.create_or_update(resource_group_name, hub_name, certificate_name, cert_description)
return client.certificates.create_or_update(resource_group_name, hub_name, certificate_name, cert_properties)
def iot_hub_certificate_update(client, hub_name, certificate_name, certificate_path, etag, resource_group_name=None, is_verified=None):
resource_group_name = _ensure_hub_resource_group_name(client, resource_group_name, hub_name)
cert_list = client.certificates.list_by_iot_hub(resource_group_name, hub_name)
for cert in cert_list.value:
if cert.name == certificate_name:
certificate = open_certificate(certificate_path)
if not certificate:
raise CLIError("Error uploading certificate '{0}'.".format(certificate_path))
cert_properties = CertificateProperties(certificate=certificate, is_verified=is_verified)
if AZURE_API_PROFILES["latest"][ResourceType.MGMT_IOTHUB] in client.profile.label:
cert_description = CertificateDescription(properties=cert_properties)
return client.certificates.create_or_update(resource_group_name, hub_name, certificate_name, cert_description, etag)
return client.certificates.create_or_update(resource_group_name, hub_name, certificate_name, cert_properties, etag)
raise CLIError("Certificate '{0}' does not exist. Use 'iot hub certificate create' to create a new certificate."
.format(certificate_name))
def iot_hub_certificate_delete(client, hub_name, certificate_name, etag, resource_group_name=None):
resource_group_name = _ensure_hub_resource_group_name(client, resource_group_name, hub_name)
return client.certificates.delete(resource_group_name, hub_name, certificate_name, etag)
def iot_hub_certificate_gen_code(client, hub_name, certificate_name, etag, resource_group_name=None):
resource_group_name = _ensure_hub_resource_group_name(client, resource_group_name, hub_name)
return client.certificates.generate_verification_code(resource_group_name, hub_name, certificate_name, etag)
def iot_hub_certificate_verify(client, hub_name, certificate_name, certificate_path, etag, resource_group_name=None):
resource_group_name = _ensure_hub_resource_group_name(client, resource_group_name, hub_name)
certificate = open_certificate(certificate_path)
if not certificate:
raise CLIError("Error uploading certificate '{0}'.".format(certificate_path))
certificate_verify_body = CertificateVerificationDescription(certificate=certificate)
return client.certificates.verify(resource_group_name, hub_name, certificate_name, etag, certificate_verify_body)
def iot_hub_create(cmd, client, hub_name, resource_group_name, location=None,
sku=IotHubSku.s1.value,
unit=1,
partition_count=4,
retention_day=1,
c2d_ttl=1,
c2d_max_delivery_count=10,
disable_local_auth=None,
disable_device_sas=None,
disable_module_sas=None,
enable_data_residency=None,
feedback_lock_duration=5,
feedback_ttl=1,
feedback_max_delivery_count=10,
enable_fileupload_notifications=False,
fileupload_notification_lock_duration=5,
fileupload_notification_max_delivery_count=10,
fileupload_notification_ttl=1,
fileupload_storage_connectionstring=None,
fileupload_storage_container_name=None,
fileupload_sas_ttl=1,
fileupload_storage_authentication_type=None,
fileupload_storage_container_uri=None,
fileupload_storage_identity=None,
min_tls_version=None,
tags=None,
system_identity=None,
user_identities=None,
identity_role=None,
identity_scopes=None):
from datetime import timedelta
cli_ctx = cmd.cli_ctx
if enable_fileupload_notifications:
if not fileupload_storage_connectionstring or not fileupload_storage_container_name:
raise RequiredArgumentMissingError('Please specify storage endpoint (storage connection string and storage container name).')
if fileupload_storage_connectionstring and not fileupload_storage_container_name:
raise RequiredArgumentMissingError('Please mention storage container name.')
if fileupload_storage_container_name and not fileupload_storage_connectionstring:
raise RequiredArgumentMissingError('Please mention storage connection string.')
identity_based_file_upload = fileupload_storage_authentication_type and fileupload_storage_authentication_type == AuthenticationType.IdentityBased.value
if not identity_based_file_upload and fileupload_storage_identity:
raise RequiredArgumentMissingError('In order to set a fileupload storage identity, please set file upload storage authentication (--fsa) to IdentityBased')
if identity_based_file_upload or fileupload_storage_identity:
# Not explicitly setting fileupload_storage_identity assumes system-assigned managed identity for file upload
if fileupload_storage_identity in [None, SYSTEM_ASSIGNED_IDENTITY] and not system_identity:
raise ArgumentUsageError('System managed identity [--mi-system-assigned] must be enabled in order to use managed identity for file upload')
if fileupload_storage_identity and fileupload_storage_identity != SYSTEM_ASSIGNED_IDENTITY and not user_identities:
raise ArgumentUsageError('User identity [--mi-user-assigned] must be added in order to use it for file upload')
location = _ensure_location(cli_ctx, resource_group_name, location)
sku = IotHubSkuInfo(name=sku, capacity=unit)
event_hub_dic = {}
event_hub_dic['events'] = EventHubProperties(retention_time_in_days=retention_day,
partition_count=partition_count)
feedback_Properties = FeedbackProperties(lock_duration_as_iso8601=timedelta(seconds=feedback_lock_duration),
ttl_as_iso8601=timedelta(hours=feedback_ttl),
max_delivery_count=feedback_max_delivery_count)
cloud_to_device_properties = CloudToDeviceProperties(max_delivery_count=c2d_max_delivery_count,
default_ttl_as_iso8601=timedelta(hours=c2d_ttl),
feedback=feedback_Properties)
msg_endpoint_dic = {}
msg_endpoint_dic['fileNotifications'] = MessagingEndpointProperties(max_delivery_count=fileupload_notification_max_delivery_count,
ttl_as_iso8601=timedelta(hours=fileupload_notification_ttl),
lock_duration_as_iso8601=timedelta(seconds=fileupload_notification_lock_duration))
storage_endpoint_dic = {}
storage_endpoint_dic['$default'] = StorageEndpointProperties(
sas_ttl_as_iso8601=timedelta(hours=fileupload_sas_ttl),
connection_string=fileupload_storage_connectionstring if fileupload_storage_connectionstring else '',
container_name=fileupload_storage_container_name if fileupload_storage_container_name else '',
authentication_type=fileupload_storage_authentication_type if fileupload_storage_authentication_type else None,
container_uri=fileupload_storage_container_uri if fileupload_storage_container_uri else '',
identity=ManagedIdentity(user_assigned_identity=fileupload_storage_identity) if fileupload_storage_identity else None)
properties = IotHubProperties(event_hub_endpoints=event_hub_dic,
messaging_endpoints=msg_endpoint_dic,
storage_endpoints=storage_endpoint_dic,
cloud_to_device=cloud_to_device_properties,
min_tls_version=min_tls_version,
enable_data_residency=enable_data_residency,
disable_local_auth=disable_local_auth,
disable_device_sas=disable_device_sas,
disable_module_sas=disable_module_sas)
properties.enable_file_upload_notifications = enable_fileupload_notifications
hub_description = IotHubDescription(location=location,
sku=sku,
properties=properties,
tags=tags)
if (system_identity or user_identities):
hub_description.identity = _build_identity(system=bool(system_identity), identities=user_identities)
if bool(identity_role) ^ bool(identity_scopes):
raise RequiredArgumentMissingError('At least one scope (--scopes) and one role (--role) required for system-assigned managed identity role assignment')
def identity_assignment(lro):
try:
from azure.cli.core.commands.arm import assign_identity
instance = lro.resource().as_dict()
identity = instance.get("identity")
if identity:
principal_id = identity.get("principal_id")
if principal_id:
hub_description.identity.principal_id = principal_id
for scope in identity_scopes:
assign_identity(cmd.cli_ctx, lambda: hub_description, lambda hub: hub_description, identity_role=identity_role, identity_scope=scope)
except CloudError as e:
raise e
create = client.iot_hub_resource.begin_create_or_update(resource_group_name, hub_name, hub_description, polling=True)
if identity_role and identity_scopes:
create.add_done_callback(identity_assignment)
return create
def iot_hub_get(cmd, client, hub_name, resource_group_name=None):
cli_ctx = cmd.cli_ctx
if resource_group_name is None:
return _get_iot_hub_by_name(client, hub_name)
if not _ensure_resource_group_existence(cli_ctx, resource_group_name):
raise CLIError("Resource group '{0}' could not be found.".format(resource_group_name))
name_availability = client.iot_hub_resource.check_name_availability(OperationInputs(name=hub_name))
if name_availability is not None and name_availability.name_available:
raise CLIError("An IotHub '{0}' under resource group '{1}' was not found."
.format(hub_name, resource_group_name))
return client.iot_hub_resource.get(resource_group_name, hub_name)
def iot_hub_list(client, resource_group_name=None):
if resource_group_name is None:
return client.iot_hub_resource.list_by_subscription()
return client.iot_hub_resource.list_by_resource_group(resource_group_name)
def update_iot_hub_custom(instance,
sku=None,
unit=None,
retention_day=None,
c2d_ttl=None,
c2d_max_delivery_count=None,
disable_local_auth=None,
disable_device_sas=None,
disable_module_sas=None,
feedback_lock_duration=None,
feedback_ttl=None,
feedback_max_delivery_count=None,
enable_fileupload_notifications=None,
fileupload_notification_lock_duration=None,
fileupload_notification_max_delivery_count=None,
fileupload_notification_ttl=None,
fileupload_storage_connectionstring=None,
fileupload_storage_container_name=None,
fileupload_sas_ttl=None,
fileupload_storage_authentication_type=None,
fileupload_storage_container_uri=None,
fileupload_storage_identity=None,
tags=None):
from datetime import timedelta
if tags is not None:
instance.tags = tags
if sku is not None:
instance.sku.name = sku
if unit is not None:
instance.sku.capacity = unit
if retention_day is not None:
instance.properties.event_hub_endpoints['events'].retention_time_in_days = retention_day
if c2d_ttl is not None:
instance.properties.cloud_to_device.default_ttl_as_iso8601 = timedelta(hours=c2d_ttl)
if c2d_max_delivery_count is not None:
instance.properties.cloud_to_device.max_delivery_count = c2d_max_delivery_count
if feedback_lock_duration is not None:
duration = timedelta(seconds=feedback_lock_duration)
instance.properties.cloud_to_device.feedback.lock_duration_as_iso8601 = duration
if feedback_ttl is not None:
instance.properties.cloud_to_device.feedback.ttl_as_iso8601 = timedelta(hours=feedback_ttl)
if feedback_max_delivery_count is not None:
instance.properties.cloud_to_device.feedback.max_delivery_count = feedback_max_delivery_count
if enable_fileupload_notifications is not None:
instance.properties.enable_file_upload_notifications = enable_fileupload_notifications
if fileupload_notification_lock_duration is not None:
lock_duration = timedelta(seconds=fileupload_notification_lock_duration)
instance.properties.messaging_endpoints['fileNotifications'].lock_duration_as_iso8601 = lock_duration
if fileupload_notification_max_delivery_count is not None:
count = fileupload_notification_max_delivery_count
instance.properties.messaging_endpoints['fileNotifications'].max_delivery_count = count
if fileupload_notification_ttl is not None:
ttl = timedelta(hours=fileupload_notification_ttl)
instance.properties.messaging_endpoints['fileNotifications'].ttl_as_iso8601 = ttl
# only bother with $default storage endpoint checking if modifying fileupload params
if any([
fileupload_storage_connectionstring, fileupload_storage_container_name, fileupload_sas_ttl,
fileupload_storage_authentication_type, fileupload_storage_container_uri, fileupload_storage_identity]):
default_storage_endpoint = instance.properties.storage_endpoints.get('$default', None)
# no default storage endpoint, either recreate with existing params or throw an error
if not default_storage_endpoint:
if not all([fileupload_storage_connectionstring, fileupload_storage_container_name]):
raise UnclassifiedUserFault('This hub has no default storage endpoint for file upload.\n'
'Please recreate your default storage endpoint by running '
'`az iot hub update --name {hub_name} --fcs {storage_connection_string} --fc {storage_container_name}`')
default_storage_endpoint = StorageEndpointProperties(container_name=fileupload_storage_container_name, connection_string=fileupload_storage_connectionstring)
# if setting a fileupload storage identity or changing fileupload to identity-based
if fileupload_storage_identity or fileupload_storage_authentication_type == AuthenticationType.IdentityBased.value:
_validate_fileupload_identity(instance, fileupload_storage_identity)
instance.properties.storage_endpoints['$default'] = _process_fileupload_args(
default_storage_endpoint,
fileupload_storage_connectionstring,
fileupload_storage_container_name,
fileupload_sas_ttl,
fileupload_storage_authentication_type,
fileupload_storage_container_uri,
fileupload_storage_identity,
)
# sas token authentication switches
if disable_local_auth is not None:
instance.properties.disable_local_auth = disable_local_auth
if disable_device_sas is not None:
instance.properties.disable_device_sas = disable_device_sas
if disable_module_sas is not None:
instance.properties.disable_module_sas = disable_module_sas
return instance
def iot_hub_update(client, hub_name, parameters, resource_group_name=None):
resource_group_name = _ensure_hub_resource_group_name(client, resource_group_name, hub_name)
return client.iot_hub_resource.begin_create_or_update(resource_group_name, hub_name, parameters, {'IF-MATCH': parameters.etag}, polling=True)
def iot_hub_delete(client, hub_name, resource_group_name=None):
resource_group_name = _ensure_hub_resource_group_name(client, resource_group_name, hub_name)
return client.iot_hub_resource.begin_delete(resource_group_name, hub_name, polling=True)
# pylint: disable=inconsistent-return-statements
def iot_hub_show_connection_string(client, hub_name=None, resource_group_name=None, policy_name='iothubowner',
key_type=KeyType.primary.value, show_all=False):
if hub_name is None:
hubs = iot_hub_list(client, resource_group_name)
if hubs is None:
raise CLIError("No IoT Hub found.")
def conn_str_getter(h):
return _get_hub_connection_string(client, h.name, h.additional_properties['resourcegroup'], policy_name, key_type, show_all)
return [{'name': h.name, 'connectionString': conn_str_getter(h)} for h in hubs]
resource_group_name = _ensure_hub_resource_group_name(client, resource_group_name, hub_name)
conn_str = _get_hub_connection_string(client, hub_name, resource_group_name, policy_name, key_type, show_all)
return {'connectionString': conn_str if show_all else conn_str[0]}
def _get_hub_connection_string(client, hub_name, resource_group_name, policy_name, key_type, show_all):
policies = []
if show_all:
policies.extend(iot_hub_policy_list(client, hub_name, resource_group_name))
else:
policies.append(iot_hub_policy_get(client, hub_name, policy_name, resource_group_name))
hostname = _get_iot_hub_hostname(client, hub_name)
conn_str_template = 'HostName={};SharedAccessKeyName={};SharedAccessKey={}'
return [conn_str_template.format(hostname,
p.key_name,
p.secondary_key if key_type == KeyType.secondary else p.primary_key) for p in policies]
def iot_hub_sku_list(client, hub_name, resource_group_name=None):
resource_group_name = _ensure_hub_resource_group_name(client, resource_group_name, hub_name)
return client.iot_hub_resource.get_valid_skus(resource_group_name, hub_name)
def iot_hub_consumer_group_create(client, hub_name, consumer_group_name, resource_group_name=None, event_hub_name='events'):
resource_group_name = _ensure_hub_resource_group_name(client, resource_group_name, hub_name)
consumer_group_body = EventHubConsumerGroupBodyDescription(properties=EventHubConsumerGroupName(name=consumer_group_name))
# Fix for breaking change argument in track 1 SDK method.
from azure.cli.core.util import get_arg_list
create_cg_op = client.iot_hub_resource.create_event_hub_consumer_group
if "consumer_group_body" not in get_arg_list(create_cg_op):
return create_cg_op(resource_group_name, hub_name, event_hub_name, consumer_group_name)
return create_cg_op(resource_group_name, hub_name, event_hub_name, consumer_group_name, consumer_group_body=consumer_group_body)
def iot_hub_consumer_group_list(client, hub_name, resource_group_name=None, event_hub_name='events'):
resource_group_name = _ensure_hub_resource_group_name(client, resource_group_name, hub_name)
return client.iot_hub_resource.list_event_hub_consumer_groups(resource_group_name, hub_name, event_hub_name)
def iot_hub_consumer_group_get(client, hub_name, consumer_group_name, resource_group_name=None, event_hub_name='events'):
resource_group_name = _ensure_hub_resource_group_name(client, resource_group_name, hub_name)
return client.iot_hub_resource.get_event_hub_consumer_group(resource_group_name, hub_name, event_hub_name, consumer_group_name)
def iot_hub_consumer_group_delete(client, hub_name, consumer_group_name, resource_group_name=None, event_hub_name='events'):
resource_group_name = _ensure_hub_resource_group_name(client, resource_group_name, hub_name)
return client.iot_hub_resource.delete_event_hub_consumer_group(resource_group_name, hub_name, event_hub_name, consumer_group_name)
def iot_hub_identity_assign(cmd, client, hub_name, system_identity=None, user_identities=None, identity_role=None, identity_scopes=None, resource_group_name=None):
resource_group_name = _ensure_hub_resource_group_name(client, resource_group_name, hub_name)
def getter():
return iot_hub_get(cmd, client, hub_name, resource_group_name)
def setter(hub):
if user_identities and not hub.identity.user_assigned_identities:
hub.identity.user_assigned_identities = {}
if user_identities:
for identity in user_identities:
hub.identity.user_assigned_identities[identity] = hub.identity.user_assigned_identities.get(identity, {}) if hub.identity.user_assigned_identities else {}
has_system_identity = hub.identity.type in [IdentityType.system_assigned_user_assigned.value, IdentityType.system_assigned.value]
if system_identity or has_system_identity:
hub.identity.type = IdentityType.system_assigned_user_assigned.value if hub.identity.user_assigned_identities else IdentityType.system_assigned.value
else:
hub.identity.type = IdentityType.user_assigned.value if hub.identity.user_assigned_identities else IdentityType.none.value
poller = client.iot_hub_resource.begin_create_or_update(resource_group_name, hub_name, hub, {'IF-MATCH': hub.etag})
return LongRunningOperation(cmd.cli_ctx)(poller)
if bool(identity_role) ^ bool(identity_scopes):
raise RequiredArgumentMissingError('At least one scope (--scopes) and one role (--role) required for system-managed identity role assignment.')
if not system_identity and not user_identities:
raise RequiredArgumentMissingError('No identities provided to assign. Please provide system (--system) or user-assigned identities (--user).')
if identity_role and identity_scopes:
from azure.cli.core.commands.arm import assign_identity
for scope in identity_scopes:
hub = assign_identity(cmd.cli_ctx, getter, setter, identity_role=identity_role, identity_scope=scope)
return hub.identity
result = setter(getter())
return result.identity
def iot_hub_identity_show(cmd, client, hub_name, resource_group_name=None):
resource_group_name = _ensure_hub_resource_group_name(client, resource_group_name, hub_name)
hub = iot_hub_get(cmd, client, hub_name, resource_group_name)
return hub.identity
def iot_hub_identity_remove(cmd, client, hub_name, system_identity=None, user_identities=None, resource_group_name=None):
resource_group_name = _ensure_hub_resource_group_name(client, resource_group_name, hub_name)
hub = iot_hub_get(cmd, client, hub_name, resource_group_name)
hub_identity = hub.identity
if not system_identity and user_identities is None:
raise RequiredArgumentMissingError('No identities provided to remove. Please provide system (--system) or user-assigned identities (--user).')
# Turn off system managed identity
if system_identity:
if hub_identity.type not in [
IdentityType.system_assigned.value,
IdentityType.system_assigned_user_assigned.value
]:
raise ArgumentUsageError('Hub {} is not currently using a system-assigned identity'.format(hub_name))
hub_identity.type = IdentityType.user_assigned if hub.identity.type in [IdentityType.user_assigned.value, IdentityType.system_assigned_user_assigned.value] else IdentityType.none.value
if user_identities:
# loop through user_identities to remove
for identity in user_identities:
if not hub_identity.user_assigned_identities[identity]:
raise ArgumentUsageError('Hub {0} is not currently using a user-assigned identity with id: {1}'.format(hub_name, identity))
del hub_identity.user_assigned_identities[identity]
if not hub_identity.user_assigned_identities:
del hub_identity.user_assigned_identities
elif isinstance(user_identities, list):
del hub_identity.user_assigned_identities
if hub_identity.type in [
IdentityType.system_assigned.value,
IdentityType.system_assigned_user_assigned.value
]:
hub_identity.type = IdentityType.system_assigned_user_assigned.value if getattr(hub_identity, 'user_assigned_identities', None) else IdentityType.system_assigned.value
else:
hub_identity.type = IdentityType.user_assigned.value if getattr(hub_identity, 'user_assigned_identities', None) else IdentityType.none.value
hub.identity = hub_identity
if not getattr(hub.identity, 'user_assigned_identities', None):
hub.identity.user_assigned_identities = None
poller = client.iot_hub_resource.begin_create_or_update(resource_group_name, hub_name, hub, {'IF-MATCH': hub.etag})
lro = LongRunningOperation(cmd.cli_ctx)(poller)
return lro.identity
def iot_hub_policy_list(client, hub_name, resource_group_name=None):
resource_group_name = _ensure_hub_resource_group_name(client, resource_group_name, hub_name)
return client.iot_hub_resource.list_keys(resource_group_name, hub_name)
def iot_hub_policy_get(client, hub_name, policy_name, resource_group_name=None):
resource_group_name = _ensure_hub_resource_group_name(client, resource_group_name, hub_name)
return client.iot_hub_resource.get_keys_for_key_name(resource_group_name, hub_name, policy_name)
def iot_hub_policy_create(cmd, client, hub_name, policy_name, permissions, resource_group_name=None):
rights = _convert_perms_to_access_rights(permissions)
hub = iot_hub_get(cmd, client, hub_name, resource_group_name)
policies = []
policies.extend(iot_hub_policy_list(client, hub_name, hub.additional_properties['resourcegroup']))
if _does_policy_exist(policies, policy_name):
raise CLIError("Policy {0} already existed.".format(policy_name))
policies.append(SharedAccessSignatureAuthorizationRule(key_name=policy_name, rights=rights))
hub.properties.authorization_policies = policies
return client.iot_hub_resource.begin_create_or_update(hub.additional_properties['resourcegroup'], hub_name, hub, {'IF-MATCH': hub.etag})
def iot_hub_policy_delete(cmd, client, hub_name, policy_name, resource_group_name=None):
import copy
hub = iot_hub_get(cmd, client, hub_name, resource_group_name)
policies = iot_hub_policy_list(client, hub_name, hub.additional_properties['resourcegroup'])
if not _does_policy_exist(copy.deepcopy(policies), policy_name):
raise CLIError("Policy {0} not found.".format(policy_name))
updated_policies = [p for p in policies if p.key_name.lower() != policy_name.lower()]
hub.properties.authorization_policies = updated_policies
return client.iot_hub_resource.begin_create_or_update(hub.additional_properties['resourcegroup'], hub_name, hub, {'IF-MATCH': hub.etag})
def iot_hub_policy_key_renew(cmd, client, hub_name, policy_name, regenerate_key, resource_group_name=None, no_wait=False):
hub = iot_hub_get(cmd, client, hub_name, resource_group_name)
policies = []
policies.extend(iot_hub_policy_list(client, hub_name, hub.additional_properties['resourcegroup']))
if not _does_policy_exist(policies, policy_name):
raise CLIError("Policy {0} not found.".format(policy_name))
updated_policies = [p for p in policies if p.key_name.lower() != policy_name.lower()]
requested_policy = [p for p in policies if p.key_name.lower() == policy_name.lower()]
if regenerate_key == RenewKeyType.Primary.value:
requested_policy[0].primary_key = generate_key()
if regenerate_key == RenewKeyType.Secondary.value:
requested_policy[0].secondary_key = generate_key()
if regenerate_key == RenewKeyType.Swap.value:
temp = requested_policy[0].primary_key
requested_policy[0].primary_key = requested_policy[0].secondary_key
requested_policy[0].secondary_key = temp
updated_policies.append(SharedAccessSignatureAuthorizationRule(key_name=requested_policy[0].key_name,
rights=requested_policy[0].rights,
primary_key=requested_policy[0].primary_key,
secondary_key=requested_policy[0].secondary_key))
hub.properties.authorization_policies = updated_policies
if no_wait:
return client.iot_hub_resource.begin_create_or_update(hub.additional_properties['resourcegroup'], hub_name, hub, {'IF-MATCH': hub.etag})
LongRunningOperation(cmd.cli_ctx)(client.iot_hub_resource.begin_create_or_update(hub.additional_properties['resourcegroup'], hub_name, hub, {'IF-MATCH': hub.etag}))
return iot_hub_policy_get(client, hub_name, policy_name, resource_group_name)
def _does_policy_exist(policies, policy_name):
policy_set = {p.key_name.lower() for p in policies}
return policy_name.lower() in policy_set
def iot_hub_get_quota_metrics(client, hub_name, resource_group_name=None):
resource_group_name = _ensure_hub_resource_group_name(client, resource_group_name, hub_name)
iotHubQuotaMetricCollection = []
iotHubQuotaMetricCollection.extend(client.iot_hub_resource.get_quota_metrics(resource_group_name, hub_name))
for quotaMetric in iotHubQuotaMetricCollection:
if quotaMetric.name == 'TotalDeviceCount':
quotaMetric.max_value = 'Unlimited'
return iotHubQuotaMetricCollection
def iot_hub_get_stats(client, hub_name, resource_group_name=None):
resource_group_name = _ensure_hub_resource_group_name(client, resource_group_name, hub_name)
return client.iot_hub_resource.get_stats(resource_group_name, hub_name)
def validate_authentication_type_input(endpoint_type, connection_string=None, authentication_type=None, endpoint_uri=None, entity_path=None):
is_keyBased = (AuthenticationType.KeyBased.value == authentication_type) or (authentication_type is None)
has_connection_string = (connection_string is not None)
if is_keyBased and not has_connection_string:
raise CLIError("Please provide a connection string '--connection-string/-c'")
has_endpoint_uri = (endpoint_uri is not None)
has_endpoint_uri_and_path = (has_endpoint_uri) and (entity_path is not None)
if EndpointType.AzureStorageContainer.value == endpoint_type.lower() and not has_endpoint_uri:
raise CLIError("Please provide an endpoint uri '--endpoint-uri'")
if not has_endpoint_uri_and_path:
raise CLIError("Please provide an endpoint uri '--endpoint-uri' and entity path '--entity-path'")
def iot_hub_routing_endpoint_create(cmd, client, hub_name, endpoint_name, endpoint_type,
endpoint_resource_group, endpoint_subscription_id,
connection_string=None, container_name=None, encoding=None,
resource_group_name=None, batch_frequency=300, chunk_size_window=300,
file_name_format='{iothub}/{partition}/{YYYY}/{MM}/{DD}/{HH}/{mm}',
authentication_type=None, endpoint_uri=None, entity_path=None,
identity=None):
resource_group_name = _ensure_hub_resource_group_name(client, resource_group_name, hub_name)
hub = iot_hub_get(cmd, client, hub_name, resource_group_name)
if identity and authentication_type != AuthenticationType.IdentityBased.value:
raise ArgumentUsageError("In order to use an identity for authentication, you must select --auth-type as 'identityBased'")
if EndpointType.EventHub.value == endpoint_type.lower():
hub.properties.routing.endpoints.event_hubs.append(
RoutingEventHubProperties(
connection_string=connection_string,
name=endpoint_name,
subscription_id=endpoint_subscription_id,
resource_group=endpoint_resource_group,
authentication_type=authentication_type,
endpoint_uri=endpoint_uri,
entity_path=entity_path,
identity=ManagedIdentity(user_assigned_identity=identity) if identity and identity not in [IdentityType.none.value, SYSTEM_ASSIGNED_IDENTITY] else None
)
)
elif EndpointType.ServiceBusQueue.value == endpoint_type.lower():
hub.properties.routing.endpoints.service_bus_queues.append(
RoutingServiceBusQueueEndpointProperties(
connection_string=connection_string,
name=endpoint_name,
subscription_id=endpoint_subscription_id,
resource_group=endpoint_resource_group,
authentication_type=authentication_type,
endpoint_uri=endpoint_uri,
entity_path=entity_path,
identity=ManagedIdentity(user_assigned_identity=identity) if identity and identity not in [IdentityType.none.value, SYSTEM_ASSIGNED_IDENTITY] else None
)
)
elif EndpointType.ServiceBusTopic.value == endpoint_type.lower():
hub.properties.routing.endpoints.service_bus_topics.append(
RoutingServiceBusTopicEndpointProperties(
connection_string=connection_string,
name=endpoint_name,
subscription_id=endpoint_subscription_id,
resource_group=endpoint_resource_group,
authentication_type=authentication_type,
endpoint_uri=endpoint_uri,
entity_path=entity_path,
identity=ManagedIdentity(user_assigned_identity=identity) if identity and identity not in [IdentityType.none.value, SYSTEM_ASSIGNED_IDENTITY] else None
)
)
elif EndpointType.AzureStorageContainer.value == endpoint_type.lower():
if not container_name:
raise CLIError("Container name is required.")
hub.properties.routing.endpoints.storage_containers.append(
RoutingStorageContainerProperties(
connection_string=connection_string,
name=endpoint_name,
subscription_id=endpoint_subscription_id,
resource_group=endpoint_resource_group,
container_name=container_name,
encoding=encoding.lower() if encoding else EncodingFormat.AVRO.value,
file_name_format=file_name_format,
batch_frequency_in_seconds=batch_frequency,
max_chunk_size_in_bytes=(chunk_size_window * 1048576),
authentication_type=authentication_type,
endpoint_uri=endpoint_uri,
identity=ManagedIdentity(user_assigned_identity=identity) if identity and identity not in [IdentityType.none.value, SYSTEM_ASSIGNED_IDENTITY] else None
)
)
return client.iot_hub_resource.begin_create_or_update(resource_group_name, hub_name, hub, {'IF-MATCH': hub.etag})
def iot_hub_routing_endpoint_list(cmd, client, hub_name, endpoint_type=None, resource_group_name=None):
resource_group_name = _ensure_hub_resource_group_name(client, resource_group_name, hub_name)
hub = iot_hub_get(cmd, client, hub_name, resource_group_name)
if not endpoint_type:
return hub.properties.routing.endpoints
if EndpointType.EventHub.value == endpoint_type.lower():
return hub.properties.routing.endpoints.event_hubs
if EndpointType.ServiceBusQueue.value == endpoint_type.lower():
return hub.properties.routing.endpoints.service_bus_queues
if EndpointType.ServiceBusTopic.value == endpoint_type.lower():
return hub.properties.routing.endpoints.service_bus_topics
if EndpointType.AzureStorageContainer.value == endpoint_type.lower():
return hub.properties.routing.endpoints.storage_containers
def iot_hub_routing_endpoint_show(cmd, client, hub_name, endpoint_name, resource_group_name=None):
resource_group_name = _ensure_hub_resource_group_name(client, resource_group_name, hub_name)
hub = iot_hub_get(cmd, client, hub_name, resource_group_name)
for event_hub in hub.properties.routing.endpoints.event_hubs:
if event_hub.name.lower() == endpoint_name.lower():
return event_hub
for service_bus_queue in hub.properties.routing.endpoints.service_bus_queues:
if service_bus_queue.name.lower() == endpoint_name.lower():
return service_bus_queue
for service_bus_topic in hub.properties.routing.endpoints.service_bus_topics:
if service_bus_topic.name.lower() == endpoint_name.lower():
return service_bus_topic
for storage_container in hub.properties.routing.endpoints.storage_containers:
if storage_container.name.lower() == endpoint_name.lower():
return storage_container
raise CLIError("No endpoint found.")
def iot_hub_routing_endpoint_delete(cmd, client, hub_name, endpoint_name=None, endpoint_type=None, resource_group_name=None):
resource_group_name = _ensure_hub_resource_group_name(client, resource_group_name, hub_name)
hub = iot_hub_get(cmd, client, hub_name, resource_group_name)
hub.properties.routing.endpoints = _delete_routing_endpoints(endpoint_name, endpoint_type, hub.properties.routing.endpoints)
return client.iot_hub_resource.begin_create_or_update(resource_group_name, hub_name, hub, {'IF-MATCH': hub.etag})
def iot_hub_route_create(cmd, client, hub_name, route_name, source_type, endpoint_name, enabled=None, condition=None,
resource_group_name=None):
resource_group_name = _ensure_hub_resource_group_name(client, resource_group_name, hub_name)
hub = iot_hub_get(cmd, client, hub_name, resource_group_name)
hub.properties.routing.routes.append(
RouteProperties(
source=source_type,
name=route_name,
endpoint_names=endpoint_name.split(),
condition=('true' if condition is None else condition),
is_enabled=(True if enabled is None else enabled)
)
)
return client.iot_hub_resource.begin_create_or_update(resource_group_name, hub_name, hub, {'IF-MATCH': hub.etag})
def iot_hub_route_list(cmd, client, hub_name, source_type=None, resource_group_name=None):
resource_group_name = _ensure_hub_resource_group_name(client, resource_group_name, hub_name)
hub = iot_hub_get(cmd, client, hub_name, resource_group_name)
if source_type:
return [route for route in hub.properties.routing.routes if route.source.lower() == source_type.lower()]
return hub.properties.routing.routes
def iot_hub_route_show(cmd, client, hub_name, route_name, resource_group_name=None):
resource_group_name = _ensure_hub_resource_group_name(client, resource_group_name, hub_name)
hub = iot_hub_get(cmd, client, hub_name, resource_group_name)
for route in hub.properties.routing.routes:
if route.name.lower() == route_name.lower():
return route
raise CLIError("No route found.")
def iot_hub_route_delete(cmd, client, hub_name, route_name=None, source_type=None, resource_group_name=None):
resource_group_name = _ensure_hub_resource_group_name(client, resource_group_name, hub_name)
hub = iot_hub_get(cmd, client, hub_name, resource_group_name)
if not route_name and not source_type:
hub.properties.routing.routes = []
if route_name:
hub.properties.routing.routes = [route for route in hub.properties.routing.routes
if route.name.lower() != route_name.lower()]
if source_type:
hub.properties.routing.routes = [route for route in hub.properties.routing.routes
if route.source.lower() != source_type.lower()]
return client.iot_hub_resource.begin_create_or_update(resource_group_name, hub_name, hub, {'IF-MATCH': hub.etag})
def iot_hub_route_update(cmd, client, hub_name, route_name, source_type=None, endpoint_name=None, enabled=None,
condition=None, resource_group_name=None):
resource_group_name = _ensure_hub_resource_group_name(client, resource_group_name, hub_name)
hub = iot_hub_get(cmd, client, hub_name, resource_group_name)
updated_route = next((route for route in hub.properties.routing.routes
if route.name.lower() == route_name.lower()), None)
if updated_route:
updated_route.source = updated_route.source if source_type is None else source_type
updated_route.endpoint_names = updated_route.endpoint_names if endpoint_name is None else endpoint_name.split()
updated_route.condition = updated_route.condition if condition is None else condition
updated_route.is_enabled = updated_route.is_enabled if enabled is None else enabled
else:
raise CLIError("No route found.")
return client.iot_hub_resource.begin_create_or_update(resource_group_name, hub_name, hub, {'IF-MATCH': hub.etag})
def iot_hub_route_test(cmd, client, hub_name, route_name=None, source_type=None, body=None, app_properties=None,
system_properties=None, resource_group_name=None):
resource_group_name = _ensure_hub_resource_group_name(client, resource_group_name, hub_name)
route_message = RoutingMessage(
body=body,
app_properties=app_properties,
system_properties=system_properties
)
if route_name:
route = iot_hub_route_show(cmd, client, hub_name, route_name, resource_group_name)
test_route_input = TestRouteInput(
message=route_message,
twin=None,
route=route
)
return client.iot_hub_resource.test_route(hub_name, resource_group_name, test_route_input)
test_all_routes_input = TestAllRoutesInput(
routing_source=source_type,
message=route_message,
twin=None
)
return client.iot_hub_resource.test_all_routes(hub_name, resource_group_name, test_all_routes_input)
def iot_message_enrichment_create(cmd, client, hub_name, key, value, endpoints, resource_group_name=None):
resource_group_name = _ensure_hub_resource_group_name(client, resource_group_name, hub_name)
hub = iot_hub_get(cmd, client, hub_name, resource_group_name)
if hub.properties.routing.enrichments is None:
hub.properties.routing.enrichments = []
hub.properties.routing.enrichments.append(EnrichmentProperties(key=key, value=value, endpoint_names=endpoints))
return client.iot_hub_resource.begin_create_or_update(resource_group_name, hub_name, hub, {'IF-MATCH': hub.etag})
def iot_message_enrichment_update(cmd, client, hub_name, key, value, endpoints, resource_group_name=None):
resource_group_name = _ensure_hub_resource_group_name(client, resource_group_name, hub_name)
hub = iot_hub_get(cmd, client, hub_name, resource_group_name)
to_update = next((endpoint for endpoint in hub.properties.routing.enrichments if endpoint.key == key), None)
if to_update:
to_update.key = key
to_update.value = value
to_update.endpoint_names = endpoints
return client.iot_hub_resource.begin_create_or_update(resource_group_name, hub_name, hub, {'IF-MATCH': hub.etag})
raise CLIError('No message enrichment with that key exists')
def iot_message_enrichment_delete(cmd, client, hub_name, key, resource_group_name=None):
resource_group_name = _ensure_hub_resource_group_name(client, resource_group_name, hub_name)
hub = iot_hub_get(cmd, client, hub_name, resource_group_name)
to_remove = next((endpoint for endpoint in hub.properties.routing.enrichments if endpoint.key == key), None)
if to_remove:
hub.properties.routing.enrichments.remove(to_remove)
return client.iot_hub_resource.begin_create_or_update(resource_group_name, hub_name, hub, {'IF-MATCH': hub.etag})
raise CLIError('No message enrichment with that key exists')
def iot_message_enrichment_list(cmd, client, hub_name, resource_group_name=None):
resource_group_name = _ensure_hub_resource_group_name(client, resource_group_name, hub_name)
hub = iot_hub_get(cmd, client, hub_name, resource_group_name)
return hub.properties.routing.enrichments
def iot_hub_devicestream_show(cmd, client, hub_name, resource_group_name=None):
from azure.cli.core.commands.client_factory import get_mgmt_service_client
resource_group_name = _ensure_hub_resource_group_name(client, resource_group_name, hub_name)
# DeviceStreams property is still in preview, so until GA we need to use a preview API-version
client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_IOTHUB, api_version='2019-07-01-preview')
hub = client.iot_hub_resource.get(resource_group_name, hub_name)
return hub.properties.device_streams
def iot_hub_manual_failover(cmd, client, hub_name, resource_group_name=None, no_wait=False):
hub = iot_hub_get(cmd, client, hub_name, resource_group_name)
resource_group_name = hub.additional_properties['resourcegroup']
failover_region = next(x.location for x in hub.properties.locations if x.role.lower() == 'secondary')
failover_input = FailoverInput(failover_region=failover_region)
if no_wait:
return client.iot_hub.begin_manual_failover(hub_name, resource_group_name, failover_input)
LongRunningOperation(cmd.cli_ctx)(client.iot_hub.begin_manual_failover(hub_name, resource_group_name, failover_input))
return iot_hub_get(cmd, client, hub_name, resource_group_name)
def _get_iot_hub_by_name(client, hub_name):
all_hubs = iot_hub_list(client)
if all_hubs is None:
raise CLIError("No IoT Hub found in current subscription.")
try:
target_hub = next(x for x in all_hubs if hub_name.lower() == x.name.lower())
except StopIteration:
raise CLIError("No IoT Hub found with name {} in current subscription.".format(hub_name))
return target_hub
def _get_iot_hub_hostname(client, hub_name):
# Intermediate fix to support domains beyond azure-devices.net properly
hub = _get_iot_hub_by_name(client, hub_name)
return hub.properties.host_name
def _ensure_resource_group_existence(cli_ctx, resource_group_name):
resource_group_client = resource_service_factory(cli_ctx).resource_groups
return resource_group_client.check_existence(resource_group_name)
def _ensure_hub_resource_group_name(client, resource_group_name, hub_name):
if resource_group_name is None:
return _get_iot_hub_by_name(client, hub_name).additional_properties['resourcegroup']
return resource_group_name
# Convert permission list to AccessRights from IoT SDK.
def _convert_perms_to_access_rights(perm_list):
perm_set = set(perm_list) # remove duplicate
sorted_perm_list = sorted(perm_set)
perm_key = '_'.join(sorted_perm_list)
access_rights_mapping = {
'registryread': AccessRights.registry_read,
'registrywrite': AccessRights.registry_write,
'serviceconnect': AccessRights.service_connect,
'deviceconnect': AccessRights.device_connect,
'registryread_registrywrite': AccessRights.registry_read_registry_write,
'registryread_serviceconnect': AccessRights.registry_read_service_connect,
'deviceconnect_registryread': AccessRights.registry_read_device_connect,
'registrywrite_serviceconnect': AccessRights.registry_write_service_connect,
'deviceconnect_registrywrite': AccessRights.registry_write_device_connect,
'deviceconnect_serviceconnect': AccessRights.service_connect_device_connect,
'registryread_registrywrite_serviceconnect': AccessRights.registry_read_registry_write_service_connect,
'deviceconnect_registryread_registrywrite': AccessRights.registry_read_registry_write_device_connect,
'deviceconnect_registryread_serviceconnect': AccessRights.registry_read_service_connect_device_connect,
'deviceconnect_registrywrite_serviceconnect': AccessRights.registry_write_service_connect_device_connect,
'deviceconnect_registryread_registrywrite_serviceconnect': AccessRights.registry_read_registry_write_service_connect_device_connect
}
return access_rights_mapping[perm_key]
def _is_linked_hub_existed(hubs, hub_name):
hub_set = {h.name.lower() for h in hubs}
return hub_name.lower() in hub_set
def _get_iot_dps_by_name(client, dps_name, resource_group=None):
all_dps = iot_dps_list(client, resource_group)
if all_dps is None:
raise CLIError("No DPS found in current subscription.")
try:
target_dps = next(x for x in all_dps if dps_name.lower() == x.name.lower())
except StopIteration:
raise CLIError("No DPS found with name {} in current subscription.".format(dps_name))
return target_dps
def _ensure_dps_resource_group_name(client, resource_group_name, dps_name):
if resource_group_name is None:
return _get_iot_dps_by_name(client, dps_name).additional_properties['resourcegroup']
return resource_group_name
def _check_dps_name_availability(iot_dps_resource, dps_name):
name_availability = iot_dps_resource.check_provisioning_service_name_availability(DpsOperationInputs(name=dps_name))
if name_availability is not None and not name_availability.name_available:
raise CLIError(name_availability.message)
def _convert_rights_to_access_rights(right_list):
right_set = set(right_list) # remove duplicate
return ",".join(list(right_set))
def _delete_routing_endpoints(endpoint_name, endpoint_type, endpoints):
if endpoint_type:
if EndpointType.ServiceBusQueue.value == endpoint_type.lower():
endpoints.service_bus_queues = []
elif EndpointType.ServiceBusTopic.value == endpoint_type.lower():
endpoints.service_bus_topics = []
elif EndpointType.AzureStorageContainer.value == endpoint_type.lower():
endpoints.storage_containers = []
elif EndpointType.EventHub.value == endpoint_type.lower():
endpoints.event_hubs = []
if endpoint_name:
if any(e.name.lower() == endpoint_name.lower() for e in endpoints.service_bus_queues):
sbq_endpoints = [e for e in endpoints.service_bus_queues if e.name.lower() != endpoint_name.lower()]
endpoints.service_bus_queues = sbq_endpoints
elif any(e.name.lower() == endpoint_name.lower() for e in endpoints.service_bus_topics):
sbt_endpoints = [e for e in endpoints.service_bus_topics if e.name.lower() != endpoint_name.lower()]
endpoints.service_bus_topics = sbt_endpoints
elif any(e.name.lower() == endpoint_name.lower() for e in endpoints.storage_containers):
sc_endpoints = [e for e in endpoints.storage_containers if e.name.lower() != endpoint_name.lower()]
endpoints.storage_containers = sc_endpoints
elif any(e.name.lower() == endpoint_name.lower() for e in endpoints.event_hubs):
eh_endpoints = [e for e in endpoints.event_hubs if e.name.lower() != endpoint_name.lower()]
endpoints.event_hubs = eh_endpoints
if not endpoint_type and not endpoint_name:
endpoints.service_bus_queues = []
endpoints.service_bus_topics = []
endpoints.storage_containers = []
endpoints.event_hubs = []
return endpoints
def iot_central_app_create(
cmd, client, app_name, resource_group_name, subdomain, sku="ST2",
location=None, template=None, display_name=None, no_wait=False, mi_system_assigned=False
):
cli_ctx = cmd.cli_ctx
location = _ensure_location(cli_ctx, resource_group_name, location)
display_name = _ensure_display_name(app_name, display_name)
appSku = AppSkuInfo(name=sku)
appid = {"type": "SystemAssigned"} if mi_system_assigned else None
app = App(subdomain=subdomain,
location=location,
display_name=display_name,
sku=appSku,
template=template,
identity=appid)
return sdk_no_wait(no_wait, client.apps.begin_create_or_update, resource_group_name, app_name, app)
def iot_central_app_get(client, app_name, resource_group_name=None):
if resource_group_name is None:
return _get_iot_central_app_by_name(client, app_name)
return client.apps.get(resource_group_name, app_name)
def iot_central_app_delete(client, app_name, resource_group_name, no_wait=False):
return sdk_no_wait(no_wait, client.apps.begin_delete, resource_group_name, app_name)
def iot_central_app_list(client, resource_group_name=None):
if resource_group_name is None:
return client.apps.list_by_subscription()
return client.apps.list_by_resource_group(resource_group_name)
def iot_central_app_update(client, app_name, parameters, resource_group_name):
return client.apps.begin_create_or_update(resource_group_name, app_name, parameters)
def iot_central_app_assign_identity(client, app_name, system_assigned=False, resource_group_name=None):
app = iot_central_app_get(client, app_name, resource_group_name)
if system_assigned:
app.identity.type = SYSTEM_ASSIGNED
poller = iot_central_app_update(client, app_name, app, resource_group_name)
return poller.result().identity
def iot_central_app_remove_identity(client, app_name, system_assigned=False, resource_group_name=None):
app = iot_central_app_get(client, app_name, resource_group_name)
if system_assigned and (app.identity.type.upper() == SYSTEM_ASSIGNED.upper()):
app.identity.type = NONE_IDENTITY
poller = iot_central_app_update(client, app_name, app, resource_group_name)
return poller.result().identity
def iot_central_app_show_identity(client, app_name, resource_group_name=None):
app = iot_central_app_get(client, app_name, resource_group_name)
return app.identity
def _ensure_location(cli_ctx, resource_group_name, location):
"""Check to see if a location was provided. If not,
fall back to the resource group location.
:param object cli_ctx: CLI Context
:param str resource_group_name: Resource group name
:param str location: Location to create the resource
"""
if location is None:
resource_group_client = resource_service_factory(cli_ctx).resource_groups
return resource_group_client.get(resource_group_name).location
return location
def _ensure_display_name(app_name, display_name):
if not display_name or display_name.isspace():
return app_name
return display_name
def _get_iot_central_app_by_name(client, app_name):
"""Search the current subscription for an app with the given name.
:param object client: IoTCentralClient
:param str app_name: App name to search for
"""
all_apps = iot_central_app_list(client)
if all_apps is None:
raise CLIError(
"No IoT Central application found in current subscription.")
try:
target_app = next(
x for x in all_apps if app_name.lower() == x.name.lower())
except StopIteration:
raise CLIError(
"No IoT Central application found with name {} in current subscription.".format(app_name))
return target_app
def get_private_link_resource(client, name=None, connection_id=None, resource_group_name=None, group_id=None):
if resource_group_name and name and group_id:
return client.private_links.get(resource_group_name=resource_group_name,
resource_name=name,
group_id=group_id)
if connection_id:
id_list = connection_id.split('/')
resource_group_name = id_list[id_list.index('resourceGroups') + 1]
name = id_list[id_list.index('iotApps') + 1]
group_id = id_list[id_list.index('privateLinkResources') + 1]
return client.private_links.get(resource_group_name=resource_group_name,
resource_name=name,
group_id=group_id)
raise RequiredArgumentMissingError(
"Must provide private link resource ID or resource name, resource group, and group id.")
def list_private_link_resource(client, app_name=None, connection_id=None, resource_group_name=None, source_type=None):
if app_name and resource_group_name and source_type:
if source_type.lower() != 'microsoft.iotcentral/iotapps':
raise InvalidArgumentValueError(
"Resource type must be Microsoft.IoTCentral/iotApps")
elif connection_id:
id_list = connection_id.split('/')
if id_list[id_list.index('providers') + 1].lower() != 'microsoft.iotcentral':
raise InvalidArgumentValueError(
"Type must be Microsoft.IoTCentral/iotApps")
resource_group_name = id_list[id_list.index('resourceGroups') + 1]
app_name = id_list[id_list.index('iotApps') + 1]
else:
raise RequiredArgumentMissingError(
"Must provide private endpoint connection resource ID or resource name, resource group, and resource type.")
return client.private_links.list(resource_group_name, app_name)
def show_private_endpoint_connection(client, resource_group_name=None, connection_id=None, account_name=None, private_endpoint_connection_name=None):
return get_private_endpoint_connection(client=client,
resource_group_name=resource_group_name,
connection_id=connection_id,
account_name=account_name,
private_endpoint_connection_name=private_endpoint_connection_name,
return_args=False)
def list_private_endpoint_connection(client, resource_group_name=None, connection_id=None, account_name=None):
if connection_id:
id_list = connection_id.split('/')
if id_list[id_list.index('providers') + 1].lower() != 'microsoft.iotcentral':
raise InvalidArgumentValueError(
"Type must be Microsoft.IoTCentral/iotApps")
resource_group_name = id_list[id_list.index('resourceGroups') + 1]
account_name = id_list[id_list.index('iotApps') + 1]
if resource_group_name is None or account_name is None:
raise RequiredArgumentMissingError(
"Must provide private endpoint connection resource ID or resource name, resource group, and resource type.")
return client.private_endpoint_connections.list(resource_group_name, account_name)
def get_private_endpoint_connection(client, resource_group_name=None, connection_id=None, account_name=None, private_endpoint_connection_name=None, return_args=False):
if resource_group_name and account_name and private_endpoint_connection_name:
output = client.private_endpoint_connections.get(resource_group_name=resource_group_name,
resource_name=account_name,
private_endpoint_connection_name=private_endpoint_connection_name)
if return_args is False:
return output
return [output, resource_group_name, account_name, private_endpoint_connection_name]
if connection_id:
id_list = connection_id.split('/')
resource_group_name = id_list[id_list.index('resourceGroups') + 1]
account_name = id_list[id_list.index('iotApps') + 1]
private_endpoint_connection_name = id_list[id_list.index('privateEndpointConnections') + 1]
output = client.private_endpoint_connections.get(resource_group_name=resource_group_name,
resource_name=account_name,
private_endpoint_connection_name=private_endpoint_connection_name)
if return_args is False:
return output
return [output, resource_group_name, account_name, private_endpoint_connection_name]
raise RequiredArgumentMissingError(
"Account name, resource group, and private endpoint connection name are required unless id is specified.")
def _update_private_endpoint_connection_status(client, resource_group_name, account_name, connection_id, private_endpoint_connection_name, is_approved=True, description=None): # pylint: disable=unused-argument
from azure.core.exceptions import HttpResponseError
getInfoArr = get_private_endpoint_connection(client,
resource_group_name=resource_group_name,
connection_id=connection_id,
account_name=account_name,
private_endpoint_connection_name=private_endpoint_connection_name,
return_args=True)
private_endpoint_connection = getInfoArr[0]
rg = getInfoArr[1]
acc_name = getInfoArr[2]
pec_name = getInfoArr[3]
old_status = private_endpoint_connection.private_link_service_connection_state.status
new_status = "Approved" if is_approved else "Rejected"
private_endpoint_connection.private_link_service_connection_state.status = new_status
private_endpoint_connection.private_link_service_connection_state.description = description
try:
return client.private_endpoint_connections.begin_create(resource_group_name=rg,
resource_name=acc_name,
private_endpoint_connection=private_endpoint_connection,
private_endpoint_connection_name=pec_name)
except HttpResponseError as ex:
if ex.response.status_code == 400:
if new_status == "Approved" and old_status == "Rejected":
raise CLIError(ex.response, "You cannot approve the connection request after rejection. Please create "
"a new connection for approval.")
if new_status == "Approved" and old_status == "Approved":
raise CLIError(ex.response, "Your connection is already approved. No need to approve again.")
raise ex
def approve_private_endpoint_connection(client, resource_group_name=None, account_name=None, private_endpoint_connection_name=None, connection_id=None, description=None):
return _update_private_endpoint_connection_status(client,
resource_group_name=resource_group_name,
account_name=account_name,
connection_id=connection_id,
private_endpoint_connection_name=private_endpoint_connection_name,
description=description)
def reject_private_endpoint_connection(client, resource_group_name=None, account_name=None, private_endpoint_connection_name=None, connection_id=None, description=None):
return _update_private_endpoint_connection_status(client,
resource_group_name=resource_group_name,
account_name=account_name,
connection_id=connection_id,
is_approved=False,
private_endpoint_connection_name=private_endpoint_connection_name,
description=description)
def delete_private_endpoint_connection(client, resource_group_name=None, account_name=None, private_endpoint_connection_name=None, connection_id=None):
getInfoArr = get_private_endpoint_connection(client,
resource_group_name=resource_group_name,
connection_id=connection_id,
account_name=account_name,
private_endpoint_connection_name=private_endpoint_connection_name,
return_args=True)
rg = getInfoArr[1]
acc_name = getInfoArr[2]
pec_name = getInfoArr[3]
# private_endpoint_connection.private_link_service_connection_state.status = new_status
# private_endpoint_connection.private_link_service_connection_state.description = description
return client.private_endpoint_connections.begin_delete(resource_group_name=rg,
resource_name=acc_name,
private_endpoint_connection_name=pec_name)
def _process_fileupload_args(
default_storage_endpoint,
fileupload_storage_connectionstring=None,
fileupload_storage_container_name=None,
fileupload_sas_ttl=None,
fileupload_storage_authentication_type=None,
fileupload_storage_container_uri=None,
fileupload_storage_identity=None,
):
from datetime import timedelta
if fileupload_storage_authentication_type and fileupload_storage_authentication_type == AuthenticationType.IdentityBased.value:
default_storage_endpoint.authentication_type = AuthenticationType.IdentityBased.value
if fileupload_storage_container_uri:
default_storage_endpoint.container_uri = fileupload_storage_container_uri
elif fileupload_storage_authentication_type and fileupload_storage_authentication_type == AuthenticationType.KeyBased.value:
default_storage_endpoint.authentication_type = AuthenticationType.KeyBased.value
default_storage_endpoint.identity = None
elif fileupload_storage_authentication_type is not None:
default_storage_endpoint.authentication_type = None
default_storage_endpoint.container_uri = None
# TODO - remove connection string and set containerURI once fileUpload SAS URL is enabled
if fileupload_storage_connectionstring is not None and fileupload_storage_container_name is not None:
default_storage_endpoint.connection_string = fileupload_storage_connectionstring
default_storage_endpoint.container_name = fileupload_storage_container_name
elif fileupload_storage_connectionstring is not None:
raise RequiredArgumentMissingError('Please mention storage container name.')
elif fileupload_storage_container_name is not None:
raise RequiredArgumentMissingError('Please mention storage connection string.')
if fileupload_sas_ttl is not None:
default_storage_endpoint.sas_ttl_as_iso8601 = timedelta(hours=fileupload_sas_ttl)
# Fix for identity/authentication-type params missing on hybrid profile api
if hasattr(default_storage_endpoint, 'authentication_type'):
# If we are now (or will be) using fsa=identity AND we've set a new identity
if default_storage_endpoint.authentication_type == AuthenticationType.IdentityBased.value and fileupload_storage_identity:
# setup new fsi
default_storage_endpoint.identity = ManagedIdentity(
user_assigned_identity=fileupload_storage_identity) if fileupload_storage_identity not in [IdentityType.none.value, SYSTEM_ASSIGNED_IDENTITY] else None
# otherwise - let them know they need identity-based auth enabled
elif fileupload_storage_identity:
raise ArgumentUsageError('In order to set a file upload storage identity, you must set the file upload storage authentication type (--fsa) to IdentityBased')
return default_storage_endpoint
def _validate_fileupload_identity(instance, fileupload_storage_identity):
instance_identity = _get_hub_identity_type(instance)
# if hub has no identity
if not instance_identity or instance_identity == IdentityType.none.value:
raise ArgumentUsageError('Hub has no identity assigned, please assign a system or user-assigned managed identity to use for file-upload with `az iot hub identity assign`')
has_system_identity = instance_identity in [IdentityType.system_assigned.value, IdentityType.system_assigned_user_assigned.value]
has_user_identity = instance_identity in [IdentityType.user_assigned.value, IdentityType.system_assigned_user_assigned.value]
# if changing storage identity to '[system]'
if fileupload_storage_identity in [None, SYSTEM_ASSIGNED_IDENTITY]:
if not has_system_identity:
raise ArgumentUsageError('System managed identity must be enabled in order to use managed identity for file upload')
# if changing to user identity and hub has no user identities
elif fileupload_storage_identity and not has_user_identity:
raise ArgumentUsageError('User identity {} must be added to hub in order to use it for file upload'.format(fileupload_storage_identity))
def _get_hub_identity_type(instance):
identity = getattr(instance, 'identity', {})
return getattr(identity, 'type', None)
def _build_identity(system=False, identities=None):
identity_type = IdentityType.none.value
if not (system or identities):
return ArmIdentity(type=identity_type)
if system:
identity_type = IdentityType.system_assigned.value
user_identities = list(identities) if identities else None
if user_identities and identity_type == IdentityType.system_assigned.value:
identity_type = IdentityType.system_assigned_user_assigned.value
elif user_identities:
identity_type = IdentityType.user_assigned.value
identity = ArmIdentity(type=identity_type)
if user_identities:
identity.user_assigned_identities = {i: {} for i in user_identities} # pylint: disable=not-an-iterable
return identity
| {
"content_hash": "05499451ea105c2ed3fde1d3f30b88ed",
"timestamp": "",
"source": "github",
"line_count": 1660,
"max_line_length": 210,
"avg_line_length": 55.76144578313253,
"alnum_prop": 0.6766669547556285,
"repo_name": "yugangw-msft/azure-cli",
"id": "678605da12485347c5b123681ed2df29534c1807",
"size": "93037",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "src/azure-cli/azure/cli/command_modules/iot/custom.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ANTLR",
"bytes": "5355"
},
{
"name": "Batchfile",
"bytes": "14110"
},
{
"name": "Bicep",
"bytes": "1679"
},
{
"name": "C#",
"bytes": "1971"
},
{
"name": "C++",
"bytes": "275"
},
{
"name": "Dockerfile",
"bytes": "8427"
},
{
"name": "HTML",
"bytes": "794"
},
{
"name": "JavaScript",
"bytes": "1404"
},
{
"name": "Jupyter Notebook",
"bytes": "389"
},
{
"name": "PowerShell",
"bytes": "1781"
},
{
"name": "Python",
"bytes": "24270340"
},
{
"name": "Rich Text Format",
"bytes": "12032"
},
{
"name": "Roff",
"bytes": "1036959"
},
{
"name": "Shell",
"bytes": "56023"
},
{
"name": "TSQL",
"bytes": "1145"
}
],
"symlink_target": ""
} |
"""
AUTO-GENERATED BY `scripts/generate_protocol.py` using `data/browser_protocol.json`
and `data/js_protocol.json` as inputs! Please do not modify this file.
"""
import logging
from typing import Any, Optional, Union
from chromewhip.helpers import PayloadMixin, BaseEvent, ChromeTypeBase
log = logging.getLogger(__name__)
from chromewhip.protocol import page as Page
class Testing(PayloadMixin):
""" Testing domain is a dumping ground for the capabilities requires for browser or app testing that do not fit other
domains.
"""
@classmethod
def generateTestReport(cls,
message: Union['str'],
group: Optional['str'] = None,
):
"""Generates a report for testing.
:param message: Message to be displayed in the report.
:type message: str
:param group: Specifies the endpoint group to deliver the report to.
:type group: str
"""
return (
cls.build_send_payload("generateTestReport", {
"message": message,
"group": group,
}),
None
)
| {
"content_hash": "6f1ad6947b0b4384eef63da5f3b27d6d",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 121,
"avg_line_length": 32.27777777777778,
"alnum_prop": 0.6075731497418244,
"repo_name": "chuckus/chromewhip",
"id": "010839aeddb6fc580438c259bcecbea9c1b0722e",
"size": "1215",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chromewhip/protocol/testing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1951"
},
{
"name": "JavaScript",
"bytes": "261"
},
{
"name": "Makefile",
"bytes": "229"
},
{
"name": "Python",
"bytes": "2227857"
},
{
"name": "Shell",
"bytes": "2787"
}
],
"symlink_target": ""
} |
from mistral.db.v2.sqlalchemy import models
from mistral.engine import tasks
from mistral.tests.unit import base
from mistral.workflow import states
# TODO(rakhmerov): This test is a legacy of the previous 'with-items'
# implementation when most of its logic was in with_items.py module.
# It makes sense to add more test for various methods of WithItemsTask.
class WithItemsTaskTest(base.BaseTest):
@staticmethod
def get_action_ex(accepted, state, index):
return models.ActionExecution(
accepted=accepted,
state=state,
runtime_context={'index': index}
)
def test_get_next_indices(self):
# Task execution for running 6 items with concurrency=3.
task_ex = models.TaskExecution(
spec={
'action': 'myaction'
},
runtime_context={
'with_items': {
'capacity': 3,
'count': 6
}
},
action_executions=[],
workflow_executions=[]
)
task = tasks.WithItemsTask(None, None, None, {}, task_ex)
# Set 3 items: 2 success and 1 error unaccepted.
task_ex.action_executions += [
self.get_action_ex(True, states.SUCCESS, 0),
self.get_action_ex(True, states.SUCCESS, 1),
self.get_action_ex(False, states.ERROR, 2)
]
# Then call get_indices and expect [2, 3, 4].
indexes = task._get_next_indexes()
self.assertListEqual([2, 3, 4], indexes)
| {
"content_hash": "c12de4cde106045dcf044d0fdeb7ae6e",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 71,
"avg_line_length": 32.5625,
"alnum_prop": 0.581573896353167,
"repo_name": "StackStorm/mistral",
"id": "9ba5dda565b9de6571b3252cfc148db853e335dc",
"size": "2172",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mistral/tests/unit/engine/test_with_items_task.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1494"
},
{
"name": "Mako",
"bytes": "951"
},
{
"name": "Python",
"bytes": "2249335"
},
{
"name": "Shell",
"bytes": "31326"
}
],
"symlink_target": ""
} |
"""The gateway tests for the august platform."""
from unittest.mock import MagicMock, patch
from august.authenticator_common import AuthenticationState
from homeassistant.components.august.const import DOMAIN
from homeassistant.components.august.gateway import AugustGateway
from tests.components.august.mocks import _mock_august_authentication, _mock_get_config
async def test_refresh_access_token(hass):
"""Test token refreshes."""
await _patched_refresh_access_token(hass, "new_token", 5678)
@patch("homeassistant.components.august.gateway.ApiAsync.async_get_operable_locks")
@patch("homeassistant.components.august.gateway.AuthenticatorAsync.async_authenticate")
@patch("homeassistant.components.august.gateway.AuthenticatorAsync.should_refresh")
@patch(
"homeassistant.components.august.gateway.AuthenticatorAsync.async_refresh_access_token"
)
async def _patched_refresh_access_token(
hass,
new_token,
new_token_expire_time,
refresh_access_token_mock,
should_refresh_mock,
authenticate_mock,
async_get_operable_locks_mock,
):
authenticate_mock.side_effect = MagicMock(
return_value=_mock_august_authentication(
"original_token", 1234, AuthenticationState.AUTHENTICATED
)
)
august_gateway = AugustGateway(hass)
mocked_config = _mock_get_config()
await august_gateway.async_setup(mocked_config[DOMAIN])
await august_gateway.async_authenticate()
should_refresh_mock.return_value = False
await august_gateway.async_refresh_access_token_if_needed()
refresh_access_token_mock.assert_not_called()
should_refresh_mock.return_value = True
refresh_access_token_mock.return_value = _mock_august_authentication(
new_token, new_token_expire_time, AuthenticationState.AUTHENTICATED
)
await august_gateway.async_refresh_access_token_if_needed()
refresh_access_token_mock.assert_called()
assert august_gateway.access_token == new_token
assert august_gateway.authentication.access_token_expires == new_token_expire_time
| {
"content_hash": "a14edeb48cae907e2ac82a1888376ebb",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 91,
"avg_line_length": 38.81132075471698,
"alnum_prop": 0.7578998541565386,
"repo_name": "partofthething/home-assistant",
"id": "ced073600082da14ffcb9818acd17b942156dc03",
"size": "2057",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "tests/components/august/test_gateway.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1720"
},
{
"name": "Python",
"bytes": "31051838"
},
{
"name": "Shell",
"bytes": "4832"
}
],
"symlink_target": ""
} |
import array
import binascii
import zmq
import struct
port = 25332
zmqContext = zmq.Context()
zmqSubSocket = zmqContext.socket(zmq.SUB)
zmqSubSocket.setsockopt(zmq.SUBSCRIBE, "hashbrick")
zmqSubSocket.setsockopt(zmq.SUBSCRIBE, "hashtx")
zmqSubSocket.setsockopt(zmq.SUBSCRIBE, "rawbrick")
zmqSubSocket.setsockopt(zmq.SUBSCRIBE, "rawtx")
zmqSubSocket.connect("tcp://127.0.0.1:%i" % port)
try:
while True:
msg = zmqSubSocket.recv_multipart()
topic = str(msg[0])
body = msg[1]
sequence = "Unknown";
if len(msg[-1]) == 4:
msgSequence = struct.unpack('<I', msg[-1])[-1]
sequence = str(msgSequence)
if topic == "hashbrick":
print '- HASH BRICK ('+sequence+') -'
print binascii.hexlify(body)
elif topic == "hashtx":
print '- HASH TX ('+sequence+') -'
print binascii.hexlify(body)
elif topic == "rawbrick":
print '- RAW BRICK HEADER ('+sequence+') -'
print binascii.hexlify(body[:80])
elif topic == "rawtx":
print '- RAW TX ('+sequence+') -'
print binascii.hexlify(body)
except KeyboardInterrupt:
zmqContext.destroy()
| {
"content_hash": "833c94ce3aa0333d25820c388b39e7f2",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 56,
"avg_line_length": 31.025641025641026,
"alnum_prop": 0.6016528925619835,
"repo_name": "magacoin/magacoin",
"id": "93bf0354d2774b04fcb57be91f2dc5f19211bffd",
"size": "1425",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contrib/zmq/zmq_sub.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28456"
},
{
"name": "C",
"bytes": "696476"
},
{
"name": "C++",
"bytes": "4589232"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "30290"
},
{
"name": "M4",
"bytes": "185658"
},
{
"name": "Makefile",
"bytes": "105693"
},
{
"name": "Objective-C",
"bytes": "3892"
},
{
"name": "Objective-C++",
"bytes": "7232"
},
{
"name": "Protocol Buffer",
"bytes": "2328"
},
{
"name": "Python",
"bytes": "1029872"
},
{
"name": "QMake",
"bytes": "2020"
},
{
"name": "Roff",
"bytes": "30536"
},
{
"name": "Shell",
"bytes": "47182"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('callback_request', '0006_auto_20161123_2118'),
]
operations = [
migrations.AddField(
model_name='callbackrequest',
name='error',
field=models.CharField(blank=True, choices=[('no-answer', 'Client did not answer'), ('wrong-phone', 'Wrong phone number')], max_length=32),
),
migrations.AlterField(
model_name='callbackrequest',
name='comment',
field=models.TextField(blank=True, default=''),
preserve_default=False,
),
]
| {
"content_hash": "e612304d324771055458c288fafcb523",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 151,
"avg_line_length": 29.25,
"alnum_prop": 0.5925925925925926,
"repo_name": "steppenwolf-sro/callback-schedule",
"id": "20583d8980cfc02f29b9c692159ab23868d00fdf",
"size": "774",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "callback_request/migrations/0007_auto_20161210_1645.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "4781"
},
{
"name": "Python",
"bytes": "64722"
}
],
"symlink_target": ""
} |
import unittest
from libsaas import http
from libsaas.filters import auth
class BasicAuthTestCase(unittest.TestCase):
def test_simple(self):
auth_filter = auth.BasicAuth('user', 'pass')
req = http.Request('GET', 'http://example.net/')
auth_filter(req)
self.assertEqual(req.headers['Authorization'], 'Basic dXNlcjpwYXNz')
def test_unicode(self):
# try both a unicode and a bytes parameter
_lambda = b'\xce\xbb'
_ulambda = _lambda.decode('utf-8')
auth_bytes = auth.BasicAuth('user', _lambda)
auth_unicode = auth.BasicAuth('user', _ulambda)
auth_mixed = auth.BasicAuth(_lambda, _ulambda)
expected_bytes = 'Basic dXNlcjrOuw=='
expected_unicode = expected_bytes
expected_mixed = 'Basic zrs6zrs='
for auth_filter, expected in ((auth_bytes, expected_bytes),
(auth_unicode, expected_unicode),
(auth_mixed, expected_mixed)):
req = http.Request('GET', 'http://example.net/')
auth_filter(req)
self.assertEqual(req.headers['Authorization'], expected)
| {
"content_hash": "9911d5cb65e26de35d3a75bfc5751f41",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 76,
"avg_line_length": 32.833333333333336,
"alnum_prop": 0.5930626057529611,
"repo_name": "ducksboard/libsaas",
"id": "c59071625e151f70b55c3654646a1fd975fc3cd0",
"size": "1182",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "test/test_basic_auth.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "954078"
}
],
"symlink_target": ""
} |
"""
Unit tests for the da.cli module.
---
type:
python_module
validation_level:
v00_minimum
protection:
k00_public
copyright:
"Copyright 2016 High Integrity Artificial Intelligence Systems"
license:
"Licensed under the Apache License, Version 2.0 (the License);
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an AS IS BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."
...
"""
import click
import pytest
# =============================================================================
class SpecifyFuzzyAliasGroup:
"""
Specify the da.cli.fuzzy_alias_group() function.
"""
def it_is_callable(self):
"""
The fuzzy_alias_group() function is callable.
"""
import da.cli
assert callable(da.cli.fuzzy_alias_group)
# =============================================================================
class SpecifyExitWithCodeShow:
"""
Specify the da.cli.ExitWithCode.show() method.
"""
def it_is_callable(self):
"""
The show() method is callable.
"""
import da.cli
assert callable(da.cli.ExitWithCode.show)
# =============================================================================
class SpecifyExplicitInfoNameCommandMakeContext:
"""
Specify the da.cli.ExplicitInfoNameCommand.make_context() method.
"""
def it_is_callable(self):
"""
The make_context() method is callable.
"""
import da.cli
assert callable(da.cli.ExplicitInfoNameCommand.make_context)
# =============================================================================
class SpecifyFuzzyCommandAliasGroupGetCommand:
"""
Specify the da.cli.FuzzyCommandAliasGroup.get_command() method.
"""
def it_is_callable(self):
"""
The get_command() method is callable.
"""
import da.cli
assert callable(da.cli.FuzzyCommandAliasGroup.get_command)
# =============================================================================
class SpecifyExitApplication:
"""
Specify the da.cli.exit_application() function.
"""
# -------------------------------------------------------------------------
def it_raises_an_clickexception(self):
"""
Placeholder test case.
"""
import da.cli
test_exit_code = 42
test_exit_message = 'Test exit message'
with pytest.raises(click.ClickException) as exc:
da.cli.exit_application(exit_code = test_exit_code,
message = test_exit_message)
assert exc.exit_code == test_exit_code
assert exc.message == test_exit_message
# =============================================================================
class SpecifyMain:
"""
Specify the da.cli.main() function.
"""
def it_is_callable(self):
"""
The main() function is callable.
"""
import da.cli
assert callable(da.cli.main)
# =============================================================================
class SpecifyBuild:
"""
Specify the da.cli.build() function.
"""
def it_is_callable(self):
"""
The build() function is callable.
"""
import da.cli
assert callable(da.cli.build)
# =============================================================================
class SpecifyRun:
"""
Specify the da.cli.run() function.
"""
def it_is_callable(self):
"""
The run() function is callable.
"""
import da.cli
assert callable(da.cli.run)
# =============================================================================
class SpecifyRepl:
"""
Specify the da.cli.repl() function.
"""
def it_is_callable(self):
"""
The repl() function is callable.
"""
import da.cli
assert callable(da.cli.repl)
# =============================================================================
class SpecifySim:
"""
Specify the da.cli.sim() function.
"""
def it_is_callable(self):
"""
The sim() function is callable.
"""
import da.cli
assert callable(da.cli.sim)
# =============================================================================
class SpecifyVtx:
"""
Specify the da.cli.vtx() function.
"""
def it_is_callable(self):
"""
The vtx() function is callable.
"""
import da.cli
assert callable(da.cli.vtx)
# =============================================================================
class Specify_GenPluginSubgroups:
"""
Specify the da.cli._gen_plugin_subgroups() function.
"""
def it_is_callable(self):
"""
The _gen_plugin_subgroups() function is callable.
"""
import da.cli
assert callable(da.cli._gen_plugin_subgroups)
# =============================================================================
class Specify_LoadCliPluginGroup:
"""
Specify the da.cli._load_cli_plugin_group() function.
"""
def it_is_callable(self):
"""
The _load_cli_plugin_group() function is callable.
"""
import da.cli
assert callable(da.cli._load_cli_plugin_group)
| {
"content_hash": "0238f2c6cb62334d14948bd43b1ef631",
"timestamp": "",
"source": "github",
"line_count": 252,
"max_line_length": 79,
"avg_line_length": 22.78174603174603,
"alnum_prop": 0.47709458282529177,
"repo_name": "wtpayne/hiai",
"id": "74d415356671793d8b0771fb9156fd5ebe35c3ae",
"size": "5765",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "a3_src/h70_internal/da/spec/spec_cli.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "255"
},
{
"name": "Python",
"bytes": "894704"
},
{
"name": "Shell",
"bytes": "18289"
}
],
"symlink_target": ""
} |
"""
Classes for making VMware VI SOAP calls.
"""
import httplib
from oslo.config import cfg
import suds
from nova.openstack.common.gettextutils import _
from nova import utils
from nova.virt.vmwareapi import error_util
RESP_NOT_XML_ERROR = 'Response is "text/html", not "text/xml"'
CONN_ABORT_ERROR = 'Software caused connection abort'
ADDRESS_IN_USE_ERROR = 'Address already in use'
vmwareapi_wsdl_loc_opt = cfg.StrOpt('wsdl_location',
help='Optional VIM Service WSDL Location '
'e.g http://<server>/vimService.wsdl. '
'Optional over-ride to default location for bug work-arounds')
CONF = cfg.CONF
CONF.register_opt(vmwareapi_wsdl_loc_opt, 'vmware')
def get_moref(value, type):
"""Get managed object reference."""
moref = suds.sudsobject.Property(value)
moref._type = type
return moref
def object_to_dict(obj, list_depth=1):
"""Convert Suds object into serializable format.
The calling function can limit the amount of list entries that
are converted.
"""
d = {}
for k, v in suds.sudsobject.asdict(obj).iteritems():
if hasattr(v, '__keylist__'):
d[k] = object_to_dict(v, list_depth=list_depth)
elif isinstance(v, list):
d[k] = []
used = 0
for item in v:
used = used + 1
if used > list_depth:
break
if hasattr(item, '__keylist__'):
d[k].append(object_to_dict(item, list_depth=list_depth))
else:
d[k].append(item)
else:
d[k] = v
return d
class VIMMessagePlugin(suds.plugin.MessagePlugin):
def addAttributeForValue(self, node):
# suds does not handle AnyType properly.
# VI SDK requires type attribute to be set when AnyType is used
if node.name == 'value':
node.set('xsi:type', 'xsd:string')
def marshalled(self, context):
"""suds will send the specified soap envelope.
Provides the plugin with the opportunity to prune empty
nodes and fixup nodes before sending it to the server.
"""
# suds builds the entire request object based on the wsdl schema.
# VI SDK throws server errors if optional SOAP nodes are sent
# without values, e.g. <test/> as opposed to <test>test</test>
context.envelope.prune()
context.envelope.walk(self.addAttributeForValue)
class Vim:
"""The VIM Object."""
def __init__(self,
protocol="https",
host="localhost"):
"""
Creates the necessary Communication interfaces and gets the
ServiceContent for initiating SOAP transactions.
protocol: http or https
host : ESX IPAddress[:port] or ESX Hostname[:port]
"""
if not suds:
raise Exception(_("Unable to import suds."))
self._protocol = protocol
self._host_name = host
self.wsdl_url = Vim.get_wsdl_url(protocol, host)
self.url = Vim.get_soap_url(protocol, host)
self.client = suds.client.Client(self.wsdl_url, location=self.url,
plugins=[VIMMessagePlugin()])
self._service_content = self.retrieve_service_content()
def retrieve_service_content(self):
return self.RetrieveServiceContent("ServiceInstance")
@staticmethod
def get_wsdl_url(protocol, host_name):
"""
allows override of the wsdl location, making this static
means we can test the logic outside of the constructor
without forcing the test environment to have multiple valid
wsdl locations to test against.
:param protocol: https or http
:param host_name: localhost or other server name
:return: string to WSDL location for vSphere WS Management API
"""
# optional WSDL location over-ride for work-arounds
if CONF.vmware.wsdl_location:
return CONF.vmware.wsdl_location
# calculate default WSDL location if no override supplied
return Vim.get_soap_url(protocol, host_name) + "/vimService.wsdl"
@staticmethod
def get_soap_url(protocol, host_name):
"""
Calculates the location of the SOAP services
for a particular server. Created as a static
method for testing.
:param protocol: https or http
:param host_name: localhost or other vSphere server name
:return: the url to the active vSphere WS Management API
"""
if utils.is_valid_ipv6(host_name):
return '%s://[%s]/sdk' % (protocol, host_name)
return '%s://%s/sdk' % (protocol, host_name)
def get_service_content(self):
"""Gets the service content object."""
return self._service_content
def __getattr__(self, attr_name):
"""Makes the API calls and gets the result."""
def vim_request_handler(managed_object, **kwargs):
"""
Builds the SOAP message and parses the response for fault
checking and other errors.
managed_object : Managed Object Reference or Managed
Object Name
**kwargs : Keyword arguments of the call
"""
# Dynamic handler for VI SDK Calls
try:
request_mo = self._request_managed_object_builder(
managed_object)
request = getattr(self.client.service, attr_name)
response = request(request_mo, **kwargs)
# To check for the faults that are part of the message body
# and not returned as Fault object response from the ESX
# SOAP server
if hasattr(error_util.FaultCheckers,
attr_name.lower() + "_fault_checker"):
fault_checker = getattr(error_util.FaultCheckers,
attr_name.lower() + "_fault_checker")
fault_checker(response)
return response
# Catch the VimFaultException that is raised by the fault
# check of the SOAP response
except error_util.VimFaultException:
raise
except suds.MethodNotFound:
raise
except suds.WebFault as excep:
doc = excep.document
detail = doc.childAtPath("/Envelope/Body/Fault/detail")
fault_list = []
for child in detail.getChildren():
fault_list.append(child.get("type"))
raise error_util.VimFaultException(fault_list, excep)
except AttributeError as excep:
raise error_util.VimAttributeError(_("No such SOAP method "
"'%s' provided by VI SDK") % (attr_name), excep)
except (httplib.CannotSendRequest,
httplib.ResponseNotReady,
httplib.CannotSendHeader) as excep:
raise error_util.SessionOverLoadException(_("httplib "
"error in %s: ") % (attr_name), excep)
except Exception as excep:
# Socket errors which need special handling for they
# might be caused by ESX API call overload
if (str(excep).find(ADDRESS_IN_USE_ERROR) != -1 or
str(excep).find(CONN_ABORT_ERROR)) != -1:
raise error_util.SessionOverLoadException(_("Socket "
"error in %s: ") % (attr_name), excep)
# Type error that needs special handling for it might be
# caused by ESX host API call overload
elif str(excep).find(RESP_NOT_XML_ERROR) != -1:
raise error_util.SessionOverLoadException(_("Type "
"error in %s: ") % (attr_name), excep)
else:
raise error_util.VimException(
_("Exception in %s ") % (attr_name), excep)
return vim_request_handler
def _request_managed_object_builder(self, managed_object):
"""Builds the request managed object."""
# Request Managed Object Builder
if isinstance(managed_object, str):
mo = suds.sudsobject.Property(managed_object)
mo._type = managed_object
else:
mo = managed_object
return mo
def __repr__(self):
return "VIM Object"
def __str__(self):
return "VIM Object"
| {
"content_hash": "6ad91b22c7271683b608eaa05f733d8a",
"timestamp": "",
"source": "github",
"line_count": 222,
"max_line_length": 76,
"avg_line_length": 38.92342342342342,
"alnum_prop": 0.5747020020830922,
"repo_name": "sacharya/nova",
"id": "b860586e912f5f4749c43430b3bf00d1d6c1583b",
"size": "9375",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/virt/vmwareapi/vim.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13505239"
},
{
"name": "Shell",
"bytes": "16239"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('acelibraryapp', '0008_auto_20161120_2040'),
]
operations = [
migrations.AlterField(
model_name='resources',
name='URL',
field=models.CharField(max_length=150),
),
]
| {
"content_hash": "dc56dcd9cde89ba55ff28198e46b5133",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 53,
"avg_line_length": 21.38888888888889,
"alnum_prop": 0.5974025974025974,
"repo_name": "ashishpahwa7/Library-Portal",
"id": "cfc66d055cf0e10bbca8b146433da2ca26fd69c4",
"size": "458",
"binary": false,
"copies": "2",
"ref": "refs/heads/development",
"path": "acelibraryapp/migrations/0009_auto_20161120_2110.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "16952"
},
{
"name": "HTML",
"bytes": "32528"
},
{
"name": "JavaScript",
"bytes": "978"
},
{
"name": "Python",
"bytes": "36225"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from distutils.version import LooseVersion # pylint: disable=import-error,no-name-in-module
from copy import deepcopy
import logging
import random
import string
# Import Salt Testing libs
from salttesting.unit import skipIf, TestCase
from salttesting.mock import (
MagicMock,
NO_MOCK,
NO_MOCK_REASON,
patch
)
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import Salt libs
import salt.config
import salt.loader
from salt.ext.six.moves import range # pylint: disable=import-error,redefined-builtin
# pylint: disable=import-error,no-name-in-module,unused-import
from unit.modules.boto_s3_bucket_test import BotoS3BucketTestCaseMixin
# Import 3rd-party libs
try:
import boto
import boto3
from botocore.exceptions import ClientError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
# pylint: enable=import-error,no-name-in-module,unused-import
# the boto_s3_bucket module relies on the connect_to_region() method
# which was added in boto 2.8.0
# https://github.com/boto/boto/commit/33ac26b416fbb48a60602542b4ce15dcc7029f12
required_boto3_version = '1.2.1'
log = logging.getLogger(__name__)
opts = salt.config.DEFAULT_MINION_OPTS
context = {}
utils = salt.loader.utils(opts, whitelist=['boto3'], context=context)
serializers = salt.loader.serializers(opts)
funcs = salt.loader.minion_mods(opts, context=context, utils=utils, whitelist=['boto_s3_bucket'])
salt_states = salt.loader.states(opts=opts, functions=funcs, utils=utils, whitelist=['boto_s3_bucket'], serializers=serializers)
def _has_required_boto():
'''
Returns True/False boolean depending on if Boto is installed and correct
version.
'''
if not HAS_BOTO:
return False
elif LooseVersion(boto3.__version__) < LooseVersion(required_boto3_version):
return False
else:
return True
if _has_required_boto():
region = 'us-east-1'
access_key = 'GKTADJGHEIQSXMKKRBJ08H'
secret_key = 'askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs'
conn_parameters = {'region': region, 'key': access_key, 'keyid': secret_key, 'profile': {}}
error_message = 'An error occurred (101) when calling the {0} operation: Test-defined error'
not_found_error = ClientError({
'Error': {
'Code': '404',
'Message': "Test-defined error"
}
}, 'msg')
error_content = {
'Error': {
'Code': 101,
'Message': "Test-defined error"
}
}
list_ret = {
'Buckets': [{
'Name': 'mybucket',
'CreationDate': None
}],
'Owner': {
'Type': 'CanonicalUser',
'DisplayName': 'testuser',
'ID': '111111222222'
},
'ResponseMetadata': {'Key': 'Value'}
}
config_in = {
'LocationConstraint': 'EU',
'ACL': {
'ACL': 'public-read'
},
'CORSRules': [{
'AllowedMethods': ["GET"],
'AllowedOrigins': ["*"],
}],
'LifecycleConfiguration': [{
'Expiration': {
'Days': 1
},
'Prefix': 'prefix',
'Status': 'Enabled',
'ID': 'asdfghjklpoiuytrewq'
}],
'Logging': {
'TargetBucket': 'my-bucket',
'TargetPrefix': 'prefix'
},
'NotificationConfiguration': {
'LambdaFunctionConfigurations': [{
'LambdaFunctionArn': 'arn:aws:lambda:us-east-1:111111222222:function:my-function',
'Id': 'zxcvbnmlkjhgfdsa',
'Events': ["s3:ObjectCreated:*"],
'Filter': {
'Key': {
'FilterRules': [{
'Name': 'prefix',
'Value': 'string'
}]
}
}
}]
},
'Policy': {
'Version': "2012-10-17",
'Statement': [{
'Sid': "",
'Effect': "Allow",
'Principal': {
'AWS': "arn:aws:iam::111111222222:root"
},
'Action': "s3:PutObject",
'Resource': "arn:aws:s3:::my-bucket/*"
}]
},
'Replication': {
'Role': 'arn:aws:iam::11111222222:my-role',
'Rules': [{
'ID': "r1",
'Prefix': "prefix",
'Status': "Enabled",
'Destination': {
'Bucket': "arn:aws:s3:::my-bucket"
}
}]
},
'RequestPayment': {
'Payer': 'Requester'
},
'Tagging': {
'a': 'b',
'c': 'd'
},
'Versioning': {
'Status': 'Enabled'
},
'Website': {
'ErrorDocument': {
'Key': 'error.html'
},
'IndexDocument': {
'Suffix': 'index.html'
}
}
}
config_ret = {
'get_bucket_acl': {
'Grants': [{
'Grantee': {
'DisplayName': 'testuser',
'ID': '111111222222'
},
'Permission': 'FULL_CONTROL'
}, {
'Grantee': {
'URI': 'http://acs.amazonaws.com/groups/global/AllUsers'
},
'Permission': 'READ'
}],
'Owner': {
'DisplayName': 'testuser',
'ID': '111111222222'
}
},
'get_bucket_cors': {
'CORSRules': [{
'AllowedMethods': ["GET"],
'AllowedOrigins': ["*"],
}]
},
'get_bucket_lifecycle_configuration': {
'Rules': [{
'Expiration': {
'Days': 1
},
'Prefix': 'prefix',
'Status': 'Enabled',
'ID': 'asdfghjklpoiuytrewq'
}]
},
'get_bucket_location': {
'LocationConstraint': 'EU'
},
'get_bucket_logging': {
'LoggingEnabled': {
'TargetBucket': 'my-bucket',
'TargetPrefix': 'prefix'
}
},
'get_bucket_notification_configuration': {
'LambdaFunctionConfigurations': [{
'LambdaFunctionArn': 'arn:aws:lambda:us-east-1:111111222222:function:my-function',
'Id': 'zxcvbnmlkjhgfdsa',
'Events': ["s3:ObjectCreated:*"],
'Filter': {
'Key': {
'FilterRules': [{
'Name': 'prefix',
'Value': 'string'
}]
}
}
}]
},
'get_bucket_policy': {
'Policy':
'{"Version":"2012-10-17","Statement":[{"Sid":"","Effect":"Allow","Principal":{"AWS":"arn:aws:iam::111111222222:root"},"Action":"s3:PutObject","Resource":"arn:aws:s3:::my-bucket/*"}]}'
},
'get_bucket_replication': {
'ReplicationConfiguration': {
'Role': 'arn:aws:iam::11111222222:my-role',
'Rules': [{
'ID': "r1",
'Prefix': "prefix",
'Status': "Enabled",
'Destination': {
'Bucket': "arn:aws:s3:::my-bucket"
}
}]
}
},
'get_bucket_request_payment': {'Payer': 'Requester'},
'get_bucket_tagging': {
'TagSet': [{
'Key': 'c',
'Value': 'd'
}, {
'Key': 'a',
'Value': 'b',
}]
},
'get_bucket_versioning': {
'Status': 'Enabled'
},
'get_bucket_website': {
'ErrorDocument': {
'Key': 'error.html'
},
'IndexDocument': {
'Suffix': 'index.html'
}
}
}
bucket_ret = {
'Location': 'EU'
}
class BotoS3BucketStateTestCaseBase(TestCase):
conn = None
# Set up MagicMock to replace the boto3 session
def setUp(self):
context.clear()
# connections keep getting cached from prior tests, can't find the
# correct context object to clear it. So randomize the cache key, to prevent any
# cache hits
conn_parameters['key'] = ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(50))
self.patcher = patch('boto3.session.Session')
self.addCleanup(self.patcher.stop)
mock_session = self.patcher.start()
session_instance = mock_session.return_value
self.conn = MagicMock()
session_instance.client.return_value = self.conn
@skipIf(HAS_BOTO is False, 'The boto module must be installed.')
@skipIf(_has_required_boto() is False, 'The boto3 module must be greater than'
' or equal to version {0}'
.format(required_boto3_version))
@skipIf(NO_MOCK, NO_MOCK_REASON)
class BotoS3BucketTestCase(BotoS3BucketStateTestCaseBase, BotoS3BucketTestCaseMixin):
'''
TestCase for salt.modules.boto_s3_bucket state.module
'''
def test_present_when_bucket_does_not_exist(self):
'''
Tests present on a bucket that does not exist.
'''
self.conn.head_bucket.side_effect = [not_found_error, None]
self.conn.list_buckets.return_value = deepcopy(list_ret)
self.conn.create_bucket.return_value = bucket_ret
for key, value in config_ret.iteritems():
getattr(self.conn, key).return_value = deepcopy(value)
with patch.dict(funcs, {'boto_iam.get_account_id': MagicMock(return_value='111111222222')}):
result = salt_states['boto_s3_bucket.present'](
'bucket present',
Bucket='testbucket',
**config_in
)
self.assertTrue(result['result'])
self.assertEqual(result['changes']['new']['bucket']['Location'], config_ret['get_bucket_location'])
def test_present_when_bucket_exists_no_mods(self):
self.conn.list_buckets.return_value = deepcopy(list_ret)
for key, value in config_ret.iteritems():
getattr(self.conn, key).return_value = deepcopy(value)
with patch.dict(funcs, {'boto_iam.get_account_id': MagicMock(return_value='111111222222')}):
result = salt_states['boto_s3_bucket.present'](
'bucket present',
Bucket='testbucket',
**config_in
)
self.assertTrue(result['result'])
self.assertEqual(result['changes'], {})
def test_present_when_bucket_exists_all_mods(self):
self.conn.list_buckets.return_value = deepcopy(list_ret)
for key, value in config_ret.iteritems():
getattr(self.conn, key).return_value = deepcopy(value)
with patch.dict(funcs, {'boto_iam.get_account_id': MagicMock(return_value='111111222222')}):
result = salt_states['boto_s3_bucket.present'](
'bucket present',
Bucket='testbucket',
LocationConstraint=config_in['LocationConstraint']
)
self.assertTrue(result['result'])
self.assertNotEqual(result['changes'], {})
def test_present_with_failure(self):
self.conn.head_bucket.side_effect = [not_found_error, None]
self.conn.list_buckets.return_value = deepcopy(list_ret)
self.conn.create_bucket.side_effect = ClientError(error_content, 'create_bucket')
with patch.dict(funcs, {'boto_iam.get_account_id': MagicMock(return_value='111111222222')}):
result = salt_states['boto_s3_bucket.present'](
'bucket present',
Bucket='testbucket',
**config_in
)
self.assertFalse(result['result'])
self.assertTrue('An error occurred' in result['comment'])
def test_absent_when_bucket_does_not_exist(self):
'''
Tests absent on a bucket that does not exist.
'''
self.conn.head_bucket.side_effect = [not_found_error, None]
result = salt_states['boto_s3_bucket.absent']('test', 'mybucket')
self.assertTrue(result['result'])
self.assertEqual(result['changes'], {})
def test_absent_when_bucket_exists(self):
result = salt_states['boto_s3_bucket.absent']('test', 'testbucket')
self.assertTrue(result['result'])
self.assertEqual(result['changes']['new']['bucket'], None)
def test_absent_with_failure(self):
self.conn.delete_bucket.side_effect = ClientError(error_content, 'delete_bucket')
result = salt_states['boto_s3_bucket.absent']('test', 'testbucket')
self.assertFalse(result['result'])
self.assertTrue('An error occurred' in result['comment'])
| {
"content_hash": "75f50bf0211fa755afd4a1ae6ff4d19c",
"timestamp": "",
"source": "github",
"line_count": 385,
"max_line_length": 199,
"avg_line_length": 34.52987012987013,
"alnum_prop": 0.5130886114036407,
"repo_name": "stephane-martin/salt-debian-packaging",
"id": "4049e9ae20c79dc954af4e0b1b9eee5a02c04ade",
"size": "13340",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "salt-2016.3.2/tests/unit/states/boto_s3_bucket_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "13798"
},
{
"name": "C",
"bytes": "986"
},
{
"name": "Groff",
"bytes": "13634346"
},
{
"name": "HTML",
"bytes": "39558"
},
{
"name": "Makefile",
"bytes": "20902"
},
{
"name": "NSIS",
"bytes": "22316"
},
{
"name": "PowerShell",
"bytes": "38719"
},
{
"name": "Python",
"bytes": "40857506"
},
{
"name": "SaltStack",
"bytes": "58278"
},
{
"name": "Scheme",
"bytes": "1790"
},
{
"name": "Shell",
"bytes": "829927"
},
{
"name": "Tcl",
"bytes": "6532"
},
{
"name": "TeX",
"bytes": "11632"
}
],
"symlink_target": ""
} |
"""
Runs the SPARQL 1.1 test suite from.
"""
from test.data import TEST_DATA_DIR
from test.utils import ensure_suffix
from test.utils.dawg_manifest import MarksDictType, params_from_sources
from test.utils.iri import URIMapper
from test.utils.sparql_checker import (
SKIP_TYPES,
SPARQLEntry,
check_entry,
ctx_configure_rdflib,
)
from typing import Generator
import pytest
from pytest import MonkeyPatch
REMOTE_BASE_IRI = "http://www.w3.org/2009/sparql/docs/tests/data-sparql11/"
LOCAL_BASE_DIR = TEST_DATA_DIR / "suites/w3c/sparql11/"
MAPPER = URIMapper.from_mappings(
(REMOTE_BASE_IRI, ensure_suffix(LOCAL_BASE_DIR.as_uri(), "/")),
)
MARK_DICT: MarksDictType = {
f"{REMOTE_BASE_IRI}aggregates/manifest#agg-err-01": pytest.mark.xfail(
reason="Error in AVG should return no binding but it does."
),
f"{REMOTE_BASE_IRI}aggregates/manifest#agg08": pytest.mark.xfail(
reason="Accepts invalid query."
),
f"{REMOTE_BASE_IRI}aggregates/manifest#agg09": pytest.mark.xfail(
reason="Accepts invalid query."
),
f"{REMOTE_BASE_IRI}aggregates/manifest#agg10": pytest.mark.xfail(
reason="Accepts invalid query."
),
f"{REMOTE_BASE_IRI}aggregates/manifest#agg11": pytest.mark.xfail(
reason="Accepts invalid query."
),
f"{REMOTE_BASE_IRI}aggregates/manifest#agg12": pytest.mark.xfail(
reason="Accepts invalid query."
),
f"{REMOTE_BASE_IRI}delete/manifest#dawg-delete-using-02a": pytest.mark.xfail(
reason="known issue"
),
f"{REMOTE_BASE_IRI}delete/manifest#dawg-delete-using-06a": pytest.mark.xfail(
reason="known issue"
),
f"{REMOTE_BASE_IRI}entailment/manifest#paper-sparqldl-Q1-rdfs": pytest.mark.xfail(
reason="entailment not implemented"
),
f"{REMOTE_BASE_IRI}entailment/manifest#paper-sparqldl-Q1": pytest.mark.xfail(
reason="entailment not implemented"
),
f"{REMOTE_BASE_IRI}entailment/manifest#paper-sparqldl-Q2": pytest.mark.xfail(
reason="entailment not implemented"
),
f"{REMOTE_BASE_IRI}entailment/manifest#paper-sparqldl-Q3": pytest.mark.xfail(
reason="entailment not implemented"
),
f"{REMOTE_BASE_IRI}entailment/manifest#paper-sparqldl-Q4": pytest.mark.xfail(
reason="entailment not implemented"
),
f"{REMOTE_BASE_IRI}entailment/manifest#parent10": pytest.mark.xfail(
reason="entailment not implemented"
),
f"{REMOTE_BASE_IRI}entailment/manifest#parent3": pytest.mark.xfail(
reason="entailment not implemented"
),
f"{REMOTE_BASE_IRI}entailment/manifest#parent4": pytest.mark.xfail(
reason="entailment not implemented"
),
f"{REMOTE_BASE_IRI}entailment/manifest#parent5": pytest.mark.xfail(
reason="entailment not implemented"
),
f"{REMOTE_BASE_IRI}entailment/manifest#parent6": pytest.mark.xfail(
reason="entailment not implemented"
),
f"{REMOTE_BASE_IRI}entailment/manifest#parent7": pytest.mark.xfail(
reason="entailment not implemented"
),
f"{REMOTE_BASE_IRI}entailment/manifest#parent8": pytest.mark.xfail(
reason="entailment not implemented"
),
f"{REMOTE_BASE_IRI}entailment/manifest#parent9": pytest.mark.xfail(
reason="entailment not implemented"
),
f"{REMOTE_BASE_IRI}entailment/manifest#rdf01": pytest.mark.xfail(
reason="entailment not implemented"
),
f"{REMOTE_BASE_IRI}entailment/manifest#rdfs01": pytest.mark.xfail(
reason="entailment not implemented"
),
f"{REMOTE_BASE_IRI}entailment/manifest#rdfs02": pytest.mark.xfail(
reason="entailment not implemented"
),
f"{REMOTE_BASE_IRI}entailment/manifest#rdfs03": pytest.mark.xfail(
reason="entailment not implemented"
),
f"{REMOTE_BASE_IRI}entailment/manifest#rdfs04": pytest.mark.xfail(
reason="entailment not implemented"
),
f"{REMOTE_BASE_IRI}entailment/manifest#rdfs05": pytest.mark.xfail(
reason="entailment not implemented"
),
f"{REMOTE_BASE_IRI}entailment/manifest#rdfs06": pytest.mark.xfail(
reason="entailment not implemented"
),
f"{REMOTE_BASE_IRI}entailment/manifest#rdfs07": pytest.mark.xfail(
reason="entailment not implemented"
),
f"{REMOTE_BASE_IRI}entailment/manifest#rdfs09": pytest.mark.xfail(
reason="entailment not implemented"
),
f"{REMOTE_BASE_IRI}entailment/manifest#rdfs10": pytest.mark.xfail(
reason="entailment not implemented"
),
f"{REMOTE_BASE_IRI}entailment/manifest#rdfs11": pytest.mark.xfail(
reason="entailment not implemented"
),
f"{REMOTE_BASE_IRI}entailment/manifest#simple1": pytest.mark.xfail(
reason="entailment not implemented"
),
f"{REMOTE_BASE_IRI}entailment/manifest#simple2": pytest.mark.xfail(
reason="entailment not implemented"
),
f"{REMOTE_BASE_IRI}entailment/manifest#simple3": pytest.mark.xfail(
reason="entailment not implemented"
),
f"{REMOTE_BASE_IRI}entailment/manifest#simple4": pytest.mark.xfail(
reason="entailment not implemented"
),
f"{REMOTE_BASE_IRI}entailment/manifest#simple5": pytest.mark.xfail(
reason="entailment not implemented"
),
f"{REMOTE_BASE_IRI}entailment/manifest#simple6": pytest.mark.xfail(
reason="entailment not implemented"
),
f"{REMOTE_BASE_IRI}entailment/manifest#simple7": pytest.mark.xfail(
reason="entailment not implemented"
),
f"{REMOTE_BASE_IRI}entailment/manifest#simple8": pytest.mark.xfail(
reason="entailment not implemented"
),
f"{REMOTE_BASE_IRI}entailment/manifest#sparqldl-02": pytest.mark.xfail(
reason="entailment not implemented"
),
f"{REMOTE_BASE_IRI}entailment/manifest#sparqldl-03": pytest.mark.xfail(
reason="entailment not implemented"
),
f"{REMOTE_BASE_IRI}entailment/manifest#sparqldl-10": pytest.mark.xfail(
reason="entailment not implemented"
),
f"{REMOTE_BASE_IRI}entailment/manifest#sparqldl-11": pytest.mark.xfail(
reason="entailment not implemented"
),
f"{REMOTE_BASE_IRI}entailment/manifest#sparqldl-12": pytest.mark.xfail(
reason="entailment not implemented"
),
f"{REMOTE_BASE_IRI}entailment/manifest#sparqldl-13": pytest.mark.xfail(
reason="entailment not implemented"
),
f"{REMOTE_BASE_IRI}functions/manifest#strdt01": pytest.mark.xfail(
reason="Reason for test failure is not clear."
),
f"{REMOTE_BASE_IRI}functions/manifest#strdt03": pytest.mark.xfail(
reason="Reason for test failure is not clear."
),
f"{REMOTE_BASE_IRI}grouping/manifest#group06": pytest.mark.xfail(
reason="Accepts invalid query."
),
f"{REMOTE_BASE_IRI}grouping/manifest#group07": pytest.mark.xfail(
reason="Parses sucessfully instead of failing."
),
f"{REMOTE_BASE_IRI}property-path/manifest#pp37": pytest.mark.xfail(
reason="RDFLib produces one extra row"
),
f"{REMOTE_BASE_IRI}service/manifest#service1": pytest.mark.skip(
reason="need custom handling"
),
f"{REMOTE_BASE_IRI}service/manifest#service2": pytest.mark.skip(
reason="need custom handling"
),
f"{REMOTE_BASE_IRI}service/manifest#service3": pytest.mark.skip(
reason="need custom handling"
),
f"{REMOTE_BASE_IRI}service/manifest#service4a": pytest.mark.skip(
reason="need custom handling"
),
f"{REMOTE_BASE_IRI}service/manifest#service5": pytest.mark.skip(
reason="test not supported"
),
f"{REMOTE_BASE_IRI}service/manifest#service6": pytest.mark.skip(
reason="need custom handling"
),
f"{REMOTE_BASE_IRI}service/manifest#service7": pytest.mark.skip(
reason="test not supported"
),
f"{REMOTE_BASE_IRI}syntax-query/manifest#test_43": pytest.mark.xfail(
reason="Parses sucessfully instead of failing."
),
f"{REMOTE_BASE_IRI}syntax-query/manifest#test_44": pytest.mark.xfail(
reason="Parses sucessfully instead of failing."
),
f"{REMOTE_BASE_IRI}syntax-query/manifest#test_45": pytest.mark.xfail(
reason="Parses sucessfully instead of failing."
),
f"{REMOTE_BASE_IRI}syntax-query/manifest#test_60": pytest.mark.xfail(
reason="Parses sucessfully instead of failing."
),
f"{REMOTE_BASE_IRI}syntax-query/manifest#test_61a": pytest.mark.xfail(
reason="Parses sucessfully instead of failing."
),
f"{REMOTE_BASE_IRI}syntax-query/manifest#test_62a": pytest.mark.xfail(
reason="Parses sucessfully instead of failing."
),
f"{REMOTE_BASE_IRI}syntax-query/manifest#test_65": pytest.mark.xfail(
reason="Parses sucessfully instead of failing."
),
f"{REMOTE_BASE_IRI}syntax-update-1/manifest#test_43": pytest.mark.xfail(
reason="Parses sucessfully instead of failing."
),
f"{REMOTE_BASE_IRI}syntax-update-1/manifest#test_44": pytest.mark.xfail(
reason="Parses sucessfully instead of failing."
),
f"{REMOTE_BASE_IRI}syntax-update-1/manifest#test_50": pytest.mark.xfail(
reason="Parses sucessfully instead of failing."
),
f"{REMOTE_BASE_IRI}syntax-update-1/manifest#test_51": pytest.mark.xfail(
reason="Parses sucessfully instead of failing."
),
f"{REMOTE_BASE_IRI}syntax-update-1/manifest#test_52": pytest.mark.xfail(
reason="Parses sucessfully instead of failing."
),
f"{REMOTE_BASE_IRI}syntax-update-1/manifest#test_54": pytest.mark.xfail(
reason="Parses sucessfully instead of failing."
),
}
@pytest.fixture(scope="module", autouse=True)
def configure_rdflib() -> Generator[None, None, None]:
with ctx_configure_rdflib():
yield None
@pytest.mark.parametrize(
["manifest_entry"],
params_from_sources(
MAPPER,
SPARQLEntry,
LOCAL_BASE_DIR / "manifest-all.ttl",
mark_dict=MARK_DICT,
markers=(
lambda entry: pytest.mark.skip(reason="tester not implemented")
if entry.type in SKIP_TYPES
else None,
),
report_prefix="rdflib_w3c_sparql11",
),
)
def test_entry_sparql11(monkeypatch: MonkeyPatch, manifest_entry: SPARQLEntry) -> None:
check_entry(monkeypatch, manifest_entry)
| {
"content_hash": "3bce9e6b5bd530fbd8aa7a809d8709d4",
"timestamp": "",
"source": "github",
"line_count": 263,
"max_line_length": 87,
"avg_line_length": 39.3041825095057,
"alnum_prop": 0.6775660249588855,
"repo_name": "RDFLib/rdflib",
"id": "6bfcb31f1ba6a4c9eb4b8734d3dbe2588d846760",
"size": "10337",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "test/test_w3c_spec/test_sparql11_w3c.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "635"
},
{
"name": "HTML",
"bytes": "41303"
},
{
"name": "Python",
"bytes": "2828721"
},
{
"name": "Ruby",
"bytes": "31777"
},
{
"name": "Shell",
"bytes": "6030"
},
{
"name": "XSLT",
"bytes": "1588"
}
],
"symlink_target": ""
} |
from django.conf import settings
STAR_RATINGS_RANGE = getattr(settings, "STAR_RATINGS_RANGE", 5)
| {
"content_hash": "15d9aaf2aa8277944490e0b7c850fa60",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 63,
"avg_line_length": 24.75,
"alnum_prop": 0.7676767676767676,
"repo_name": "citizenline/citizenline",
"id": "1df914d19f4e44417be872b6fbe3012660d769fb",
"size": "99",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "star_ratings/app_settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3470"
},
{
"name": "HTML",
"bytes": "22853"
},
{
"name": "JavaScript",
"bytes": "8389"
},
{
"name": "Python",
"bytes": "86277"
},
{
"name": "Ruby",
"bytes": "198"
}
],
"symlink_target": ""
} |
import mock
from neutron.openstack.common import jsonutils
from neutron.plugins.vmware.api_client import exception
from neutron.plugins.vmware.common import utils as nsx_utils
from neutron.plugins.vmware import nsxlib
from neutron.plugins.vmware.nsxlib import l2gateway as l2gwlib
from neutron.plugins.vmware.nsxlib import switch as switchlib
from neutron.tests.unit import test_api_v2
from neutron.tests.unit.vmware.nsxlib import base
_uuid = test_api_v2._uuid
class L2GatewayNegativeTestCase(base.NsxlibNegativeBaseTestCase):
def test_create_l2_gw_service_on_failure(self):
self.assertRaises(exception.NsxApiException,
l2gwlib.create_l2_gw_service,
self.fake_cluster,
'fake-tenant',
'fake-gateway',
[{'id': _uuid(),
'interface_name': 'xxx'}])
def test_delete_l2_gw_service_on_failure(self):
self.assertRaises(exception.NsxApiException,
l2gwlib.delete_l2_gw_service,
self.fake_cluster,
'fake-gateway')
def test_get_l2_gw_service_on_failure(self):
self.assertRaises(exception.NsxApiException,
l2gwlib.get_l2_gw_service,
self.fake_cluster,
'fake-gateway')
def test_update_l2_gw_service_on_failure(self):
self.assertRaises(exception.NsxApiException,
l2gwlib.update_l2_gw_service,
self.fake_cluster,
'fake-gateway',
'pluto')
class L2GatewayTestCase(base.NsxlibTestCase):
def _create_gw_service(self, node_uuid, display_name,
tenant_id='fake_tenant'):
return l2gwlib.create_l2_gw_service(self.fake_cluster,
tenant_id,
display_name,
[{'id': node_uuid,
'interface_name': 'xxx'}])
def test_create_l2_gw_service(self):
display_name = 'fake-gateway'
node_uuid = _uuid()
response = self._create_gw_service(node_uuid, display_name)
self.assertEqual(response.get('type'), 'L2GatewayServiceConfig')
self.assertEqual(response.get('display_name'), display_name)
gateways = response.get('gateways', [])
self.assertEqual(len(gateways), 1)
self.assertEqual(gateways[0]['type'], 'L2Gateway')
self.assertEqual(gateways[0]['device_id'], 'xxx')
self.assertEqual(gateways[0]['transport_node_uuid'], node_uuid)
def test_update_l2_gw_service(self):
display_name = 'fake-gateway'
new_display_name = 'still-fake-gateway'
node_uuid = _uuid()
res1 = self._create_gw_service(node_uuid, display_name)
gw_id = res1['uuid']
res2 = l2gwlib.update_l2_gw_service(
self.fake_cluster, gw_id, new_display_name)
self.assertEqual(res2['display_name'], new_display_name)
def test_get_l2_gw_service(self):
display_name = 'fake-gateway'
node_uuid = _uuid()
gw_id = self._create_gw_service(node_uuid, display_name)['uuid']
response = l2gwlib.get_l2_gw_service(self.fake_cluster, gw_id)
self.assertEqual(response.get('type'), 'L2GatewayServiceConfig')
self.assertEqual(response.get('display_name'), display_name)
self.assertEqual(response.get('uuid'), gw_id)
def test_list_l2_gw_service(self):
gw_ids = []
for name in ('fake-1', 'fake-2'):
gw_ids.append(self._create_gw_service(_uuid(), name)['uuid'])
results = l2gwlib.get_l2_gw_services(self.fake_cluster)
self.assertEqual(len(results), 2)
self.assertEqual(sorted(gw_ids), sorted([r['uuid'] for r in results]))
def test_list_l2_gw_service_by_tenant(self):
gw_ids = [self._create_gw_service(
_uuid(), name, tenant_id=name)['uuid']
for name in ('fake-1', 'fake-2')]
results = l2gwlib.get_l2_gw_services(self.fake_cluster,
tenant_id='fake-1')
self.assertEqual(len(results), 1)
self.assertEqual(results[0]['uuid'], gw_ids[0])
def test_delete_l2_gw_service(self):
display_name = 'fake-gateway'
node_uuid = _uuid()
gw_id = self._create_gw_service(node_uuid, display_name)['uuid']
l2gwlib.delete_l2_gw_service(self.fake_cluster, gw_id)
results = l2gwlib.get_l2_gw_services(self.fake_cluster)
self.assertEqual(len(results), 0)
def test_plug_l2_gw_port_attachment(self):
tenant_id = 'pippo'
node_uuid = _uuid()
transport_zones_config = [{'zone_uuid': _uuid(),
'transport_type': 'stt'}]
lswitch = switchlib.create_lswitch(
self.fake_cluster, _uuid(), tenant_id,
'fake-switch', transport_zones_config)
gw_id = self._create_gw_service(node_uuid, 'fake-gw')['uuid']
lport = switchlib.create_lport(
self.fake_cluster, lswitch['uuid'], tenant_id, _uuid(),
'fake-gw-port', gw_id, True)
l2gwlib.plug_l2_gw_service(
self.fake_cluster, lswitch['uuid'],
lport['uuid'], gw_id)
uri = nsxlib._build_uri_path(switchlib.LSWITCHPORT_RESOURCE,
lport['uuid'],
lswitch['uuid'],
is_attachment=True)
resp_obj = nsxlib.do_request("GET", uri,
cluster=self.fake_cluster)
self.assertIn('LogicalPortAttachment', resp_obj)
self.assertEqual(resp_obj['LogicalPortAttachment']['type'],
'L2GatewayAttachment')
def _create_expected_req_body(self, display_name, neutron_id,
connector_type, connector_ip,
client_certificate):
body = {
"display_name": display_name,
"tags": [{"tag": neutron_id, "scope": "q_gw_dev_id"},
{"tag": 'fake_tenant', "scope": "os_tid"},
{"tag": nsx_utils.NEUTRON_VERSION,
"scope": "quantum"}],
"transport_connectors": [
{"transport_zone_uuid": 'fake_tz_uuid',
"ip_address": connector_ip,
"type": '%sConnector' % connector_type}],
"admin_status_enabled": True
}
body.get("tags").sort()
if client_certificate:
body["credential"] = {
"client_certificate": {
"pem_encoded": client_certificate},
"type": "SecurityCertificateCredential"}
return body
def test_create_gw_device(self):
# NOTE(salv-orlando): This unit test mocks backend calls rather than
# leveraging the fake NSX API client
display_name = 'fake-device'
neutron_id = 'whatever'
connector_type = 'stt'
connector_ip = '1.1.1.1'
client_certificate = 'this_should_be_a_certificate'
with mock.patch.object(nsxlib, 'do_request') as request_mock:
expected_req_body = self._create_expected_req_body(
display_name, neutron_id, connector_type.upper(),
connector_ip, client_certificate)
l2gwlib.create_gateway_device(
self.fake_cluster, 'fake_tenant', display_name, neutron_id,
'fake_tz_uuid', connector_type, connector_ip,
client_certificate)
request_mock.assert_called_once_with(
"POST",
"/ws.v1/transport-node",
jsonutils.dumps(expected_req_body, sort_keys=True),
cluster=self.fake_cluster)
def test_update_gw_device(self):
# NOTE(salv-orlando): This unit test mocks backend calls rather than
# leveraging the fake NSX API client
display_name = 'fake-device'
neutron_id = 'whatever'
connector_type = 'stt'
connector_ip = '1.1.1.1'
client_certificate = 'this_should_be_a_certificate'
with mock.patch.object(nsxlib, 'do_request') as request_mock:
expected_req_body = self._create_expected_req_body(
display_name, neutron_id, connector_type.upper(),
connector_ip, client_certificate)
l2gwlib.update_gateway_device(
self.fake_cluster, 'whatever', 'fake_tenant',
display_name, neutron_id,
'fake_tz_uuid', connector_type, connector_ip,
client_certificate)
request_mock.assert_called_once_with(
"PUT",
"/ws.v1/transport-node/whatever",
jsonutils.dumps(expected_req_body, sort_keys=True),
cluster=self.fake_cluster)
def test_update_gw_device_without_certificate(self):
# NOTE(salv-orlando): This unit test mocks backend calls rather than
# leveraging the fake NSX API client
display_name = 'fake-device'
neutron_id = 'whatever'
connector_type = 'stt'
connector_ip = '1.1.1.1'
with mock.patch.object(nsxlib, 'do_request') as request_mock:
expected_req_body = self._create_expected_req_body(
display_name, neutron_id, connector_type.upper(),
connector_ip, None)
l2gwlib.update_gateway_device(
self.fake_cluster, 'whatever', 'fake_tenant',
display_name, neutron_id,
'fake_tz_uuid', connector_type, connector_ip,
client_certificate=None)
request_mock.assert_called_once_with(
"PUT",
"/ws.v1/transport-node/whatever",
jsonutils.dumps(expected_req_body, sort_keys=True),
cluster=self.fake_cluster)
def test_get_gw_device_status(self):
# NOTE(salv-orlando): This unit test mocks backend calls rather than
# leveraging the fake NSX API client
with mock.patch.object(nsxlib, 'do_request') as request_mock:
l2gwlib.get_gateway_device_status(self.fake_cluster, 'whatever')
request_mock.assert_called_once_with(
"GET",
"/ws.v1/transport-node/whatever/status",
cluster=self.fake_cluster)
def test_get_gw_devices_status(self):
# NOTE(salv-orlando): This unit test mocks backend calls rather than
# leveraging the fake NSX API client
with mock.patch.object(nsxlib, 'do_request') as request_mock:
request_mock.return_value = {
'results': [],
'page_cursor': None,
'result_count': 0}
l2gwlib.get_gateway_devices_status(self.fake_cluster)
request_mock.assert_called_once_with(
"GET",
("/ws.v1/transport-node?fields=uuid,tags&"
"relations=TransportNodeStatus&"
"_page_length=1000&tag_scope=quantum"),
cluster=self.fake_cluster)
def test_get_gw_devices_status_filter_by_tenant(self):
# NOTE(salv-orlando): This unit test mocks backend calls rather than
# leveraging the fake NSX API client
with mock.patch.object(nsxlib, 'do_request') as request_mock:
request_mock.return_value = {
'results': [],
'page_cursor': None,
'result_count': 0}
l2gwlib.get_gateway_devices_status(self.fake_cluster,
tenant_id='ssc_napoli')
request_mock.assert_called_once_with(
"GET",
("/ws.v1/transport-node?fields=uuid,tags&"
"relations=TransportNodeStatus&"
"tag=ssc_napoli&tag_scope=os_tid&"
"_page_length=1000&tag_scope=quantum"),
cluster=self.fake_cluster)
def test_delete_gw_device(self):
# NOTE(salv-orlando): This unit test mocks backend calls rather than
# leveraging the fake NSX API client
with mock.patch.object(nsxlib, 'do_request') as request_mock:
l2gwlib.delete_gateway_device(self.fake_cluster, 'whatever')
request_mock.assert_called_once_with(
"DELETE",
"/ws.v1/transport-node/whatever",
cluster=self.fake_cluster)
| {
"content_hash": "6df6bd207ee9af4f648c27ae20acc68d",
"timestamp": "",
"source": "github",
"line_count": 281,
"max_line_length": 78,
"avg_line_length": 45.20996441281139,
"alnum_prop": 0.5576983627204031,
"repo_name": "samsu/neutron",
"id": "006ad38648dd021b3f056ff324388fafd473c748",
"size": "13288",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tests/unit/vmware/nsxlib/test_l2gateway.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1043"
},
{
"name": "Python",
"bytes": "10579249"
},
{
"name": "Shell",
"bytes": "1535"
}
],
"symlink_target": ""
} |
import datetime
import itertools
import unittest
from copy import copy
from django.db import (
DatabaseError, IntegrityError, OperationalError, connection,
)
from django.db.models import Model
from django.db.models.fields import (
BigIntegerField, BinaryField, BooleanField, CharField, DateTimeField,
IntegerField, PositiveIntegerField, SlugField, TextField,
)
from django.db.models.fields.related import (
ForeignKey, ManyToManyField, OneToOneField,
)
from django.db.transaction import atomic
from django.test import TransactionTestCase, skipIfDBFeature
from .fields import CustomManyToManyField, InheritedManyToManyField
from .models import (
Author, AuthorWithDefaultHeight, AuthorWithEvenLongerName, Book, BookWeak,
BookWithLongName, BookWithO2O, BookWithSlug, Note, NoteRename, Tag,
TagIndexed, TagM2MTest, TagUniqueRename, Thing, UniqueTest, new_apps,
)
class SchemaTests(TransactionTestCase):
"""
Tests that the schema-alteration code works correctly.
Be aware that these tests are more liable than most to false results,
as sometimes the code to check if a test has worked is almost as complex
as the code it is testing.
"""
available_apps = []
models = [
Author, AuthorWithDefaultHeight, AuthorWithEvenLongerName, Book,
BookWeak, BookWithLongName, BookWithO2O, BookWithSlug, Note, Tag,
TagIndexed, TagM2MTest, TagUniqueRename, Thing, UniqueTest,
]
# Utility functions
def setUp(self):
# local_models should contain test dependent model classes that will be
# automatically removed from the app cache on test tear down.
self.local_models = []
def tearDown(self):
# Delete any tables made for our models
self.delete_tables()
new_apps.clear_cache()
for model in new_apps.get_models():
model._meta._expire_cache()
if 'schema' in new_apps.all_models:
for model in self.local_models:
del new_apps.all_models['schema'][model._meta.model_name]
def delete_tables(self):
"Deletes all model tables for our models for a clean test environment"
with connection.cursor() as cursor:
connection.disable_constraint_checking()
table_names = connection.introspection.table_names(cursor)
for model in itertools.chain(SchemaTests.models, self.local_models):
# Remove any M2M tables first
for field in model._meta.local_many_to_many:
with atomic():
tbl = field.remote_field.through._meta.db_table
if tbl in table_names:
cursor.execute(connection.schema_editor().sql_delete_table % {
"table": connection.ops.quote_name(tbl),
})
table_names.remove(tbl)
# Then remove the main tables
with atomic():
tbl = model._meta.db_table
if tbl in table_names:
cursor.execute(connection.schema_editor().sql_delete_table % {
"table": connection.ops.quote_name(tbl),
})
table_names.remove(tbl)
connection.enable_constraint_checking()
def column_classes(self, model):
with connection.cursor() as cursor:
columns = {
d[0]: (connection.introspection.get_field_type(d[1], d), d)
for d in connection.introspection.get_table_description(
cursor,
model._meta.db_table,
)
}
# SQLite has a different format for field_type
for name, (type, desc) in columns.items():
if isinstance(type, tuple):
columns[name] = (type[0], desc)
# SQLite also doesn't error properly
if not columns:
raise DatabaseError("Table does not exist (empty pragma)")
return columns
def get_indexes(self, table):
"""
Get the indexes on the table using a new cursor.
"""
with connection.cursor() as cursor:
return connection.introspection.get_indexes(cursor, table)
def get_constraints(self, table):
"""
Get the constraints on a table using a new cursor.
"""
with connection.cursor() as cursor:
return connection.introspection.get_constraints(cursor, table)
# Tests
def test_creation_deletion(self):
"""
Tries creating a model's table, and then deleting it.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Check that it's there
list(Author.objects.all())
# Clean up that table
with connection.schema_editor() as editor:
editor.delete_model(Author)
# Check that it's gone
self.assertRaises(
DatabaseError,
lambda: list(Author.objects.all()),
)
@unittest.skipUnless(connection.features.supports_foreign_keys, "No FK support")
def test_fk(self):
"Tests that creating tables out of FK order, then repointing, works"
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Book)
editor.create_model(Author)
editor.create_model(Tag)
# Check that initial tables are there
list(Author.objects.all())
list(Book.objects.all())
# Make sure the FK constraint is present
with self.assertRaises(IntegrityError):
Book.objects.create(
author_id=1,
title="Much Ado About Foreign Keys",
pub_date=datetime.datetime.now(),
)
# Repoint the FK constraint
old_field = Book._meta.get_field("author")
new_field = ForeignKey(Tag)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
# Make sure the new FK constraint is present
constraints = self.get_constraints(Book._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["author_id"] and details['foreign_key']:
self.assertEqual(details['foreign_key'], ('schema_tag', 'id'))
break
else:
self.fail("No FK constraint for author_id found")
@unittest.skipUnless(connection.features.supports_foreign_keys, "No FK support")
def test_fk_db_constraint(self):
"Tests that the db_constraint parameter is respected"
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
editor.create_model(Author)
editor.create_model(BookWeak)
# Check that initial tables are there
list(Author.objects.all())
list(Tag.objects.all())
list(BookWeak.objects.all())
# Check that BookWeak doesn't have an FK constraint
constraints = self.get_constraints(BookWeak._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["author_id"] and details['foreign_key']:
self.fail("FK constraint for author_id found")
# Make a db_constraint=False FK
new_field = ForeignKey(Tag, db_constraint=False)
new_field.set_attributes_from_name("tag")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Make sure no FK constraint is present
constraints = self.get_constraints(Author._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["tag_id"] and details['foreign_key']:
self.fail("FK constraint for tag_id found")
# Alter to one with a constraint
new_field2 = ForeignKey(Tag)
new_field2.set_attributes_from_name("tag")
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, new_field2, strict=True)
# Make sure the new FK constraint is present
constraints = self.get_constraints(Author._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["tag_id"] and details['foreign_key']:
self.assertEqual(details['foreign_key'], ('schema_tag', 'id'))
break
else:
self.fail("No FK constraint for tag_id found")
# Alter to one without a constraint again
new_field2 = ForeignKey(Tag)
new_field2.set_attributes_from_name("tag")
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field2, new_field, strict=True)
# Make sure no FK constraint is present
constraints = self.get_constraints(Author._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["tag_id"] and details['foreign_key']:
self.fail("FK constraint for tag_id found")
def _test_m2m_db_constraint(self, M2MFieldClass):
class LocalAuthorWithM2M(Model):
name = CharField(max_length=255)
class Meta:
app_label = 'schema'
apps = new_apps
self.local_models = [LocalAuthorWithM2M]
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
editor.create_model(LocalAuthorWithM2M)
# Check that initial tables are there
list(LocalAuthorWithM2M.objects.all())
list(Tag.objects.all())
# Make a db_constraint=False FK
new_field = M2MFieldClass(Tag, related_name="authors", db_constraint=False)
new_field.contribute_to_class(LocalAuthorWithM2M, "tags")
# Add the field
with connection.schema_editor() as editor:
editor.add_field(LocalAuthorWithM2M, new_field)
# Make sure no FK constraint is present
constraints = self.get_constraints(new_field.remote_field.through._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["tag_id"] and details['foreign_key']:
self.fail("FK constraint for tag_id found")
@unittest.skipUnless(connection.features.supports_foreign_keys, "No FK support")
def test_m2m_db_constraint(self):
self._test_m2m_db_constraint(ManyToManyField)
@unittest.skipUnless(connection.features.supports_foreign_keys, "No FK support")
def test_m2m_db_constraint_custom(self):
self._test_m2m_db_constraint(CustomManyToManyField)
@unittest.skipUnless(connection.features.supports_foreign_keys, "No FK support")
def test_m2m_db_constraint_inherited(self):
self._test_m2m_db_constraint(InheritedManyToManyField)
def test_add_field(self):
"""
Tests adding fields to models
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no age field
columns = self.column_classes(Author)
self.assertNotIn("age", columns)
# Add the new field
new_field = IntegerField(null=True)
new_field.set_attributes_from_name("age")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
self.assertEqual(columns['age'][0], "IntegerField")
self.assertEqual(columns['age'][1][6], True)
def test_add_field_temp_default(self):
"""
Tests adding fields to models with a temporary default
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no age field
columns = self.column_classes(Author)
self.assertNotIn("age", columns)
# Add some rows of data
Author.objects.create(name="Andrew", height=30)
Author.objects.create(name="Andrea")
# Add a not-null field
new_field = CharField(max_length=30, default="Godwin")
new_field.set_attributes_from_name("surname")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
self.assertEqual(columns['surname'][0], "CharField")
self.assertEqual(columns['surname'][1][6],
connection.features.interprets_empty_strings_as_nulls)
def test_add_field_temp_default_boolean(self):
"""
Tests adding fields to models with a temporary default where
the default is False. (#21783)
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no age field
columns = self.column_classes(Author)
self.assertNotIn("age", columns)
# Add some rows of data
Author.objects.create(name="Andrew", height=30)
Author.objects.create(name="Andrea")
# Add a not-null field
new_field = BooleanField(default=False)
new_field.set_attributes_from_name("awesome")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
# BooleanField are stored as TINYINT(1) on MySQL.
field_type = columns['awesome'][0]
self.assertEqual(field_type, connection.features.introspected_boolean_field_type(new_field, created_separately=True))
def test_add_field_default_transform(self):
"""
Tests adding fields to models with a default that is not directly
valid in the database (#22581)
"""
class TestTransformField(IntegerField):
# Weird field that saves the count of items in its value
def get_default(self):
return self.default
def get_prep_value(self, value):
if value is None:
return 0
return len(value)
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Add some rows of data
Author.objects.create(name="Andrew", height=30)
Author.objects.create(name="Andrea")
# Add the field with a default it needs to cast (to string in this case)
new_field = TestTransformField(default={1: 2})
new_field.set_attributes_from_name("thing")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure the field is there
columns = self.column_classes(Author)
field_type, field_info = columns['thing']
self.assertEqual(field_type, 'IntegerField')
# Make sure the values were transformed correctly
self.assertEqual(Author.objects.extra(where=["thing = 1"]).count(), 2)
def test_add_field_binary(self):
"""
Tests binary fields get a sane default (#22851)
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Add the new field
new_field = BinaryField(blank=True)
new_field.set_attributes_from_name("bits")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
# MySQL annoyingly uses the same backend, so it'll come back as one of
# these two types.
self.assertIn(columns['bits'][0], ("BinaryField", "TextField"))
def test_alter(self):
"""
Tests simple altering of fields
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the field is right to begin with
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "CharField")
self.assertEqual(bool(columns['name'][1][6]), bool(connection.features.interprets_empty_strings_as_nulls))
# Alter the name field to a TextField
old_field = Author._meta.get_field("name")
new_field = TextField(null=True)
new_field.set_attributes_from_name("name")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "TextField")
self.assertEqual(columns['name'][1][6], True)
# Change nullability again
new_field2 = TextField(null=False)
new_field2.set_attributes_from_name("name")
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, new_field2, strict=True)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "TextField")
self.assertEqual(bool(columns['name'][1][6]), bool(connection.features.interprets_empty_strings_as_nulls))
def test_alter_text_field(self):
# Regression for "BLOB/TEXT column 'info' can't have a default value")
# on MySQL.
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Note)
old_field = Note._meta.get_field("info")
new_field = TextField(blank=True)
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
@skipIfDBFeature('interprets_empty_strings_as_nulls')
def test_alter_textual_field_keep_null_status(self):
"""
Changing a field type shouldn't affect the not null status.
"""
with connection.schema_editor() as editor:
editor.create_model(Note)
with self.assertRaises(IntegrityError):
Note.objects.create(info=None)
old_field = Note._meta.get_field("info")
new_field = CharField(max_length=50)
new_field.set_attributes_from_name("info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
with self.assertRaises(IntegrityError):
Note.objects.create(info=None)
def test_alter_numeric_field_keep_null_status(self):
"""
Changing a field type shouldn't affect the not null status.
"""
with connection.schema_editor() as editor:
editor.create_model(UniqueTest)
with self.assertRaises(IntegrityError):
UniqueTest.objects.create(year=None, slug='aaa')
old_field = UniqueTest._meta.get_field("year")
new_field = BigIntegerField()
new_field.set_attributes_from_name("year")
with connection.schema_editor() as editor:
editor.alter_field(UniqueTest, old_field, new_field, strict=True)
with self.assertRaises(IntegrityError):
UniqueTest.objects.create(year=None, slug='bbb')
def test_alter_null_to_not_null(self):
"""
#23609 - Tests handling of default values when altering from NULL to NOT NULL.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the field is right to begin with
columns = self.column_classes(Author)
self.assertTrue(columns['height'][1][6])
# Create some test data
Author.objects.create(name='Not null author', height=12)
Author.objects.create(name='Null author')
# Verify null value
self.assertEqual(Author.objects.get(name='Not null author').height, 12)
self.assertIsNone(Author.objects.get(name='Null author').height)
# Alter the height field to NOT NULL with default
old_field = Author._meta.get_field("height")
new_field = PositiveIntegerField(default=42)
new_field.set_attributes_from_name("height")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
self.assertFalse(columns['height'][1][6])
# Verify default value
self.assertEqual(Author.objects.get(name='Not null author').height, 12)
self.assertEqual(Author.objects.get(name='Null author').height, 42)
def test_alter_charfield_to_null(self):
"""
#24307 - Should skip an alter statement on databases with
interprets_empty_strings_as_null when changing a CharField to null.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Change the CharField to null
old_field = Author._meta.get_field('name')
new_field = copy(old_field)
new_field.null = True
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field)
def test_alter_textfield_to_null(self):
"""
#24307 - Should skip an alter statement on databases with
interprets_empty_strings_as_null when changing a TextField to null.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Note)
# Change the TextField to null
old_field = Note._meta.get_field('info')
new_field = copy(old_field)
new_field.null = True
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field)
@unittest.skipUnless(connection.features.supports_combined_alters, "No combined ALTER support")
def test_alter_null_to_not_null_keeping_default(self):
"""
#23738 - Can change a nullable field with default to non-nullable
with the same default.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(AuthorWithDefaultHeight)
# Ensure the field is right to begin with
columns = self.column_classes(AuthorWithDefaultHeight)
self.assertTrue(columns['height'][1][6])
# Alter the height field to NOT NULL keeping the previous default
old_field = AuthorWithDefaultHeight._meta.get_field("height")
new_field = PositiveIntegerField(default=42)
new_field.set_attributes_from_name("height")
with connection.schema_editor() as editor:
editor.alter_field(AuthorWithDefaultHeight, old_field, new_field)
# Ensure the field is right afterwards
columns = self.column_classes(AuthorWithDefaultHeight)
self.assertFalse(columns['height'][1][6])
@unittest.skipUnless(connection.features.supports_foreign_keys, "No FK support")
def test_alter_fk(self):
"""
Tests altering of FKs
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the field is right to begin with
columns = self.column_classes(Book)
self.assertEqual(columns['author_id'][0], "IntegerField")
# Make sure the FK constraint is present
constraints = self.get_constraints(Book._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["author_id"] and details['foreign_key']:
self.assertEqual(details['foreign_key'], ('schema_author', 'id'))
break
else:
self.fail("No FK constraint for author_id found")
# Alter the FK
old_field = Book._meta.get_field("author")
new_field = ForeignKey(Author, editable=False)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
# Ensure the field is right afterwards
columns = self.column_classes(Book)
self.assertEqual(columns['author_id'][0], "IntegerField")
# Make sure the FK constraint is present
constraints = self.get_constraints(Book._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["author_id"] and details['foreign_key']:
self.assertEqual(details['foreign_key'], ('schema_author', 'id'))
break
else:
self.fail("No FK constraint for author_id found")
@unittest.skipUnless(connection.features.supports_foreign_keys, "No FK support")
def test_alter_to_fk(self):
"""
#24447 - Tests adding a FK constraint for an existing column
"""
class LocalBook(Model):
author = IntegerField()
title = CharField(max_length=100, db_index=True)
pub_date = DateTimeField()
class Meta:
app_label = 'schema'
apps = new_apps
self.local_models = [LocalBook]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(LocalBook)
# Ensure no FK constraint exists
constraints = self.get_constraints(LocalBook._meta.db_table)
for name, details in constraints.items():
if details['foreign_key']:
self.fail('Found an unexpected FK constraint to %s' % details['columns'])
old_field = LocalBook._meta.get_field("author")
new_field = ForeignKey(Author)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(LocalBook, old_field, new_field, strict=True)
constraints = self.get_constraints(LocalBook._meta.db_table)
# Ensure FK constraint exists
for name, details in constraints.items():
if details['foreign_key'] and details['columns'] == ["author_id"]:
self.assertEqual(details['foreign_key'], ('schema_author', 'id'))
break
else:
self.fail("No FK constraint for author_id found")
@unittest.skipUnless(connection.features.supports_foreign_keys, "No FK support")
def test_alter_o2o_to_fk(self):
"""
#24163 - Tests altering of OneToOneField to ForeignKey
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(BookWithO2O)
# Ensure the field is right to begin with
columns = self.column_classes(BookWithO2O)
self.assertEqual(columns['author_id'][0], "IntegerField")
# Ensure the field is unique
author = Author.objects.create(name="Joe")
BookWithO2O.objects.create(author=author, title="Django 1", pub_date=datetime.datetime.now())
with self.assertRaises(IntegrityError):
BookWithO2O.objects.create(author=author, title="Django 2", pub_date=datetime.datetime.now())
BookWithO2O.objects.all().delete()
# Make sure the FK constraint is present
constraints = self.get_constraints(BookWithO2O._meta.db_table)
author_is_fk = False
for name, details in constraints.items():
if details['columns'] == ['author_id']:
if details['foreign_key'] and details['foreign_key'] == ('schema_author', 'id'):
author_is_fk = True
self.assertTrue(author_is_fk, "No FK constraint for author_id found")
# Alter the OneToOneField to ForeignKey
old_field = BookWithO2O._meta.get_field("author")
new_field = ForeignKey(Author)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(BookWithO2O, old_field, new_field, strict=True)
# Ensure the field is right afterwards
columns = self.column_classes(Book)
self.assertEqual(columns['author_id'][0], "IntegerField")
# Ensure the field is not unique anymore
Book.objects.create(author=author, title="Django 1", pub_date=datetime.datetime.now())
Book.objects.create(author=author, title="Django 2", pub_date=datetime.datetime.now())
# Make sure the FK constraint is still present
constraints = self.get_constraints(Book._meta.db_table)
author_is_fk = False
for name, details in constraints.items():
if details['columns'] == ['author_id']:
if details['foreign_key'] and details['foreign_key'] == ('schema_author', 'id'):
author_is_fk = True
self.assertTrue(author_is_fk, "No FK constraint for author_id found")
@unittest.skipUnless(connection.features.supports_foreign_keys, "No FK support")
def test_alter_fk_to_o2o(self):
"""
#24163 - Tests altering of ForeignKey to OneToOneField
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the field is right to begin with
columns = self.column_classes(Book)
self.assertEqual(columns['author_id'][0], "IntegerField")
# Ensure the field is not unique
author = Author.objects.create(name="Joe")
Book.objects.create(author=author, title="Django 1", pub_date=datetime.datetime.now())
Book.objects.create(author=author, title="Django 2", pub_date=datetime.datetime.now())
Book.objects.all().delete()
# Make sure the FK constraint is present
constraints = self.get_constraints(Book._meta.db_table)
author_is_fk = False
for name, details in constraints.items():
if details['columns'] == ['author_id']:
if details['foreign_key'] and details['foreign_key'] == ('schema_author', 'id'):
author_is_fk = True
self.assertTrue(author_is_fk, "No FK constraint for author_id found")
# Alter the ForeignKey to OneToOneField
old_field = Book._meta.get_field("author")
new_field = OneToOneField(Author)
new_field.set_attributes_from_name("author")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
# Ensure the field is right afterwards
columns = self.column_classes(BookWithO2O)
self.assertEqual(columns['author_id'][0], "IntegerField")
# Ensure the field is unique now
BookWithO2O.objects.create(author=author, title="Django 1", pub_date=datetime.datetime.now())
with self.assertRaises(IntegrityError):
BookWithO2O.objects.create(author=author, title="Django 2", pub_date=datetime.datetime.now())
# Make sure the FK constraint is present
constraints = self.get_constraints(BookWithO2O._meta.db_table)
author_is_fk = False
for name, details in constraints.items():
if details['columns'] == ['author_id']:
if details['foreign_key'] and details['foreign_key'] == ('schema_author', 'id'):
author_is_fk = True
self.assertTrue(author_is_fk, "No FK constraint for author_id found")
def test_alter_implicit_id_to_explicit(self):
"""
Should be able to convert an implicit "id" field to an explicit "id"
primary key field.
"""
with connection.schema_editor() as editor:
editor.create_model(Author)
old_field = Author._meta.get_field("id")
new_field = IntegerField(primary_key=True)
new_field.set_attributes_from_name("id")
new_field.model = Author
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
# This will fail if DROP DEFAULT is inadvertently executed on this
# field which drops the id sequence, at least on PostgreSQL.
Author.objects.create(name='Foo')
def test_rename(self):
"""
Tests simple altering of fields
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the field is right to begin with
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "CharField")
self.assertNotIn("display_name", columns)
# Alter the name field's name
old_field = Author._meta.get_field("name")
new_field = CharField(max_length=254)
new_field.set_attributes_from_name("display_name")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
# Ensure the field is right afterwards
columns = self.column_classes(Author)
self.assertEqual(columns['display_name'][0], "CharField")
self.assertNotIn("name", columns)
@skipIfDBFeature('interprets_empty_strings_as_nulls')
def test_rename_keep_null_status(self):
"""
Renaming a field shouldn't affect the not null status.
"""
with connection.schema_editor() as editor:
editor.create_model(Note)
with self.assertRaises(IntegrityError):
Note.objects.create(info=None)
old_field = Note._meta.get_field("info")
new_field = TextField()
new_field.set_attributes_from_name("detail_info")
with connection.schema_editor() as editor:
editor.alter_field(Note, old_field, new_field, strict=True)
columns = self.column_classes(Note)
self.assertEqual(columns['detail_info'][0], "TextField")
self.assertNotIn("info", columns)
with self.assertRaises(IntegrityError):
NoteRename.objects.create(detail_info=None)
def _test_m2m_create(self, M2MFieldClass):
"""
Tests M2M fields on models during creation
"""
class LocalBookWithM2M(Model):
author = ForeignKey(Author)
title = CharField(max_length=100, db_index=True)
pub_date = DateTimeField()
tags = M2MFieldClass("TagM2MTest", related_name="books")
class Meta:
app_label = 'schema'
apps = new_apps
self.local_models = [
LocalBookWithM2M,
LocalBookWithM2M._meta.get_field('tags').remote_field.through,
]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(TagM2MTest)
editor.create_model(LocalBookWithM2M)
# Ensure there is now an m2m table there
columns = self.column_classes(LocalBookWithM2M._meta.get_field("tags").remote_field.through)
self.assertEqual(columns['tagm2mtest_id'][0], "IntegerField")
def test_m2m_create(self):
self._test_m2m_create(ManyToManyField)
def test_m2m_create_custom(self):
self._test_m2m_create(CustomManyToManyField)
def test_m2m_create_inherited(self):
self._test_m2m_create(InheritedManyToManyField)
def _test_m2m_create_through(self, M2MFieldClass):
"""
Tests M2M fields on models during creation with through models
"""
class LocalTagThrough(Model):
book = ForeignKey("schema.LocalBookWithM2MThrough")
tag = ForeignKey("schema.TagM2MTest")
class Meta:
app_label = 'schema'
apps = new_apps
class LocalBookWithM2MThrough(Model):
tags = M2MFieldClass("TagM2MTest", related_name="books", through=LocalTagThrough)
class Meta:
app_label = 'schema'
apps = new_apps
self.local_models = [LocalTagThrough, LocalBookWithM2MThrough]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(LocalTagThrough)
editor.create_model(TagM2MTest)
editor.create_model(LocalBookWithM2MThrough)
# Ensure there is now an m2m table there
columns = self.column_classes(LocalTagThrough)
self.assertEqual(columns['book_id'][0], "IntegerField")
self.assertEqual(columns['tag_id'][0], "IntegerField")
def test_m2m_create_through(self):
self._test_m2m_create_through(ManyToManyField)
def test_m2m_create_through_custom(self):
self._test_m2m_create_through(CustomManyToManyField)
def test_m2m_create_through_inherited(self):
self._test_m2m_create_through(InheritedManyToManyField)
def _test_m2m(self, M2MFieldClass):
"""
Tests adding/removing M2M fields on models
"""
class LocalAuthorWithM2M(Model):
name = CharField(max_length=255)
class Meta:
app_label = 'schema'
apps = new_apps
self.local_models = [LocalAuthorWithM2M]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(LocalAuthorWithM2M)
editor.create_model(TagM2MTest)
# Create an M2M field
new_field = M2MFieldClass("schema.TagM2MTest", related_name="authors")
new_field.contribute_to_class(LocalAuthorWithM2M, "tags")
self.local_models += [new_field.remote_field.through]
# Ensure there's no m2m table there
self.assertRaises(DatabaseError, self.column_classes, new_field.remote_field.through)
# Add the field
with connection.schema_editor() as editor:
editor.add_field(LocalAuthorWithM2M, new_field)
# Ensure there is now an m2m table there
columns = self.column_classes(new_field.remote_field.through)
self.assertEqual(columns['tagm2mtest_id'][0], "IntegerField")
# "Alter" the field. This should not rename the DB table to itself.
with connection.schema_editor() as editor:
editor.alter_field(LocalAuthorWithM2M, new_field, new_field)
# Remove the M2M table again
with connection.schema_editor() as editor:
editor.remove_field(LocalAuthorWithM2M, new_field)
# Ensure there's no m2m table there
self.assertRaises(DatabaseError, self.column_classes, new_field.remote_field.through)
def test_m2m(self):
self._test_m2m(ManyToManyField)
def test_m2m_custom(self):
self._test_m2m(CustomManyToManyField)
def test_m2m_inherited(self):
self._test_m2m(InheritedManyToManyField)
def _test_m2m_through_alter(self, M2MFieldClass):
"""
Tests altering M2Ms with explicit through models (should no-op)
"""
class LocalAuthorTag(Model):
author = ForeignKey("schema.LocalAuthorWithM2MThrough")
tag = ForeignKey("schema.TagM2MTest")
class Meta:
app_label = 'schema'
apps = new_apps
class LocalAuthorWithM2MThrough(Model):
name = CharField(max_length=255)
tags = M2MFieldClass("schema.TagM2MTest", related_name="authors", through=LocalAuthorTag)
class Meta:
app_label = 'schema'
apps = new_apps
self.local_models = [LocalAuthorTag, LocalAuthorWithM2MThrough]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(LocalAuthorTag)
editor.create_model(LocalAuthorWithM2MThrough)
editor.create_model(TagM2MTest)
# Ensure the m2m table is there
self.assertEqual(len(self.column_classes(LocalAuthorTag)), 3)
# "Alter" the field's blankness. This should not actually do anything.
old_field = LocalAuthorWithM2MThrough._meta.get_field("tags")
new_field = M2MFieldClass("schema.TagM2MTest", related_name="authors", through=LocalAuthorTag)
new_field.contribute_to_class(LocalAuthorWithM2MThrough, "tags")
with connection.schema_editor() as editor:
editor.alter_field(LocalAuthorWithM2MThrough, old_field, new_field)
# Ensure the m2m table is still there
self.assertEqual(len(self.column_classes(LocalAuthorTag)), 3)
def test_m2m_through_alter(self):
self._test_m2m_through_alter(ManyToManyField)
def test_m2m_through_alter_custom(self):
self._test_m2m_through_alter(CustomManyToManyField)
def test_m2m_through_alter_inherited(self):
self._test_m2m_through_alter(InheritedManyToManyField)
def _test_m2m_repoint(self, M2MFieldClass):
"""
Tests repointing M2M fields
"""
class LocalBookWithM2M(Model):
author = ForeignKey(Author)
title = CharField(max_length=100, db_index=True)
pub_date = DateTimeField()
tags = M2MFieldClass("TagM2MTest", related_name="books")
class Meta:
app_label = 'schema'
apps = new_apps
self.local_models = [
LocalBookWithM2M,
LocalBookWithM2M._meta.get_field('tags').remote_field.through,
]
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(LocalBookWithM2M)
editor.create_model(TagM2MTest)
editor.create_model(UniqueTest)
# Ensure the M2M exists and points to TagM2MTest
constraints = self.get_constraints(LocalBookWithM2M._meta.get_field("tags").remote_field.through._meta.db_table)
if connection.features.supports_foreign_keys:
for name, details in constraints.items():
if details['columns'] == ["tagm2mtest_id"] and details['foreign_key']:
self.assertEqual(details['foreign_key'], ('schema_tagm2mtest', 'id'))
break
else:
self.fail("No FK constraint for tagm2mtest_id found")
# Repoint the M2M
old_field = LocalBookWithM2M._meta.get_field("tags")
new_field = M2MFieldClass(UniqueTest)
new_field.contribute_to_class(LocalBookWithM2M, "uniques")
self.local_models += [new_field.remote_field.through]
with connection.schema_editor() as editor:
editor.alter_field(LocalBookWithM2M, old_field, new_field)
# Ensure old M2M is gone
self.assertRaises(DatabaseError, self.column_classes, LocalBookWithM2M._meta.get_field("tags").remote_field.through)
# Ensure the new M2M exists and points to UniqueTest
constraints = self.get_constraints(new_field.remote_field.through._meta.db_table)
if connection.features.supports_foreign_keys:
for name, details in constraints.items():
if details['columns'] == ["uniquetest_id"] and details['foreign_key']:
self.assertEqual(details['foreign_key'], ('schema_uniquetest', 'id'))
break
else:
self.fail("No FK constraint for uniquetest_id found")
def test_m2m_repoint(self):
self._test_m2m_repoint(ManyToManyField)
def test_m2m_repoint_custom(self):
self._test_m2m_repoint(CustomManyToManyField)
def test_m2m_repoint_inherited(self):
self._test_m2m_repoint(InheritedManyToManyField)
@unittest.skipUnless(connection.features.supports_column_check_constraints, "No check constraints")
def test_check_constraints(self):
"""
Tests creating/deleting CHECK constraints
"""
# Create the tables
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the constraint exists
constraints = self.get_constraints(Author._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["height"] and details['check']:
break
else:
self.fail("No check constraint for height found")
# Alter the column to remove it
old_field = Author._meta.get_field("height")
new_field = IntegerField(null=True, blank=True)
new_field.set_attributes_from_name("height")
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field, strict=True)
constraints = self.get_constraints(Author._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["height"] and details['check']:
self.fail("Check constraint for height found")
# Alter the column to re-add it
new_field2 = Author._meta.get_field("height")
with connection.schema_editor() as editor:
editor.alter_field(Author, new_field, new_field2, strict=True)
constraints = self.get_constraints(Author._meta.db_table)
for name, details in constraints.items():
if details['columns'] == ["height"] and details['check']:
break
else:
self.fail("No check constraint for height found")
def test_unique(self):
"""
Tests removing and adding unique constraints to a single column.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
# Ensure the field is unique to begin with
Tag.objects.create(title="foo", slug="foo")
self.assertRaises(IntegrityError, Tag.objects.create, title="bar", slug="foo")
Tag.objects.all().delete()
# Alter the slug field to be non-unique
old_field = Tag._meta.get_field("slug")
new_field = SlugField(unique=False)
new_field.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_field(Tag, old_field, new_field, strict=True)
# Ensure the field is no longer unique
Tag.objects.create(title="foo", slug="foo")
Tag.objects.create(title="bar", slug="foo")
Tag.objects.all().delete()
# Alter the slug field to be unique
new_field2 = SlugField(unique=True)
new_field2.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_field(Tag, new_field, new_field2, strict=True)
# Ensure the field is unique again
Tag.objects.create(title="foo", slug="foo")
self.assertRaises(IntegrityError, Tag.objects.create, title="bar", slug="foo")
Tag.objects.all().delete()
# Rename the field
new_field3 = SlugField(unique=True)
new_field3.set_attributes_from_name("slug2")
with connection.schema_editor() as editor:
editor.alter_field(Tag, new_field2, new_field3, strict=True)
# Ensure the field is still unique
TagUniqueRename.objects.create(title="foo", slug2="foo")
self.assertRaises(IntegrityError, TagUniqueRename.objects.create, title="bar", slug2="foo")
Tag.objects.all().delete()
def test_unique_together(self):
"""
Tests removing and adding unique_together constraints on a model.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(UniqueTest)
# Ensure the fields are unique to begin with
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.create(year=2011, slug="foo")
UniqueTest.objects.create(year=2011, slug="bar")
self.assertRaises(IntegrityError, UniqueTest.objects.create, year=2012, slug="foo")
UniqueTest.objects.all().delete()
# Alter the model to its non-unique-together companion
with connection.schema_editor() as editor:
editor.alter_unique_together(UniqueTest, UniqueTest._meta.unique_together, [])
# Ensure the fields are no longer unique
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.create(year=2012, slug="foo")
UniqueTest.objects.all().delete()
# Alter it back
new_field2 = SlugField(unique=True)
new_field2.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_unique_together(UniqueTest, [], UniqueTest._meta.unique_together)
# Ensure the fields are unique again
UniqueTest.objects.create(year=2012, slug="foo")
self.assertRaises(IntegrityError, UniqueTest.objects.create, year=2012, slug="foo")
UniqueTest.objects.all().delete()
def test_unique_together_with_fk(self):
"""
Tests removing and adding unique_together constraints that include
a foreign key.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the fields are unique to begin with
self.assertEqual(Book._meta.unique_together, ())
# Add the unique_together constraint
with connection.schema_editor() as editor:
editor.alter_unique_together(Book, [], [['author', 'title']])
# Alter it back
with connection.schema_editor() as editor:
editor.alter_unique_together(Book, [['author', 'title']], [])
def test_index_together(self):
"""
Tests removing and adding index_together constraints on a model.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
# Ensure there's no index on the year/slug columns first
self.assertEqual(
False,
any(
c["index"]
for c in self.get_constraints("schema_tag").values()
if c['columns'] == ["slug", "title"]
),
)
# Alter the model to add an index
with connection.schema_editor() as editor:
editor.alter_index_together(Tag, [], [("slug", "title")])
# Ensure there is now an index
self.assertEqual(
True,
any(
c["index"]
for c in self.get_constraints("schema_tag").values()
if c['columns'] == ["slug", "title"]
),
)
# Alter it back
new_field2 = SlugField(unique=True)
new_field2.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_index_together(Tag, [("slug", "title")], [])
# Ensure there's no index
self.assertEqual(
False,
any(
c["index"]
for c in self.get_constraints("schema_tag").values()
if c['columns'] == ["slug", "title"]
),
)
def test_index_together_with_fk(self):
"""
Tests removing and adding index_together constraints that include
a foreign key.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the fields are unique to begin with
self.assertEqual(Book._meta.index_together, ())
# Add the unique_together constraint
with connection.schema_editor() as editor:
editor.alter_index_together(Book, [], [['author', 'title']])
# Alter it back
with connection.schema_editor() as editor:
editor.alter_index_together(Book, [['author', 'title']], [])
def test_create_index_together(self):
"""
Tests creating models with index_together already defined
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(TagIndexed)
# Ensure there is an index
self.assertEqual(
True,
any(
c["index"]
for c in self.get_constraints("schema_tagindexed").values()
if c['columns'] == ["slug", "title"]
),
)
def test_db_table(self):
"""
Tests renaming of the table
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure the table is there to begin with
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "CharField")
# Alter the table
with connection.schema_editor() as editor:
editor.alter_db_table(Author, "schema_author", "schema_otherauthor")
# Ensure the table is there afterwards
Author._meta.db_table = "schema_otherauthor"
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "CharField")
# Alter the table again
with connection.schema_editor() as editor:
editor.alter_db_table(Author, "schema_otherauthor", "schema_author")
# Ensure the table is still there
Author._meta.db_table = "schema_author"
columns = self.column_classes(Author)
self.assertEqual(columns['name'][0], "CharField")
def test_indexes(self):
"""
Tests creation/altering of indexes
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
editor.create_model(Book)
# Ensure the table is there and has the right index
self.assertIn(
"title",
self.get_indexes(Book._meta.db_table),
)
# Alter to remove the index
old_field = Book._meta.get_field("title")
new_field = CharField(max_length=100, db_index=False)
new_field.set_attributes_from_name("title")
with connection.schema_editor() as editor:
editor.alter_field(Book, old_field, new_field, strict=True)
# Ensure the table is there and has no index
self.assertNotIn(
"title",
self.get_indexes(Book._meta.db_table),
)
# Alter to re-add the index
new_field2 = Book._meta.get_field("title")
with connection.schema_editor() as editor:
editor.alter_field(Book, new_field, new_field2, strict=True)
# Ensure the table is there and has the index again
self.assertIn(
"title",
self.get_indexes(Book._meta.db_table),
)
# Add a unique column, verify that creates an implicit index
new_field3 = BookWithSlug._meta.get_field("slug")
with connection.schema_editor() as editor:
editor.add_field(Book, new_field3)
self.assertIn(
"slug",
self.get_indexes(Book._meta.db_table),
)
# Remove the unique, check the index goes with it
new_field4 = CharField(max_length=20, unique=False)
new_field4.set_attributes_from_name("slug")
with connection.schema_editor() as editor:
editor.alter_field(BookWithSlug, new_field3, new_field4, strict=True)
self.assertNotIn(
"slug",
self.get_indexes(Book._meta.db_table),
)
def test_primary_key(self):
"""
Tests altering of the primary key
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Tag)
# Ensure the table is there and has the right PK
self.assertTrue(
self.get_indexes(Tag._meta.db_table)['id']['primary_key'],
)
# Alter to change the PK
id_field = Tag._meta.get_field("id")
old_field = Tag._meta.get_field("slug")
new_field = SlugField(primary_key=True)
new_field.set_attributes_from_name("slug")
new_field.model = Tag
with connection.schema_editor() as editor:
editor.remove_field(Tag, id_field)
editor.alter_field(Tag, old_field, new_field)
# Ensure the PK changed
self.assertNotIn(
'id',
self.get_indexes(Tag._meta.db_table),
)
self.assertTrue(
self.get_indexes(Tag._meta.db_table)['slug']['primary_key'],
)
def test_context_manager_exit(self):
"""
Ensures transaction is correctly closed when an error occurs
inside a SchemaEditor context.
"""
class SomeError(Exception):
pass
try:
with connection.schema_editor():
raise SomeError
except SomeError:
self.assertFalse(connection.in_atomic_block)
@unittest.skipUnless(connection.features.supports_foreign_keys, "No FK support")
def test_foreign_key_index_long_names_regression(self):
"""
Regression test for #21497.
Only affects databases that supports foreign keys.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(AuthorWithEvenLongerName)
editor.create_model(BookWithLongName)
# Find the properly shortened column name
column_name = connection.ops.quote_name("author_foreign_key_with_really_long_field_name_id")
column_name = column_name[1:-1].lower() # unquote, and, for Oracle, un-upcase
# Ensure the table is there and has an index on the column
self.assertIn(
column_name,
self.get_indexes(BookWithLongName._meta.db_table),
)
@unittest.skipUnless(connection.features.supports_foreign_keys, "No FK support")
def test_add_foreign_key_long_names(self):
"""
Regression test for #23009.
Only affects databases that supports foreign keys.
"""
# Create the initial tables
with connection.schema_editor() as editor:
editor.create_model(AuthorWithEvenLongerName)
editor.create_model(BookWithLongName)
# Add a second FK, this would fail due to long ref name before the fix
new_field = ForeignKey(AuthorWithEvenLongerName, related_name="something")
new_field.set_attributes_from_name("author_other_really_long_named_i_mean_so_long_fk")
with connection.schema_editor() as editor:
editor.add_field(BookWithLongName, new_field)
def test_creation_deletion_reserved_names(self):
"""
Tries creating a model's table, and then deleting it when it has a
SQL reserved name.
"""
# Create the table
with connection.schema_editor() as editor:
try:
editor.create_model(Thing)
except OperationalError as e:
self.fail("Errors when applying initial migration for a model "
"with a table named after a SQL reserved word: %s" % e)
# Check that it's there
list(Thing.objects.all())
# Clean up that table
with connection.schema_editor() as editor:
editor.delete_model(Thing)
# Check that it's gone
self.assertRaises(
DatabaseError,
lambda: list(Thing.objects.all()),
)
@unittest.skipUnless(connection.features.supports_foreign_keys, "No FK support")
def test_remove_constraints_capital_letters(self):
"""
#23065 - Constraint names must be quoted if they contain capital letters.
"""
def get_field(*args, **kwargs):
kwargs['db_column'] = "CamelCase"
field = kwargs.pop('field_class', IntegerField)(*args, **kwargs)
field.set_attributes_from_name("CamelCase")
return field
model = Author
field = get_field()
table = model._meta.db_table
column = field.column
with connection.schema_editor() as editor:
editor.create_model(model)
editor.add_field(model, field)
editor.execute(
editor.sql_create_index % {
"table": editor.quote_name(table),
"name": editor.quote_name("CamelCaseIndex"),
"columns": editor.quote_name(column),
"extra": "",
}
)
editor.alter_field(model, get_field(db_index=True), field)
editor.execute(
editor.sql_create_unique % {
"table": editor.quote_name(table),
"name": editor.quote_name("CamelCaseUniqConstraint"),
"columns": editor.quote_name(field.column),
}
)
editor.alter_field(model, get_field(unique=True), field)
editor.execute(
editor.sql_create_fk % {
"table": editor.quote_name(table),
"name": editor.quote_name("CamelCaseFKConstraint"),
"column": editor.quote_name(column),
"to_table": editor.quote_name(table),
"to_column": editor.quote_name(model._meta.auto_field.column),
}
)
editor.alter_field(model, get_field(Author, field_class=ForeignKey), field)
def test_add_field_use_effective_default(self):
"""
#23987 - effective_default() should be used as the field default when
adding a new field.
"""
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no surname field
columns = self.column_classes(Author)
self.assertNotIn("surname", columns)
# Create a row
Author.objects.create(name='Anonymous1')
# Add new CharField to ensure default will be used from effective_default
new_field = CharField(max_length=15, blank=True)
new_field.set_attributes_from_name("surname")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure field was added with the right default
with connection.cursor() as cursor:
cursor.execute("SELECT surname FROM schema_author;")
item = cursor.fetchall()[0]
self.assertEqual(item[0], None if connection.features.interprets_empty_strings_as_nulls else '')
def test_add_field_default_dropped(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Ensure there's no surname field
columns = self.column_classes(Author)
self.assertNotIn("surname", columns)
# Create a row
Author.objects.create(name='Anonymous1')
# Add new CharField with a default
new_field = CharField(max_length=15, blank=True, default='surname default')
new_field.set_attributes_from_name("surname")
with connection.schema_editor() as editor:
editor.add_field(Author, new_field)
# Ensure field was added with the right default
with connection.cursor() as cursor:
cursor.execute("SELECT surname FROM schema_author;")
item = cursor.fetchall()[0]
self.assertEqual(item[0], 'surname default')
# And that the default is no longer set in the database.
field = next(
f for f in connection.introspection.get_table_description(cursor, "schema_author")
if f.name == "surname"
)
if connection.features.can_introspect_default:
self.assertIsNone(field.default)
def test_alter_field_default_dropped(self):
# Create the table
with connection.schema_editor() as editor:
editor.create_model(Author)
# Create a row
Author.objects.create(name='Anonymous1')
self.assertEqual(Author.objects.get().height, None)
old_field = Author._meta.get_field('height')
# The default from the new field is used in updating existing rows.
new_field = IntegerField(blank=True, default=42)
new_field.set_attributes_from_name('height')
with connection.schema_editor() as editor:
editor.alter_field(Author, old_field, new_field)
self.assertEqual(Author.objects.get().height, 42)
# The database default should be removed.
with connection.cursor() as cursor:
field = next(
f for f in connection.introspection.get_table_description(cursor, "schema_author")
if f.name == "height"
)
if connection.features.can_introspect_default:
self.assertIsNone(field.default)
| {
"content_hash": "4f9fc5bb0ea1712c25b0c7fee8491ce6",
"timestamp": "",
"source": "github",
"line_count": 1508,
"max_line_length": 125,
"avg_line_length": 43.28978779840849,
"alnum_prop": 0.6144513717620748,
"repo_name": "shtouff/django",
"id": "5128c9ec1e1960d945d9d78b7af11d233c628ab7",
"size": "65281",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/schema/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "43177"
},
{
"name": "HTML",
"bytes": "171768"
},
{
"name": "JavaScript",
"bytes": "105066"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "10907314"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
import boto
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.cloudsearch2 import exceptions
from boto.compat import json
class CloudSearchConnection(AWSQueryConnection):
"""
Amazon CloudSearch Configuration Service
You use the Amazon CloudSearch configuration service to create,
configure, and manage search domains. Configuration service
requests are submitted using the AWS Query protocol. AWS Query
requests are HTTP or HTTPS requests submitted via HTTP GET or POST
with a query parameter named Action.
The endpoint for configuration service requests is region-
specific: cloudsearch. region .amazonaws.com. For example,
cloudsearch.us-east-1.amazonaws.com. For a current list of
supported regions and endpoints, see `Regions and Endpoints`_.
"""
APIVersion = "2013-01-01"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "cloudsearch.us-east-1.amazonaws.com"
ResponseError = JSONResponseError
_faults = {
"InvalidTypeException": exceptions.InvalidTypeException,
"LimitExceededException": exceptions.LimitExceededException,
"InternalException": exceptions.InternalException,
"DisabledOperationException": exceptions.DisabledOperationException,
"ResourceNotFoundException": exceptions.ResourceNotFoundException,
"BaseException": exceptions.BaseException,
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
if 'host' not in kwargs or kwargs['host'] is None:
kwargs['host'] = region.endpoint
super(CloudSearchConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def build_suggesters(self, domain_name):
"""
Indexes the search suggestions.
:type domain_name: string
:param domain_name: A string that represents the name of a domain.
Domain names are unique across the domains owned by an account
within an AWS region. Domain names start with a letter or number
and can contain the following characters: a-z (lowercase), 0-9, and
- (hyphen).
"""
params = {'DomainName': domain_name, }
return self._make_request(
action='BuildSuggesters',
verb='POST',
path='/', params=params)
def create_domain(self, domain_name):
"""
Creates a new search domain. For more information, see
`Creating a Search Domain`_ in the Amazon CloudSearch
Developer Guide .
:type domain_name: string
:param domain_name: A name for the domain you are creating. Allowed
characters are a-z (lower-case letters), 0-9, and hyphen (-).
Domain names must start with a letter or number and be at least 3
and no more than 28 characters long.
"""
params = {'DomainName': domain_name, }
return self._make_request(
action='CreateDomain',
verb='POST',
path='/', params=params)
def define_analysis_scheme(self, domain_name, analysis_scheme):
"""
Configures an analysis scheme for a domain. An analysis scheme
defines language-specific text processing options for a `text`
field. For more information, see `Configuring Analysis
Schemes`_ in the Amazon CloudSearch Developer Guide .
:type domain_name: string
:param domain_name: A string that represents the name of a domain.
Domain names are unique across the domains owned by an account
within an AWS region. Domain names start with a letter or number
and can contain the following characters: a-z (lowercase), 0-9, and
- (hyphen).
:type analysis_scheme: dict
:param analysis_scheme: Configuration information for an analysis
scheme. Each analysis scheme has a unique name and specifies the
language of the text to be processed. The following options can be
configured for an analysis scheme: `Synonyms`, `Stopwords`,
`StemmingDictionary`, and `AlgorithmicStemming`.
"""
params = {'DomainName': domain_name, }
self.build_complex_param(params, 'AnalysisScheme',
analysis_scheme)
return self._make_request(
action='DefineAnalysisScheme',
verb='POST',
path='/', params=params)
def define_expression(self, domain_name, expression):
"""
Configures an `Expression` for the search domain. Used to
create new expressions and modify existing ones. If the
expression exists, the new configuration replaces the old one.
For more information, see `Configuring Expressions`_ in the
Amazon CloudSearch Developer Guide .
:type domain_name: string
:param domain_name: A string that represents the name of a domain.
Domain names are unique across the domains owned by an account
within an AWS region. Domain names start with a letter or number
and can contain the following characters: a-z (lowercase), 0-9, and
- (hyphen).
:type expression: dict
:param expression: A named expression that can be evaluated at search
time. Can be used for sorting and filtering search results and
constructing other expressions.
"""
params = {'DomainName': domain_name, }
self.build_complex_param(params, 'Expression',
expression)
return self._make_request(
action='DefineExpression',
verb='POST',
path='/', params=params)
def define_index_field(self, domain_name, index_field):
"""
Configures an `IndexField` for the search domain. Used to
create new fields and modify existing ones. You must specify
the name of the domain you are configuring and an index field
configuration. The index field configuration specifies a
unique name, the index field type, and the options you want to
configure for the field. The options you can specify depend on
the `IndexFieldType`. If the field exists, the new
configuration replaces the old one. For more information, see
`Configuring Index Fields`_ in the Amazon CloudSearch
Developer Guide .
:type domain_name: string
:param domain_name: A string that represents the name of a domain.
Domain names are unique across the domains owned by an account
within an AWS region. Domain names start with a letter or number
and can contain the following characters: a-z (lowercase), 0-9, and
- (hyphen).
:type index_field: dict
:param index_field: The index field and field options you want to
configure.
"""
params = {'DomainName': domain_name, }
self.build_complex_param(params, 'IndexField',
index_field)
return self._make_request(
action='DefineIndexField',
verb='POST',
path='/', params=params)
def define_suggester(self, domain_name, suggester):
"""
Configures a suggester for a domain. A suggester enables you
to display possible matches before users finish typing their
queries. When you configure a suggester, you must specify the
name of the text field you want to search for possible matches
and a unique name for the suggester. For more information, see
`Getting Search Suggestions`_ in the Amazon CloudSearch
Developer Guide .
:type domain_name: string
:param domain_name: A string that represents the name of a domain.
Domain names are unique across the domains owned by an account
within an AWS region. Domain names start with a letter or number
and can contain the following characters: a-z (lowercase), 0-9, and
- (hyphen).
:type suggester: dict
:param suggester: Configuration information for a search suggester.
Each suggester has a unique name and specifies the text field you
want to use for suggestions. The following options can be
configured for a suggester: `FuzzyMatching`, `SortExpression`.
"""
params = {'DomainName': domain_name, }
self.build_complex_param(params, 'Suggester',
suggester)
return self._make_request(
action='DefineSuggester',
verb='POST',
path='/', params=params)
def delete_analysis_scheme(self, domain_name, analysis_scheme_name):
"""
Deletes an analysis scheme. For more information, see
`Configuring Analysis Schemes`_ in the Amazon CloudSearch
Developer Guide .
:type domain_name: string
:param domain_name: A string that represents the name of a domain.
Domain names are unique across the domains owned by an account
within an AWS region. Domain names start with a letter or number
and can contain the following characters: a-z (lowercase), 0-9, and
- (hyphen).
:type analysis_scheme_name: string
:param analysis_scheme_name: The name of the analysis scheme you want
to delete.
"""
params = {
'DomainName': domain_name,
'AnalysisSchemeName': analysis_scheme_name,
}
return self._make_request(
action='DeleteAnalysisScheme',
verb='POST',
path='/', params=params)
def delete_domain(self, domain_name):
"""
Permanently deletes a search domain and all of its data. Once
a domain has been deleted, it cannot be recovered. For more
information, see `Deleting a Search Domain`_ in the Amazon
CloudSearch Developer Guide .
:type domain_name: string
:param domain_name: The name of the domain you want to permanently
delete.
"""
params = {'DomainName': domain_name, }
return self._make_request(
action='DeleteDomain',
verb='POST',
path='/', params=params)
def delete_expression(self, domain_name, expression_name):
"""
Removes an `Expression` from the search domain. For more
information, see `Configuring Expressions`_ in the Amazon
CloudSearch Developer Guide .
:type domain_name: string
:param domain_name: A string that represents the name of a domain.
Domain names are unique across the domains owned by an account
within an AWS region. Domain names start with a letter or number
and can contain the following characters: a-z (lowercase), 0-9, and
- (hyphen).
:type expression_name: string
:param expression_name: The name of the `Expression` to delete.
"""
params = {
'DomainName': domain_name,
'ExpressionName': expression_name,
}
return self._make_request(
action='DeleteExpression',
verb='POST',
path='/', params=params)
def delete_index_field(self, domain_name, index_field_name):
"""
Removes an `IndexField` from the search domain. For more
information, see `Configuring Index Fields`_ in the Amazon
CloudSearch Developer Guide .
:type domain_name: string
:param domain_name: A string that represents the name of a domain.
Domain names are unique across the domains owned by an account
within an AWS region. Domain names start with a letter or number
and can contain the following characters: a-z (lowercase), 0-9, and
- (hyphen).
:type index_field_name: string
:param index_field_name: The name of the index field your want to
remove from the domain's indexing options.
"""
params = {
'DomainName': domain_name,
'IndexFieldName': index_field_name,
}
return self._make_request(
action='DeleteIndexField',
verb='POST',
path='/', params=params)
def delete_suggester(self, domain_name, suggester_name):
"""
Deletes a suggester. For more information, see `Getting Search
Suggestions`_ in the Amazon CloudSearch Developer Guide .
:type domain_name: string
:param domain_name: A string that represents the name of a domain.
Domain names are unique across the domains owned by an account
within an AWS region. Domain names start with a letter or number
and can contain the following characters: a-z (lowercase), 0-9, and
- (hyphen).
:type suggester_name: string
:param suggester_name: Specifies the name of the suggester you want to
delete.
"""
params = {
'DomainName': domain_name,
'SuggesterName': suggester_name,
}
return self._make_request(
action='DeleteSuggester',
verb='POST',
path='/', params=params)
def describe_analysis_schemes(self, domain_name,
analysis_scheme_names=None, deployed=None):
"""
Gets the analysis schemes configured for a domain. An analysis
scheme defines language-specific text processing options for a
`text` field. Can be limited to specific analysis schemes by
name. By default, shows all analysis schemes and includes any
pending changes to the configuration. Set the `Deployed`
option to `True` to show the active configuration and exclude
pending changes. For more information, see `Configuring
Analysis Schemes`_ in the Amazon CloudSearch Developer Guide .
:type domain_name: string
:param domain_name: The name of the domain you want to describe.
:type analysis_scheme_names: list
:param analysis_scheme_names: The analysis schemes you want to
describe.
:type deployed: boolean
:param deployed: Whether to display the deployed configuration (
`True`) or include any pending changes ( `False`). Defaults to
`False`.
"""
params = {'DomainName': domain_name, }
if analysis_scheme_names is not None:
self.build_list_params(params,
analysis_scheme_names,
'AnalysisSchemeNames.member')
if deployed is not None:
params['Deployed'] = str(
deployed).lower()
return self._make_request(
action='DescribeAnalysisSchemes',
verb='POST',
path='/', params=params)
def describe_availability_options(self, domain_name, deployed=None):
"""
Gets the availability options configured for a domain. By
default, shows the configuration with any pending changes. Set
the `Deployed` option to `True` to show the active
configuration and exclude pending changes. For more
information, see `Configuring Availability Options`_ in the
Amazon CloudSearch Developer Guide .
:type domain_name: string
:param domain_name: The name of the domain you want to describe.
:type deployed: boolean
:param deployed: Whether to display the deployed configuration (
`True`) or include any pending changes ( `False`). Defaults to
`False`.
"""
params = {'DomainName': domain_name, }
if deployed is not None:
params['Deployed'] = str(
deployed).lower()
return self._make_request(
action='DescribeAvailabilityOptions',
verb='POST',
path='/', params=params)
def describe_domains(self, domain_names=None):
"""
Gets information about the search domains owned by this
account. Can be limited to specific domains. Shows all domains
by default. For more information, see `Getting Information
about a Search Domain`_ in the Amazon CloudSearch Developer
Guide .
:type domain_names: list
:param domain_names: The names of the domains you want to include in
the response.
"""
params = {}
if domain_names is not None:
self.build_list_params(params,
domain_names,
'DomainNames.member')
return self._make_request(
action='DescribeDomains',
verb='POST',
path='/', params=params)
def describe_expressions(self, domain_name, expression_names=None,
deployed=None):
"""
Gets the expressions configured for the search domain. Can be
limited to specific expressions by name. By default, shows all
expressions and includes any pending changes to the
configuration. Set the `Deployed` option to `True` to show the
active configuration and exclude pending changes. For more
information, see `Configuring Expressions`_ in the Amazon
CloudSearch Developer Guide .
:type domain_name: string
:param domain_name: The name of the domain you want to describe.
:type expression_names: list
:param expression_names: Limits the `DescribeExpressions` response to
the specified expressions. If not specified, all expressions are
shown.
:type deployed: boolean
:param deployed: Whether to display the deployed configuration (
`True`) or include any pending changes ( `False`). Defaults to
`False`.
"""
params = {'DomainName': domain_name, }
if expression_names is not None:
self.build_list_params(params,
expression_names,
'ExpressionNames.member')
if deployed is not None:
params['Deployed'] = str(
deployed).lower()
return self._make_request(
action='DescribeExpressions',
verb='POST',
path='/', params=params)
def describe_index_fields(self, domain_name, field_names=None,
deployed=None):
"""
Gets information about the index fields configured for the
search domain. Can be limited to specific fields by name. By
default, shows all fields and includes any pending changes to
the configuration. Set the `Deployed` option to `True` to show
the active configuration and exclude pending changes. For more
information, see `Getting Domain Information`_ in the Amazon
CloudSearch Developer Guide .
:type domain_name: string
:param domain_name: The name of the domain you want to describe.
:type field_names: list
:param field_names: A list of the index fields you want to describe. If
not specified, information is returned for all configured index
fields.
:type deployed: boolean
:param deployed: Whether to display the deployed configuration (
`True`) or include any pending changes ( `False`). Defaults to
`False`.
"""
params = {'DomainName': domain_name, }
if field_names is not None:
self.build_list_params(params,
field_names,
'FieldNames.member')
if deployed is not None:
params['Deployed'] = str(
deployed).lower()
return self._make_request(
action='DescribeIndexFields',
verb='POST',
path='/', params=params)
def describe_scaling_parameters(self, domain_name):
"""
Gets the scaling parameters configured for a domain. A
domain's scaling parameters specify the desired search
instance type and replication count. For more information, see
`Configuring Scaling Options`_ in the Amazon CloudSearch
Developer Guide .
:type domain_name: string
:param domain_name: A string that represents the name of a domain.
Domain names are unique across the domains owned by an account
within an AWS region. Domain names start with a letter or number
and can contain the following characters: a-z (lowercase), 0-9, and
- (hyphen).
"""
params = {'DomainName': domain_name, }
return self._make_request(
action='DescribeScalingParameters',
verb='POST',
path='/', params=params)
def describe_service_access_policies(self, domain_name, deployed=None):
"""
Gets information about the access policies that control access
to the domain's document and search endpoints. By default,
shows the configuration with any pending changes. Set the
`Deployed` option to `True` to show the active configuration
and exclude pending changes. For more information, see
`Configuring Access for a Search Domain`_ in the Amazon
CloudSearch Developer Guide .
:type domain_name: string
:param domain_name: The name of the domain you want to describe.
:type deployed: boolean
:param deployed: Whether to display the deployed configuration (
`True`) or include any pending changes ( `False`). Defaults to
`False`.
"""
params = {'DomainName': domain_name, }
if deployed is not None:
params['Deployed'] = str(
deployed).lower()
return self._make_request(
action='DescribeServiceAccessPolicies',
verb='POST',
path='/', params=params)
def describe_suggesters(self, domain_name, suggester_names=None,
deployed=None):
"""
Gets the suggesters configured for a domain. A suggester
enables you to display possible matches before users finish
typing their queries. Can be limited to specific suggesters by
name. By default, shows all suggesters and includes any
pending changes to the configuration. Set the `Deployed`
option to `True` to show the active configuration and exclude
pending changes. For more information, see `Getting Search
Suggestions`_ in the Amazon CloudSearch Developer Guide .
:type domain_name: string
:param domain_name: The name of the domain you want to describe.
:type suggester_names: list
:param suggester_names: The suggesters you want to describe.
:type deployed: boolean
:param deployed: Whether to display the deployed configuration (
`True`) or include any pending changes ( `False`). Defaults to
`False`.
"""
params = {'DomainName': domain_name, }
if suggester_names is not None:
self.build_list_params(params,
suggester_names,
'SuggesterNames.member')
if deployed is not None:
params['Deployed'] = str(
deployed).lower()
return self._make_request(
action='DescribeSuggesters',
verb='POST',
path='/', params=params)
def index_documents(self, domain_name):
"""
Tells the search domain to start indexing its documents using
the latest indexing options. This operation must be invoked to
activate options whose OptionStatus is
`RequiresIndexDocuments`.
:type domain_name: string
:param domain_name: A string that represents the name of a domain.
Domain names are unique across the domains owned by an account
within an AWS region. Domain names start with a letter or number
and can contain the following characters: a-z (lowercase), 0-9, and
- (hyphen).
"""
params = {'DomainName': domain_name, }
return self._make_request(
action='IndexDocuments',
verb='POST',
path='/', params=params)
def list_domain_names(self):
"""
Lists all search domains owned by an account.
"""
params = {}
return self._make_request(
action='ListDomainNames',
verb='POST',
path='/', params=params)
def update_availability_options(self, domain_name, multi_az):
"""
Configures the availability options for a domain. Enabling the
Multi-AZ option expands an Amazon CloudSearch domain to an
additional Availability Zone in the same Region to increase
fault tolerance in the event of a service disruption. Changes
to the Multi-AZ option can take about half an hour to become
active. For more information, see `Configuring Availability
Options`_ in the Amazon CloudSearch Developer Guide .
:type domain_name: string
:param domain_name: A string that represents the name of a domain.
Domain names are unique across the domains owned by an account
within an AWS region. Domain names start with a letter or number
and can contain the following characters: a-z (lowercase), 0-9, and
- (hyphen).
:type multi_az: boolean
:param multi_az: You expand an existing search domain to a second
Availability Zone by setting the Multi-AZ option to true.
Similarly, you can turn off the Multi-AZ option to downgrade the
domain to a single Availability Zone by setting the Multi-AZ option
to `False`.
"""
params = {'DomainName': domain_name, 'MultiAZ': multi_az, }
return self._make_request(
action='UpdateAvailabilityOptions',
verb='POST',
path='/', params=params)
def update_scaling_parameters(self, domain_name, scaling_parameters):
"""
Configures scaling parameters for a domain. A domain's scaling
parameters specify the desired search instance type and
replication count. Amazon CloudSearch will still automatically
scale your domain based on the volume of data and traffic, but
not below the desired instance type and replication count. If
the Multi-AZ option is enabled, these values control the
resources used per Availability Zone. For more information,
see `Configuring Scaling Options`_ in the Amazon CloudSearch
Developer Guide .
:type domain_name: string
:param domain_name: A string that represents the name of a domain.
Domain names are unique across the domains owned by an account
within an AWS region. Domain names start with a letter or number
and can contain the following characters: a-z (lowercase), 0-9, and
- (hyphen).
:type scaling_parameters: dict
:param scaling_parameters: The desired instance type and desired number
of replicas of each index partition.
"""
params = {'DomainName': domain_name, }
self.build_complex_param(params, 'ScalingParameters',
scaling_parameters)
return self._make_request(
action='UpdateScalingParameters',
verb='POST',
path='/', params=params)
def update_service_access_policies(self, domain_name, access_policies):
"""
Configures the access rules that control access to the
domain's document and search endpoints. For more information,
see ` Configuring Access for an Amazon CloudSearch Domain`_.
:type domain_name: string
:param domain_name: A string that represents the name of a domain.
Domain names are unique across the domains owned by an account
within an AWS region. Domain names start with a letter or number
and can contain the following characters: a-z (lowercase), 0-9, and
- (hyphen).
:type access_policies: string
:param access_policies: The access rules you want to configure. These
rules replace any existing rules.
"""
params = {
'DomainName': domain_name,
'AccessPolicies': access_policies,
}
return self._make_request(
action='UpdateServiceAccessPolicies',
verb='POST',
path='/', params=params)
def build_complex_param(self, params, label, value):
"""Serialize a structure.
For example::
param_type = 'structure'
label = 'IndexField'
value = {'IndexFieldName': 'a', 'IntOptions': {'DefaultValue': 5}}
would result in the params dict being updated with these params::
IndexField.IndexFieldName = a
IndexField.IntOptions.DefaultValue = 5
:type params: dict
:param params: The params dict. The complex list params
will be added to this dict.
:type label: str
:param label: String label for param key
:type value: any
:param value: The value to serialize
"""
for k, v in value.items():
if isinstance(v, dict):
for k2, v2 in v.items():
self.build_complex_param(params, label + '.' + k, v)
elif isinstance(v, bool):
params['%s.%s' % (label, k)] = v and 'true' or 'false'
else:
params['%s.%s' % (label, k)] = v
def _make_request(self, action, verb, path, params):
params['ContentType'] = 'JSON'
response = self.make_request(action=action, verb='POST',
path='/', params=params)
body = response.read().decode('utf-8')
boto.log.debug(body)
if response.status == 200:
return json.loads(body)
else:
json_body = json.loads(body)
fault_name = json_body.get('Error', {}).get('Code', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
| {
"content_hash": "a56c42dfc8fb4bf59f566e29f6eb48fd",
"timestamp": "",
"source": "github",
"line_count": 757,
"max_line_length": 79,
"avg_line_length": 41.093791281373846,
"alnum_prop": 0.611996913977112,
"repo_name": "kyleknap/boto",
"id": "fdc9d4c625026531e6e2f682971895817f6e3c0b",
"size": "32232",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "boto/cloudsearch2/layer1.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
from enum import Enum
from typing import Iterable, List, Set, Tuple, Type, Union
def enum_to_choices(enumeration: Type[Enum]) -> Iterable[Tuple]:
return tuple((e.value, e.value) for e in enumeration)
def enum_to_set(enumeration: Type[Enum]) -> Set:
return set(e.value for e in enumeration)
def values_to_choices(enumeration: Union[List, Set]) -> Iterable[Tuple]:
return tuple((e, e) for e in sorted(enumeration))
| {
"content_hash": "d2d46694ddfd16574c0739a795cc2d91",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 72,
"avg_line_length": 30.785714285714285,
"alnum_prop": 0.7122969837587007,
"repo_name": "polyaxon/polyaxon",
"id": "eadf572bb2424b92d487d54a9e9bb0f58a12ac40",
"size": "1036",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/polyaxon/utils/enums_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1989"
},
{
"name": "Python",
"bytes": "5201898"
},
{
"name": "Shell",
"bytes": "1565"
}
],
"symlink_target": ""
} |
'''
@author Angel -Ote- Cortes
@version 0.1
Anonymizer is a class based on the requests package to simplify the use of proxies and Tor.
The class automatically select a proxy from a list, change headers randomly and keep control of not working proxies.
Copyright (C) 2012 Angel Cortés
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of copyright holders nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS OR CONTRIBUTORS
BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
'''
import requests
from random import choice
from time import sleep
import socket
import datetime
TOR_CONF = {"MaxCircuitDirtiness":"60","NewCircuitPeriod":"10","CircuitBuildTimeout":"20"}
class AnonymizerException(Exception):
'''
Simple exception for the class
'''
def __init__(self,errorCode,content):
self.errorCode = errorCode
self.content = content
def __str__(self):
Lista = [self.errorCode,self.content]
return repr(Lista)
class Anonymizer(object):
'''
Anonymize any http GET petition throught a proxy list or a TOR.
If the proxy is a TOR you can manage how many connections want to do per minute and if you
configure a TOR Control Port automatically change the circuit.
Params:
-proxy: Required. Dict with the http proxies list. Accept standar HTTP proxies:
{'http':["127.0.0.1:3128","127.0.0.1:3129"]}
Or TOR format with/without TORCTL port:
{'tor':"127.0.0.1:8118",'torctl':"127.0.0.1:9051"}
-petitions: (default 15) Number of petitions per minute with TOR
-user: (default None) Reserved for future uses
-passwd: (default None) Passphrase of the TOR control AUTHENTICATE
-timeout: (default 15) Timeout for HTTP petitions
'''
def __init__(self,proxy,petitions=15,user=None,passwd=None, timeout=15):
self.MAX_PETITIONS=petitions
self.CURR_PETITIONS=0
self.LAST_TIMESTAMP = datetime.datetime.now()
self.timeout = timeout
self.proxy_to_use = {'http':None}
self.isTor = False
self.torCTL = None
##TorCtl user/pass
self.proxy_user = user
self.proxy_passwd = passwd
##Set the Headers
self.request_headers = {}
##Temporal objects
self.url = None
##Result object
self.http_response = None
#Validate the proxy list provided
self.__check_proxy_list(proxy)
def __check_proxy_list(self,proxyDict):
if not (proxyDict or (not (isinstance(proxyDict,dict)))):
raise AnonymizerException(501,"No good proxy dict/list provided for Anonymizer")
if "tor" in proxyDict.keys():
self.isTor = True
self.proxy = {'http':[proxyDict['tor']]}
if "torctl" in proxyDict.keys():
self.torCTL = proxyDict['torctl']
self.__prepare_tor()
return True
if "http" in proxyDict.keys():
if isinstance(proxyDict['http'],list):
self.proxy = proxyDict
return True
else:
raise AnonymizerException(502,"No good HTTP proxy list provided for Anonymizer")
def __check_timestamps(self):
now=datetime.datetime.now()
delta=now-self.LAST_TIMESTAMP
#print("Delta Seconds:%s"%str(delta.seconds))
if delta.seconds > int(TOR_CONF['MaxCircuitDirtiness']):
self.LAST_TIMESTAMP = now
return True
return False
def __set_RandomHeaders(self):
'''
Select a random headers from a list and asings it to the the connection
'''
##User Agent
user_agents_list = []
user_agents_list.append('Mozilla/5.0 (iPhone; U; CPU iOS 2_0 like Mac OS X; en-us)')
user_agents_list.append('Mozilla/5.0 (Linux; U; Android 0.5; en-us)')
user_agents_list.append('Mozilla/5.0 (iPad; U; CPU OS 3_2_1 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko)')
user_agents_list.append('Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)')
user_agents_list.append('Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)')
user_agents_list.append('Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/525.13 (KHTML, like Gecko) Chrome/0.2.149.29 Safari/525.13')
user_agents_list.append('Opera/9.25 (Windows NT 6.0; U; en)')
user_agents_list.append('Opera/9.80 (X11; Linux x86_64; U; pl) Presto/2.7.62 Version/11.00')
user_agents_list.append('Opera/9.80 (Windows NT 6.0; U; en) Presto/2.7.39 Version/11.00')
user_agents_list.append('Mozilla/5.0 (Windows NT 6.0; U; ja; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6 Opera 11.00')
user_agents_list.append('Mozilla/4.0 (compatible; MSIE 8.0; X11; Linux x86_64; pl) Opera 11.00')
user_agents_list.append('Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; fr) Opera 11.00')
user_agents_list.append('Opera/9.80 (Windows NT 6.1 x64; U; en) Presto/2.7.62 Version/11.00')
user_agents_list.append('Mozilla/5.0 (Windows NT 5.1; U; de; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6 Opera 11.00')
user_agents_list.append('Mozilla/4.0 (compatible; MSIE 8.0; X11; Linux x86_64; pl) Opera 11.00')
user_agent = choice(user_agents_list).strip()
##Language
accept_language_list = []
accept_language_list.append('de-de,es-es;q=0.8,en-us;q=0.5,en;q=0.3')
accept_language_list.append('en-us;q=0.8,en;q=0.3')
accept_language_list.append('es;q=0.8,en-us;q=0.5,en;q=0.3')
accept_language_list.append('es-es;q=0.8,en;q=0.3')
accept_language_list.append('de-de;q=0.8,en;q=0.3')
accept_language_list.append('de-de;q=0.8,en-us;q=0.5)')
language = choice(accept_language_list).strip()
self.request_headers = {'User-Agent': user_agent, 'Accept-Language':language, 'Referer': ''}
def __prepare_request(self,url):
"""
Prepare the random objects for the request.
"""
self.url = url
self.__set_RandomHeaders()
requests.defaults.defaults['keep_alive']=False
if self.isTor:
if self.torCTL != None:
if not self.__check_timestamps():
if (self.CURR_PETITIONS == self.MAX_PETITIONS):
self.CURR_PETITIONS = 0
raise AnonymizerException(111,"Max number of petitions(%s) in %sseconds reached"%(self.MAX_PETITIONS,TOR_CONF['MaxCircuitDirtiness']))
self.CURR_PETITIONS = self.CURR_PETITIONS + 1
else:
self.CURR_PETITIONS = 1
self.__reroute_tor()
self.proxy_to_use['http'] = choice(self.proxy['http'])
def __prepare_tor(self):
host, port = self.torCTL.split(':')
#print("Servidor de control TOR: %s"%host)
#print("Puerto de control TOR: %s"%port)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host,int(port)))
if self.proxy_passwd:
s.send(str.encode('AUTHENTICATE "%s"\r\n'%self.proxy_passwd))
data = s.recv(100)
if not str(data.decode()).startswith("250"):
raise Anonymizer(211, "Error in the AUTHENTICATE command to the TOR control port.")
#Short circuit time
s.send(str.encode('SETCONF NewCircuitPeriod=%s\r\n'%TOR_CONF['NewCircuitPeriod']))
data = s.recv(100)
#Short circuit build time
s.send(str.encode('SETCONF CircuitBuildTimeout=%s\r\n'%TOR_CONF['CircuitBuildTimeout']))
data = s.recv(100)
#Short circuit Valid time
s.send(str.encode('SETCONF MaxCircuitDirtiness="%s"\r\n'%TOR_CONF['MaxCircuitDirtiness']))
data = s.recv(100)
sleep(5)
s.close()
#print("Tor ReConfigured")
def __reroute_tor(self):
host, port = self.torCTL.split(':')
#print("Servidor de control TOR: %s"%host)
#print("Puerto de control TOR: %s"%port)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
data = None
s.connect((host,int(port)))
#print("Conectado al servidor de control")
if self.proxy_passwd:
s.send(str.encode("AUTHENTICATE \"%s\"\r\n"%self.proxy_passwd))
data = s.recv(100)
if not str(data.decode()).startswith("250"):
raise Anonymizer(211, "Error in the AUTHENTICATE command to the TOR control port.")
s.send(str.encode('SIGNAL NEWNYM\r\n'))
s.recv(100)
sleep(5)
s.close()
#print("Tor rerouted")
def get(self,url,pureAnon=False,DEBUG=False):
'''
get will return the url requested using a randomized proxy from the list as a request.response item.
PARAMS:
-url: The url to retrieve
-pureAnon: (default False) If set to True no cookies are accepted in this petition and will not be returned.
-DEBUG: (default False) If True, return a dict like:
{'response':http_response,'proxy':"proxy used",'headers':"Fake headers used"}
'''
self.__prepare_request(url)
if pureAnon:
requests.defaults.defaults['store_cookies'] = False
try:
self.http_response = requests.get(self.url,proxies=self.proxy_to_use,headers=self.request_headers, timeout=self.timeout)
except Exception as e:
raise AnonymizerException(101,"Requests unable to get %s using the proxy %s"%(url,self.proxy_to_use))
if not DEBUG:
return self.http_response
else:
output = {'response':self.http_response,'proxy':self.proxy_to_use,'headers':self.request_headers}
return output
| {
"content_hash": "c9c8e4efc094de6449c7fafb5b31813d",
"timestamp": "",
"source": "github",
"line_count": 256,
"max_line_length": 158,
"avg_line_length": 43.45703125,
"alnum_prop": 0.6314606741573033,
"repo_name": "OteCortes/Anopymizer",
"id": "362998b22aa4236ae7c6fd7aebe46c56727579c8",
"size": "11151",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "anonymizer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "9656"
}
],
"symlink_target": ""
} |
"""Tests for record_input_op."""
import os
from tensorflow.python.framework import test_util
from tensorflow.python.framework.errors_impl import NotFoundError
from tensorflow.python.lib.io import tf_record
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class RecordInputOpTest(test.TestCase):
def generateTestData(self,
prefix,
n,
m,
compression_type=tf_record.TFRecordCompressionType.NONE):
options = tf_record.TFRecordOptions(compression_type)
for i in range(n):
f = os.path.join(self.get_temp_dir(), prefix + "." + str(i))
w = tf_record.TFRecordWriter(f, options=options)
for j in range(m):
w.write("{0:0{width}}".format(i * m + j, width=10).encode("utf-8"))
w.close()
def testRecordInputSimple(self):
with self.cached_session() as sess:
self.generateTestData("basic", 1, 1)
yield_op = data_flow_ops.RecordInput(
file_pattern=os.path.join(self.get_temp_dir(), "basic.*"),
parallelism=1,
buffer_size=1,
batch_size=1,
name="record_input").get_yield_op()
self.assertEqual(self.evaluate(yield_op), b"0000000000")
def testRecordInputSimpleGzip(self):
with self.cached_session() as sess:
self.generateTestData(
"basic",
1,
1,
compression_type=tf_record.TFRecordCompressionType.GZIP)
yield_op = data_flow_ops.RecordInput(
file_pattern=os.path.join(self.get_temp_dir(), "basic.*"),
parallelism=1,
buffer_size=1,
batch_size=1,
name="record_input",
compression_type=tf_record.TFRecordCompressionType.GZIP).get_yield_op(
)
self.assertEqual(self.evaluate(yield_op), b"0000000000")
def testRecordInputSimpleZlib(self):
with self.cached_session() as sess:
self.generateTestData(
"basic",
1,
1,
compression_type=tf_record.TFRecordCompressionType.ZLIB)
yield_op = data_flow_ops.RecordInput(
file_pattern=os.path.join(self.get_temp_dir(), "basic.*"),
parallelism=1,
buffer_size=1,
batch_size=1,
name="record_input",
compression_type=tf_record.TFRecordCompressionType.ZLIB).get_yield_op(
)
self.assertEqual(self.evaluate(yield_op), b"0000000000")
@test_util.run_deprecated_v1
def testRecordInputEpochs(self):
files = 100
records_per_file = 100
batches = 2
with self.cached_session() as sess:
self.generateTestData("basic", files, records_per_file)
records = data_flow_ops.RecordInput(
file_pattern=os.path.join(self.get_temp_dir(), "basic.*"),
parallelism=2,
buffer_size=2000,
batch_size=1,
shift_ratio=0.33,
seed=10,
name="record_input",
batches=batches)
yield_op = records.get_yield_op()
# cycle over 3 epochs and make sure we never duplicate
for _ in range(3):
epoch_set = set()
for _ in range(int(files * records_per_file / batches)):
op_list = self.evaluate(yield_op)
self.assertTrue(len(op_list) is batches)
for r in op_list:
self.assertTrue(r[0] not in epoch_set)
epoch_set.add(r[0])
@test_util.run_deprecated_v1
def testDoesNotDeadlock(self):
# Iterate multiple times to cause deadlock if there is a chance it can occur
for _ in range(30):
with self.cached_session() as sess:
self.generateTestData("basic", 1, 1)
records = data_flow_ops.RecordInput(
file_pattern=os.path.join(self.get_temp_dir(), "basic.*"),
parallelism=1,
buffer_size=100,
batch_size=1,
name="record_input")
yield_op = records.get_yield_op()
for _ in range(50):
self.evaluate(yield_op)
@test_util.run_deprecated_v1
def testEmptyGlob(self):
with self.cached_session() as sess:
record_input = data_flow_ops.RecordInput(file_pattern="foo")
yield_op = record_input.get_yield_op()
self.evaluate(variables.global_variables_initializer())
with self.assertRaises(NotFoundError):
self.evaluate(yield_op)
@test_util.run_deprecated_v1
def testBufferTooSmall(self):
files = 10
records_per_file = 10
batches = 2
with self.cached_session() as sess:
self.generateTestData("basic", files, records_per_file)
records = data_flow_ops.RecordInput(
file_pattern=os.path.join(self.get_temp_dir(), "basic.*"),
parallelism=2,
buffer_size=2000,
batch_size=1,
shift_ratio=0.33,
seed=10,
name="record_input",
batches=batches)
yield_op = records.get_yield_op()
# cycle over 3 epochs and make sure we never duplicate
for _ in range(3):
epoch_set = set()
for _ in range(int(files * records_per_file / batches)):
op_list = self.evaluate(yield_op)
self.assertTrue(len(op_list) is batches)
for r in op_list:
self.assertTrue(r[0] not in epoch_set)
epoch_set.add(r[0])
if __name__ == "__main__":
test.main()
| {
"content_hash": "66183f1f7518593d9a4db19cffce6647",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 80,
"avg_line_length": 32.06547619047619,
"alnum_prop": 0.6047893075923519,
"repo_name": "gautam1858/tensorflow",
"id": "8ab4e45bf09bcfa968e00d41be256d69b98cca5a",
"size": "6076",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "tensorflow/python/kernel_tests/io_ops/record_input_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "47492"
},
{
"name": "C",
"bytes": "1129549"
},
{
"name": "C#",
"bytes": "13496"
},
{
"name": "C++",
"bytes": "116904214"
},
{
"name": "CMake",
"bytes": "165809"
},
{
"name": "Cython",
"bytes": "5003"
},
{
"name": "Dockerfile",
"bytes": "341994"
},
{
"name": "Go",
"bytes": "2052513"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "1053827"
},
{
"name": "JavaScript",
"bytes": "5772"
},
{
"name": "Jupyter Notebook",
"bytes": "787371"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "9549263"
},
{
"name": "Makefile",
"bytes": "2760"
},
{
"name": "Objective-C",
"bytes": "180638"
},
{
"name": "Objective-C++",
"bytes": "295149"
},
{
"name": "Pawn",
"bytes": "5336"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "43775271"
},
{
"name": "Roff",
"bytes": "5034"
},
{
"name": "Ruby",
"bytes": "7854"
},
{
"name": "Shell",
"bytes": "566970"
},
{
"name": "Smarty",
"bytes": "89664"
},
{
"name": "SourcePawn",
"bytes": "8509"
},
{
"name": "Starlark",
"bytes": "6897556"
},
{
"name": "Swift",
"bytes": "78435"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
} |
"""Ops and optimizations for using BLAS calls
BLAS = Basic Linear Algebra Subroutines
Learn more about BLAS here:
http://www.netlib.org/blas/blast-forum/
The standard BLAS libraries implement what is called "legacy BLAS" in that
document.
This documentation describes Theano's BLAS optimization pipeline.
Where there is a discrepancy between how things do work and how they *should*
work, both aspects should be documented.
There are four kinds of BLAS Ops in Theano:
- Python implementations (this file)
- SciPy-based (blas_scipy)
- C-based (blas_c)
- CUDA-based (theano.sandbox.cuda.blas)
Notes
-----
Unfortunately (because it's confusing) this file currently contains Ops
that contain both Python and C versions. I think it would be better to
move the C implementations to blas_c so that this file is pure Python.
-JB
Ops
===
GEMM: Dot22, Dot22Scalar, GemmRelated, Gemm
-------------------------------------------
The BLAS GEMM operation implements Z <- a X Y + b Z,
where Z, X and Y are matrices, and a and b are scalars.
Dot22 is a GEMM where a=1, b=0, and Z is allocated every time.
Dot22Scalar is a GEMM where b=0 and Z is allocated every time.
Gemm is a GEMM in all its generality.
In the future we can refactor the GemmRelated, Gemm, Dot22 and
Dot22Scalar Ops into a single Op. That new Op (Gemm2) is basically a
normal Gemm, but with an additional configuration variable that says
to ignore the input Z. Setting that configuration variable to True
would make Gemm2 equivalent to the current Dot22 and Dot22Scalar.
This would make the file a lot easier to read, and save a few hundred
lines of library, to say nothing of testing and documentation.
GEMV: Gemv
----------
The BLAS GEMV operation implements Z <- a X Y + b Z,
where X is a matrix, Y, and Z are vectors, and a and b are scalars.
GER: Ger
--------
The BLAS GER operation implements Z <- a X' Y + Z,
where X and Y are vectors, and matrix Z gets a rank-1 update.
Other Notable BLAS-related Ops
------------------------------
SYRK is another useful special case of GEMM. Particularly SYRK preserves
symmetry in the matrix that it updates. See how the linear-algebra module uses
symmetry hints before implementing this Op, so that this Op is compatible with
that system.
Optimizations
=============
The optimization pipeline works something like this:
1. identify dot22 from dot
2. identify gemm from dot22
3. identify dot22scalar from dot22 that are not gemm
4. specialize gemm to gemv where applicable
5. specialize gemm to ger where applicable
6. specialize dot22 -> gemv or ger where applicable
:note: GEMM is the most canonical BLAS signature that we deal with so far, it
would be good to turn most things into GEMM (dot, inner, outer, dot22,
dot22scalar), and then to specialize from gemm to the various other L2 and
L3 operations.
Identify Dot22
--------------
Numpy's dot supports arguments that are of any rank, and we should support that
too (just for compatibility). The BLAS optimizations work with Dot Ops whose
inputs are each either vector or matrix. So the first part of the optimization
pipeline is to transform qualifying Dot Ops to Dot22 Ops. Dot22 Ops may be
transformed further, but they will get implemented by a BLAS call.
More precisely, Dot nodes whose inputs are all vectors or matrices and whose
inputs both have the same dtype, and whose dtype is float or complex, become
Dot22. This is implemented in `local_dot_to_dot22`.
Identify Gemm from Dot22
------------------------
This is complicated, done in GemmOptimizer.
Identify Dot22Scalar from Dot22
-------------------------------
Dot22 Ops that remain after the GemmOptimizer is done have not
qualified as GEMM Ops. Still they might be scaled by a factor, in
which case we use Dot22Scalar which is like Gemm, but without the b
and the Z. In the future it would be good to merge this into the
GemmOptimizer.
Specialize Gemm to Gemv
-----------------------
If arguments to GEMM are dimshuffled vectors, then we can use GEMV
instead. This optimization is `local_gemm_to_gemv`.
"""
from __future__ import print_function
import copy
import logging
import os
import time
import numpy
import numpy.distutils
try:
import numpy.distutils.__config__
except ImportError:
pass
from six import iteritems
from six.moves import reduce, xrange
from theano import config
from theano.gof import (utils, Op, view_roots,
local_optimizer, Optimizer,
InconsistencyError, toolbox, SequenceDB,
EquilibriumOptimizer, Apply,
ReplacementDidntRemovedError)
from theano.printing import pprint, FunctionPrinter, debugprint
from theano.compile.mode import optdb
import theano.scalar
from theano.tensor import basic as T
from theano.tensor.blas_headers import blas_header_text
from theano.tensor.blas_headers import blas_header_version
from theano.tensor.opt import in2out, local_dimshuffle_lift
_logger = logging.getLogger('theano.tensor.blas')
try:
import scipy.linalg.blas
have_fblas = True
try:
fblas = scipy.linalg.blas.fblas
except AttributeError:
# A change merged in Scipy development version on 2012-12-02 replaced
# `scipy.linalg.blas.fblas` with `scipy.linalg.blas`.
# See http://github.com/scipy/scipy/pull/358
fblas = scipy.linalg.blas
_blas_gemv_fns = {numpy.dtype('float32'): fblas.sgemv,
numpy.dtype('float64'): fblas.dgemv,
numpy.dtype('complex64'): fblas.cgemv,
numpy.dtype('complex128'): fblas.zgemv}
except ImportError as e:
have_fblas = False
# This is used in Gemv and ScipyGer. We use CGemv and CGer
# when theano.config.blas.ldflags is defined. So we don't need a
# warning in that case.
if not config.blas.ldflags:
_logger.warning('Failed to import scipy.linalg.blas, and '
'Theano flag blas.ldflags is empty. '
'Falling back on slower implementations for '
'dot(matrix, vector), dot(vector, matrix) and '
'dot(vector, vector) (%s)',
str(e))
# If check_init_y() == True we need to initialize y when beta == 0.
def check_init_y():
if check_init_y._result is None:
if not have_fblas:
check_init_y._result = False
y = float('NaN') * numpy.ones((2,))
x = numpy.ones((2,))
A = numpy.ones((2, 2))
gemv = _blas_gemv_fns[y.dtype]
gemv(1.0, A.T, x, 0.0, y, overwrite_y=True, trans=True)
check_init_y._result = numpy.isnan(y).any()
return check_init_y._result
check_init_y._result = None
class Gemv(Op):
"""
expression is beta * y + alpha * A x
A is matrix
x, y are vectors
alpha, beta are scalars
output is a vector that can be inplace on y
"""
__props__ = ("inplace",)
def __init__(self, inplace):
self.inplace = inplace
if inplace:
self.destroy_map = {0: [0]}
def __str__(self):
if self.inplace:
return '%s{inplace}' % self.__class__.__name__
else:
return '%s{no_inplace}' % self.__class__.__name__
def make_node(self, y, alpha, A, x, beta):
y = T.as_tensor_variable(y)
x = T.as_tensor_variable(x)
A = T.as_tensor_variable(A)
alpha = T.as_tensor_variable(alpha)
beta = T.as_tensor_variable(beta)
if y.dtype != A.dtype or y.dtype != x.dtype:
raise TypeError('Gemv requires matching dtypes',
(y.dtype, A.dtype, x.dtype))
if A.ndim != 2:
raise TypeError('gemv requires matrix for A', A.type)
if x.ndim != 1:
raise TypeError('gemv requires vector for x', x.type)
if y.ndim != 1:
raise TypeError('gemv requires vector for y', y.type)
return Apply(self, [y, alpha, A, x, beta], [y.type()])
def perform(self, node, inputs, out_storage):
y, alpha, A, x, beta = inputs
if (have_fblas and y.shape[0] != 0 and x.shape[0] != 0 and
y.dtype in _blas_gemv_fns):
gemv = _blas_gemv_fns[y.dtype]
if (A.shape[0] != y.shape[0] or A.shape[1] != x.shape[0]):
raise ValueError(
'Incompatible shapes for gemv '
'(beta * y + alpha * dot(A, x)). y: %s, A: %s, x: %s '
% (y.shape, A.shape, x.shape))
if beta == 0 and check_init_y():
y.fill(0)
# Here I suppose that A is in c order. If we don't make it
# explicitly as fortran order, scipy 0.7.2 seam to create
# a copy in fortran order instead of just reshaping it
# and using the trans flag.
# If A is already in fortran order, make it in c order and using the
# trans flag don't seam to cause slowdown.
# out_storage[0][0] = gemv(alpha, A, x, beta, y,
# overwrite_y=self.inplace)
out_storage[0][0] = gemv(alpha, A.T, x, beta, y,
overwrite_y=self.inplace, trans=True)
else:
out = numpy.dot(A, x)
if alpha != 1:
out *= alpha
if beta != 0:
if beta != 1:
out += beta * y
else:
out += y
out_storage[0][0] = numpy.asarray(out, dtype=y.dtype)
def infer_shape(self, node, input_shapes):
return [input_shapes[0]]
gemv_no_inplace = Gemv(inplace=False)
gemv_inplace = Gemv(inplace=True)
# For the user interface. Opt will make them inplace later
gemv = gemv_no_inplace
class Ger(Op):
"""
BLAS defines general rank-1 update GER as A <- A + alpha x y'
for matrix A, scalar alpha, vectors x and y.
This interface to GER allows non-destructive operation on A via the
`destructive` argument to the constructor.
:TODO: Create better classes ScipyGer and CGer that inherit from this class
and override the make_thunk() method to use Scipy and C respectively.
"""
__props__ = ("destructive",)
def __init__(self, destructive):
self.destructive = destructive
if destructive:
self.destroy_map = {0: [0]}
def __str__(self):
if self.destructive:
return '%s{destructive}' % self.__class__.__name__
else:
return '%s{non-destructive}' % self.__class__.__name__
def make_node(self, A, alpha, x, y):
A = T.as_tensor_variable(A)
y = T.as_tensor_variable(y)
x = T.as_tensor_variable(x)
alpha = T.as_tensor_variable(alpha)
if len(set([A.dtype, alpha.dtype, x.dtype, y.dtype])) != 1:
raise TypeError('ger requires matching dtypes',
(A.dtype, alpha.dtype, x.dtype, y.dtype))
if alpha.ndim != 0:
raise TypeError('ger requires scalar alpha', alpha.type)
if A.ndim != 2:
raise TypeError('ger requires matrix for A', A.type)
if x.ndim != 1:
raise TypeError('ger requires vector for x', x.type)
if y.ndim != 1:
raise TypeError('ger requires vector for y', y.type)
if x.dtype not in ('float32', 'float64', 'complex64', 'complex128'):
raise TypeError('only float and complex types supported', x.dtype)
return Apply(self, [A, alpha, x, y], [A.type()])
def perform(self, node, inp, out):
cA, calpha, cx, cy = inp
cZ, = out
if self.destructive:
A = cA
else:
A = cA.copy()
if calpha != 1:
A += calpha * numpy.outer(cx, cy)
else:
A += numpy.outer(cx, cy)
cZ[0] = A
def infer_shape(self, node, input_shapes):
return [input_shapes[0]]
ger = Ger(destructive=False)
ger_destructive = Ger(destructive=True)
def ldflags(libs=True, flags=False, libs_dir=False, include_dir=False):
"""Extract a list of compilation flags from config.blas.ldflags.
Depending on the options, different type of flags will be kept.
It returns a list of libraries against which an Op's object file
should be linked to benefit from a BLAS implementation.
Parameters
----------
libs : bool, optional
Extract flags starting with "-l" (the default is True).
libs_dir : bool, optional
Extract flags starting with "-L" (the default is False).
include_dir : bool, optional
Extract flags starting with "-I" (the default is False).
flags: bool, optional
Extract all the other flags (the default is False).
Returns
-------
list of strings
Extracted flags.
"""
ldflags_str = theano.config.blas.ldflags
return _ldflags(ldflags_str=ldflags_str,
libs=libs,
flags=flags,
libs_dir=libs_dir,
include_dir=include_dir)
@utils.memoize
def _ldflags(ldflags_str, libs, flags, libs_dir, include_dir):
"""Extract list of compilation flags from a string.
Depending on the options, different type of flags will be kept.
Parameters
----------
ldflags_str : string
The string to process. Typically, this will be the content of
`theano.config.blas.ldflags`.
libs : bool
Extract flags starting with "-l".
flags: bool
Extract all the other flags.
libs_dir: bool
Extract flags starting with "-L".
include_dir: bool
Extract flags starting with "-I".
Returns
-------
list of strings
Extracted flags.
"""
rval = []
if libs_dir:
found_dyn = False
dirs = [x[2:] for x in ldflags_str.split()
if x.startswith('-L')]
l = _ldflags(ldflags_str=ldflags_str, libs=True,
flags=False, libs_dir=False, include_dir=False)
for d in dirs:
for f in os.listdir(d):
if (f.endswith('.so') or f.endswith('.dylib') or
f.endswith('.dll')):
if any([f.find(ll) >= 0 for ll in l]):
found_dyn = True
if not found_dyn and dirs:
_logger.warning(
"We did not found a dynamic library into the "
"library_dir of the library we use for blas. If you use "
"ATLAS, make sure to compile it with dynamics library.")
for t in ldflags_str.split():
# Remove extra quote.
if t.startswith("'") or t.startswith('"'):
t = t[1:]
if t.endswith("'") or t.endswith('"'):
t = t[:-1]
try:
t0, t1, t2 = t[0:3]
assert t0 == '-'
except Exception:
raise ValueError('invalid token "%s" in ldflags_str: "%s"'
% (t, ldflags_str))
if libs_dir and t1 == 'L':
rval.append(t[2:])
elif include_dir and t1 == 'I':
raise ValueError('Include dirs are not used for blas. We disable'
' this as this can hide other headers and this'
' is not wanted.', t)
rval.append(t[2:])
elif libs and t1 == 'l': # example -lmkl
rval.append(t[2:])
elif flags and t1 not in ['L', 'I', 'l']: # example -openmp
rval.append(t)
elif flags and t1 == 'L':
# to find it when we load the compiled op if the env of the
# used is not well configured.
rval.append('-Wl,-rpath,' + t[2:])
return rval
class GemmRelated(Op):
"""Base class for Gemm and Dot22.
This class provides a kind of templated gemm Op.
"""
__props__ = ()
def c_support_code(self):
# return cblas_header_text()
mod_str = """
#ifndef MOD
#define MOD %
#endif
static double time_time() // a time function like time.time()
{
struct timeval tv;
gettimeofday(&tv, 0);
return (double) tv.tv_sec + (double) tv.tv_usec / 1000000.0;
}
"""
return blas_header_text() + mod_str
def c_headers(self):
# std.cout doesn't require the '%' symbol to print stuff...
# so it works much better with python's string-substitution stuff.
return ['<iostream>', '<time.h>', '<sys/time.h>']
def c_libraries(self):
return ldflags()
# code_cache_version is built by subclasses from
# build_gemm_version
def c_compile_args(self):
return ldflags(libs=False, flags=True)
def c_lib_dirs(self):
return ldflags(libs=False, libs_dir=True)
def c_header_dirs(self):
return ldflags(libs=False, include_dir=True)
declare_NS = """
int unit = 0;
int type_num = PyArray_DESCR(%(_x)s)->type_num;
int type_size = PyArray_DESCR(%(_x)s)->elsize; // in bytes
npy_intp* Nx = PyArray_DIMS(%(_x)s);
npy_intp* Ny = PyArray_DIMS(%(_y)s);
npy_intp* Nz = 0; //PyArray_DIMS(%(_zout)s);
npy_intp* Sx = PyArray_STRIDES(%(_x)s);
npy_intp* Sy = PyArray_STRIDES(%(_y)s);
npy_intp* Sz = 0; //PyArray_STRIDES(%(_zout)s);
//strides for x, y, z in dimensions 0, 1
int sx_0, sx_1, sy_0, sy_1, sz_0, sz_1;
"""
# setup_z_Nz_Sz = None
check_xyz_rank2 = """
if (PyArray_NDIM(%(_x)s) != 2) {
PyErr_Format(PyExc_NotImplementedError,
"rank(x) != 2. rank(x) is %%d.",
PyArray_NDIM(%(_x)s));
%(fail)s;
}
if (PyArray_NDIM(%(_y)s) != 2) {
PyErr_Format(PyExc_NotImplementedError,
"rank(y) != 2. rank(y) is %%d.", PyArray_NDIM(%(_y)s));
%(fail)s;
}
if (%(_zout)s && PyArray_NDIM(%(_zout)s) != 2) {
PyErr_Format(PyExc_NotImplementedError,
"rank(z) != 2. rank(z) is %%d.", PyArray_NDIM(%(_zout)s));
%(fail)s;
}
"""
check_xyz_double_or_float = """
if ((PyArray_DESCR(%(_x)s)->type_num != NPY_DOUBLE)
&& (PyArray_DESCR(%(_x)s)->type_num != NPY_FLOAT))
{PyErr_SetString(PyExc_NotImplementedError, "type(x) is not double or float"); %(fail)s;}
if ((PyArray_DESCR(%(_y)s)->type_num != NPY_DOUBLE)
&& (PyArray_DESCR(%(_y)s)->type_num != NPY_FLOAT))
{PyErr_SetString(PyExc_NotImplementedError, "type(y) is not double or float"); %(fail)s;}
if ((PyArray_DESCR(%(_zout)s)->type_num != NPY_DOUBLE)
&& (PyArray_DESCR(%(_zout)s)->type_num != NPY_FLOAT))
{PyErr_SetString(PyExc_NotImplementedError, "type(z) is not double or float"); %(fail)s;}
if ((PyArray_DESCR(%(_x)s)->type_num != PyArray_DESCR(%(_y)s)->type_num)
||(PyArray_DESCR(%(_x)s)->type_num != PyArray_DESCR(%(_zout)s)->type_num))
{ PyErr_SetString(PyExc_NotImplementedError, "type(x), type(y), type(z) are not all the same"); %(fail)s; }
"""
# it is not necessary that a or b have the same type as x,y,z
check_ab_double_or_float = """
if ((PyArray_DESCR(%(_a)s)->type_num != NPY_DOUBLE)
&& (PyArray_DESCR(%(_a)s)->type_num != NPY_FLOAT))
{PyErr_SetString(PyExc_NotImplementedError, "type(a) is not double or float"); %(fail)s;}
if ((PyArray_DESCR(%(_b)s)->type_num != NPY_DOUBLE)
&& (PyArray_DESCR(%(_b)s)->type_num != NPY_FLOAT))
{PyErr_SetString(PyExc_NotImplementedError, "type(b) is not double or float"); %(fail)s;}
"""
check_dims = """
if (Nx[0] != Nz[0])
{
PyErr_Format(PyExc_ValueError,
"Shape mismatch: x has %%ld rows but z has %%ld rows",
(long int)Nx[0], (long int)Nz[0]);
%(fail)s;
}
if (Nx[1] != Ny[0])
{
PyErr_Format(PyExc_ValueError,
"Shape mismatch: x has %%ld cols (and %%ld rows) but y has %%ld rows (and %%ld cols)",
(long int)Nx[1], (long int)Nx[0], (long int)Ny[0], (long int)Ny[1]);
%(fail)s;
}
if (Ny[1] != Nz[1])
{
PyErr_Format(PyExc_ValueError,
"Shape mismatch: y has %%ld cols but z has %%ld cols",
(long int)Ny[1], (long int)Nz[1]);
%(fail)s;
}
// We must not raise an error when Nx[1] == 0. This would disable cases
// that numpy.dot accept.
"""
check_strides = """
/*
If some matrices are not contiguous on either dimensions,
or have invalid strides, copy their content into a contiguous one
*/
if ((Sx[0] < 1) || (Sx[1] < 1) || (Sx[0] MOD type_size) || (Sx[1] MOD type_size)
|| ((Sx[0] != type_size) && (Sx[1] != type_size)))
{
PyArrayObject * _x_copy = (PyArrayObject *) PyArray_Copy(%(_x)s);
if (!_x_copy)
%(fail)s
Py_XDECREF(%(_x)s);
%(_x)s = _x_copy;
Sx = PyArray_STRIDES(%(_x)s);
}
if ((Sy[0] < 1) || (Sy[1] < 1) || (Sy[0] MOD type_size) || (Sy[1] MOD type_size)
|| ((Sy[0] != type_size) && (Sy[1] != type_size)))
{
PyArrayObject * _y_copy = (PyArrayObject *) PyArray_Copy(%(_y)s);
if (!_y_copy)
%(fail)s
Py_XDECREF(%(_y)s);
%(_y)s = _y_copy;
Sy = PyArray_STRIDES(%(_y)s);
}
if ((Sz[0] < 1) || (Sz[1] < 1) || (Sz[0] MOD type_size) || (Sz[1] MOD type_size)
|| ((Sz[0] != type_size) && (Sz[1] != type_size)))
{
PyArrayObject * _z_copy = (PyArrayObject *) PyArray_Copy(%(_zout)s);
if (!_z_copy)
%(fail)s
Py_XDECREF(%(_zout)s);
%(_zout)s = _z_copy;
Sz = PyArray_STRIDES(%(_zout)s);
}
"""
encode_strides_in_unit = """
/*
encode the stride structure of _x,_y,_zout into a single integer
*/
unit |= ((Sx[1] == type_size || Nx[1]==1) ? 0x0 : (Sx[0] == type_size || Nx[0]==1) ? 0x1 : 0x2) << 8;
unit |= ((Sy[1] == type_size || Ny[1]==1) ? 0x0 : (Sy[0] == type_size || Ny[0]==1) ? 0x1 : 0x2) << 4;
unit |= ((Sz[1] == type_size || Nz[1]==1) ? 0x0 : (Sz[0] == type_size || Nz[0]==1) ? 0x1 : 0x2) << 0;
"""
compute_strides = """
/* create appropriate strides for malformed matrices that are row or column
* vectors, or empty matrices.
* In that case, the value of the stride does not really matter, but
* some versions of BLAS insist that:
* - they are not smaller than the number of elements in the array,
* - they are not 0.
*/
sx_0 = (Nx[0] > 1) ? Sx[0]/type_size : (Nx[1] + 1);
sx_1 = (Nx[1] > 1) ? Sx[1]/type_size : (Nx[0] + 1);
sy_0 = (Ny[0] > 1) ? Sy[0]/type_size : (Ny[1] + 1);
sy_1 = (Ny[1] > 1) ? Sy[1]/type_size : (Ny[0] + 1);
sz_0 = (Nz[0] > 1) ? Sz[0]/type_size : (Nz[1] + 1);
sz_1 = (Nz[1] > 1) ? Sz[1]/type_size : (Nz[0] + 1);
"""
begin_switch_typenum = """
switch (type_num)
{
"""
case_float = """
case NPY_FLOAT:
{
"""
# case_float_ab_constants = None
case_float_gemm = """
float* x = (float*)PyArray_DATA(%(_x)s);
float* y = (float*)PyArray_DATA(%(_y)s);
float* z = (float*)PyArray_DATA(%(_zout)s);
char N = 'N';
char T = 'T';
int Nz0 = Nz[0], Nz1 = Nz[1], Nx1 = Nx[1];
//std::cerr << (unit/256) MOD 16 << (unit / 16) MOD 16 << unit MOD 16<< '\\n';
//double t0 = time_time();
switch(unit)
{
case 0x000: sgemm_(&N, &N, &Nz1, &Nz0, &Nx1, &a, y, &sy_0, x, &sx_0, &b, z, &sz_0); break;
case 0x100: sgemm_(&N, &T, &Nz1, &Nz0, &Nx1, &a, y, &sy_0, x, &sx_1, &b, z, &sz_0); break;
case 0x010: sgemm_(&T, &N, &Nz1, &Nz0, &Nx1, &a, y, &sy_1, x, &sx_0, &b, z, &sz_0); break;
case 0x110: sgemm_(&T, &T, &Nz1, &Nz0, &Nx1, &a, y, &sy_1, x, &sx_1, &b, z, &sz_0); break;
case 0x001: sgemm_(&T, &T, &Nz0, &Nz1, &Nx1, &a, x, &sx_0, y, &sy_0, &b, z, &sz_1); break;
case 0x101: sgemm_(&N, &T, &Nz0, &Nz1, &Nx1, &a, x, &sx_1, y, &sy_0, &b, z, &sz_1); break;
case 0x011: sgemm_(&T, &N, &Nz0, &Nz1, &Nx1, &a, x, &sx_0, y, &sy_1, &b, z, &sz_1); break;
case 0x111: sgemm_(&N, &N, &Nz0, &Nz1, &Nx1, &a, x, &sx_1, y, &sy_1, &b, z, &sz_1); break;
default: PyErr_SetString(PyExc_ValueError, "some matrix has no unit stride"); %(fail)s;
};
//fprintf(stderr, "Calling sgemm %%i %%i %%i %%i took %%f\\n", unit, Nz1, Nz0, Nx1, time_time() - t0);
"""
case_double = """
}
break;
case NPY_DOUBLE:
{
"""
# case_double_ab_constants = None
case_double_gemm = """
double* x = (double*)PyArray_DATA(%(_x)s);
double* y = (double*)PyArray_DATA(%(_y)s);
double* z = (double*)PyArray_DATA(%(_zout)s);
char N = 'N';
char T = 'T';
int Nz0 = Nz[0], Nz1 = Nz[1], Nx1 = Nx[1];
//std::cerr << (unit/256) MOD 16 << (unit / 16) MOD 16 << unit MOD 16<< '\\n';
//double t0 = time_time();
//fprintf(stderr, "unit=%%x N= %%i %%i %%i S = %%i %%i %%i %%i %%i %%i\\n", unit,
//Nz1, Nz0, Nx1,
//sy_0, sy_1,
//sx_0, sx_1,
//sz_0, sz_1
//);
switch(unit)
{
case 0x000: dgemm_(&N, &N, &Nz1, &Nz0, &Nx1, &a, y,
&sy_0, x, &sx_0, &b, z, &sz_0); break;
case 0x100: dgemm_(&N, &T, &Nz1, &Nz0, &Nx1, &a, y,
&sy_0, x, &sx_1, &b, z, &sz_0); break;
case 0x010: dgemm_(&T, &N, &Nz1, &Nz0, &Nx1, &a, y,
&sy_1, x, &sx_0, &b, z, &sz_0); break;
case 0x110: dgemm_(&T, &T, &Nz1, &Nz0, &Nx1, &a, y,
&sy_1, x, &sx_1, &b, z, &sz_0); break;
case 0x001: dgemm_(&T, &T, &Nz0, &Nz1, &Nx1, &a, x,
&sx_0, y, &sy_0, &b, z, &sz_1); break;
case 0x101: dgemm_(&N, &T, &Nz0, &Nz1, &Nx1, &a, x,
&sx_1, y, &sy_0, &b, z, &sz_1); break;
case 0x011: dgemm_(&T, &N, &Nz0, &Nz1, &Nx1, &a, x,
&sx_0, y, &sy_1, &b, z, &sz_1); break;
case 0x111: dgemm_(&N, &N, &Nz0, &Nz1, &Nx1, &a, x,
&sx_1, y, &sy_1, &b, z, &sz_1); break;
default: PyErr_SetString(PyExc_ValueError,
"some matrix has no unit stride");
%(fail)s;
};
//fprintf(stderr, "Calling dgemm %%i %%i %%i %%i took %%f\\n",
// unit, Nz1, Nz0, Nx1, time_time()- t0);
"""
end_switch_typenum = """
}
break;
}
"""
def build_gemm_call(self):
return reduce(str.__add__, (
self.declare_NS,
self.check_xyz_rank2,
self.setup_z_Nz_Sz,
self.check_xyz_double_or_float,
self.check_ab_double_or_float,
self.check_dims,
self.check_strides,
self.encode_strides_in_unit,
self.compute_strides,
self.begin_switch_typenum,
self.case_float,
self.case_float_ab_constants,
self.case_float_gemm,
self.case_double,
self.case_double_ab_constants,
self.case_double_gemm,
self.end_switch_typenum), '')
def build_gemm_version(self):
return (13, blas_header_version())
class Gemm(GemmRelated):
"""In-place version of matrix-matrix multiplication (with accumulation).
When a and b are scalars and x, y, and z are matrices, then
gemm(z,a,x,y,b)
is similar to
b*z + a*dot(x,y)
The difference between the two is that the top form is destructive
on z, whereas the bottom form is not. Gemm works in-place on the
storage associated with z, and the L{Variable} returned by Gemm
has a storage that will be aliased to the storage of the z
argument. Because of this in-place computation, an L{Apply} of
this op will destroy the L{Variable} z on which it operates. (See
L{DestructiveOps} for an explanation of what destroying means in
the context of theano graphs. See L{BlasLapackSupport} for more
optimized linear algebra operations.)
"""
E_rank = 'gemm only works for rank 2'
E_scalar = 'gemm requires scalar argument'
E_z_uniq = 'argument z aliased to x or y' # TODO: justify / delete this
E_mixed = 'gemm requires matching dtypes'
E_float = 'gemm requires floating-point dtypes'
__props__ = ('inplace',)
def __init__(self, inplace):
self.inplace = inplace
if self.inplace:
self.destroy_map = {0: [0]}
self.setup_z_Nz_Sz = self.setup_z_Nz_Sz_inplace
else:
self.setup_z_Nz_Sz = self.setup_z_Nz_Sz_outplace
def __str__(self):
if self.inplace:
inplace_str = 'inplace'
else:
inplace_str = 'no_inplace'
return '%s{%s}' % (self.__class__.__name__, inplace_str)
def __setstate__(self, dct):
self.__dict__.update(dct)
if self.inplace:
self.setup_z_Nz_Sz = self.setup_z_Nz_Sz_inplace
else:
self.setup_z_Nz_Sz = self.setup_z_Nz_Sz_outplace
# Correctly reload older pickles where _op_use_c_code and
# destroy_map were not saved
if '_op_use_c_code' not in self.__dict__:
self._op_use_c_code = theano.config.cxx
if 'destroy_map' not in self.__dict__ and self.inplace:
self.destroy_map = {0: [0]}
def __getstate__(self):
rval = self.__dict__.copy()
# Do not serialize the setup code, it will be restored in __setstate__
# depending on the value of 'inplace'
rval.pop('setup_z_Nz_Sz')
return rval
def make_node(self, *inputs):
inputs = list(map(T.as_tensor_variable, inputs))
if len(inputs) != 5:
raise TypeError(
"Wrong number of inputs for %s (expected 5, got %s)" %
(self, len(inputs)))
z, a, x, y, b = inputs
# For the consistency check we don't want z to be a cached constant.
if getattr(z, 'cached', False):
z = copy.copy(z)
zr, xr, yr = [set(view_roots(i)) for i in (z, x, y)]
# We want the gemm to be inplace. When this op is inplace, it
# declare to be inplace only on z. So to make it safe, we
# raise an error if z can be a view on x or y.
# I don't know if Theano currently can support that case. As
# this case don't happen in our code, I won't spent time
# investigating this. So the assert is for safety. I also
# think there is another mechanism that would prevent this,
# but I don't what to modify old code and have chance to break
# something.
if zr.intersection(xr):
raise InconsistencyError(Gemm.E_z_uniq, (z, x))
if zr.intersection(yr):
raise InconsistencyError(Gemm.E_z_uniq, (z, y))
if z.ndim != 2:
raise TypeError(Gemm.E_rank, z)
if a.ndim != 0:
raise TypeError(Gemm.E_scalar, a)
if x.ndim != 2:
raise TypeError(Gemm.E_rank, x)
if y.ndim != 2:
raise TypeError(Gemm.E_rank, y)
if b.ndim != 0:
raise TypeError(Gemm.E_scalar, b)
if not (z.dtype == a.dtype == x.dtype == y.dtype == b.dtype):
raise TypeError(Gemm.E_mixed,
(z.dtype, a.dtype, x.dtype, y.dtype, b.dtype))
if (not z.dtype.startswith('float') and
not z.dtype.startswith('complex')):
raise TypeError(Gemm.E_float, (z.dtype))
output = z.type()
return Apply(self, inputs, [output])
def perform(self, node, inp, out):
z, a, x, y, b = inp
zout, = out
assert a.shape == ()
assert b.shape == ()
if not self.inplace:
z = z.copy() # the original z will not be changed
if z.shape == ():
z.itemset(z * a + b * numpy.dot(x, y))
zout[0] = z
else:
if b == 0.0:
if a == 1.0:
z[:] = numpy.dot(x, y)
elif a == -1.0:
z[:] = -numpy.dot(x, y)
else:
z[:] = a * numpy.dot(x, y)
elif b == 1.0:
if a == 1.0:
z += numpy.dot(x, y)
elif a == -1.0:
z -= numpy.dot(x, y)
else:
z += a * numpy.dot(x, y)
else:
z *= b
z += a * numpy.dot(x, y)
zout[0] = z
def infer_shape(self, node, input_shapes):
return [input_shapes[0]]
setup_z_Nz_Sz_inplace = """
if (%(_zout)s != %(_z)s)
{
if (%(_zout)s)
{
Py_DECREF(%(_zout)s);
}
%(_zout)s = %(_z)s;
Py_INCREF(%(_zout)s);
}
Nz = PyArray_DIMS(%(_z)s);
Sz = PyArray_STRIDES(%(_z)s);
"""
setup_z_Nz_Sz_outplace = """
if ((NULL == %(_zout)s)
|| (PyArray_DIMS(%(_zout)s)[0] != PyArray_DIMS(%(_z)s)[0])
|| (PyArray_DIMS(%(_zout)s)[1] != PyArray_DIMS(%(_z)s)[1])
|| (PyArray_STRIDES(%(_zout)s)[0] <= 0)
|| (PyArray_STRIDES(%(_zout)s)[1] <= 0)
|| (PyArray_STRIDES(%(_zout)s)[0] MOD type_size)
|| (PyArray_STRIDES(%(_zout)s)[1] MOD type_size)
|| ((PyArray_STRIDES(%(_zout)s)[0] != type_size)
&& (PyArray_STRIDES(%(_zout)s)[1] != type_size)))
{
Py_XDECREF(%(_zout)s);
npy_intp dims[2];
dims[0] = PyArray_DIMS(%(_z)s)[0];
dims[1] = PyArray_DIMS(%(_z)s)[1];
%(_zout)s = (PyArrayObject*)PyArray_SimpleNew(2, dims,
PyArray_TYPE(%(_z)s));
//fprintf(stderr, "Gemm Allocating %%i %%i\\n", dims[0], dims[1]);
if(!%(_zout)s) {
PyErr_SetString(PyExc_MemoryError,
"failed to alloc gemm_no_inplace output");
%(fail)s
}
}
Nz = PyArray_DIMS(%(_zout)s);
Sz = PyArray_STRIDES(%(_zout)s);
if (PyArray_DESCR(%(_zout)s)->type_num == NPY_FLOAT)
{
float * zoutdata = (float*)PyArray_DATA(%(_zout)s);
int zoi = Sz[0] / sizeof(float);
int zoj = Sz[1] / sizeof(float);
const float * zdata = (float*)PyArray_DATA(%(_z)s);
int zi = PyArray_STRIDES(%(_z)s)[0]/sizeof(float);
int zj = PyArray_STRIDES(%(_z)s)[1]/sizeof(float);
for (int i = 0; i < Nz[0]; ++i)
{
for (int j = 0; j < Nz[1]; ++j)
{
zoutdata[zoi*i + zoj*j] = zdata[zi*i + zj*j];
}
}
}
else if (PyArray_DESCR(%(_zout)s)->type_num == NPY_DOUBLE)
{
double * zoutdata = (double*) PyArray_DATA(%(_zout)s);
int zoi = Sz[0] / sizeof(double);
int zoj = Sz[1] / sizeof(double);
const double * zdata = (double*)PyArray_DATA(%(_z)s);
int zi = PyArray_STRIDES(%(_z)s)[0]/sizeof(double);
int zj = PyArray_STRIDES(%(_z)s)[1]/sizeof(double);
for (int i = 0; i < Nz[0]; ++i)
{
for (int j = 0; j < Nz[1]; ++j)
{
zoutdata[zoi*i + zoj*j] = zdata[zi*i + zj*j];
}
}
}
else
{
PyErr_SetString(PyExc_AssertionError,
"neither float nor double dtype");
%(fail)s
}
"""
case_float_ab_constants = """
#define REAL float
float a = (PyArray_DESCR(%(_a)s)->type_num == NPY_FLOAT)
? (REAL)(((float*)PyArray_DATA(%(_a)s))[0])
: (REAL)(((double*)PyArray_DATA(%(_a)s))[0]);
float b = (PyArray_DESCR(%(_b)s)->type_num == NPY_FLOAT) ?
(REAL)(((float*)PyArray_DATA(%(_b)s))[0])
: (REAL)(((double*)PyArray_DATA(%(_b)s))[0]);
#undef REAL
"""
case_double_ab_constants = """
#define REAL double
double a = (PyArray_DESCR(%(_a)s)->type_num == NPY_FLOAT)
? (REAL)(((float*)PyArray_DATA(%(_a)s))[0])
: (REAL)(((double*)PyArray_DATA(%(_a)s))[0]);
double b = (PyArray_DESCR(%(_b)s)->type_num == NPY_FLOAT) ?
(REAL)(((float*)PyArray_DATA(%(_b)s))[0])
: (REAL)(((double*)PyArray_DATA(%(_b)s))[0]);
#undef REAL
"""
def c_code(self, node, name, inp, out, sub):
_z, _a, _x, _y, _b = inp
_zout, = out
if node.inputs[0].type.dtype.startswith('complex'):
raise utils.MethodNotDefined('%s.c_code'
% self.__class__.__name__)
if not config.blas.ldflags:
return super(Gemm, self).c_code(node, name,
(_z, _a, _x, _y, _b), (_zout, ),
sub)
full_code = self.build_gemm_call() % dict(locals(), **sub)
return full_code
def c_code_cache_version(self):
gv = self.build_gemm_version()
if gv:
return (5,) + gv
else:
return gv
gemm_inplace = Gemm(inplace=True)
gemm_no_inplace = Gemm(inplace=False)
# For the user interface. Theano optimization will make them inplace
gemm = gemm_no_inplace
pprint.assign(gemm_inplace, FunctionPrinter('gemm_inplace'))
pprint.assign(gemm_no_inplace, FunctionPrinter('gemm_no_inplace'))
def res_is_a(node, op, maxclients=None):
if maxclients is not None:
retval = (len(node.clients) <= maxclients)
else:
retval = True
return (node.owner and
node.owner.op == op and
retval)
def _as_scalar(res, dtype=None):
"""Return None or a TensorVariable whose type is in T.float_scalar_types"""
if dtype is None:
dtype = config.floatX
if numpy.all(res.type.broadcastable):
while res.owner and isinstance(res.owner.op, T.DimShuffle):
res = res.owner.inputs[0]
# may still have some number of True's
if res.type.broadcastable:
rval = res.dimshuffle()
else:
rval = res
if rval.type.dtype[:3] in ('int', 'uin'):
# We check that the upcast of res and dtype won't change dtype.
# If dtype is float64, we will cast int64 to float64.
# This is valid when res is a scalar used as input to a dot22
# as the cast of the scalar can be done before or after the dot22
# and this will give the same result.
if theano.scalar.upcast(res.dtype, dtype) == dtype:
return T.cast(rval, dtype)
else:
return None
return rval
def _is_real_matrix(res):
return (res.type.dtype in ('float32', 'float64') and
res.type.ndim == 2 and
res.type.broadcastable[0] is False and
res.type.broadcastable[1] is False) # cope with tuple vs. list
def _is_real_vector(res):
return (res.type.dtype in ('float32', 'float64') and
res.type.ndim == 1 and
res.type.broadcastable[0] is False)
def _beta_L_plus_alpha_M(beta, L, alpha, M, recurse_flip=True):
# print 'BETA L + ALPHA M', beta, L, alpha, M, recurse_flip
# EXPRESSION: (beta * L) + (alpha * M)
# we've already checked the client counts, now just make the type check.
# if res_is_a(M, _dot22, 1):
if M.owner and M.owner.op == _dot22:
Ml, Mr = M.owner.inputs
rval = [gemm_no_inplace(L, alpha, Ml, Mr, beta)]
# print 'GEMM 0', rval, beta, L, alpha, M
return rval, M
# it also might be the case that there is a dimshuffle between the +
# and the dot22. local_dot_to_dot22 in particular will put in such things.
if (M.owner and isinstance(M.owner.op, T.DimShuffle) and
M.owner.inputs[0].owner and
isinstance(M.owner.inputs[0].owner.op, Dot22)):
MM = M.owner.inputs[0]
if M.owner.op.new_order == (0,):
# it is making a column MM into a vector
MMl, MMr = MM.owner.inputs
g = gemm_no_inplace(L.dimshuffle(0, 'x'),
alpha, MMl, MMr, beta)
rval = [g.dimshuffle(0)]
return rval, MM
if M.owner.op.new_order == (1,):
# it is making a row MM into a vector
MMl, MMr = MM.owner.inputs
g = gemm_no_inplace(L.dimshuffle('x', 0),
alpha, MMl, MMr, beta)
rval = [g.dimshuffle(1)]
return rval, MM
if len(M.owner.op.new_order) == 0:
# it is making a row MM into a vector
MMl, MMr = MM.owner.inputs
g = gemm_no_inplace(L.dimshuffle('x', 'x'),
alpha, MMl, MMr, beta)
rval = [g.dimshuffle()]
return rval, MM
# this is False'd out because of inadequate testing.
# TODO see ticket #237
if False and res_is_a(M, gemm_no_inplace, 1):
# EXPRESSION: (beta * L) + (alpha * (gemm_no_inplace(G, a, u, v, b)))
# EXPRESSION: (beta * L) + alpha * (b * G) + alpha * a * dot(u, v)
G, a, u, v, b = M.owner.inputs
# print 'GEMM', G, L
if res_is_a(G, _dot22, 1):
# EXPRESSION: (beta * L) +
# (alpha * (gemm_no_inplace(dot(x,y), a, u, v, b)))
x, y = G.owner.inputs
# EXPRESSION: (beta * L) + (alpha * ((b*dot(x,y) +
# (a * dot(u, v)))))
# EXPRESSION: (beta * L) + (alpha*b*dot(x,y)) +
# (alpha * a * dot(u, v))
rval = [gemm_no_inplace(gemm_no_inplace(L, alpha * b, x, y, beta),
alpha * a, u, v, 1.0)]
return rval
if (G is L):
# EXPRESSION: (beta * L) + (alpha*b*L) + (alpha * a * dot(u, v))
rval = [gemm_no_inplace(L, alpha * a, u, v, alpha * b + beta)]
return rval
if (1.0 != alpha):
# at the very least, move the alpha inside the gemm_no_inplace
rval = [beta * L + gemm_no_inplace(G, alpha * a, u, v, alpha * b)]
return rval
if recurse_flip:
return _beta_L_plus_alpha_M(alpha, M, beta, L, recurse_flip=False)
else:
return False, False
def _gemm_canonicalize(r, scale, rval, maxclients):
# Tries to interpret node as a sum of scalars * (vectors or matrices)
def scaled(thing):
if scale == 1:
return thing
if scale == -1:
return -thing
else:
return scale * thing
try:
r.type.broadcastable
except Exception:
return None
if ((r.type.ndim not in (1, 2)) or
r.type.dtype not in ('float32', 'float64',
'complex64', 'complex128')):
rval.append(scaled(r))
return rval
if maxclients and len(getattr(r, 'clients', [])) > maxclients:
rval.append((scale, r))
return rval
if r.owner and r.owner.op == T.sub:
_gemm_canonicalize(r.owner.inputs[0], scale, rval, 1)
_gemm_canonicalize(r.owner.inputs[1], -scale, rval, 1)
elif r.owner and r.owner.op == T.add:
for i in r.owner.inputs:
_gemm_canonicalize(i, scale, rval, 1)
elif r.owner and r.owner.op == T.neg:
_gemm_canonicalize(r.owner.inputs[0], -scale, rval, 1)
elif r.owner and r.owner.op == T.mul:
scalars = []
vectors = []
matrices = []
for i in r.owner.inputs:
if numpy.all(i.type.broadcastable):
while i.owner and isinstance(i.owner.op, T.DimShuffle):
i = i.owner.inputs[0]
if i.type.broadcastable:
scalars.append(i.dimshuffle())
else:
scalars.append(i)
elif _is_real_vector(i):
vectors.append(i)
elif _is_real_matrix(i):
matrices.append(i)
else:
# just put the original arguments as in the base case
rval.append((scale, r))
return rval
if len(matrices) == 1:
assert len(vectors) == 0
m = matrices[0]
if len(scalars) == 0:
_gemm_canonicalize(m, scale, rval, 1)
elif len(scalars) == 1:
_gemm_canonicalize(m, scaled(scalars[0]), rval, 1)
else:
_gemm_canonicalize(m, T.mul(scaled(scalars[0]), *scalars[1:]),
rval, 1)
elif len(vectors) == 1:
assert len(matrices) == 0
v = vectors[0]
if len(scalars) == 0:
_gemm_canonicalize(v, scale, rval, 1)
elif len(scalars) == 1:
_gemm_canonicalize(v, scaled(scalars[0]), rval, 1)
else:
_gemm_canonicalize(v, T.mul(scaled(scalars[0]),
*scalars[1:]), rval, 1)
else: # lets not open this up
rval.append((scale, r))
else:
rval.append((scale, r))
return rval
def _factor_canonicalized(lst):
# remove duplicates from canonicalized list
# we only delete out of the right end of the list,
# once i has touched a list element, it is permantent
lst = list(lst)
# print 'FACTOR', lst
# for t in lst:
# if not isinstance(t, (list, tuple)):
# t = (t,)
# for e in t:
# try:
# theano.printing.debugprint(e)
# except TypeError:
# print e, type(e)
i = 0
while i < len(lst) - 1:
try:
s_i, M_i = lst[i]
except Exception:
i += 1
continue
j = i + 1
while j < len(lst):
try:
s_j, M_j = lst[j]
except Exception:
j += 1
continue
if M_i is M_j:
s_i = s_i + s_j
lst[i] = (s_i, M_i)
del lst[j]
else:
j += 1
i += 1
return lst
def _gemm_from_factored_list(lst):
"""
Returns None, or a list to replace node.outputs.
"""
lst2 = []
# Remove the tuple that can't be cast correctly.
# This can happen when we try to cast a complex to a real
for sM in lst:
# Make every pair in list have matching dtypes
# sM can be a tuple of 2 elements or a theano variable.
if isinstance(sM, tuple):
sm0, sm1 = sM
sm0 = T.as_tensor_variable(sm0)
if theano.scalar.upcast(sm0.dtype, sm1.dtype) == sm1.dtype:
lst2.append((T.cast(sm0, sm1.dtype), sM[1]))
lst = lst2
def item_to_var(t):
try:
s, M = t
except Exception:
return t
if s == 1:
return M
if s == -1:
return -M
return s * M
# Try every pair in the sM_list, trying to turn it into a gemm operation
for i in xrange(len(lst) - 1):
s_i, M_i = lst[i]
for j in xrange(i + 1, len(lst)):
s_j, M_j = lst[j]
if M_i.type != M_j.type:
continue
# print 'TRYING', (s_i, M_i, s_j, M_j)
gemm_of_sM_list, old_dot22 = _beta_L_plus_alpha_M(s_i, M_i,
s_j, M_j)
# print 'GOT IT', gemm_of_sM_list
if gemm_of_sM_list:
assert len(gemm_of_sM_list) == 1
add_inputs = [item_to_var(input)
for k, input in enumerate(lst) if k not in (i, j)]
add_inputs.extend(gemm_of_sM_list)
if len(add_inputs) > 1:
rval = [T.add(*add_inputs)]
else:
rval = add_inputs
# print "RETURNING GEMM THIGN", rval
return rval, old_dot22
def _gemm_from_node2(node):
"""
:todo: In many expressions, there are many ways to turn it into a
gemm. For example dot(a,b) + c + d. This function should
return all of them, so that if one version of gemm causes a
cycle in the graph, then another application of gemm can be
tried.
"""
lst = []
t0 = time.time()
_gemm_canonicalize(node.outputs[0], 1.0, lst, 0)
t1 = time.time()
# print "GEMM CANON", lst
if len(lst) > 1:
lst = _factor_canonicalized(lst)
t2 = time.time()
rval = _gemm_from_factored_list(lst)
t3 = time.time()
# It can happen that _factor_canonicalized and
# _gemm_from_factored_list return a node with an incorrect
# type. This happens in particular when one of the scalar
# factors forces the upcast of the whole expression. In that
# case, we simply skip that candidate for Gemm. This was
# discussed in
# http://groups.google.com/group/theano-dev/browse_thread/thread/a3096c82856e3ad5,
# but never made it into a trac ticket.
if rval and (rval[0][0].type == node.outputs[0].type):
return rval, t1 - t0, t2 - t1, t3 - t2
return None, t1 - t0, 0, 0
class GemmOptimizer(Optimizer):
"""Graph optimizer for inserting Gemm operations."""
def __init__(self):
Optimizer.__init__(self)
self.warned = False
def add_requirements(self, fgraph):
fgraph.attach_feature(toolbox.ReplaceValidate())
def apply(self, fgraph):
did_something = True
nb_iter = 0
nb_replacement = 0
nb_replacement_didn_t_remove = 0
nb_inconsistency_make = 0
nb_inconsistency_replace = 0
time_canonicalize = 0
time_factor_can = 0
time_factor_list = 0
time_toposort = 0
if fgraph.profile:
validate_before = fgraph.profile.validate_time
callbacks_before = fgraph.execute_callbacks_times.copy()
callback_before = fgraph.execute_callbacks_time
def on_import(new_node):
if new_node is not node:
nodelist.append(new_node)
u = theano.gof.opt.Updater(on_import, None, None)
fgraph.attach_feature(u)
while did_something:
nb_iter += 1
t0 = time.time()
nodelist = theano.gof.graph.io_toposort(fgraph.inputs, fgraph.outputs)
time_toposort += time.time() - t0
did_something = False
nodelist.reverse()
for node in nodelist:
if not (isinstance(node.op, T.Elemwise) and
isinstance(node.op.scalar_op,
(theano.scalar.Add, theano.scalar.Sub,
theano.scalar.Neg, theano.scalar.Mul))):
continue
if node not in fgraph.apply_nodes:
# This mean that we already removed this node from
# the graph
continue
try:
new_outputs, time1, time2, time3 = _gemm_from_node2(node)
time_canonicalize += time1
time_factor_can += time2
time_factor_list += time3
except InconsistencyError:
nb_inconsistency_make += 1
continue
if new_outputs:
new_outputs, old_dot22 = new_outputs
assert len(new_outputs) == len(node.outputs)
try:
fgraph.replace_all_validate_remove(
list(zip(node.outputs, new_outputs)),
[old_dot22],
reason='GemmOptimizer',
# For now we disable the warning as we know case
# that we need to fix.
warn=False, # warn=not self.warned
)
did_something = True
nb_replacement += 1
except InconsistencyError:
# TODO: retry other applications of gemm (see comment
# in _gemm_from_node)
nb_inconsistency_replace += 1
except ReplacementDidntRemovedError:
nb_replacement_didn_t_remove += 1
self.warned = True
fgraph.remove_feature(u)
if fgraph.profile:
validate_time = fgraph.profile.validate_time - validate_before
callback_time = fgraph.execute_callbacks_time - callback_before
callbacks_time = {}
for k, v in iteritems(fgraph.execute_callbacks_times):
if k in callbacks_before:
callbacks_time[k] = v - callbacks_before[k]
else:
callbacks_time[k] = v
else:
validate_time = None
callback_time = None
callbacks_time = {}
return (self, nb_iter, nb_replacement, nb_replacement_didn_t_remove,
nb_inconsistency_make, nb_inconsistency_replace,
time_canonicalize, time_factor_can,
time_factor_list, time_toposort,
validate_time, callback_time, callbacks_time,)
@staticmethod
def print_profile(stream, prof, level=0):
blanc = (' ' * level)
print(blanc, "GemmOptimizer", file=stream)
print(blanc, " nb_iter", prof[1], file=stream)
print(blanc, " nb_replacement", prof[2], file=stream)
print(blanc, " nb_replacement_didn_t_remove", prof[3], file=stream)
print(blanc, " nb_inconsistency_make", prof[4], file=stream)
print(blanc, " nb_inconsistency_replace", prof[5], file=stream)
print(blanc, " time_canonicalize", prof[6], file=stream)
print(blanc, " time_factor_can", prof[7], file=stream)
print(blanc, " time_factor_list", prof[8], file=stream)
print(blanc, " time_toposort", prof[9], file=stream)
print(blanc, " validate_time", prof[10], file=stream)
print(blanc, " callback_time", prof[11], file=stream)
if prof[11] > 1:
print(blanc, " callbacks_time", file=stream)
for i in sorted(iteritems(prof[12]), key=lambda a: a[1]):
if i[1] > 0:
print(i)
class Dot22(GemmRelated):
"""Compute a matrix-matrix product.
This is a specialization of the more general Dot().
"""
def make_node(self, x, y):
dtypes = ('float32', 'float64', 'complex64', 'complex128')
if x.type.ndim != 2 or x.type.dtype not in dtypes:
raise TypeError(x)
if y.type.ndim != 2 or y.type.dtype not in dtypes:
raise TypeError(y)
if y.type.dtype != x.type.dtype:
raise TypeError('dtype mismatch to Dot22')
bz = (x.type.broadcastable[0], y.type.broadcastable[1])
outputs = [T.tensor(x.type.dtype, bz)]
return Apply(self, [x, y], outputs)
def perform(self, node, inp, out):
x, y = inp
z, = out
try:
z[0] = numpy.asarray(numpy.dot(x, y))
except ValueError as e:
# The error raised by numpy has no shape information, we mean to
# add that
e.args = e.args + (x.shape, y.shape)
raise
def infer_shape(self, node, input_shapes):
return [[input_shapes[0][0], input_shapes[1][1]]]
setup_z_Nz_Sz = """
if ((NULL == %(_zout)s)
|| (PyArray_DIMS(%(_zout)s)[0] != PyArray_DIMS(%(_x)s)[0])
|| (PyArray_DIMS(%(_zout)s)[1] != PyArray_DIMS(%(_y)s)[1]))
{
if (NULL != %(_zout)s) Py_XDECREF(%(_zout)s);
npy_intp dims[2];
dims[0] = PyArray_DIMS(%(_x)s)[0];
dims[1] = PyArray_DIMS(%(_y)s)[1];
%(_zout)s = (PyArrayObject*)PyArray_SimpleNew(2, dims,
PyArray_TYPE(%(_x)s));
//fprintf(stderr, "Dot Allocating %%i %%i\\n", dims[0], dims[1]);
if(!%(_zout)s) {
PyErr_SetString(PyExc_MemoryError,
"failed to alloc dot22 output");
%(fail)s
}
}
Nz = PyArray_DIMS(%(_zout)s);
Sz = PyArray_STRIDES(%(_zout)s);
"""
check_ab_double_or_float = ""
case_float_ab_constants = """
float a = 1.0;
float b = 0.0;
"""
case_double_ab_constants = """
double a = 1.0;
double b = 0.0;
"""
def c_code(self, node, name, inp, out, sub): # DEBUG
_x, _y = inp
_zout, = out
if node.inputs[0].type.dtype.startswith('complex'):
raise utils.MethodNotDefined('%s.c_code'
% self.__class__.__name__)
if len(self.c_libraries()) <= 0:
return super(Dot22, self).c_code(node, name, (_x, _y),
(_zout, ), sub)
full_code = self.build_gemm_call() % dict(locals(), **sub)
return full_code
def c_code_cache_version(self):
gv = self.build_gemm_version()
if gv:
return (2,) + gv
else:
return gv
_dot22 = Dot22()
@local_optimizer([T.Dot])
def local_dot_to_dot22(node):
# This works for tensor.outer too because basic.outer is a macro that
# produces a dot(dimshuffle,dimshuffle) of form 4 below
if not isinstance(node.op, T.Dot):
return
x, y = node.inputs
if y.type.dtype != x.type.dtype:
# TODO: upcast one so the types match
_logger.info('Not optimizing dot with inputs %s %s %s %s',
x, y, x.type, y.type)
return
if y.type.dtype in ['float32', 'float64', 'complex64', 'complex128']:
if x.ndim == 2 and y.ndim == 2:
# print "local_dot_to_dot22: MM"
return [_dot22(*node.inputs)]
if x.ndim == 2 and y.ndim == 1:
# print "local_dot_to_dot22: MV"
return [_dot22(x, y.dimshuffle(0, 'x')).dimshuffle(0)]
if x.ndim == 1 and y.ndim == 2:
# print "local_dot_to_dot22: VM"
return [_dot22(x.dimshuffle('x', 0), y).dimshuffle(1)]
if x.ndim == 1 and y.ndim == 1:
# print "local_dot_to_dot22: VV"
return [_dot22(x.dimshuffle('x', 0),
y.dimshuffle(0, 'x')).dimshuffle()]
_logger.info('Not optimizing dot with inputs %s %s %s %s',
x, y, x.type, y.type)
@local_optimizer([gemm_no_inplace], inplace=True)
def local_inplace_gemm(node):
if node.op == gemm_no_inplace:
return [gemm_inplace(*node.inputs)]
@local_optimizer([gemv_no_inplace], inplace=True)
def local_inplace_gemv(node):
if node.op == gemv_no_inplace:
return [gemv_inplace(*node.inputs)]
@local_optimizer([ger], inplace=True)
def local_inplace_ger(node):
if node.op == ger:
return [ger_destructive(*node.inputs)]
@local_optimizer([gemm_no_inplace])
def local_gemm_to_gemv(node):
"""GEMM acting on row or column matrices -> GEMV."""
if node.op == gemm_no_inplace:
z, a, x, y, b = node.inputs
if z.broadcastable == x.broadcastable == (True, False):
r = gemv_no_inplace(z.dimshuffle(1), a, y.T, x.dimshuffle(1), b)
return [r.dimshuffle('x', 0)]
if z.broadcastable == y.broadcastable == (False, True):
r = gemv_no_inplace(z.dimshuffle(0), a, x, y.dimshuffle(0), b)
return [r.dimshuffle(0, 'x')]
@local_optimizer([gemm_no_inplace])
def local_gemm_to_ger(node):
"""GEMM computing an outer-product -> GER."""
if node.op == gemm_no_inplace:
z, a, x, y, b = node.inputs
if x.broadcastable[1] and y.broadcastable[0]:
# x and y are both vectors so this might qualifies for a GER
xv = x.dimshuffle(0)
yv = y.dimshuffle(1)
try:
bval = T.get_scalar_constant_value(b)
except T.NotScalarConstantError:
# b isn't a constant, GEMM is doing useful pre-scaling
return
if bval == 1: # best case a natural GER
rval = ger(z, a, xv, yv)
return [rval]
elif bval == 0: # GER on zeros_like should be faster than GEMM
zeros = T.zeros([x.shape[0], y.shape[1]], x.dtype)
rval = ger(zeros, a, xv, yv)
return [rval]
else:
# if bval is another constant, then z is being usefully
# pre-scaled and GER isn't really the right tool for the job.
return
# TODO: delete this optimization when we have the proper dot->gemm->ger pipeline
# working
@local_optimizer([_dot22])
def local_dot22_to_ger_or_gemv(node):
"""dot22 computing an outer-product -> GER."""
if node.op == _dot22:
x, y = node.inputs
xb = x.broadcastable
yb = y.broadcastable
one = T.as_tensor_variable(numpy.asarray(1, dtype=x.dtype))
zero = T.as_tensor_variable(numpy.asarray(0, dtype=x.dtype))
if xb[1] and yb[0]:
# x and y are both vectors so this might qualifies for a GER
xv = x.dimshuffle(0)
yv = y.dimshuffle(1)
zeros = T.zeros([x.shape[0], y.shape[1]], dtype=x.dtype)
rval = ger(zeros, one, xv, yv)
return [rval]
if xb[0] and yb[1]:
# x and y are both vectors so this qualifies for a sdot / ddot
# TODO: Theano doesn't have a sdot, but gemv is better than _dot22
xv = x.dimshuffle(1)
zeros = T.AllocEmpty(x.dtype)(1)
rval = gemv_no_inplace(zeros, one, y.T, xv, zero)
return [rval.dimshuffle('x', 0)]
if xb[0] and not yb[0] and not yb[1]:
# x is vector, y is matrix so try gemv
xv = x.dimshuffle(1)
zeros = T.AllocEmpty(x.dtype)(y.shape[1])
rval = gemv_no_inplace(zeros, one, y.T, xv, zero)
return [rval.dimshuffle('x', 0)]
if not xb[0] and not xb[1] and yb[1]:
# x is matrix, y is vector, try gemv
yv = y.dimshuffle(0)
zeros = T.AllocEmpty(x.dtype)(x.shape[0])
rval = gemv_no_inplace(zeros, one, x, yv, zero)
return [rval.dimshuffle(0, 'x')]
#################################
#
# Set up the BlasOpt optimizer
#
#################################
blas_optdb = SequenceDB()
# run after numerical stability optimizations (1.5)
optdb.register('BlasOpt', blas_optdb, 1.7, 'fast_run', 'fast_compile')
# run before specialize (2.0) because specialize is basically a
# free-for-all that makes the graph crazy.
# fast_compile is needed to have GpuDot22 created.
blas_optdb.register('local_dot_to_dot22',
in2out(local_dot_to_dot22),
0, 'fast_run', 'fast_compile')
blas_optdb.register('gemm_optimizer',
GemmOptimizer(),
10, 'fast_run')
blas_optdb.register('local_gemm_to_gemv',
EquilibriumOptimizer([local_gemm_to_gemv,
local_gemm_to_ger,
local_dot22_to_ger_or_gemv,
local_dimshuffle_lift],
max_use_ratio=5,
ignore_newtrees=False),
15, 'fast_run')
# After destroyhandler(49.5) but before we try to make elemwise things
# inplace (75)
blas_opt_inplace = in2out(local_inplace_gemm,
local_inplace_gemv,
local_inplace_ger,
name="blas_opt_inplace")
optdb.register('InplaceBlasOpt',
blas_opt_inplace,
70.0, 'fast_run', 'inplace', 'blas_opt_inplace')
class Dot22Scalar(GemmRelated):
"""Compute a matrix-matrix product.
This is a specialization of the more general Dot()
Used to call optimized gemm implementation.
Also used to generate a gemm later.
compute scalar*dot(x,y).
"""
def make_node(self, x, y, a):
if a.ndim != 0:
raise TypeError(Gemm.E_scalar, a)
if x.ndim != 2:
raise TypeError(Gemm.E_rank, x)
if y.ndim != 2:
raise TypeError(Gemm.E_rank, y)
if not (a.dtype == x.dtype == y.dtype):
raise TypeError('Dot22Scalar requires matching dtypes',
(a.dtype, x.dtype, y.dtype))
if (not a.dtype.startswith('float') and
not a.dtype.startswith('complex')):
raise TypeError('Dot22Scalar requires float or complex args',
a.dtype)
bz = [x.type.broadcastable[0], y.type.broadcastable[1]]
outputs = [T.tensor(x.type.dtype, bz)]
return Apply(self, [x, y, a], outputs)
def perform(self, node, inp, out):
x, y, scalar = inp
z, = out
try:
z[0] = numpy.asarray(scalar * numpy.dot(x, y))
except ValueError as e:
# The error raised by numpy has no shape information, we
# mean to add that
e.args = e.args + (x.shape, y.shape)
raise
def infer_shape(self, node, input_shapes):
return [[input_shapes[0][0], input_shapes[1][1]]]
setup_z_Nz_Sz = Dot22.setup_z_Nz_Sz
check_ab_double_or_float = """
if ((PyArray_DESCR(%(_a)s)->type_num != NPY_DOUBLE)
&& (PyArray_DESCR(%(_a)s)->type_num != NPY_FLOAT))
{PyErr_SetString(PyExc_NotImplementedError,
"type(a) is not double or float"); %(fail)s;}
"""
case_float_ab_constants = """
#define REAL float
float a = (PyArray_DESCR(%(_a)s)->type_num == NPY_FLOAT)
? (REAL)(((float*)PyArray_DATA(%(_a)s))[0])
: (REAL)(((double*)PyArray_DATA(%(_a)s))[0]);
#undef REAL
float b = 0.0;
"""
case_double_ab_constants = """
#define REAL double
double a = (PyArray_DESCR(%(_a)s)->type_num == NPY_FLOAT)
? (REAL)(((float*)PyArray_DATA(%(_a)s))[0])
: (REAL)(((double*)PyArray_DATA(%(_a)s))[0]);
#undef REAL
double b = 0.0;
"""
def c_code(self, node, name, inp, out, sub):
_x, _y, _a = inp
_zout, = out
if node.inputs[0].type.dtype.startswith('complex'):
raise utils.MethodNotDefined('%s.c_code'
% self.__class__.__name__)
if len(self.c_libraries()) <= 0:
return super(Dot22Scalar, self).c_code(node, name, (_x, _y),
(_zout, ), sub)
full_code = self.build_gemm_call() % dict(locals(), **sub)
return full_code
def c_code_cache_version(self):
gv = self.build_gemm_version()
if gv:
return (2,) + gv
else:
return gv
_dot22scalar = Dot22Scalar()
@local_optimizer([T.mul])
def local_dot22_to_dot22scalar(node):
"""
Notes
-----
Previous attempts to alter this optimization to replace dot22 with
gemm instead of dot22scalar resulted in some Scan nodes being
duplicated and the ScanSaveMem optimization never running on them,
resulting in highly increased memory usage. Until this issue is
resolved, this optimization should keep using dot22scalar instead of
gemm.
We upcast the scalar if after the multiplication with the dot this give
the same type.
We execute this optimizer after the gemm optimizer. This
allow to give more priority to gemm that give more speed up
then this optimizer, but allow the gemm optimizer to ignore
this op.
TODO: support when we can reorder the mul to generate a
dot22scalar or fix the canonizer to merge them(1 mul with multiple
inputs)
"""
if node.op != T.mul:
return False
i_dot22 = [x.owner and x.owner.op == _dot22 for x in node.inputs]
if not any(i_dot22):
return False # no dot22
if i_dot22.count(True) > 1:
# TODO: try each of them.
pass
# return False #TODO fix
dot22_idx = i_dot22.index(True)
d = node.inputs[dot22_idx]
i_scalar = [_as_scalar(x, dtype=d.dtype) for x in node.inputs]
if not any(i_scalar):
# Check if we can reorder the graph as this mul have a mul in inputs.
# We support only 1 additional level of mul.
# The canonizer should have merged those mul together.
i_mul = [x.owner and x.owner.op == T.mul and
any([_as_scalar(x_i, dtype=d.dtype)
for x_i in x.owner.inputs])
for x in node.inputs]
if not any(i_mul):
# no scalar in input and no multiplication
# if their was a multiplication we couls reorder the graph
# by the associativity of the graph.
return False
mul_idx = i_mul.index(True) # The first one should always work
m = node.inputs[mul_idx]
scalar_idx = -1
for i, x in enumerate(m.owner.inputs):
if _as_scalar(x, dtype=d.dtype) and (theano.scalar.upcast(
x.type.dtype, d.type.dtype) == d.type.dtype):
scalar_idx = i
break
if scalar_idx < 0:
_logger.info('Not optimizing dot22 with inputs %s %s, as the'
' type of the scalar cannot be upcasted to the'
' matrix type',
node.inputs, [x.type for x in node.inputs])
return False
a = T.cast(_as_scalar(m.owner.inputs[scalar_idx],
dtype=d.dtype), d.type.dtype)
assert not a.type.ndim
dot = _dot22scalar(d.owner.inputs[0], d.owner.inputs[1], a)
# The other inputs to the original node that were
# neither part of the dot22 or this mul should be
# factors in the returned "mul" node.
assert dot22_idx != mul_idx
other_factors = [inpt
for i, inpt in enumerate(node.inputs)
if i not in (dot22_idx, mul_idx)]
other_m_inputs = [inpt
for i, inpt in enumerate(m.owner.inputs)
if i != scalar_idx]
return [T.mul(dot, *(other_factors + other_m_inputs))]
scalar_idx = -1
for i, x in enumerate(node.inputs):
if (i != dot22_idx and i_scalar[i] is not None and
(theano.scalar.upcast(x.type.dtype, d.type.dtype) ==
d.type.dtype)):
scalar_idx = i
break
if scalar_idx < 0:
_logger.info('Not optimizing dot22 with inputs %s %s, as the type '
'of the scalar cannot be upcasted to the matrix type',
node.inputs, [x.type for x in node.inputs])
return False
assert scalar_idx < len(node.inputs)
s = node.inputs[scalar_idx]
o = copy.copy(node.inputs)
o.remove(d)
o.remove(s)
a = T.cast(i_scalar[scalar_idx], d.type.dtype)
assert not a.type.ndim
if len(o) == 0:
return [_dot22scalar(d.owner.inputs[0], d.owner.inputs[1], a)]
else:
return [T.mul(_dot22scalar(d.owner.inputs[0],
d.owner.inputs[1], a), *o)]
# must happen after gemm as the gemm optimizer don't understant
# dot22scalar and gemm give more speed up then dot22scalar
blas_optdb.register('local_dot22_to_dot22scalar',
in2out(local_dot22_to_dot22scalar),
11, 'fast_run')
class BatchedDot(Op):
"""
Computes the batched dot product of two variables:
batched_dot(a, b)[i] = dot(a[i], b[i])
"""
__props__ = ()
def make_node(self, *inputs):
inputs = list(map(T.as_tensor_variable, inputs))
if len(inputs) != 2:
raise TypeError("theano.tensor.blas.BatchedDot: 2 arguments"
" required, %d given " % len(inputs))
if inputs[0].ndim not in (2, 3):
raise TypeError("theano.tensor.blas.BatchedDot: input 0 (0-indexed)"
" must have ndim of 2 or 3, %d given. Consider"
" calling theano.tensor.batched_dot instead."
% inputs[0].ndim)
if inputs[1].ndim not in (2, 3):
raise TypeError("theano.tensor.blas.BatchedDot: input 1 (0-indexed)"
" must have ndim of 2 or 3, %d given. Consider"
" calling theano.tensor.batched_dot instead."
% inputs[1].ndim)
dtype = theano.scalar.upcast(*[input.type.dtype for input in inputs])
# upcast inputs to common dtype if needed
upcasted_inputs = [T.cast(input, dtype) for input in inputs]
broadcastable = ((inputs[0].type.broadcastable[0] or
inputs[1].type.broadcastable[0],) +
inputs[0].type.broadcastable[1:-1] +
inputs[1].type.broadcastable[2:])
return Apply(self, upcasted_inputs, [T.tensor(dtype, broadcastable)])
def perform(self, node, inp, out):
x, y = inp
z, = out
if x.shape[0] != y.shape[0]:
raise TypeError(
"theano.tensor.blas.BatchedDot: inputs [%s] must have the"
" same size in axis 0, but have sizes [%s]." %
(", ".join(map(str, inp)),
", ".join([str(i.shape[0]) for i in inp])))
shape = self.infer_shape(node, [i.shape for i in inp])[0]
dtype = node.outputs[0].dtype
z0 = z[0] = numpy.empty(shape, dtype=dtype)
for i in xrange(z0.shape[0]):
z0[i] = numpy.dot(x[i], y[i])
def c_support_code(self):
batch_gemm_defn = """
template<typename dtype, typename function>
bool batch_gemm(function gemm, int type_size,
PyArrayObject* xs, PyArrayObject* ys, PyArrayObject* zs) {
npy_intp *Nx = PyArray_DIMS(xs), *Sx = PyArray_STRIDES(xs);
npy_intp *Ny = PyArray_DIMS(ys), *Sy = PyArray_STRIDES(ys);
npy_intp *Nz = PyArray_DIMS(zs), *Sz = PyArray_STRIDES(zs);
if (Nx[0] != Ny[0]) {
PyErr_Format(PyExc_ValueError,
"Shape mismatch: batch sizes unequal."
" x.shape is (%d, %d, %d),"
" y.shape is (%d, %d, %d).",
Nx[0], Nx[1], Nx[2],
Ny[0], Ny[1], Ny[2]);
return 1;
}
if (Nx[2] != Ny[1]) {
PyErr_Format(PyExc_ValueError,
"Shape mismatch: summation axis sizes unequal."
" x.shape is (%d, %d, %d),"
" y.shape is (%d, %d, %d).",
Nx[0], Nx[1], Nx[2],
Ny[0], Ny[1], Ny[2]);
return 1;
}
/* encode the stride structure of _x,_y,_z into a single integer. */
int unit = 0;
unit |= ((Sx[2] == type_size || Nx[2] == 1) ? 0x0 : (Sx[1] == type_size || Nx[1]==1) ? 0x1 : 0x2) << 8;
unit |= ((Sy[2] == type_size || Ny[2] == 1) ? 0x0 : (Sy[1] == type_size || Ny[1]==1) ? 0x1 : 0x2) << 4;
unit |= ((Sz[2] == type_size || Nz[2] == 1) ? 0x0 : (Sz[1] == type_size || Nz[1]==1) ? 0x1 : 0x2) << 0;
/* create appropriate strides for malformed matrices that are row or column
* vectors, or empty matrices.
* In that case, the value of the stride does not really matter, but
* some versions of BLAS insist that:
* - they are not smaller than the number of elements in the array,
* - they are not 0.
*/
int sx_1 = (Nx[1] > 1) ? Sx[1]/type_size : (Nx[2] + 1);
int sx_2 = (Nx[2] > 1) ? Sx[2]/type_size : (Nx[1] + 1);
int sy_1 = (Ny[1] > 1) ? Sy[1]/type_size : (Ny[2] + 1);
int sy_2 = (Ny[2] > 1) ? Sy[2]/type_size : (Ny[1] + 1);
int sz_1 = (Nz[1] > 1) ? Sz[1]/type_size : (Nz[2] + 1);
int sz_2 = (Nz[2] > 1) ? Sz[2]/type_size : (Nz[1] + 1);
dtype* x = (dtype*)PyArray_DATA(xs);
dtype* y = (dtype*)PyArray_DATA(ys);
dtype* z = (dtype*)PyArray_DATA(zs);
dtype a = 1.0;
dtype b = 0.0;
char N = 'N';
char T = 'T';
int Nz1 = Nz[1], Nz2 = Nz[2], Nx2 = Nx[2];
// loop over batch axis
for (int i = 0; i < Nz[0]; i++) {
switch(unit)
{
case 0x000: gemm(&N, &N, &Nz2, &Nz1, &Nx2, &a, y, &sy_1, x, &sx_1, &b, z, &sz_1); break;
case 0x100: gemm(&N, &T, &Nz2, &Nz1, &Nx2, &a, y, &sy_1, x, &sx_2, &b, z, &sz_1); break;
case 0x010: gemm(&T, &N, &Nz2, &Nz1, &Nx2, &a, y, &sy_2, x, &sx_1, &b, z, &sz_1); break;
case 0x110: gemm(&T, &T, &Nz2, &Nz1, &Nx2, &a, y, &sy_2, x, &sx_2, &b, z, &sz_1); break;
case 0x001: gemm(&T, &T, &Nz1, &Nz2, &Nx2, &a, x, &sx_1, y, &sy_1, &b, z, &sz_2); break;
case 0x101: gemm(&N, &T, &Nz1, &Nz2, &Nx2, &a, x, &sx_2, y, &sy_1, &b, z, &sz_2); break;
case 0x011: gemm(&T, &N, &Nz1, &Nz2, &Nx2, &a, x, &sx_1, y, &sy_2, &b, z, &sz_2); break;
case 0x111: gemm(&N, &N, &Nz1, &Nz2, &Nx2, &a, x, &sx_2, y, &sy_2, &b, z, &sz_2); break;
default: PyErr_SetString(PyExc_ValueError, "some matrix has no unit stride"); return 1;
};
x += Sx[0] / type_size;
y += Sy[0] / type_size;
z += Sz[0] / type_size;
}
return 0;
}
"""
return blas_header_text() + batch_gemm_defn
def c_libraries(self):
return ldflags()
def c_compile_args(self):
return ldflags(libs=False, flags=True)
def c_lib_dirs(self):
return ldflags(libs=False, libs_dir=True)
def c_header_dirs(self):
return ldflags(libs=False, include_dir=True)
def c_code_cleanup(self, node, name, inputs, outputs, sub):
return """
// clean up views
Py_XDECREF(xs); xs = 0;
Py_XDECREF(ys); ys = 0;
Py_XDECREF(zs); zs = 0;
"""
def c_code(self, node, name, inp, out, sub):
_x, _y = inp
_z, = out
fail = sub["fail"]
if not config.blas.ldflags:
return super(BatchedDot, self).c_code(node, name,
inp, out, sub)
# generate contiguity condition
def contiguous(var, ndim):
strides = "PyArray_STRIDES(%s)" % var
return " && ".join([
" && ".join("{strides}[{i}] > 0 && {strides}[{i}] % type_size == 0"
.format(strides=strides, i=i) for i in range(ndim)),
"(%s)" % " || ".join("{strides}[{i}] == type_size"
.format(strides=strides, i=i) for i in range(ndim)),
])
x_ndim, y_ndim, z_ndim = node.inputs[0].ndim, node.inputs[1].ndim, node.outputs[0].ndim
# generate code to allocate output based on runtime input shapes
z_dims = ["PyArray_DIMS(%s)[0]" % _x]
if x_ndim == 3:
z_dims.append("PyArray_DIMS(%s)[1]" % _x)
if y_ndim == 3:
z_dims.append("PyArray_DIMS(%s)[2]" % _y)
assert len(z_dims) == z_ndim
z_shape_correct = " && ".join("PyArray_DIMS(%s)[%i] == %s"
% (_z, i, dim) for i, dim in enumerate(z_dims))
z_shape = ", ".join(z_dims)
z_contiguous = contiguous(_z, z_ndim)
allocate = """
if (NULL == %(_z)s || !(%(z_shape_correct)s) || !(%(z_contiguous)s))
{
npy_intp dims[%(z_ndim)s] = {%(z_shape)s};
Py_XDECREF(%(_z)s);
%(_z)s = (PyArrayObject*)PyArray_SimpleNew(
%(z_ndim)s, dims, PyArray_TYPE(%(_x)s));
if(!%(_z)s) {
PyErr_SetString(PyExc_MemoryError,
"failed to alloc BatchedDot output");
%(fail)s
}
}
""" % locals()
# code to reallocate inputs contiguously if necessary
contiguate = []
for var, ndim in [(_x, x_ndim), (_y, y_ndim)]:
_contiguous = contiguous(var, ndim)
contiguate.append("""
if (!(%(_contiguous)s)) {
PyArrayObject * _copy = (PyArrayObject *) PyArray_Copy(%(var)s);
if (!_copy)
%(fail)s
Py_XDECREF(%(var)s);
%(var)s = _copy;
}
""" % locals())
contiguate = "\n".join(contiguate)
def c_dimshuffle(newname, oldname, shape):
_fail = fail
_shape = ", ".join("1" if axis is None else "PyArray_DIMS(%s)[%i]" % (oldname, axis)
for axis in shape)
return """{
npy_intp dims[3] = {%(_shape)s};
PyArray_Dims newshape = {dims, 3};
%(newname)s = (PyArrayObject*)PyArray_Newshape(%(oldname)s, &newshape, NPY_ANYORDER);
if (!%(newname)s)
%(_fail)s
// make sure we didn't accidentally copy
assert(PyArray_DATA(%(oldname)s) == PyArray_DATA(%(newname)s));
}""" % locals()
# create tensor3 views for any of x, y, z that are not tensor3, so that
# we only need to implement the tensor3-tensor3 batched dot product.
# xs, ys and zs will point to these views, or to the original array if
# it was already tensor3.
# in the latter case, we artificially increase the reference count of
# the original array so that the c_code_cleanup method can decref them
# all indiscriminately.
upcast = []
if x_ndim == 3:
upcast.append("xs = %(_x)s; Py_XINCREF(xs);")
elif x_ndim == 2:
upcast.append(c_dimshuffle("xs", _x, (0, None, 1)))
if y_ndim == 3:
upcast.append("ys = %(_y)s; Py_XINCREF(ys);")
elif y_ndim == 2:
upcast.append(c_dimshuffle("ys", _y, (0, 1, None)))
if z_ndim == 3:
upcast.append("zs = %(_z)s; Py_XINCREF(zs);")
else:
upcast.append(c_dimshuffle(
"zs", _z, (0,
None if x_ndim == 2 else 1,
None if y_ndim == 2 else 1)))
upcast = "\n".join(upcast) % locals()
return """
int type_num = PyArray_DESCR(%(_x)s)->type_num;
int type_size = PyArray_DESCR(%(_x)s)->elsize; // in bytes
// xs, ys, zs will point to views onto %(_x)s, %(_y)s, %(_z)s
PyArrayObject *xs = 0, *ys = 0, *zs = 0;
if (PyArray_NDIM(%(_x)s) != %(x_ndim)s) {
PyErr_Format(PyExc_NotImplementedError,
"rank(x) != %(x_ndim)s. rank(x) is %%d.",
PyArray_NDIM(%(_x)s));
%(fail)s;
}
if (PyArray_NDIM(%(_y)s) != %(y_ndim)s) {
PyErr_Format(PyExc_NotImplementedError,
"rank(y) != %(y_ndim)s. rank(y) is %%d.",
PyArray_NDIM(%(_y)s));
%(fail)s;
}
if (%(_z)s && PyArray_NDIM(%(_z)s) != %(z_ndim)s) {
PyErr_Format(PyExc_NotImplementedError,
"rank(z) != %(z_ndim)s. rank(z) is %%d.",
PyArray_NDIM(%(_z)s));
%(fail)s;
}
// allocate output
%(allocate)s
// reallocate any noncontiguous arrays or arrays with invalid strides
%(contiguate)s
// add dims to make sure everything is tensor3
%(upcast)s
// from here on, use xs, ys and zs as they are tensor3 and share memory
// with the original %(_x)s, %(_y)s and %(_z)s arrays.
if ((PyArray_DESCR(xs)->type_num != NPY_DOUBLE)
&& (PyArray_DESCR(xs)->type_num != NPY_FLOAT))
{PyErr_SetString(PyExc_NotImplementedError, "type(x) is not double or float"); %(fail)s;}
if ((PyArray_DESCR(ys)->type_num != NPY_DOUBLE)
&& (PyArray_DESCR(ys)->type_num != NPY_FLOAT))
{PyErr_SetString(PyExc_NotImplementedError, "type(y) is not double or float"); %(fail)s;}
if ((PyArray_DESCR(zs)->type_num != NPY_DOUBLE)
&& (PyArray_DESCR(zs)->type_num != NPY_FLOAT))
{PyErr_SetString(PyExc_NotImplementedError, "type(z) is not double or float"); %(fail)s;}
if ((PyArray_DESCR(xs)->type_num != PyArray_DESCR(ys)->type_num)
||(PyArray_DESCR(xs)->type_num != PyArray_DESCR(zs)->type_num))
{ PyErr_SetString(PyExc_NotImplementedError, "type(x), type(y), type(z) are not all the same"); %(fail)s; }
switch (type_num)
{
case NPY_FLOAT:
if (batch_gemm<float>(sgemm_, type_size, xs, ys, zs)) {
%(fail)s;
}
break;
case NPY_DOUBLE:
if (batch_gemm<double>(dgemm_, type_size, xs, ys, zs)) {
%(fail)s;
}
break;
}
""" % locals()
def c_code_cache_version(self):
from theano.tensor.blas_headers import blas_header_version
return (1, blas_header_version())
def grad(self, inp, grads):
x, y = inp
gz, = grads
xdim, ydim, gdim = x.type.ndim, y.type.ndim, gz.type.ndim
# grad is a vector, so x is a matrix and y is a matrix
if gdim == 1:
xgrad = gz.dimshuffle(0, 'x') * y
ygrad = gz.dimshuffle(0, 'x') * x
# x is a matrix, y is a tensor3, grad is a matrix
elif xdim == 2 and ydim == 3:
xgrad = T.batched_dot(gz, y.dimshuffle(0, 2, 1))
ygrad = x.dimshuffle(0, 1, 'x') * gz.dimshuffle(0, 'x', 1)
# x is a tensor3, y is a matrix, grad is a matrix
elif xdim == 3 and ydim == 2:
xgrad = gz.dimshuffle(0, 1, 'x') * y.dimshuffle(0, 'x', 1)
ygrad = T.batched_dot(x.dimshuffle(0, 2, 1), gz)
# x is a tensor3, y is a tensor3, grad is a tensor3
elif xdim == ydim == 3:
xgrad = T.batched_dot(gz, y.dimshuffle(0, 2, 1))
ygrad = T.batched_dot(x.dimshuffle(0, 2, 1), gz)
# If x or y contain broadcastable dimensions but only one of
# them know that a matching dimensions is broadcastable, the
# above code don't always return the right broadcast pattern.
# This cause problem down the road. See gh-1461.
if xgrad.broadcastable != x.broadcastable:
xgrad = T.patternbroadcast(xgrad, x.broadcastable)
if ygrad.broadcastable != y.broadcastable:
ygrad = T.patternbroadcast(ygrad, y.broadcastable)
return xgrad, ygrad
def R_op(self, inputs, eval_points):
# R_op for batched_dot(a, b) evaluted at c for a and d for b is
# simply batched_dot(c, b) + batched_dot(a, d)
assert len(inputs) == 2
assert len(eval_points) == 2
if eval_points[0] is None and eval_points[1] is None:
return [None]
debugger_available = config.compute_test_value != 'off'
if debugger_available:
try:
iv0 = theano.gof.op.get_test_value(inputs[0])
except AttributeError:
theano.gof.op.missing_test_message(
'first input passed to BatchedDot.R_op has no test value')
debugger_available = False
try:
iv1 = theano.gof.op.get_test_value(inputs[1])
except AttributeError:
theano.gof.op.missing_test_message(
'second input passed to BatchedDot.R_op has no test value')
debugger_available = False
if eval_points[0]:
try:
ev0 = theano.gof.op.get_test_value(eval_points[0])
except AttributeError:
theano.gof.op.missing_test_message(
'first eval point passed to BatchedDot.R_op '
'has no test value')
debugger_available = False
if eval_points[1]:
try:
ev1 = theano.gof.op.get_test_value(eval_points[1])
except AttributeError:
theano.gof.op.missing_test_message(
'second eval point passed to BatchedDot.R_op '
'has no test value')
debugger_available = False
if debugger_available:
input_values = [iv0, iv1]
eval_point_values = [ev0, ev1]
for i in xrange(2):
if eval_point_values[i] is not None and \
input_values[i].shape != eval_point_values[i].shape:
raise ValueError(
'input ' + str(i) + ' and eval_point ' + str(i) +
' to BatchedDot.R_op should have the same shape, but '
'their shapes are %s and %s, respectively' % (
str(input_values[i].shape),
str(eval_point_values[i].shape)))
if eval_points[0]:
t1 = self(eval_points[0], inputs[1])
if eval_points[1]:
t2 = self(inputs[0], eval_points[1])
if eval_points[0] and eval_points[1]:
return [t1 + t2]
elif eval_points[0]:
return [t1]
else:
return [t2]
def infer_shape(self, node, shapes):
for shape_ in shapes:
if len(shape_) not in (2, 3):
raise NotImplementedError()
xshp, yshp = shapes
return [xshp[:-1] + yshp[2:]]
# from opt import register_specialize, register_canonicalize
# @register_specialize
@local_optimizer([T.sub, T.add])
def local_print_as_we_go_along(node):
if node.op in (T.sub, T.add):
debugprint(node)
| {
"content_hash": "6e57160e5f3a110fe682cbef2e44d17b",
"timestamp": "",
"source": "github",
"line_count": 2443,
"max_line_length": 118,
"avg_line_length": 37.79410560785919,
"alnum_prop": 0.5099587354193066,
"repo_name": "surgebiswas/poker",
"id": "c63bf6dd68e35c05b4b8bd12de2b1fb88c6410e2",
"size": "92331",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "PokerBots_2017/Johnny/theano/tensor/blas.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "20"
},
{
"name": "C",
"bytes": "569372"
},
{
"name": "C++",
"bytes": "3604944"
},
{
"name": "CSS",
"bytes": "1750"
},
{
"name": "Cuda",
"bytes": "232079"
},
{
"name": "Fortran",
"bytes": "13029"
},
{
"name": "HTML",
"bytes": "127417"
},
{
"name": "Jupyter Notebook",
"bytes": "97929"
},
{
"name": "Makefile",
"bytes": "76699"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Perl",
"bytes": "25163"
},
{
"name": "Python",
"bytes": "26314770"
},
{
"name": "Shell",
"bytes": "1082"
}
],
"symlink_target": ""
} |
"""
Testing for pipeline_grid_search module.
"""
from __future__ import print_function
from __future__ import division
import time
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin, ClassifierMixin
from sklearn.cross_validation import StratifiedKFold
from sklearn.datasets import make_classification
from sklearn.decomposition import PCA
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVC
from nose.tools import assert_equal
from pipeline_grid_search import PipelineGridSearchCV
# Globals for counting estimator calls
n_transform_calls = 0
n_fit_calls = 0
# http://stackoverflow.com/a/27005560/4963543
def make_init_body(classname,parameters):
# Calling super does not work for some reason,
# but it does not matter in this case, since
# BaseEstimator, TransformerMixin and ClassifierMixin have an empty __init__ function.
#body = " super({}, self).__init__()".format(classname)
body = " pass"
body += ''.join('\n self.{}={}'.format(key,key) for key,_ in parameters)
func_str = " def __init__(self{}):\n{}".format(''.join(', {}={}'.format(key,val) for key,val in parameters), body)
return func_str
def create_mock_estimator(classname,parameters,is_classifier=False):
# parameters is a list of (key,val) pairs.
init_body = make_init_body(classname,parameters)
main_body = """
def fit(self, X, y=None):
global n_fit_calls
n_fit_calls += 1
return self
"""
if is_classifier:
bases = "(BaseEstimator, TransformerMixin, ClassifierMixin)"
main_body += """
def predict(self, X):
return np.arange(X.shape[0])
"""
else:
bases = "(BaseEstimator, TransformerMixin)"
main_body += """
def transform(self, X):
global n_transform_calls
n_transform_calls += 1
odd = False
for k,v in self.get_params().items():
if odd:
X = X*v
else:
X = X-v
odd = not odd
return X
"""
body = "class {}{}:\n{}\n{}".format(classname,bases,init_body,main_body)
print(body)
exec(body)
newclassobj = locals()[classname]()
return newclassobj
def create_mock_classifier(classname,parameters):
return create_mock_estimator(classname,parameters,is_classifier=True)
def nfits(nparams):
# calcs the number of optimal calls to fit when following DFS order
# (the number of nodes in the pipeline tree minus one)
s = 1
for c in reversed(nparams):
s = 1+c*s if c>1 else s+1
return s-1
def calc_n_ideal_fit_calls(parts, cv_params, n_folds):
pipe_length = len(parts)
nparams = []
for p in parts:
param_count = 1
for (name,vals) in cv_params:
est_name,_ = name.split("__",1)
if est_name == p.__class__.__name__:
param_count *= len(vals)
nparams.append(param_count)
print(nparams)
n_ideal_calls = nfits(nparams)
n_ideal_calls *= n_folds # We repeat the above number of fit calls for each fold
n_ideal_calls += pipe_length # plus the fits for fitting on the whole X last
return n_ideal_calls
def calc_n_ideal_transform_calls(parts, cv_params, n_folds):
pipe_length = len(parts)
nparams = []
for p in parts[:-1]: # Do not include the last part of the pipeline; it is a classifier (without transform)
param_count = 1
for (name,vals) in cv_params:
est_name,_ = name.split("__",1)
if est_name == p.__class__.__name__:
param_count *= len(vals)
nparams.append(param_count)
n_ideal_calls = nfits(nparams)
n_ideal_calls *= n_folds*2 # We repeat the above number of fit calls for each fold (and for both the train and development set)
n_ideal_calls += pipe_length-1 # plus the fits for fitting on the whole X last (minus the classifier at the end)
return n_ideal_calls
def test_pipeline_grid_search1():
# The that the number of estimator calls is less than the ones for regular GridSearchCV
parts = [
create_mock_estimator("f0",[]),
create_mock_estimator("f1", [("p1",0),("p2",2)]),
create_mock_estimator("f2",[]),
create_mock_estimator("f3",[("c",0),("d",0)]),
create_mock_estimator("f4",[]),
create_mock_estimator("f5",[]),
create_mock_classifier("f6",[("c",0)]),
]
cv_params = [
('f1__p1', [10,20]),
('f3__c', [10,20,30]),
('f3__d', [10,20,30,40]),
('f6__c', [10,20,30,40]),
]
perform_pipeline_case(parts, cv_params)
def test_pipeline_grid_search2():
# The that the number of estimator calls is less than the ones for regular GridSearchCV
parts = [
create_mock_estimator("f0",[]),
create_mock_estimator("f1", [("p1",0),("p2",2)]),
create_mock_estimator("f2",[]),
create_mock_estimator("f3",[("c",0),("d",0)]),
create_mock_estimator("f4",[]),
create_mock_estimator("f5",[]),
create_mock_estimator("f40",[]),
create_mock_estimator("f50",[]),
create_mock_estimator("f41",[]),
create_mock_estimator("f51",[]),
create_mock_estimator("f42",[]),
create_mock_estimator("f52",[]),
create_mock_classifier("f6",[("c",0)]),
]
cv_params = [
('f1__p1', [10,20]),
('f3__c', [10,20,30]),
('f3__d', [10,20,30,40]),
('f6__c', [10,20,30,40]),
]
perform_pipeline_case(parts, cv_params)
def test_pipeline_grid_search3():
# The that the number of estimator calls is less than the ones for regular GridSearchCV
parts = [
create_mock_classifier("f1", [("p1",0)]),
]
cv_params = [
('f1__p1', [10,20]),
]
perform_pipeline_case(parts, cv_params)
def test_pipeline_grid_search4():
# The that the number of estimator calls is less than the ones for regular GridSearchCV
parts = [
create_mock_classifier("f1", []),
]
cv_params = [
]
perform_pipeline_case(parts, cv_params)
def test_pipeline_grid_search5():
# The that the number of estimator calls is less than the ones for regular GridSearchCV
parts = [
create_mock_estimator("f0",[]),
create_mock_estimator("f1", [("p1",0),("p2",2)]),
create_mock_estimator("f2",[]),
create_mock_estimator("f3",[("c",0),("d",0)]),
create_mock_estimator("f4",[]),
create_mock_estimator("f5",[]),
create_mock_estimator("f6",[]),
create_mock_estimator("f7",[]),
create_mock_estimator("f8",[]),
create_mock_estimator("f9",[]),
create_mock_estimator("f10",[]),
create_mock_classifier("f11",[]),
]
cv_params = [
('f1__p1', [10,20]),
('f3__c', [10,20,30]),
('f3__d', [10,20,30,40]),
]
perform_pipeline_case(parts, cv_params)
def test_pipeline_grid_search6():
# Test that the number of estimator calls is less than the ones for regular GridSearchCV
parts = [
create_mock_estimator("f0",[]),
create_mock_estimator("f1", [("p1",0),("p2",2)]),
create_mock_estimator("f2",[]),
create_mock_estimator("f3",[("c",0),("d",0)]),
create_mock_estimator("f4",[]),
create_mock_estimator("f5",[]),
SVC()
]
cv_params = [
('f1__p1', [10,20]),
('f3__c', [10,20,30]),
('f3__d', [10,20,30,40]),
('SVC__C', [1.,10.,100.,1000.]),
('SVC__kernel', ['linear']),
]
# Set assert_n_calls_equal to False, as we need to implement our custom counting of function calls in order to measure the call tests.
perform_pipeline_case(parts, cv_params, assert_n_calls_equal=False)
def test_pipeline_grid_search7():
# Test that _DFSGridSearchCVPipeline gives the same selected parameters as the normal GridSearchCV
parts = [
PCA(),
Normalizer(),
SVC()
]
cv_params = [
('PCA__n_components', [3,5,7]),
('Normalizer__norm', ['l2']),
('SVC__C', [1.,10.,100.,1000.]),
('SVC__kernel', ['linear']),
]
perform_pipeline_case(parts, cv_params, assert_n_calls_equal=False)
def test_pipeline_grid_search8():
# Test using a FeatureUnion with embedded Pipelines.
parts = [
create_mock_estimator("f0",[]),
FeatureUnion([
('feat1', Pipeline([
('f11', create_mock_estimator("f11", [("p1",0),("p2",2)])),
])),
('feat2', Pipeline([
('f12', create_mock_estimator("f12", [("a",0)])),
])),
]),
create_mock_estimator("f1", [("p1",0),("p2",2)]),
create_mock_estimator("f2",[]),
create_mock_estimator("f3",[("c",0),("d",0)]),
create_mock_estimator("f4",[]),
create_mock_estimator("f5",[]),
create_mock_classifier("f11",[]),
]
cv_params = [
('FeatureUnion__feat1__f11__p1', [10,20]),
('FeatureUnion__feat2__f12__a', [10,20,30]),
('f1__p1', [10,20]),
('f3__c', [10,20,30]),
('f3__d', [10,20,30,40]),
]
# Set assert_n_calls_equal to False, as we need to implement our custom counting of function calls in order to measure the call tests.
perform_pipeline_case(parts, cv_params, assert_n_calls_equal=False)
# TODO: Update assert_n_calls_equal logic to work correctly with pipelines embedded in FeatureUnions.
def test_pipeline_grid_search9():
# Test using a FeatureUnion with embedded Pipelines.
parts = [
create_mock_estimator("f0",[]),
FeatureUnion([
('feat1', Pipeline([
('f11', create_mock_estimator("f11", [("p1",0),("p2",2)])),
('f111', create_mock_estimator("f111", [("p1",0),("p2",2)])),
('f112', create_mock_estimator("f112", [("p1",0),("p2",2)])),
])),
('feat2', Pipeline([
('f12', create_mock_estimator("f12", [("a",0)])),
('f121', create_mock_estimator("f121", [("a",0)])),
('f122', create_mock_estimator("f122", [("a",0)])),
])),
]),
create_mock_estimator("f1", [("p1",0),("p2",2)]),
create_mock_estimator("f2",[]),
create_mock_estimator("f3",[("c",0),("d",0)]),
create_mock_estimator("f4",[]),
create_mock_estimator("f5",[]),
create_mock_classifier("f11",[]),
]
cv_params = [
('FeatureUnion__feat1__f11__p1', [10,20]),
#('FeatureUnion__feat1__f111__p1', [10,20]),
('FeatureUnion__feat1__f112__p1', [10,20]),
#('FeatureUnion__feat2__f12__a', [10,20,30]),
#('FeatureUnion__feat2__f121__a', [10,20,30]),
('FeatureUnion__feat2__f122__a', [10,20,30]),
('f1__p1', [10,20]),
('f3__c', [10,20,30]),
('f3__d', [10,20,30,40]),
]
# Set assert_n_calls_equal to False, as we need to implement our custom counting of function calls in order to measure the call tests.
perform_pipeline_case(parts, cv_params, assert_n_calls_equal=False, mode='file', cachedir='file_cache', datasetname='make_class')
def test_pipeline_grid_search10():
# Test if _DFSGridSearchCVPipeline works with submerged pipelines.
parts = [
create_mock_estimator("f0",[]),
FeatureUnion([
('feat1', Pipeline([
('f11', create_mock_estimator("f11", [("p1",0),("p2",2)])),
('f111', create_mock_estimator("f111", [("p1",0),("p2",2)])),
('f112', create_mock_estimator("f112", [("p1",0),("p2",2)])),
])),
('feat2', Pipeline([
('f12', create_mock_estimator("f12", [("a",0)])),
('f121', create_mock_estimator("f121", [("a",0)])),
('f122', create_mock_estimator("f122", [("a",0)])),
])),
]),
PCA(),
Normalizer(),
SVC(),
]
cv_params = [
('FeatureUnion__feat1__f11__p1', [10,20]),
#('FeatureUnion__feat1__f111__p1', [10,20]),
('FeatureUnion__feat1__f112__p1', [10,20]),
#('FeatureUnion__feat2__f12__a', [10,20,30]),
#('FeatureUnion__feat2__f121__a', [10,20,30]),
('FeatureUnion__feat2__f122__a', [10,20,30]),
('PCA__n_components', [3,5,7]),
('Normalizer__norm', ['l2']),
('SVC__C', [1.,10.,100.,1000.]),
('SVC__kernel', ['linear']),
]
# Set assert_n_calls_equal to False, as we need to implement our custom counting of function calls in order to measure the call tests.
perform_pipeline_case(parts, cv_params, assert_n_calls_equal=False, mode='dfs', cachedir='file_cache', datasetname='make_class')
def test_pipeline_grid_search11():
# Test if _CacheGridSearchCVPipeline works with submerged pipelines.
parts = [
create_mock_estimator("f0",[]),
FeatureUnion([
('feat1', Pipeline([
('f11', create_mock_estimator("f11", [("p1",0),("p2",2)])),
('f111', create_mock_estimator("f111", [("p1",0),("p2",2)])),
('f112', create_mock_estimator("f112", [("p1",0),("p2",2)])),
])),
('feat2', Pipeline([
('f12', create_mock_estimator("f12", [("a",0)])),
('f121', create_mock_estimator("f121", [("a",0)])),
('f122', create_mock_estimator("f122", [("a",0)])),
])),
]),
PCA(),
Normalizer(),
SVC(),
]
cv_params = [
('FeatureUnion__feat1__f11__p1', [10,20]),
#('FeatureUnion__feat1__f111__p1', [10,20]),
('FeatureUnion__feat1__f112__p1', [10,20]),
#('FeatureUnion__feat2__f12__a', [10,20,30]),
#('FeatureUnion__feat2__f121__a', [10,20,30]),
('FeatureUnion__feat2__f122__a', [10,20,30]),
('PCA__n_components', [3,5,7]),
('Normalizer__norm', ['l2']),
('SVC__C', [1.,10.,100.,1000.]),
('SVC__kernel', ['linear']),
]
# Set assert_n_calls_equal to False, as we need to implement our custom counting of function calls in order to measure the call tests.
perform_pipeline_case(parts, cv_params, assert_n_calls_equal=False, mode='file', cachedir='file_cache', datasetname='make_class')
def test_pipeline_grid_search12():
# Test that _DFSGridSearchCVPipeline gives the same selected parameters as the normal GridSearchCV
parts = [
PCA(),
Normalizer(),
SVC()
]
cv_params = [
('PCA__n_components', [3,5,7]),
('Normalizer__norm', ['l1','l2']),
('SVC__C', [1.,10.,100.,1000.]),
('SVC__kernel', ['linear']),
]
perform_pipeline_case(parts, cv_params, assert_n_calls_equal=False, mode='file', cachedir='file_cache', datasetname='make_class')
def test_pipeline_grid_search13():
# Test that _DFSGridSearchCVPipeline gives the same selected parameters as the normal GridSearchCV
parts = [
SVC()
]
cv_params = [
('SVC__C', [1.,10.,100.,1000.]),
('SVC__kernel', ['linear']),
]
perform_pipeline_case(parts, cv_params, assert_n_calls_equal=False, mode='file', cachedir='file_cache', datasetname='make_class')
def test_pipeline_grid_search14():
# Test that _DFSGridSearchCVPipeline gives the same selected parameters as the normal GridSearchCV
parts = [
PCA(),
Normalizer(),
SVC()
]
cv_params = [
('PCA__n_components', [3,5]),
('Normalizer__norm', ['l2']),
('SVC__C', [1.,10.]),
('SVC__kernel', ['linear']),
]
perform_pipeline_case(parts, cv_params, assert_n_calls_equal=False, mode='file', cachedir='file_cache', datasetname='make_class')
def test_pipeline_grid_search15():
# Test if _CacheGridSearchCVPipeline works with submerged pipelines.
parts = [
create_mock_estimator("f0",[("p1",0)]),
FeatureUnion([
('feat1', Pipeline([
('f11', create_mock_estimator("f11", [("p1",0)])),
('f12', create_mock_estimator("f12", [("p1",0)])),
])),
('feat2', Pipeline([
('f21', create_mock_estimator("f21", [("p1",0)])),
('f22', create_mock_estimator("f22", [("p1",0)])),
])),
]),
PCA(),
Normalizer(),
SVC(),
]
cv_params = [
('f0__p1', [10,20]),
('FeatureUnion__feat1__f11__p1', [30,40]),
('FeatureUnion__feat1__f12__p1', [50,60]),
('FeatureUnion__feat2__f21__p1', [100,200,300]),
('FeatureUnion__feat2__f22__p1', [400,500,600]),
('PCA__n_components', [3,5]),
('Normalizer__norm', ['l2']),
('SVC__C', [1.,10.]),
('SVC__kernel', ['linear']),
]
# Set assert_n_calls_equal to False, as we need to implement our custom counting of function calls in order to measure the call tests.
perform_pipeline_case(parts, cv_params, assert_n_calls_equal=False, mode='file', cachedir='file_cache', datasetname='make_class')
def perform_pipeline_case(parts, cv_params, assert_n_calls_equal=True, **pipelinegridsearchcv_kwargs):
# tests a particular pipe and cv_params combination
pipe = Pipeline([ (p.__class__.__name__, p) for p in parts ])
print(pipe)
X, y = make_classification(n_samples=100, n_features=20)
n_folds = 5
n_jobs = 1
verbose = 1
random_seed = 0
# mock.MagicMock cannot be used since GridSearchCV resets each estimator using
# clone() before each call to fit.
# So, let's use global variables instead that we increment in our mock
# estimators.
global n_transform_calls, n_fit_calls
# Start PipelineGridSearchCV test here
n_transform_calls = 0
n_fit_calls = 0
ideal_cv_time = time.time()
model = PipelineGridSearchCV(pipe, dict(cv_params), cv=StratifiedKFold(y, n_folds, random_state=random_seed), verbose=verbose, n_jobs=n_jobs, **pipelinegridsearchcv_kwargs)
model.fit(X,y)
ideal_cv_time = time.time() - ideal_cv_time
print("model.best_estimator_: {}".format(model.best_estimator_))
print("Counts (PipelineGridSearchCV)")
print("n_fit_calls:",n_fit_calls)
print("n_transform_calls:",n_transform_calls)
print("time to do grid search:",ideal_cv_time)
n_ideal_fit_calls = calc_n_ideal_fit_calls(parts,cv_params,n_folds)
n_ideal_transform_calls = calc_n_ideal_transform_calls(parts,cv_params,n_folds)
if assert_n_calls_equal:
# Make sure that PipelineGridSearchCV only called fit the optimal number of times.
assert_equal(n_fit_calls, n_ideal_fit_calls)
assert_equal(n_transform_calls, n_ideal_transform_calls)
# Start GridSearchCV test here
n_transform_calls = 0
n_fit_calls = 0
naive_cv_time = time.time()
model_naive = GridSearchCV(pipe, dict(cv_params), cv=StratifiedKFold(y, n_folds, random_state=random_seed), verbose=verbose, n_jobs=n_jobs)
model_naive.fit(X,y)
naive_cv_time = time.time() - naive_cv_time
print("Counts (GridSearchCV)")
print("n_fit_calls:",n_fit_calls)
print("n_transform_calls:",n_transform_calls)
print("time to do grid search:",naive_cv_time)
n_param_combs = np.prod(map(lambda x: len(x[1]), cv_params))
n_naive_fit_calls = n_param_combs * len(parts) * n_folds + len(parts)
n_naive_transform_calls = n_param_combs * (len(parts)-1) * n_folds * 2 + (len(parts)-1) # The 2 is for running on both the train and dev. set
if assert_n_calls_equal:
assert_equal(n_fit_calls, n_naive_fit_calls)
assert_equal(n_transform_calls, n_naive_transform_calls)
# Make sure that PipelineGridSearchCV and GridSearchCV return the same result.
print("[pipeline_grid_search] best_params_:",model.best_params_)
print("[pipeline_grid_search] best_score_:",model.best_score_)
print("[naive_grid_search] best_params_:",model_naive.best_params_)
print("[naive_grid_search] best_score_:",model_naive.best_score_)
assert_equal(model_naive.best_score_, model.best_score_)
# Note that for equal mean_validation_score, the best params of GridSearchCV will depend
# on the order that they occur to the classifier, so sometimes this test fails even though
# PipelineGridSearchCV behaves correctly.
assert_equal(model_naive.best_params_, model.best_params_)
| {
"content_hash": "f506edad0c8435d84382c80f8a6b272c",
"timestamp": "",
"source": "github",
"line_count": 556,
"max_line_length": 176,
"avg_line_length": 37.17625899280576,
"alnum_prop": 0.5707789066279633,
"repo_name": "tkerola/pipeline_grid_search",
"id": "08f99bf099b509bb037360e36725eb363dadd17f",
"size": "20670",
"binary": false,
"copies": "1",
"ref": "refs/heads/dfs_based",
"path": "tests/test_pipeline_grid_search.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "105"
},
{
"name": "Python",
"bytes": "66513"
}
],
"symlink_target": ""
} |
"""
async requests HTTP library
~~~~~~~~~~~~~~~~~~~~~
"""
import logging
__title__ = 'requests-futures'
__version__ = '0.9.7'
__build__ = 0x000000
__author__ = 'Ross McFarland'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2013 Ross McFarland'
# Set default logging handler to avoid "No handler found" warnings.
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
| {
"content_hash": "1fd0f36f4dec862f543a97fe32f537b5",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 67,
"avg_line_length": 21.8,
"alnum_prop": 0.6422018348623854,
"repo_name": "eenchev/idea-note-taking-app",
"id": "9ac9cd31585cf5bded7298b92a726cbed1572432",
"size": "590",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "env/lib/python2.7/site-packages/requests_futures/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "460"
},
{
"name": "Python",
"bytes": "1214"
}
],
"symlink_target": ""
} |
import time, parallel, threading, Tkinter
from optparse import OptionParser
class parallelBuffer(threading.Thread):
def __init__(self, pwmHertz = 60.0):
"""
Initilization.
@param pwmHertz Optional parameter to set PWM frequency.
"""
threading.Thread.__init__(self, name='ParallelBufferThread')
self.pwmTotalCycleTime = 1.0 / pwmHertz
self.daemon = True
self.running = False
self.onTime = self.pwmTotalCycleTime
self.offTime = 0
def run(self):
"""
Starts the buffer.
"""
self.running = True
while(self.running):
p.setData(self.dataOn)
time.sleep(self.onTime)
p.setData(self.dataOff)
time.sleep(self.offTime)
def setDataOn(self, data):
"""
Sets the data to be set when PMW is cycled on.
"""
self.dataOn = data
def setDataOff(self, data):
"""
Sets the data to be set when PMW is cycled off.
"""
self.dataOff = data
def setPWMLevel(self, data):
"""
Takes % to set PWM at.
"""
self.onTime = self.pwmTotalCycleTime * float(data)/100.0
self.offTime = self.pwmTotalCycleTime - self.onTime
def stop(self):
"""
Stops the buffer.
"""
self.running = False
class newLightTimer(threading.Thread):
def __init__(self, Slider, portCode=1):
threading.Thread.__init__(self, name='LightingThread')
self.portCode=portCode
self.slider=Slider
self.daemon = True
def internalRunLights(self):
level=self.slider.getLevel()
totalCycleTime=1.0/80.0
onTime=totalCycleTime*(float(level)/100.0)
offTime=totalCycleTime-onTime
while(self.running):
if(level!=self.slider.getLevel()):
level=self.slider.getLevel()
onTime=totalCycleTime*(float(level)/100.0)
offTime=totalCycleTime-onTime
p.setData(self.portCode)
time.sleep(onTime)
p.setData(0)
time.sleep(offTime)
def run(self):
self.running=True
self.internalRunLights()
def stop(self):
self.running=False
class newLightSlider(object):
def __init__(self, TkWindow, callback, startValue=0, title=None):
self.levelVar=Tkinter.IntVar(value=startValue)
scale=Tkinter.Scale(TkWindow, command = callback, variable = self.levelVar, label=title, from_=100, to=0)
scale.pack(side=Tkinter.RIGHT)
def getLevel(self):
try:
return self.levelVar.get()
except:
return 0
def GUItest():
# Init.
p.setData(000)
pB = parallelBuffer(80.0)
# Start with relay off.
pB.setDataOn(002)
pB.setDataOff(000)
# GUI Init.
window = Tkinter.Tk()
window.title("LED")
relayStatus = Tkinter.IntVar()
def checkRelayStatus():
if not relayStatus.get():
pB.setDataOn(002)
pB.setDataOff(000)
elif relayStatus.get():
pB.setDataOn(003)
pB.setDataOff(001)
radio = Tkinter.Checkbutton(window, variable = relayStatus, command = checkRelayStatus, text = "Relay")
radio.pack(side = Tkinter.TOP)
slide1 = Tkinter.Scale(window, command = pB.setPWMLevel, label = "Lights", from_ = 100, to = 0)
slide1.pack(side = Tkinter.TOP)
headColor = '#3C3B37'
window.configure(background=headColor)
radio.configure(background=headColor, highlightbackground=headColor)
slide1.configure(background=headColor, highlightbackground=headColor)
#timer1 = newLightTimer(slide1, 2)
#slide2 = newLightSlider(window, 100, 'Light 2')
#timer2 = newLightTimer(slide1, 2)
#timer1.start()
#timer2.start()
# Start buffer then GUI.
pB.start()
window.mainloop()
window.quit()
return
def verbtoseTest():
print('10%')
runLights(5, 10, 001)
print('20%')
runLights(5, 20, 001)
print('30%')
runLights(5, 30, 001)
print('40%')
runLights(5, 40, 001)
print('50%')
runLights(5, 50, 001)
print('60%')
runLights(5, 60, 001)
print('70%')
runLights(5, 70, 001)
print('80%')
runLights(5, 80, 001)
print('90%')
runLights(5, 90, 001)
print('100%')
runLights(5, 100, 001)
print('Finished')
return
def quickTest(var):
print('Started')
l=1
while(l<=var):
runLights(0.1, l, 001)
l+=1
print('Finshed')
return
def runLights(runningTime, powerPercent, portCode):
"""
Runs lights at specified power (%) for about specified time (seconds). Requires parallel module and time module.
"""
totalCycleTime = 1.0 / 80.0
onTime = totalCycleTime * (float(powerPercent) / 100.0)
offTime = totalCycleTime - onTime
iterations = int(runningTime * 80)
i=0
while(i < iterations):
p.setData(portCode)
time.sleep(onTime)
i += 1
p.setData(0)
time.sleep(offTime)
return
def userInterface():
choice = raw_input("[V]erbtose test, [Q]uick test, G[U]I, or [E]xit: ")
if choice == 'V' or choice == 'v':
verbtoseTest()
return 'continue'
elif choice == 'Q' or choice == 'q':
quickTest(100)
return 'continue'
elif choice == 'U' or choice == 'u':
GUItest()
return 'continue'
else:
return 'exit'
print('Welcome to parallel control tester!')
p = parallel.Parallel()
# Setup the command line arguments.
optp = OptionParser()
# Output verbosity options.
optp.add_option('-u', '--gui', help='Open GUI',
action='store_const', dest='gui',
const=True, default=False)
options, args = optp.parse_args()
if (options.gui):
GUItest();
else:
while(True):
if(userInterface() == 'exit'):
p.setData(000)
break
exit()
| {
"content_hash": "dd663cbcb67ffc2d3864613ece6aed15",
"timestamp": "",
"source": "github",
"line_count": 195,
"max_line_length": 114,
"avg_line_length": 27.77948717948718,
"alnum_prop": 0.6479601255307366,
"repo_name": "JoshuaJB/pyParallel-PWM",
"id": "697e21924c67c522c725b3dc52d7dd7a67274786",
"size": "5435",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "parallelcrl.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5435"
}
],
"symlink_target": ""
} |
def is_preprocessed_formdata(valuelist):
if len(valuelist) != 1:
return False
value = valuelist[0]
return isinstance(value, (dict, list))
| {
"content_hash": "58e4a826b9f77fecab5b8ee614b8d759",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 42,
"avg_line_length": 31.6,
"alnum_prop": 0.6582278481012658,
"repo_name": "ThiefMaster/indico",
"id": "487ac47216c8a71c020282c58ffdaf187dcf5909",
"size": "372",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "indico/web/forms/fields/util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "34704"
},
{
"name": "HTML",
"bytes": "1411006"
},
{
"name": "JavaScript",
"bytes": "2083786"
},
{
"name": "Mako",
"bytes": "1527"
},
{
"name": "Python",
"bytes": "5133951"
},
{
"name": "SCSS",
"bytes": "476568"
},
{
"name": "Shell",
"bytes": "3877"
},
{
"name": "TeX",
"bytes": "23327"
},
{
"name": "XSLT",
"bytes": "1504"
}
],
"symlink_target": ""
} |
"""This example gets all custom targeting keys and the values. To create custom
targeting keys and values, run create_custom_targeting_keys_and_values.py."""
__author__ = '[email protected] (Jeff Sham)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..'))
# Initialize appropriate service.
custom_targeting_service = client.GetService(
'CustomTargetingService', version='v201208')
filter_statement = {'query': 'LIMIT 500'}
# Get custom targeting keys by statement.
response = custom_targeting_service.GetCustomTargetingKeysByStatement(
filter_statement)[0]
keys = []
if 'results' in response:
keys = response['results']
# Display results.
if keys:
key_ids = [key['id'] for key in keys]
filter_statement = {'query': ('WHERE customTargetingKeyId IN (%s)'
% ', '.join(key_ids))}
# Get custom targeting values by statement.
response = custom_targeting_service.GetCustomTargetingValuesByStatement(
filter_statement)[0]
values = []
if 'results' in response:
values = response['results']
# Create map of custom targeting key id to custom targeting values.
key_value_map = {}
for key in keys:
for value in values:
if key['id'] == value['customTargetingKeyId']:
if key['id'] not in key_value_map.keys():
key_value_map[key['id']] = []
key_value_map[key['id']].append(value)
break
# Display results.
for key in keys:
print ('Custom targeting key with id \'%s\', name \'%s\', display name '
'\'%s\', and type \'%s\' was found.'
%(key['id'], key['name'], key['displayName'], key['type']))
if key['id'] in key_value_map.keys():
for value in key_value_map[key['id']]:
print ('\tCustom targeting value with id \'%s\', name \'%s\', and '
'display name \'%s\' was found.'
% (value['id'], value['name'], value['displayName']))
else:
print 'No keys were found.'
| {
"content_hash": "41d65748b71e4d7223c8be7db40986ee",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 80,
"avg_line_length": 34.53030303030303,
"alnum_prop": 0.6375603334795963,
"repo_name": "caioserra/apiAdwords",
"id": "dccaed6457e2771b5545ddbc81c6e60b6258adf4",
"size": "2897",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "examples/adspygoogle/dfp/v201208/get_all_custom_targeting_keys_and_values.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "47375"
},
{
"name": "Python",
"bytes": "3481410"
},
{
"name": "Shell",
"bytes": "14782"
}
],
"symlink_target": ""
} |
import unittest
import tempfile
import os
import shutil
from arena.local_arena import LocalIOArena
from arena.hand_log import HandLog
from pokeher.actions import GameAction
class PyArenaTest(unittest.TestCase):
def test_arena_methods(self):
"""Make sure the arena can do everything it needs to"""
arena = LocalIOArena()
self.assertTrue(arena.run)
self.assertTrue(arena.load_bot)
self.assertTrue(arena.bot_count)
self.assertTrue(arena.play_match)
def test_load_bots(self):
"""See if we can load a bot"""
with LocalIOArena() as arena:
arena.load_bot("pokeher/theaigame_bot.py")
self.assertEqual(arena.bot_count(), 1)
stacks = arena.bot_stacks()
self.assertEqual(stacks['bot_0'], 1000)
self.assertEqual(stacks.keys(), ['bot_0'])
def test_load_bad_filename(self):
"""Don't want load_bot exploding on us"""
arena = LocalIOArena()
arena.load_bot("asdlfj23u90dj")
self.assertTrue(arena)
self.assertEqual(arena.bot_count(), 0)
def test_pot_splitting(self):
arena = LocalIOArena()
winnings = arena.split_pot(pot=16, num_winners=2)
self.assertEqual(len(winnings), 2)
for prize in winnings:
self.assertEqual(prize, 8)
def test_uneven_pot_splitting(self):
arena = LocalIOArena()
winnings = arena.split_pot(pot=15, num_winners=2)
self.assertEqual(len(winnings), 2)
self.assertIn(7, winnings)
self.assertIn(8, winnings)
def test_hand_log_writing(self):
arena = LocalIOArena()
arena.key = "fake-uuid-woo-boom"
temp = tempfile.mkdtemp()
arena.output_directory = temp
arena.current_round = 0
log = HandLog({})
log.unix_epoch_s = lambda: 10
log.action("bot_1", GameAction(GameAction.FOLD))
arena.write_hand_log(log)
written_file = os.path.join(temp, arena.key, "hand_0.json")
written_handle = open(written_file, 'r')
contents = written_handle.read()
self.assertEquals(contents, '{"initial_stacks": {}, "actions": [{"player": "bot_1", "data": 0, "event": "Fold", "ts": 10}]}')
shutil.rmtree(temp)
| {
"content_hash": "0099edcae5a3a19ecb5cb843f38863b1",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 133,
"avg_line_length": 33.92537313432836,
"alnum_prop": 0.6159260888693356,
"repo_name": "gnmerritt/poker",
"id": "e9c6308bc1ce08ad03912fb1895c18c098ed2594",
"size": "2273",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_arena.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "668"
},
{
"name": "Python",
"bytes": "174467"
},
{
"name": "Shell",
"bytes": "113"
}
],
"symlink_target": ""
} |
import BaseHTTPServer
import bisect
import cgi
import cmd
import codecs
import ctypes
import datetime
import disasm
import mmap
import optparse
import os
import re
import sys
import types
import urllib
import urlparse
import v8heapconst
import webbrowser
PORT_NUMBER = 8081
USAGE="""usage: %prog [OPTIONS] [DUMP-FILE]
Minidump analyzer.
Shows the processor state at the point of exception including the
stack of the active thread and the referenced objects in the V8
heap. Code objects are disassembled and the addresses linked from the
stack (e.g. pushed return addresses) are marked with "=>".
Examples:
$ %prog 12345678-1234-1234-1234-123456789abcd-full.dmp"""
DEBUG=False
def DebugPrint(s):
if not DEBUG: return
print s
class Descriptor(object):
"""Descriptor of a structure in a memory."""
def __init__(self, fields):
self.fields = fields
self.is_flexible = False
for _, type_or_func in fields:
if isinstance(type_or_func, types.FunctionType):
self.is_flexible = True
break
if not self.is_flexible:
self.ctype = Descriptor._GetCtype(fields)
self.size = ctypes.sizeof(self.ctype)
def Read(self, memory, offset):
if self.is_flexible:
fields_copy = self.fields[:]
last = 0
for name, type_or_func in fields_copy:
if isinstance(type_or_func, types.FunctionType):
partial_ctype = Descriptor._GetCtype(fields_copy[:last])
partial_object = partial_ctype.from_buffer(memory, offset)
type = type_or_func(partial_object)
if type is not None:
fields_copy[last] = (name, type)
last += 1
else:
last += 1
complete_ctype = Descriptor._GetCtype(fields_copy[:last])
else:
complete_ctype = self.ctype
return complete_ctype.from_buffer(memory, offset)
@staticmethod
def _GetCtype(fields):
class Raw(ctypes.Structure):
_fields_ = fields
_pack_ = 1
def __str__(self):
return "{" + ", ".join("%s: %s" % (field, self.__getattribute__(field))
for field, _ in Raw._fields_) + "}"
return Raw
def FullDump(reader, heap):
"""Dump all available memory regions."""
def dump_region(reader, start, size, location):
print
while start & 3 != 0:
start += 1
size -= 1
location += 1
is_executable = reader.IsProbableExecutableRegion(location, size)
is_ascii = reader.IsProbableASCIIRegion(location, size)
if is_executable is not False:
lines = reader.GetDisasmLines(start, size)
for line in lines:
print FormatDisasmLine(start, heap, line)
print
if is_ascii is not False:
# Output in the same format as the Unix hd command
addr = start
for slot in xrange(location, location + size, 16):
hex_line = ""
asc_line = ""
for i in xrange(0, 16):
if slot + i < location + size:
byte = ctypes.c_uint8.from_buffer(reader.minidump, slot + i).value
if byte >= 0x20 and byte < 0x7f:
asc_line += chr(byte)
else:
asc_line += "."
hex_line += " %02x" % (byte)
else:
hex_line += " "
if i == 7:
hex_line += " "
print "%s %s |%s|" % (reader.FormatIntPtr(addr),
hex_line,
asc_line)
addr += 16
if is_executable is not True and is_ascii is not True:
print "%s - %s" % (reader.FormatIntPtr(start),
reader.FormatIntPtr(start + size))
for slot in xrange(start,
start + size,
reader.PointerSize()):
maybe_address = reader.ReadUIntPtr(slot)
heap_object = heap.FindObject(maybe_address)
print "%s: %s" % (reader.FormatIntPtr(slot),
reader.FormatIntPtr(maybe_address))
if heap_object:
heap_object.Print(Printer())
print
reader.ForEachMemoryRegion(dump_region)
# Heap constants generated by 'make grokdump' in v8heapconst module.
INSTANCE_TYPES = v8heapconst.INSTANCE_TYPES
KNOWN_MAPS = v8heapconst.KNOWN_MAPS
KNOWN_OBJECTS = v8heapconst.KNOWN_OBJECTS
# Set of structures and constants that describe the layout of minidump
# files. Based on MSDN and Google Breakpad.
MINIDUMP_HEADER = Descriptor([
("signature", ctypes.c_uint32),
("version", ctypes.c_uint32),
("stream_count", ctypes.c_uint32),
("stream_directories_rva", ctypes.c_uint32),
("checksum", ctypes.c_uint32),
("time_date_stampt", ctypes.c_uint32),
("flags", ctypes.c_uint64)
])
MINIDUMP_LOCATION_DESCRIPTOR = Descriptor([
("data_size", ctypes.c_uint32),
("rva", ctypes.c_uint32)
])
MINIDUMP_STRING = Descriptor([
("length", ctypes.c_uint32),
("buffer", lambda t: ctypes.c_uint8 * (t.length + 2))
])
MINIDUMP_DIRECTORY = Descriptor([
("stream_type", ctypes.c_uint32),
("location", MINIDUMP_LOCATION_DESCRIPTOR.ctype)
])
MD_EXCEPTION_MAXIMUM_PARAMETERS = 15
MINIDUMP_EXCEPTION = Descriptor([
("code", ctypes.c_uint32),
("flags", ctypes.c_uint32),
("record", ctypes.c_uint64),
("address", ctypes.c_uint64),
("parameter_count", ctypes.c_uint32),
("unused_alignment", ctypes.c_uint32),
("information", ctypes.c_uint64 * MD_EXCEPTION_MAXIMUM_PARAMETERS)
])
MINIDUMP_EXCEPTION_STREAM = Descriptor([
("thread_id", ctypes.c_uint32),
("unused_alignment", ctypes.c_uint32),
("exception", MINIDUMP_EXCEPTION.ctype),
("thread_context", MINIDUMP_LOCATION_DESCRIPTOR.ctype)
])
# Stream types.
MD_UNUSED_STREAM = 0
MD_RESERVED_STREAM_0 = 1
MD_RESERVED_STREAM_1 = 2
MD_THREAD_LIST_STREAM = 3
MD_MODULE_LIST_STREAM = 4
MD_MEMORY_LIST_STREAM = 5
MD_EXCEPTION_STREAM = 6
MD_SYSTEM_INFO_STREAM = 7
MD_THREAD_EX_LIST_STREAM = 8
MD_MEMORY_64_LIST_STREAM = 9
MD_COMMENT_STREAM_A = 10
MD_COMMENT_STREAM_W = 11
MD_HANDLE_DATA_STREAM = 12
MD_FUNCTION_TABLE_STREAM = 13
MD_UNLOADED_MODULE_LIST_STREAM = 14
MD_MISC_INFO_STREAM = 15
MD_MEMORY_INFO_LIST_STREAM = 16
MD_THREAD_INFO_LIST_STREAM = 17
MD_HANDLE_OPERATION_LIST_STREAM = 18
MD_FLOATINGSAVEAREA_X86_REGISTERAREA_SIZE = 80
MINIDUMP_FLOATING_SAVE_AREA_X86 = Descriptor([
("control_word", ctypes.c_uint32),
("status_word", ctypes.c_uint32),
("tag_word", ctypes.c_uint32),
("error_offset", ctypes.c_uint32),
("error_selector", ctypes.c_uint32),
("data_offset", ctypes.c_uint32),
("data_selector", ctypes.c_uint32),
("register_area", ctypes.c_uint8 * MD_FLOATINGSAVEAREA_X86_REGISTERAREA_SIZE),
("cr0_npx_state", ctypes.c_uint32)
])
MD_CONTEXT_X86_EXTENDED_REGISTERS_SIZE = 512
# Context flags.
MD_CONTEXT_X86 = 0x00010000
MD_CONTEXT_X86_CONTROL = (MD_CONTEXT_X86 | 0x00000001)
MD_CONTEXT_X86_INTEGER = (MD_CONTEXT_X86 | 0x00000002)
MD_CONTEXT_X86_SEGMENTS = (MD_CONTEXT_X86 | 0x00000004)
MD_CONTEXT_X86_FLOATING_POINT = (MD_CONTEXT_X86 | 0x00000008)
MD_CONTEXT_X86_DEBUG_REGISTERS = (MD_CONTEXT_X86 | 0x00000010)
MD_CONTEXT_X86_EXTENDED_REGISTERS = (MD_CONTEXT_X86 | 0x00000020)
def EnableOnFlag(type, flag):
return lambda o: [None, type][int((o.context_flags & flag) != 0)]
MINIDUMP_CONTEXT_X86 = Descriptor([
("context_flags", ctypes.c_uint32),
# MD_CONTEXT_X86_DEBUG_REGISTERS.
("dr0", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_DEBUG_REGISTERS)),
("dr1", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_DEBUG_REGISTERS)),
("dr2", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_DEBUG_REGISTERS)),
("dr3", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_DEBUG_REGISTERS)),
("dr6", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_DEBUG_REGISTERS)),
("dr7", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_DEBUG_REGISTERS)),
# MD_CONTEXT_X86_FLOATING_POINT.
("float_save", EnableOnFlag(MINIDUMP_FLOATING_SAVE_AREA_X86.ctype,
MD_CONTEXT_X86_FLOATING_POINT)),
# MD_CONTEXT_X86_SEGMENTS.
("gs", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_SEGMENTS)),
("fs", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_SEGMENTS)),
("es", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_SEGMENTS)),
("ds", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_SEGMENTS)),
# MD_CONTEXT_X86_INTEGER.
("edi", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_INTEGER)),
("esi", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_INTEGER)),
("ebx", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_INTEGER)),
("edx", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_INTEGER)),
("ecx", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_INTEGER)),
("eax", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_INTEGER)),
# MD_CONTEXT_X86_CONTROL.
("ebp", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_CONTROL)),
("eip", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_CONTROL)),
("cs", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_CONTROL)),
("eflags", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_CONTROL)),
("esp", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_CONTROL)),
("ss", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_CONTROL)),
# MD_CONTEXT_X86_EXTENDED_REGISTERS.
("extended_registers",
EnableOnFlag(ctypes.c_uint8 * MD_CONTEXT_X86_EXTENDED_REGISTERS_SIZE,
MD_CONTEXT_X86_EXTENDED_REGISTERS))
])
MD_CONTEXT_ARM = 0x40000000
MD_CONTEXT_ARM_INTEGER = (MD_CONTEXT_ARM | 0x00000002)
MD_CONTEXT_ARM_FLOATING_POINT = (MD_CONTEXT_ARM | 0x00000004)
MD_FLOATINGSAVEAREA_ARM_FPR_COUNT = 32
MD_FLOATINGSAVEAREA_ARM_FPEXTRA_COUNT = 8
MINIDUMP_FLOATING_SAVE_AREA_ARM = Descriptor([
("fpscr", ctypes.c_uint64),
("regs", ctypes.c_uint64 * MD_FLOATINGSAVEAREA_ARM_FPR_COUNT),
("extra", ctypes.c_uint64 * MD_FLOATINGSAVEAREA_ARM_FPEXTRA_COUNT)
])
MINIDUMP_CONTEXT_ARM = Descriptor([
("context_flags", ctypes.c_uint32),
# MD_CONTEXT_ARM_INTEGER.
("r0", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_ARM_INTEGER)),
("r1", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_ARM_INTEGER)),
("r2", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_ARM_INTEGER)),
("r3", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_ARM_INTEGER)),
("r4", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_ARM_INTEGER)),
("r5", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_ARM_INTEGER)),
("r6", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_ARM_INTEGER)),
("r7", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_ARM_INTEGER)),
("r8", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_ARM_INTEGER)),
("r9", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_ARM_INTEGER)),
("r10", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_ARM_INTEGER)),
("r11", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_ARM_INTEGER)),
("r12", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_ARM_INTEGER)),
("sp", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_ARM_INTEGER)),
("lr", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_ARM_INTEGER)),
("pc", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_ARM_INTEGER)),
("cpsr", ctypes.c_uint32),
("float_save", EnableOnFlag(MINIDUMP_FLOATING_SAVE_AREA_ARM.ctype,
MD_CONTEXT_ARM_FLOATING_POINT))
])
MD_CONTEXT_AMD64 = 0x00100000
MD_CONTEXT_AMD64_CONTROL = (MD_CONTEXT_AMD64 | 0x00000001)
MD_CONTEXT_AMD64_INTEGER = (MD_CONTEXT_AMD64 | 0x00000002)
MD_CONTEXT_AMD64_SEGMENTS = (MD_CONTEXT_AMD64 | 0x00000004)
MD_CONTEXT_AMD64_FLOATING_POINT = (MD_CONTEXT_AMD64 | 0x00000008)
MD_CONTEXT_AMD64_DEBUG_REGISTERS = (MD_CONTEXT_AMD64 | 0x00000010)
MINIDUMP_CONTEXT_AMD64 = Descriptor([
("p1_home", ctypes.c_uint64),
("p2_home", ctypes.c_uint64),
("p3_home", ctypes.c_uint64),
("p4_home", ctypes.c_uint64),
("p5_home", ctypes.c_uint64),
("p6_home", ctypes.c_uint64),
("context_flags", ctypes.c_uint32),
("mx_csr", ctypes.c_uint32),
# MD_CONTEXT_AMD64_CONTROL.
("cs", EnableOnFlag(ctypes.c_uint16, MD_CONTEXT_AMD64_CONTROL)),
# MD_CONTEXT_AMD64_SEGMENTS
("ds", EnableOnFlag(ctypes.c_uint16, MD_CONTEXT_AMD64_SEGMENTS)),
("es", EnableOnFlag(ctypes.c_uint16, MD_CONTEXT_AMD64_SEGMENTS)),
("fs", EnableOnFlag(ctypes.c_uint16, MD_CONTEXT_AMD64_SEGMENTS)),
("gs", EnableOnFlag(ctypes.c_uint16, MD_CONTEXT_AMD64_SEGMENTS)),
# MD_CONTEXT_AMD64_CONTROL.
("ss", EnableOnFlag(ctypes.c_uint16, MD_CONTEXT_AMD64_CONTROL)),
("eflags", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_AMD64_CONTROL)),
# MD_CONTEXT_AMD64_DEBUG_REGISTERS.
("dr0", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_DEBUG_REGISTERS)),
("dr1", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_DEBUG_REGISTERS)),
("dr2", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_DEBUG_REGISTERS)),
("dr3", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_DEBUG_REGISTERS)),
("dr6", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_DEBUG_REGISTERS)),
("dr7", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_DEBUG_REGISTERS)),
# MD_CONTEXT_AMD64_INTEGER.
("rax", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_INTEGER)),
("rcx", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_INTEGER)),
("rdx", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_INTEGER)),
("rbx", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_INTEGER)),
# MD_CONTEXT_AMD64_CONTROL.
("rsp", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_CONTROL)),
# MD_CONTEXT_AMD64_INTEGER.
("rbp", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_INTEGER)),
("rsi", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_INTEGER)),
("rdi", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_INTEGER)),
("r8", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_INTEGER)),
("r9", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_INTEGER)),
("r10", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_INTEGER)),
("r11", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_INTEGER)),
("r12", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_INTEGER)),
("r13", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_INTEGER)),
("r14", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_INTEGER)),
("r15", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_INTEGER)),
# MD_CONTEXT_AMD64_CONTROL.
("rip", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_CONTROL)),
# MD_CONTEXT_AMD64_FLOATING_POINT
("sse_registers", EnableOnFlag(ctypes.c_uint8 * (16 * 26),
MD_CONTEXT_AMD64_FLOATING_POINT)),
("vector_registers", EnableOnFlag(ctypes.c_uint8 * (16 * 26),
MD_CONTEXT_AMD64_FLOATING_POINT)),
("vector_control", EnableOnFlag(ctypes.c_uint64,
MD_CONTEXT_AMD64_FLOATING_POINT)),
# MD_CONTEXT_AMD64_DEBUG_REGISTERS.
("debug_control", EnableOnFlag(ctypes.c_uint64,
MD_CONTEXT_AMD64_DEBUG_REGISTERS)),
("last_branch_to_rip", EnableOnFlag(ctypes.c_uint64,
MD_CONTEXT_AMD64_DEBUG_REGISTERS)),
("last_branch_from_rip", EnableOnFlag(ctypes.c_uint64,
MD_CONTEXT_AMD64_DEBUG_REGISTERS)),
("last_exception_to_rip", EnableOnFlag(ctypes.c_uint64,
MD_CONTEXT_AMD64_DEBUG_REGISTERS)),
("last_exception_from_rip", EnableOnFlag(ctypes.c_uint64,
MD_CONTEXT_AMD64_DEBUG_REGISTERS))
])
MINIDUMP_MEMORY_DESCRIPTOR = Descriptor([
("start", ctypes.c_uint64),
("memory", MINIDUMP_LOCATION_DESCRIPTOR.ctype)
])
MINIDUMP_MEMORY_DESCRIPTOR64 = Descriptor([
("start", ctypes.c_uint64),
("size", ctypes.c_uint64)
])
MINIDUMP_MEMORY_LIST = Descriptor([
("range_count", ctypes.c_uint32),
("ranges", lambda m: MINIDUMP_MEMORY_DESCRIPTOR.ctype * m.range_count)
])
MINIDUMP_MEMORY_LIST_Mac = Descriptor([
("range_count", ctypes.c_uint32),
("junk", ctypes.c_uint32),
("ranges", lambda m: MINIDUMP_MEMORY_DESCRIPTOR.ctype * m.range_count)
])
MINIDUMP_MEMORY_LIST64 = Descriptor([
("range_count", ctypes.c_uint64),
("base_rva", ctypes.c_uint64),
("ranges", lambda m: MINIDUMP_MEMORY_DESCRIPTOR64.ctype * m.range_count)
])
MINIDUMP_THREAD = Descriptor([
("id", ctypes.c_uint32),
("suspend_count", ctypes.c_uint32),
("priority_class", ctypes.c_uint32),
("priority", ctypes.c_uint32),
("ted", ctypes.c_uint64),
("stack", MINIDUMP_MEMORY_DESCRIPTOR.ctype),
("context", MINIDUMP_LOCATION_DESCRIPTOR.ctype)
])
MINIDUMP_THREAD_LIST = Descriptor([
("thread_count", ctypes.c_uint32),
("threads", lambda t: MINIDUMP_THREAD.ctype * t.thread_count)
])
MINIDUMP_THREAD_LIST_Mac = Descriptor([
("thread_count", ctypes.c_uint32),
("junk", ctypes.c_uint32),
("threads", lambda t: MINIDUMP_THREAD.ctype * t.thread_count)
])
MINIDUMP_VS_FIXEDFILEINFO = Descriptor([
("dwSignature", ctypes.c_uint32),
("dwStrucVersion", ctypes.c_uint32),
("dwFileVersionMS", ctypes.c_uint32),
("dwFileVersionLS", ctypes.c_uint32),
("dwProductVersionMS", ctypes.c_uint32),
("dwProductVersionLS", ctypes.c_uint32),
("dwFileFlagsMask", ctypes.c_uint32),
("dwFileFlags", ctypes.c_uint32),
("dwFileOS", ctypes.c_uint32),
("dwFileType", ctypes.c_uint32),
("dwFileSubtype", ctypes.c_uint32),
("dwFileDateMS", ctypes.c_uint32),
("dwFileDateLS", ctypes.c_uint32)
])
MINIDUMP_RAW_MODULE = Descriptor([
("base_of_image", ctypes.c_uint64),
("size_of_image", ctypes.c_uint32),
("checksum", ctypes.c_uint32),
("time_date_stamp", ctypes.c_uint32),
("module_name_rva", ctypes.c_uint32),
("version_info", MINIDUMP_VS_FIXEDFILEINFO.ctype),
("cv_record", MINIDUMP_LOCATION_DESCRIPTOR.ctype),
("misc_record", MINIDUMP_LOCATION_DESCRIPTOR.ctype),
("reserved0", ctypes.c_uint32 * 2),
("reserved1", ctypes.c_uint32 * 2)
])
MINIDUMP_MODULE_LIST = Descriptor([
("number_of_modules", ctypes.c_uint32),
("modules", lambda t: MINIDUMP_RAW_MODULE.ctype * t.number_of_modules)
])
MINIDUMP_MODULE_LIST_Mac = Descriptor([
("number_of_modules", ctypes.c_uint32),
("junk", ctypes.c_uint32),
("modules", lambda t: MINIDUMP_RAW_MODULE.ctype * t.number_of_modules)
])
MINIDUMP_RAW_SYSTEM_INFO = Descriptor([
("processor_architecture", ctypes.c_uint16)
])
MD_CPU_ARCHITECTURE_X86 = 0
MD_CPU_ARCHITECTURE_ARM = 5
MD_CPU_ARCHITECTURE_AMD64 = 9
class FuncSymbol:
def __init__(self, start, size, name):
self.start = start
self.end = self.start + size
self.name = name
def __cmp__(self, other):
if isinstance(other, FuncSymbol):
return self.start - other.start
return self.start - other
def Covers(self, addr):
return (self.start <= addr) and (addr < self.end)
class MinidumpReader(object):
"""Minidump (.dmp) reader."""
_HEADER_MAGIC = 0x504d444d
def __init__(self, options, minidump_name):
self.minidump_name = minidump_name
self.minidump_file = open(minidump_name, "r")
self.minidump = mmap.mmap(self.minidump_file.fileno(), 0, mmap.MAP_PRIVATE)
self.header = MINIDUMP_HEADER.Read(self.minidump, 0)
if self.header.signature != MinidumpReader._HEADER_MAGIC:
print >>sys.stderr, "Warning: Unsupported minidump header magic!"
DebugPrint(self.header)
directories = []
offset = self.header.stream_directories_rva
for _ in xrange(self.header.stream_count):
directories.append(MINIDUMP_DIRECTORY.Read(self.minidump, offset))
offset += MINIDUMP_DIRECTORY.size
self.arch = None
self.exception = None
self.exception_context = None
self.memory_list = None
self.memory_list64 = None
self.module_list = None
self.thread_map = {}
self.symdir = options.symdir
self.modules_with_symbols = []
self.symbols = []
# Find MDRawSystemInfo stream and determine arch.
for d in directories:
if d.stream_type == MD_SYSTEM_INFO_STREAM:
system_info = MINIDUMP_RAW_SYSTEM_INFO.Read(
self.minidump, d.location.rva)
self.arch = system_info.processor_architecture
assert self.arch in [MD_CPU_ARCHITECTURE_AMD64,
MD_CPU_ARCHITECTURE_ARM,
MD_CPU_ARCHITECTURE_X86]
assert not self.arch is None
for d in directories:
DebugPrint(d)
if d.stream_type == MD_EXCEPTION_STREAM:
self.exception = MINIDUMP_EXCEPTION_STREAM.Read(
self.minidump, d.location.rva)
DebugPrint(self.exception)
if self.arch == MD_CPU_ARCHITECTURE_X86:
self.exception_context = MINIDUMP_CONTEXT_X86.Read(
self.minidump, self.exception.thread_context.rva)
elif self.arch == MD_CPU_ARCHITECTURE_AMD64:
self.exception_context = MINIDUMP_CONTEXT_AMD64.Read(
self.minidump, self.exception.thread_context.rva)
elif self.arch == MD_CPU_ARCHITECTURE_ARM:
self.exception_context = MINIDUMP_CONTEXT_ARM.Read(
self.minidump, self.exception.thread_context.rva)
DebugPrint(self.exception_context)
elif d.stream_type == MD_THREAD_LIST_STREAM:
thread_list = MINIDUMP_THREAD_LIST.Read(self.minidump, d.location.rva)
if ctypes.sizeof(thread_list) + 4 == d.location.data_size:
thread_list = MINIDUMP_THREAD_LIST_Mac.Read(
self.minidump, d.location.rva)
assert ctypes.sizeof(thread_list) == d.location.data_size
DebugPrint(thread_list)
for thread in thread_list.threads:
DebugPrint(thread)
self.thread_map[thread.id] = thread
elif d.stream_type == MD_MODULE_LIST_STREAM:
assert self.module_list is None
self.module_list = MINIDUMP_MODULE_LIST.Read(
self.minidump, d.location.rva)
if ctypes.sizeof(self.module_list) + 4 == d.location.data_size:
self.module_list = MINIDUMP_MODULE_LIST_Mac.Read(
self.minidump, d.location.rva)
assert ctypes.sizeof(self.module_list) == d.location.data_size
DebugPrint(self.module_list)
elif d.stream_type == MD_MEMORY_LIST_STREAM:
print >>sys.stderr, "Warning: This is not a full minidump!"
assert self.memory_list is None
self.memory_list = MINIDUMP_MEMORY_LIST.Read(
self.minidump, d.location.rva)
if ctypes.sizeof(self.memory_list) + 4 == d.location.data_size:
self.memory_list = MINIDUMP_MEMORY_LIST_Mac.Read(
self.minidump, d.location.rva)
assert ctypes.sizeof(self.memory_list) == d.location.data_size
DebugPrint(self.memory_list)
elif d.stream_type == MD_MEMORY_64_LIST_STREAM:
assert self.memory_list64 is None
self.memory_list64 = MINIDUMP_MEMORY_LIST64.Read(
self.minidump, d.location.rva)
assert ctypes.sizeof(self.memory_list64) == d.location.data_size
DebugPrint(self.memory_list64)
def IsValidAddress(self, address):
return self.FindLocation(address) is not None
def ReadU8(self, address):
location = self.FindLocation(address)
return ctypes.c_uint8.from_buffer(self.minidump, location).value
def ReadU32(self, address):
location = self.FindLocation(address)
return ctypes.c_uint32.from_buffer(self.minidump, location).value
def ReadU64(self, address):
location = self.FindLocation(address)
return ctypes.c_uint64.from_buffer(self.minidump, location).value
def ReadUIntPtr(self, address):
if self.arch == MD_CPU_ARCHITECTURE_AMD64:
return self.ReadU64(address)
elif self.arch == MD_CPU_ARCHITECTURE_ARM:
return self.ReadU32(address)
elif self.arch == MD_CPU_ARCHITECTURE_X86:
return self.ReadU32(address)
def ReadBytes(self, address, size):
location = self.FindLocation(address)
return self.minidump[location:location + size]
def _ReadWord(self, location):
if self.arch == MD_CPU_ARCHITECTURE_AMD64:
return ctypes.c_uint64.from_buffer(self.minidump, location).value
elif self.arch == MD_CPU_ARCHITECTURE_ARM:
return ctypes.c_uint32.from_buffer(self.minidump, location).value
elif self.arch == MD_CPU_ARCHITECTURE_X86:
return ctypes.c_uint32.from_buffer(self.minidump, location).value
def IsProbableASCIIRegion(self, location, length):
ascii_bytes = 0
non_ascii_bytes = 0
for loc in xrange(location, location + length):
byte = ctypes.c_uint8.from_buffer(self.minidump, loc).value
if byte >= 0x7f:
non_ascii_bytes += 1
if byte < 0x20 and byte != 0:
non_ascii_bytes += 1
if byte < 0x7f and byte >= 0x20:
ascii_bytes += 1
if byte == 0xa: # newline
ascii_bytes += 1
if ascii_bytes * 10 <= length:
return False
if length > 0 and ascii_bytes > non_ascii_bytes * 7:
return True
if ascii_bytes > non_ascii_bytes * 3:
return None # Maybe
return False
def IsProbableExecutableRegion(self, location, length):
opcode_bytes = 0
sixty_four = self.arch == MD_CPU_ARCHITECTURE_AMD64
for loc in xrange(location, location + length):
byte = ctypes.c_uint8.from_buffer(self.minidump, loc).value
if (byte == 0x8b or # mov
byte == 0x89 or # mov reg-reg
(byte & 0xf0) == 0x50 or # push/pop
(sixty_four and (byte & 0xf0) == 0x40) or # rex prefix
byte == 0xc3 or # return
byte == 0x74 or # jeq
byte == 0x84 or # jeq far
byte == 0x75 or # jne
byte == 0x85 or # jne far
byte == 0xe8 or # call
byte == 0xe9 or # jmp far
byte == 0xeb): # jmp near
opcode_bytes += 1
opcode_percent = (opcode_bytes * 100) / length
threshold = 20
if opcode_percent > threshold + 2:
return True
if opcode_percent > threshold - 2:
return None # Maybe
return False
def FindRegion(self, addr):
answer = [-1, -1]
def is_in(reader, start, size, location):
if addr >= start and addr < start + size:
answer[0] = start
answer[1] = size
self.ForEachMemoryRegion(is_in)
if answer[0] == -1:
return None
return answer
def ForEachMemoryRegion(self, cb):
if self.memory_list64 is not None:
for r in self.memory_list64.ranges:
location = self.memory_list64.base_rva + offset
cb(self, r.start, r.size, location)
offset += r.size
if self.memory_list is not None:
for r in self.memory_list.ranges:
cb(self, r.start, r.memory.data_size, r.memory.rva)
def FindWord(self, word, alignment=0):
def search_inside_region(reader, start, size, location):
location = (location + alignment) & ~alignment
for loc in xrange(location, location + size - self.PointerSize()):
if reader._ReadWord(loc) == word:
slot = start + (loc - location)
print "%s: %s" % (reader.FormatIntPtr(slot),
reader.FormatIntPtr(word))
self.ForEachMemoryRegion(search_inside_region)
def FindWordList(self, word):
aligned_res = []
unaligned_res = []
def search_inside_region(reader, start, size, location):
for loc in xrange(location, location + size - self.PointerSize()):
if reader._ReadWord(loc) == word:
slot = start + (loc - location)
if slot % self.PointerSize() == 0:
aligned_res.append(slot)
else:
unaligned_res.append(slot)
self.ForEachMemoryRegion(search_inside_region)
return (aligned_res, unaligned_res)
def FindLocation(self, address):
offset = 0
if self.memory_list64 is not None:
for r in self.memory_list64.ranges:
if r.start <= address < r.start + r.size:
return self.memory_list64.base_rva + offset + address - r.start
offset += r.size
if self.memory_list is not None:
for r in self.memory_list.ranges:
if r.start <= address < r.start + r.memory.data_size:
return r.memory.rva + address - r.start
return None
def GetDisasmLines(self, address, size):
def CountUndefinedInstructions(lines):
pattern = "<UNDEFINED>"
return sum([line.count(pattern) for (ignore, line) in lines])
location = self.FindLocation(address)
if location is None: return []
arch = None
possible_objdump_flags = [""]
if self.arch == MD_CPU_ARCHITECTURE_X86:
arch = "ia32"
elif self.arch == MD_CPU_ARCHITECTURE_ARM:
arch = "arm"
possible_objdump_flags = ["", "--disassembler-options=force-thumb"]
elif self.arch == MD_CPU_ARCHITECTURE_AMD64:
arch = "x64"
results = [ disasm.GetDisasmLines(self.minidump_name,
location,
size,
arch,
False,
objdump_flags)
for objdump_flags in possible_objdump_flags ]
return min(results, key=CountUndefinedInstructions)
def Dispose(self):
self.minidump.close()
self.minidump_file.close()
def ExceptionIP(self):
if self.arch == MD_CPU_ARCHITECTURE_AMD64:
return self.exception_context.rip
elif self.arch == MD_CPU_ARCHITECTURE_ARM:
return self.exception_context.pc
elif self.arch == MD_CPU_ARCHITECTURE_X86:
return self.exception_context.eip
def ExceptionSP(self):
if self.arch == MD_CPU_ARCHITECTURE_AMD64:
return self.exception_context.rsp
elif self.arch == MD_CPU_ARCHITECTURE_ARM:
return self.exception_context.sp
elif self.arch == MD_CPU_ARCHITECTURE_X86:
return self.exception_context.esp
def ExceptionFP(self):
if self.arch == MD_CPU_ARCHITECTURE_AMD64:
return self.exception_context.rbp
elif self.arch == MD_CPU_ARCHITECTURE_ARM:
return None
elif self.arch == MD_CPU_ARCHITECTURE_X86:
return self.exception_context.ebp
def FormatIntPtr(self, value):
if self.arch == MD_CPU_ARCHITECTURE_AMD64:
return "%016x" % value
elif self.arch == MD_CPU_ARCHITECTURE_ARM:
return "%08x" % value
elif self.arch == MD_CPU_ARCHITECTURE_X86:
return "%08x" % value
def PointerSize(self):
if self.arch == MD_CPU_ARCHITECTURE_AMD64:
return 8
elif self.arch == MD_CPU_ARCHITECTURE_ARM:
return 4
elif self.arch == MD_CPU_ARCHITECTURE_X86:
return 4
def Register(self, name):
return self.exception_context.__getattribute__(name)
def ReadMinidumpString(self, rva):
string = bytearray(MINIDUMP_STRING.Read(self.minidump, rva).buffer)
string = string.decode("utf16")
return string[0:len(string) - 1]
# Load FUNC records from a BreakPad symbol file
#
# http://code.google.com/p/google-breakpad/wiki/SymbolFiles
#
def _LoadSymbolsFrom(self, symfile, baseaddr):
print "Loading symbols from %s" % (symfile)
funcs = []
with open(symfile) as f:
for line in f:
result = re.match(
r"^FUNC ([a-f0-9]+) ([a-f0-9]+) ([a-f0-9]+) (.*)$", line)
if result is not None:
start = int(result.group(1), 16)
size = int(result.group(2), 16)
name = result.group(4).rstrip()
bisect.insort_left(self.symbols,
FuncSymbol(baseaddr + start, size, name))
print " ... done"
def TryLoadSymbolsFor(self, modulename, module):
try:
symfile = os.path.join(self.symdir,
modulename.replace('.', '_') + ".pdb.sym")
if os.path.isfile(symfile):
self._LoadSymbolsFrom(symfile, module.base_of_image)
self.modules_with_symbols.append(module)
except Exception as e:
print " ... failure (%s)" % (e)
# Returns true if address is covered by some module that has loaded symbols.
def _IsInModuleWithSymbols(self, addr):
for module in self.modules_with_symbols:
start = module.base_of_image
end = start + module.size_of_image
if (start <= addr) and (addr < end):
return True
return False
# Find symbol covering the given address and return its name in format
# <symbol name>+<offset from the start>
def FindSymbol(self, addr):
if not self._IsInModuleWithSymbols(addr):
return None
i = bisect.bisect_left(self.symbols, addr)
symbol = None
if (0 < i) and self.symbols[i - 1].Covers(addr):
symbol = self.symbols[i - 1]
elif (i < len(self.symbols)) and self.symbols[i].Covers(addr):
symbol = self.symbols[i]
else:
return None
diff = addr - symbol.start
return "%s+0x%x" % (symbol.name, diff)
class Printer(object):
"""Printer with indentation support."""
def __init__(self):
self.indent = 0
def Indent(self):
self.indent += 2
def Dedent(self):
self.indent -= 2
def Print(self, string):
print "%s%s" % (self._IndentString(), string)
def PrintLines(self, lines):
indent = self._IndentString()
print "\n".join("%s%s" % (indent, line) for line in lines)
def _IndentString(self):
return self.indent * " "
ADDRESS_RE = re.compile(r"0x[0-9a-fA-F]+")
def FormatDisasmLine(start, heap, line):
line_address = start + line[0]
stack_slot = heap.stack_map.get(line_address)
marker = " "
if stack_slot:
marker = "=>"
code = AnnotateAddresses(heap, line[1])
# Compute the actual call target which the disassembler is too stupid
# to figure out (it adds the call offset to the disassembly offset rather
# than the absolute instruction address).
if heap.reader.arch == MD_CPU_ARCHITECTURE_X86:
if code.startswith("e8"):
words = code.split()
if len(words) > 6 and words[5] == "call":
offset = int(words[4] + words[3] + words[2] + words[1], 16)
target = (line_address + offset + 5) & 0xFFFFFFFF
code = code.replace(words[6], "0x%08x" % target)
# TODO(jkummerow): port this hack to ARM and x64.
return "%s%08x %08x: %s" % (marker, line_address, line[0], code)
def AnnotateAddresses(heap, line):
extra = []
for m in ADDRESS_RE.finditer(line):
maybe_address = int(m.group(0), 16)
object = heap.FindObject(maybe_address)
if not object: continue
extra.append(str(object))
if len(extra) == 0: return line
return "%s ;; %s" % (line, ", ".join(extra))
class HeapObject(object):
def __init__(self, heap, map, address):
self.heap = heap
self.map = map
self.address = address
def Is(self, cls):
return isinstance(self, cls)
def Print(self, p):
p.Print(str(self))
def __str__(self):
instance_type = "???"
if self.map is not None:
instance_type = INSTANCE_TYPES[self.map.instance_type]
return "HeapObject(%s, %s)" % (self.heap.reader.FormatIntPtr(self.address),
instance_type)
def ObjectField(self, offset):
field_value = self.heap.reader.ReadUIntPtr(self.address + offset)
return self.heap.FindObjectOrSmi(field_value)
def SmiField(self, offset):
field_value = self.heap.reader.ReadUIntPtr(self.address + offset)
if (field_value & 1) == 0:
return field_value / 2
return None
class Map(HeapObject):
def Decode(self, offset, size, value):
return (value >> offset) & ((1 << size) - 1)
# Instance Sizes
def InstanceSizesOffset(self):
return self.heap.PointerSize()
def InstanceSizeOffset(self):
return self.InstanceSizesOffset()
def InObjectProperties(self):
return self.InstanceSizeOffset() + 1
def PreAllocatedPropertyFields(self):
return self.InObjectProperties() + 1
def VisitorId(self):
return self.PreAllocatedPropertyFields() + 1
# Instance Attributes
def InstanceAttributesOffset(self):
return self.InstanceSizesOffset() + self.heap.IntSize()
def InstanceTypeOffset(self):
return self.InstanceAttributesOffset()
def UnusedPropertyFieldsOffset(self):
return self.InstanceTypeOffset() + 1
def BitFieldOffset(self):
return self.UnusedPropertyFieldsOffset() + 1
def BitField2Offset(self):
return self.BitFieldOffset() + 1
# Other fields
def PrototypeOffset(self):
return self.InstanceAttributesOffset() + self.heap.IntSize()
def ConstructorOffset(self):
return self.PrototypeOffset() + self.heap.PointerSize()
def TransitionsOrBackPointerOffset(self):
return self.ConstructorOffset() + self.heap.PointerSize()
def DescriptorsOffset(self):
return self.TransitionsOrBackPointerOffset() + self.heap.PointerSize()
def CodeCacheOffset(self):
return self.DescriptorsOffset() + self.heap.PointerSize()
def DependentCodeOffset(self):
return self.CodeCacheOffset() + self.heap.PointerSize()
def BitField3Offset(self):
return self.DependentCodeOffset() + self.heap.PointerSize()
def ReadByte(self, offset):
return self.heap.reader.ReadU8(self.address + offset)
def Print(self, p):
p.Print("Map(%08x)" % (self.address))
p.Print("- size: %d, inobject: %d, preallocated: %d, visitor: %d" % (
self.ReadByte(self.InstanceSizeOffset()),
self.ReadByte(self.InObjectProperties()),
self.ReadByte(self.PreAllocatedPropertyFields()),
self.VisitorId()))
bitfield = self.ReadByte(self.BitFieldOffset())
bitfield2 = self.ReadByte(self.BitField2Offset())
p.Print("- %s, unused: %d, bf: %d, bf2: %d" % (
INSTANCE_TYPES[self.ReadByte(self.InstanceTypeOffset())],
self.ReadByte(self.UnusedPropertyFieldsOffset()),
bitfield, bitfield2))
p.Print("- kind: %s" % (self.Decode(3, 5, bitfield2)))
bitfield3 = self.ObjectField(self.BitField3Offset())
p.Print(
"- EnumLength: %d NumberOfOwnDescriptors: %d OwnsDescriptors: %s" % (
self.Decode(0, 11, bitfield3),
self.Decode(11, 11, bitfield3),
self.Decode(25, 1, bitfield3)))
p.Print("- IsShared: %s" % (self.Decode(22, 1, bitfield3)))
p.Print("- FunctionWithPrototype: %s" % (self.Decode(23, 1, bitfield3)))
p.Print("- DictionaryMap: %s" % (self.Decode(24, 1, bitfield3)))
descriptors = self.ObjectField(self.DescriptorsOffset())
if descriptors.__class__ == FixedArray:
DescriptorArray(descriptors).Print(p)
else:
p.Print("Descriptors: %s" % (descriptors))
transitions = self.ObjectField(self.TransitionsOrBackPointerOffset())
if transitions.__class__ == FixedArray:
TransitionArray(transitions).Print(p)
else:
p.Print("TransitionsOrBackPointer: %s" % (transitions))
def __init__(self, heap, map, address):
HeapObject.__init__(self, heap, map, address)
self.instance_type = \
heap.reader.ReadU8(self.address + self.InstanceTypeOffset())
class String(HeapObject):
def LengthOffset(self):
# First word after the map is the hash, the second is the length.
return self.heap.PointerSize() * 2
def __init__(self, heap, map, address):
HeapObject.__init__(self, heap, map, address)
self.length = self.SmiField(self.LengthOffset())
def GetChars(self):
return "?string?"
def Print(self, p):
p.Print(str(self))
def __str__(self):
return "\"%s\"" % self.GetChars()
class SeqString(String):
def CharsOffset(self):
return self.heap.PointerSize() * 3
def __init__(self, heap, map, address):
String.__init__(self, heap, map, address)
self.chars = heap.reader.ReadBytes(self.address + self.CharsOffset(),
self.length)
def GetChars(self):
return self.chars
class ExternalString(String):
# TODO(vegorov) fix ExternalString for X64 architecture
RESOURCE_OFFSET = 12
WEBKIT_RESOUCE_STRING_IMPL_OFFSET = 4
WEBKIT_STRING_IMPL_CHARS_OFFSET = 8
def __init__(self, heap, map, address):
String.__init__(self, heap, map, address)
reader = heap.reader
self.resource = \
reader.ReadU32(self.address + ExternalString.RESOURCE_OFFSET)
self.chars = "?external string?"
if not reader.IsValidAddress(self.resource): return
string_impl_address = self.resource + \
ExternalString.WEBKIT_RESOUCE_STRING_IMPL_OFFSET
if not reader.IsValidAddress(string_impl_address): return
string_impl = reader.ReadU32(string_impl_address)
chars_ptr_address = string_impl + \
ExternalString.WEBKIT_STRING_IMPL_CHARS_OFFSET
if not reader.IsValidAddress(chars_ptr_address): return
chars_ptr = reader.ReadU32(chars_ptr_address)
if not reader.IsValidAddress(chars_ptr): return
raw_chars = reader.ReadBytes(chars_ptr, 2 * self.length)
self.chars = codecs.getdecoder("utf16")(raw_chars)[0]
def GetChars(self):
return self.chars
class ConsString(String):
def LeftOffset(self):
return self.heap.PointerSize() * 3
def RightOffset(self):
return self.heap.PointerSize() * 4
def __init__(self, heap, map, address):
String.__init__(self, heap, map, address)
self.left = self.ObjectField(self.LeftOffset())
self.right = self.ObjectField(self.RightOffset())
def GetChars(self):
try:
return self.left.GetChars() + self.right.GetChars()
except:
return "***CAUGHT EXCEPTION IN GROKDUMP***"
class Oddball(HeapObject):
# Should match declarations in objects.h
KINDS = [
"False",
"True",
"TheHole",
"Null",
"ArgumentMarker",
"Undefined",
"Other"
]
def ToStringOffset(self):
return self.heap.PointerSize()
def ToNumberOffset(self):
return self.ToStringOffset() + self.heap.PointerSize()
def KindOffset(self):
return self.ToNumberOffset() + self.heap.PointerSize()
def __init__(self, heap, map, address):
HeapObject.__init__(self, heap, map, address)
self.to_string = self.ObjectField(self.ToStringOffset())
self.kind = self.SmiField(self.KindOffset())
def Print(self, p):
p.Print(str(self))
def __str__(self):
if self.to_string:
return "Oddball(%08x, <%s>)" % (self.address, str(self.to_string))
else:
kind = "???"
if 0 <= self.kind < len(Oddball.KINDS):
kind = Oddball.KINDS[self.kind]
return "Oddball(%08x, kind=%s)" % (self.address, kind)
class FixedArray(HeapObject):
def LengthOffset(self):
return self.heap.PointerSize()
def ElementsOffset(self):
return self.heap.PointerSize() * 2
def MemberOffset(self, i):
return self.ElementsOffset() + self.heap.PointerSize() * i
def Get(self, i):
return self.ObjectField(self.MemberOffset(i))
def __init__(self, heap, map, address):
HeapObject.__init__(self, heap, map, address)
self.length = self.SmiField(self.LengthOffset())
def Print(self, p):
p.Print("FixedArray(%s) {" % self.heap.reader.FormatIntPtr(self.address))
p.Indent()
p.Print("length: %d" % self.length)
base_offset = self.ElementsOffset()
for i in xrange(self.length):
offset = base_offset + 4 * i
try:
p.Print("[%08d] = %s" % (i, self.ObjectField(offset)))
except TypeError:
p.Dedent()
p.Print("...")
p.Print("}")
return
p.Dedent()
p.Print("}")
def __str__(self):
return "FixedArray(%08x, length=%d)" % (self.address, self.length)
class DescriptorArray(object):
def __init__(self, array):
self.array = array
def Length(self):
return self.array.Get(0)
def Decode(self, offset, size, value):
return (value >> offset) & ((1 << size) - 1)
TYPES = [
"normal",
"field",
"function",
"callbacks"
]
def Type(self, value):
return DescriptorArray.TYPES[self.Decode(0, 3, value)]
def Attributes(self, value):
attributes = self.Decode(3, 3, value)
result = []
if (attributes & 0): result += ["ReadOnly"]
if (attributes & 1): result += ["DontEnum"]
if (attributes & 2): result += ["DontDelete"]
return "[" + (",".join(result)) + "]"
def Deleted(self, value):
return self.Decode(6, 1, value) == 1
def FieldIndex(self, value):
return self.Decode(20, 11, value)
def Pointer(self, value):
return self.Decode(6, 11, value)
def Details(self, di, value):
return (
di,
self.Type(value),
self.Attributes(value),
self.FieldIndex(value),
self.Pointer(value)
)
def Print(self, p):
length = self.Length()
array = self.array
p.Print("Descriptors(%08x, length=%d)" % (array.address, length))
p.Print("[et] %s" % (array.Get(1)))
for di in xrange(length):
i = 2 + di * 3
p.Print("0x%x" % (array.address + array.MemberOffset(i)))
p.Print("[%i] name: %s" % (di, array.Get(i + 0)))
p.Print("[%i] details: %s %s field-index %i pointer %i" % \
self.Details(di, array.Get(i + 1)))
p.Print("[%i] value: %s" % (di, array.Get(i + 2)))
end = self.array.length // 3
if length != end:
p.Print("[%i-%i] slack descriptors" % (length, end))
class TransitionArray(object):
def __init__(self, array):
self.array = array
def IsSimpleTransition(self):
return self.array.length <= 2
def Length(self):
# SimpleTransition cases
if self.IsSimpleTransition():
return self.array.length - 1
return (self.array.length - 3) // 2
def Print(self, p):
length = self.Length()
array = self.array
p.Print("Transitions(%08x, length=%d)" % (array.address, length))
p.Print("[backpointer] %s" % (array.Get(0)))
if self.IsSimpleTransition():
if length == 1:
p.Print("[simple target] %s" % (array.Get(1)))
return
elements = array.Get(1)
if elements is not None:
p.Print("[elements ] %s" % (elements))
prototype = array.Get(2)
if prototype is not None:
p.Print("[prototype ] %s" % (prototype))
for di in xrange(length):
i = 3 + di * 2
p.Print("[%i] symbol: %s" % (di, array.Get(i + 0)))
p.Print("[%i] target: %s" % (di, array.Get(i + 1)))
class JSFunction(HeapObject):
def CodeEntryOffset(self):
return 3 * self.heap.PointerSize()
def SharedOffset(self):
return 5 * self.heap.PointerSize()
def __init__(self, heap, map, address):
HeapObject.__init__(self, heap, map, address)
code_entry = \
heap.reader.ReadU32(self.address + self.CodeEntryOffset())
self.code = heap.FindObject(code_entry - Code.HeaderSize(heap) + 1)
self.shared = self.ObjectField(self.SharedOffset())
def Print(self, p):
source = "\n".join(" %s" % line for line in self._GetSource().split("\n"))
p.Print("JSFunction(%s) {" % self.heap.reader.FormatIntPtr(self.address))
p.Indent()
p.Print("inferred name: %s" % self.shared.inferred_name)
if self.shared.script.Is(Script) and self.shared.script.name.Is(String):
p.Print("script name: %s" % self.shared.script.name)
p.Print("source:")
p.PrintLines(self._GetSource().split("\n"))
p.Print("code:")
self.code.Print(p)
if self.code != self.shared.code:
p.Print("unoptimized code:")
self.shared.code.Print(p)
p.Dedent()
p.Print("}")
def __str__(self):
inferred_name = ""
if self.shared is not None and self.shared.Is(SharedFunctionInfo):
inferred_name = self.shared.inferred_name
return "JSFunction(%s, %s) " % \
(self.heap.reader.FormatIntPtr(self.address), inferred_name)
def _GetSource(self):
source = "?source?"
start = self.shared.start_position
end = self.shared.end_position
if not self.shared.script.Is(Script): return source
script_source = self.shared.script.source
if not script_source.Is(String): return source
if start and end:
source = script_source.GetChars()[start:end]
return source
class SharedFunctionInfo(HeapObject):
def CodeOffset(self):
return 2 * self.heap.PointerSize()
def ScriptOffset(self):
return 7 * self.heap.PointerSize()
def InferredNameOffset(self):
return 9 * self.heap.PointerSize()
def EndPositionOffset(self):
return 12 * self.heap.PointerSize() + 4 * self.heap.IntSize()
def StartPositionAndTypeOffset(self):
return 12 * self.heap.PointerSize() + 5 * self.heap.IntSize()
def __init__(self, heap, map, address):
HeapObject.__init__(self, heap, map, address)
self.code = self.ObjectField(self.CodeOffset())
self.script = self.ObjectField(self.ScriptOffset())
self.inferred_name = self.ObjectField(self.InferredNameOffset())
if heap.PointerSize() == 8:
start_position_and_type = \
heap.reader.ReadU32(self.StartPositionAndTypeOffset())
self.start_position = start_position_and_type >> 2
pseudo_smi_end_position = \
heap.reader.ReadU32(self.EndPositionOffset())
self.end_position = pseudo_smi_end_position >> 2
else:
start_position_and_type = \
self.SmiField(self.StartPositionAndTypeOffset())
if start_position_and_type:
self.start_position = start_position_and_type >> 2
else:
self.start_position = None
self.end_position = \
self.SmiField(self.EndPositionOffset())
class Script(HeapObject):
def SourceOffset(self):
return self.heap.PointerSize()
def NameOffset(self):
return self.SourceOffset() + self.heap.PointerSize()
def __init__(self, heap, map, address):
HeapObject.__init__(self, heap, map, address)
self.source = self.ObjectField(self.SourceOffset())
self.name = self.ObjectField(self.NameOffset())
class CodeCache(HeapObject):
def DefaultCacheOffset(self):
return self.heap.PointerSize()
def NormalTypeCacheOffset(self):
return self.DefaultCacheOffset() + self.heap.PointerSize()
def __init__(self, heap, map, address):
HeapObject.__init__(self, heap, map, address)
self.default_cache = self.ObjectField(self.DefaultCacheOffset())
self.normal_type_cache = self.ObjectField(self.NormalTypeCacheOffset())
def Print(self, p):
p.Print("CodeCache(%s) {" % self.heap.reader.FormatIntPtr(self.address))
p.Indent()
p.Print("default cache: %s" % self.default_cache)
p.Print("normal type cache: %s" % self.normal_type_cache)
p.Dedent()
p.Print("}")
class Code(HeapObject):
CODE_ALIGNMENT_MASK = (1 << 5) - 1
def InstructionSizeOffset(self):
return self.heap.PointerSize()
@staticmethod
def HeaderSize(heap):
return (heap.PointerSize() + heap.IntSize() + \
4 * heap.PointerSize() + 3 * heap.IntSize() + \
Code.CODE_ALIGNMENT_MASK) & ~Code.CODE_ALIGNMENT_MASK
def __init__(self, heap, map, address):
HeapObject.__init__(self, heap, map, address)
self.entry = self.address + Code.HeaderSize(heap)
self.instruction_size = \
heap.reader.ReadU32(self.address + self.InstructionSizeOffset())
def Print(self, p):
lines = self.heap.reader.GetDisasmLines(self.entry, self.instruction_size)
p.Print("Code(%s) {" % self.heap.reader.FormatIntPtr(self.address))
p.Indent()
p.Print("instruction_size: %d" % self.instruction_size)
p.PrintLines(self._FormatLine(line) for line in lines)
p.Dedent()
p.Print("}")
def _FormatLine(self, line):
return FormatDisasmLine(self.entry, self.heap, line)
class V8Heap(object):
CLASS_MAP = {
"SYMBOL_TYPE": SeqString,
"ONE_BYTE_SYMBOL_TYPE": SeqString,
"CONS_SYMBOL_TYPE": ConsString,
"CONS_ONE_BYTE_SYMBOL_TYPE": ConsString,
"EXTERNAL_SYMBOL_TYPE": ExternalString,
"EXTERNAL_SYMBOL_WITH_ONE_BYTE_DATA_TYPE": ExternalString,
"EXTERNAL_ONE_BYTE_SYMBOL_TYPE": ExternalString,
"SHORT_EXTERNAL_SYMBOL_TYPE": ExternalString,
"SHORT_EXTERNAL_SYMBOL_WITH_ONE_BYTE_DATA_TYPE": ExternalString,
"SHORT_EXTERNAL_ONE_BYTE_SYMBOL_TYPE": ExternalString,
"STRING_TYPE": SeqString,
"ONE_BYTE_STRING_TYPE": SeqString,
"CONS_STRING_TYPE": ConsString,
"CONS_ONE_BYTE_STRING_TYPE": ConsString,
"EXTERNAL_STRING_TYPE": ExternalString,
"EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE": ExternalString,
"EXTERNAL_ONE_BYTE_STRING_TYPE": ExternalString,
"MAP_TYPE": Map,
"ODDBALL_TYPE": Oddball,
"FIXED_ARRAY_TYPE": FixedArray,
"JS_FUNCTION_TYPE": JSFunction,
"SHARED_FUNCTION_INFO_TYPE": SharedFunctionInfo,
"SCRIPT_TYPE": Script,
"CODE_CACHE_TYPE": CodeCache,
"CODE_TYPE": Code,
}
def __init__(self, reader, stack_map):
self.reader = reader
self.stack_map = stack_map
self.objects = {}
def FindObjectOrSmi(self, tagged_address):
if (tagged_address & 1) == 0: return tagged_address / 2
return self.FindObject(tagged_address)
def FindObject(self, tagged_address):
if tagged_address in self.objects:
return self.objects[tagged_address]
if (tagged_address & self.ObjectAlignmentMask()) != 1: return None
address = tagged_address - 1
if not self.reader.IsValidAddress(address): return None
map_tagged_address = self.reader.ReadUIntPtr(address)
if tagged_address == map_tagged_address:
# Meta map?
meta_map = Map(self, None, address)
instance_type_name = INSTANCE_TYPES.get(meta_map.instance_type)
if instance_type_name != "MAP_TYPE": return None
meta_map.map = meta_map
object = meta_map
else:
map = self.FindMap(map_tagged_address)
if map is None: return None
instance_type_name = INSTANCE_TYPES.get(map.instance_type)
if instance_type_name is None: return None
cls = V8Heap.CLASS_MAP.get(instance_type_name, HeapObject)
object = cls(self, map, address)
self.objects[tagged_address] = object
return object
def FindMap(self, tagged_address):
if (tagged_address & self.MapAlignmentMask()) != 1: return None
address = tagged_address - 1
if not self.reader.IsValidAddress(address): return None
object = Map(self, None, address)
return object
def IntSize(self):
return 4
def PointerSize(self):
return self.reader.PointerSize()
def ObjectAlignmentMask(self):
return self.PointerSize() - 1
def MapAlignmentMask(self):
if self.reader.arch == MD_CPU_ARCHITECTURE_AMD64:
return (1 << 4) - 1
elif self.reader.arch == MD_CPU_ARCHITECTURE_ARM:
return (1 << 4) - 1
elif self.reader.arch == MD_CPU_ARCHITECTURE_X86:
return (1 << 5) - 1
def PageAlignmentMask(self):
return (1 << 20) - 1
class KnownObject(HeapObject):
def __init__(self, heap, known_name):
HeapObject.__init__(self, heap, None, None)
self.known_name = known_name
def __str__(self):
return "<%s>" % self.known_name
class KnownMap(HeapObject):
def __init__(self, heap, known_name, instance_type):
HeapObject.__init__(self, heap, None, None)
self.instance_type = instance_type
self.known_name = known_name
def __str__(self):
return "<%s>" % self.known_name
COMMENT_RE = re.compile(r"^C (0x[0-9a-fA-F]+) (.*)$")
PAGEADDRESS_RE = re.compile(
r"^P (mappage|pointerpage|datapage) (0x[0-9a-fA-F]+)$")
class InspectionInfo(object):
def __init__(self, minidump_name, reader):
self.comment_file = minidump_name + ".comments"
self.address_comments = {}
self.page_address = {}
if os.path.exists(self.comment_file):
with open(self.comment_file, "r") as f:
lines = f.readlines()
f.close()
for l in lines:
m = COMMENT_RE.match(l)
if m:
self.address_comments[int(m.group(1), 0)] = m.group(2)
m = PAGEADDRESS_RE.match(l)
if m:
self.page_address[m.group(1)] = int(m.group(2), 0)
self.reader = reader
self.styles = {}
self.color_addresses()
return
def get_page_address(self, page_kind):
return self.page_address.get(page_kind, 0)
def save_page_address(self, page_kind, address):
with open(self.comment_file, "a") as f:
f.write("P %s 0x%x\n" % (page_kind, address))
f.close()
def color_addresses(self):
# Color all stack addresses.
exception_thread = self.reader.thread_map[self.reader.exception.thread_id]
stack_top = self.reader.ExceptionSP()
stack_bottom = exception_thread.stack.start + \
exception_thread.stack.memory.data_size
frame_pointer = self.reader.ExceptionFP()
self.styles[frame_pointer] = "frame"
for slot in xrange(stack_top, stack_bottom, self.reader.PointerSize()):
self.styles[slot] = "stackaddress"
for slot in xrange(stack_top, stack_bottom, self.reader.PointerSize()):
maybe_address = self.reader.ReadUIntPtr(slot)
self.styles[maybe_address] = "stackval"
if slot == frame_pointer:
self.styles[slot] = "frame"
frame_pointer = maybe_address
self.styles[self.reader.ExceptionIP()] = "pc"
def get_style_class(self, address):
return self.styles.get(address, None)
def get_style_class_string(self, address):
style = self.get_style_class(address)
if style != None:
return " class=\"%s\" " % style
else:
return ""
def set_comment(self, address, comment):
self.address_comments[address] = comment
with open(self.comment_file, "a") as f:
f.write("C 0x%x %s\n" % (address, comment))
f.close()
def get_comment(self, address):
return self.address_comments.get(address, "")
class InspectionPadawan(object):
"""The padawan can improve annotations by sensing well-known objects."""
def __init__(self, reader, heap):
self.reader = reader
self.heap = heap
self.known_first_map_page = 0
self.known_first_data_page = 0
self.known_first_pointer_page = 0
def __getattr__(self, name):
"""An InspectionPadawan can be used instead of V8Heap, even though
it does not inherit from V8Heap (aka. mixin)."""
return getattr(self.heap, name)
def GetPageOffset(self, tagged_address):
return tagged_address & self.heap.PageAlignmentMask()
def IsInKnownMapSpace(self, tagged_address):
page_address = tagged_address & ~self.heap.PageAlignmentMask()
return page_address == self.known_first_map_page
def IsInKnownOldSpace(self, tagged_address):
page_address = tagged_address & ~self.heap.PageAlignmentMask()
return page_address in [self.known_first_data_page,
self.known_first_pointer_page]
def ContainingKnownOldSpaceName(self, tagged_address):
page_address = tagged_address & ~self.heap.PageAlignmentMask()
if page_address == self.known_first_data_page: return "OLD_DATA_SPACE"
if page_address == self.known_first_pointer_page: return "OLD_POINTER_SPACE"
return None
def SenseObject(self, tagged_address):
if self.IsInKnownOldSpace(tagged_address):
offset = self.GetPageOffset(tagged_address)
lookup_key = (self.ContainingKnownOldSpaceName(tagged_address), offset)
known_obj_name = KNOWN_OBJECTS.get(lookup_key)
if known_obj_name:
return KnownObject(self, known_obj_name)
if self.IsInKnownMapSpace(tagged_address):
known_map = self.SenseMap(tagged_address)
if known_map:
return known_map
found_obj = self.heap.FindObject(tagged_address)
if found_obj: return found_obj
address = tagged_address - 1
if self.reader.IsValidAddress(address):
map_tagged_address = self.reader.ReadUIntPtr(address)
map = self.SenseMap(map_tagged_address)
if map is None: return None
instance_type_name = INSTANCE_TYPES.get(map.instance_type)
if instance_type_name is None: return None
cls = V8Heap.CLASS_MAP.get(instance_type_name, HeapObject)
return cls(self, map, address)
return None
def SenseMap(self, tagged_address):
if self.IsInKnownMapSpace(tagged_address):
offset = self.GetPageOffset(tagged_address)
known_map_info = KNOWN_MAPS.get(offset)
if known_map_info:
known_map_type, known_map_name = known_map_info
return KnownMap(self, known_map_name, known_map_type)
found_map = self.heap.FindMap(tagged_address)
if found_map: return found_map
return None
def FindObjectOrSmi(self, tagged_address):
"""When used as a mixin in place of V8Heap."""
found_obj = self.SenseObject(tagged_address)
if found_obj: return found_obj
if (tagged_address & 1) == 0:
return "Smi(%d)" % (tagged_address / 2)
else:
return "Unknown(%s)" % self.reader.FormatIntPtr(tagged_address)
def FindObject(self, tagged_address):
"""When used as a mixin in place of V8Heap."""
raise NotImplementedError
def FindMap(self, tagged_address):
"""When used as a mixin in place of V8Heap."""
raise NotImplementedError
def PrintKnowledge(self):
print " known_first_map_page = %s\n"\
" known_first_data_page = %s\n"\
" known_first_pointer_page = %s" % (
self.reader.FormatIntPtr(self.known_first_map_page),
self.reader.FormatIntPtr(self.known_first_data_page),
self.reader.FormatIntPtr(self.known_first_pointer_page))
WEB_HEADER = """
<!DOCTYPE html>
<html>
<head>
<meta content="text/html; charset=utf-8" http-equiv="content-type">
<style media="screen" type="text/css">
.code {
font-family: monospace;
}
.dmptable {
border-collapse : collapse;
border-spacing : 0px;
}
.codedump {
border-collapse : collapse;
border-spacing : 0px;
}
.addrcomments {
border : 0px;
}
.register {
padding-right : 1em;
}
.header {
clear : both;
}
.header .navigation {
float : left;
}
.header .dumpname {
float : right;
}
tr.highlight-line {
background-color : yellow;
}
.highlight {
background-color : magenta;
}
tr.inexact-highlight-line {
background-color : pink;
}
input {
background-color: inherit;
border: 1px solid LightGray;
}
.dumpcomments {
border : 1px solid LightGray;
width : 32em;
}
.regions td {
padding:0 15px 0 15px;
}
.stackframe td {
background-color : cyan;
}
.stackaddress {
background-color : LightGray;
}
.stackval {
background-color : LightCyan;
}
.frame {
background-color : cyan;
}
.commentinput {
width : 20em;
}
a.nodump:visited {
color : black;
text-decoration : none;
}
a.nodump:link {
color : black;
text-decoration : none;
}
a:visited {
color : blueviolet;
}
a:link {
color : blue;
}
.disasmcomment {
color : DarkGreen;
}
</style>
<script type="application/javascript">
var address_str = "address-";
var address_len = address_str.length;
function comment() {
var s = event.srcElement.id;
var index = s.indexOf(address_str);
if (index >= 0) {
send_comment(s.substring(index + address_len), event.srcElement.value);
}
}
function send_comment(address, comment) {
xmlhttp = new XMLHttpRequest();
address = encodeURIComponent(address)
comment = encodeURIComponent(comment)
xmlhttp.open("GET",
"setcomment?%(query_dump)s&address=" + address +
"&comment=" + comment, true);
xmlhttp.send();
}
var dump_str = "dump-";
var dump_len = dump_str.length;
function dump_comment() {
var s = event.srcElement.id;
var index = s.indexOf(dump_str);
if (index >= 0) {
send_dump_desc(s.substring(index + dump_len), event.srcElement.value);
}
}
function send_dump_desc(name, desc) {
xmlhttp = new XMLHttpRequest();
name = encodeURIComponent(name)
desc = encodeURIComponent(desc)
xmlhttp.open("GET",
"setdumpdesc?dump=" + name +
"&description=" + desc, true);
xmlhttp.send();
}
function onpage(kind, address) {
xmlhttp = new XMLHttpRequest();
kind = encodeURIComponent(kind)
address = encodeURIComponent(address)
xmlhttp.onreadystatechange = function() {
if (xmlhttp.readyState==4 && xmlhttp.status==200) {
location.reload(true)
}
};
xmlhttp.open("GET",
"setpageaddress?%(query_dump)s&kind=" + kind +
"&address=" + address);
xmlhttp.send();
}
</script>
<title>Dump %(dump_name)s</title>
</head>
<body>
<div class="header">
<form class="navigation" action="search.html">
<a href="summary.html?%(query_dump)s">Context info</a>
<a href="info.html?%(query_dump)s">Dump info</a>
<a href="modules.html?%(query_dump)s">Modules</a>
<input type="search" name="val">
<input type="submit" name="search" value="Search">
<input type="hidden" name="dump" value="%(dump_name)s">
</form>
<form class="navigation" action="disasm.html#highlight">
<input type="search" name="val">
<input type="submit" name="disasm" value="Disasm">
<a href="dumps.html">Dumps...</a>
</form>
</div>
<br>
<hr>
"""
WEB_FOOTER = """
</body>
</html>
"""
class WebParameterError(Exception):
def __init__(self, message):
Exception.__init__(self, message)
class InspectionWebHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def formatter(self, query_components):
name = query_components.get("dump", [None])[0]
return self.server.get_dump_formatter(name)
def send_success_html_headers(self):
self.send_response(200)
self.send_header("Cache-Control", "no-cache, no-store, must-revalidate")
self.send_header("Pragma", "no-cache")
self.send_header("Expires", "0")
self.send_header('Content-type','text/html')
self.end_headers()
return
def do_GET(self):
try:
parsedurl = urlparse.urlparse(self.path)
query_components = urlparse.parse_qs(parsedurl.query)
if parsedurl.path == "/dumps.html":
self.send_success_html_headers()
self.server.output_dumps(self.wfile)
elif parsedurl.path == "/summary.html":
self.send_success_html_headers()
self.formatter(query_components).output_summary(self.wfile)
elif parsedurl.path == "/info.html":
self.send_success_html_headers()
self.formatter(query_components).output_info(self.wfile)
elif parsedurl.path == "/modules.html":
self.send_success_html_headers()
self.formatter(query_components).output_modules(self.wfile)
elif parsedurl.path == "/search.html":
address = query_components.get("val", [])
if len(address) != 1:
self.send_error(404, "Invalid params")
return
self.send_success_html_headers()
self.formatter(query_components).output_search_res(
self.wfile, address[0])
elif parsedurl.path == "/disasm.html":
address = query_components.get("val", [])
exact = query_components.get("exact", ["on"])
if len(address) != 1:
self.send_error(404, "Invalid params")
return
self.send_success_html_headers()
self.formatter(query_components).output_disasm(
self.wfile, address[0], exact[0])
elif parsedurl.path == "/data.html":
address = query_components.get("val", [])
datakind = query_components.get("type", ["address"])
if len(address) == 1 and len(datakind) == 1:
self.send_success_html_headers()
self.formatter(query_components).output_data(
self.wfile, address[0], datakind[0])
else:
self.send_error(404,'Invalid params')
elif parsedurl.path == "/setdumpdesc":
name = query_components.get("dump", [""])
description = query_components.get("description", [""])
if len(name) == 1 and len(description) == 1:
name = name[0]
description = description[0]
if self.server.set_dump_desc(name, description):
self.send_success_html_headers()
self.wfile.write("OK")
return
self.send_error(404,'Invalid params')
elif parsedurl.path == "/setcomment":
address = query_components.get("address", [])
comment = query_components.get("comment", [""])
if len(address) == 1 and len(comment) == 1:
address = address[0]
comment = comment[0]
self.formatter(query_components).set_comment(address, comment)
self.send_success_html_headers()
self.wfile.write("OK")
else:
self.send_error(404,'Invalid params')
elif parsedurl.path == "/setpageaddress":
kind = query_components.get("kind", [])
address = query_components.get("address", [""])
if len(kind) == 1 and len(address) == 1:
kind = kind[0]
address = address[0]
self.formatter(query_components).set_page_address(kind, address)
self.send_success_html_headers()
self.wfile.write("OK")
else:
self.send_error(404,'Invalid params')
else:
self.send_error(404,'File Not Found: %s' % self.path)
except IOError:
self.send_error(404,'File Not Found: %s' % self.path)
except WebParameterError as e:
self.send_error(404, 'Web parameter error: %s' % e.message)
HTML_REG_FORMAT = "<span class=\"register\"><b>%s</b>: %s</span><br/>\n"
class InspectionWebFormatter(object):
CONTEXT_FULL = 0
CONTEXT_SHORT = 1
def __init__(self, switches, minidump_name, http_server):
self.dumpfilename = os.path.split(minidump_name)[1]
self.encfilename = urllib.urlencode({ 'dump' : self.dumpfilename })
self.reader = MinidumpReader(switches, minidump_name)
self.server = http_server
# Set up the heap
exception_thread = self.reader.thread_map[self.reader.exception.thread_id]
stack_top = self.reader.ExceptionSP()
stack_bottom = exception_thread.stack.start + \
exception_thread.stack.memory.data_size
stack_map = {self.reader.ExceptionIP(): -1}
for slot in xrange(stack_top, stack_bottom, self.reader.PointerSize()):
maybe_address = self.reader.ReadUIntPtr(slot)
if not maybe_address in stack_map:
stack_map[maybe_address] = slot
self.heap = V8Heap(self.reader, stack_map)
self.padawan = InspectionPadawan(self.reader, self.heap)
self.comments = InspectionInfo(minidump_name, self.reader)
self.padawan.known_first_data_page = (
self.comments.get_page_address("datapage"))
self.padawan.known_first_map_page = (
self.comments.get_page_address("mappage"))
self.padawan.known_first_pointer_page = (
self.comments.get_page_address("pointerpage"))
def set_comment(self, straddress, comment):
try:
address = int(straddress, 0)
self.comments.set_comment(address, comment)
except ValueError:
print "Invalid address"
def set_page_address(self, kind, straddress):
try:
address = int(straddress, 0)
if kind == "datapage":
self.padawan.known_first_data_page = address
elif kind == "mappage":
self.padawan.known_first_map_page = address
elif kind == "pointerpage":
self.padawan.known_first_pointer_page = address
self.comments.save_page_address(kind, address)
except ValueError:
print "Invalid address"
def td_from_address(self, f, address):
f.write("<td %s>" % self.comments.get_style_class_string(address))
def format_address(self, maybeaddress, straddress = None):
if maybeaddress is None:
return "not in dump"
else:
if straddress is None:
straddress = "0x" + self.reader.FormatIntPtr(maybeaddress)
style_class = ""
if not self.reader.IsValidAddress(maybeaddress):
style_class = " class=\"nodump\""
return ("<a %s href=\"search.html?%s&val=%s\">%s</a>" %
(style_class, self.encfilename, straddress, straddress))
def output_header(self, f):
f.write(WEB_HEADER %
{ "query_dump" : self.encfilename,
"dump_name" : cgi.escape(self.dumpfilename) })
def output_footer(self, f):
f.write(WEB_FOOTER)
MAX_CONTEXT_STACK = 4096
def output_summary(self, f):
self.output_header(f)
f.write('<div class="code">')
self.output_context(f, InspectionWebFormatter.CONTEXT_SHORT)
self.output_disasm_pc(f)
# Output stack
exception_thread = self.reader.thread_map[self.reader.exception.thread_id]
stack_bottom = exception_thread.stack.start + \
min(exception_thread.stack.memory.data_size, self.MAX_CONTEXT_STACK)
stack_top = self.reader.ExceptionSP()
self.output_words(f, stack_top - 16, stack_bottom, stack_top, "Stack")
f.write('</div>')
self.output_footer(f)
return
def output_info(self, f):
self.output_header(f)
f.write("<h3>Dump info</h3>\n")
f.write("Description: ")
self.server.output_dump_desc_field(f, self.dumpfilename)
f.write("<br>\n")
f.write("Filename: ")
f.write("<span class=\"code\">%s</span><br>\n" % (self.dumpfilename))
dt = datetime.datetime.fromtimestamp(self.reader.header.time_date_stampt)
f.write("Timestamp: %s<br>\n" % dt.strftime('%Y-%m-%d %H:%M:%S'))
self.output_context(f, InspectionWebFormatter.CONTEXT_FULL)
self.output_address_ranges(f)
self.output_footer(f)
return
def output_address_ranges(self, f):
regions = {}
def print_region(_reader, start, size, _location):
regions[start] = size
self.reader.ForEachMemoryRegion(print_region)
f.write("<h3>Available memory regions</h3>\n")
f.write('<div class="code">')
f.write("<table class=\"regions\">\n")
f.write("<thead><tr>")
f.write("<th>Start address</th>")
f.write("<th>End address</th>")
f.write("<th>Number of bytes</th>")
f.write("</tr></thead>\n")
for start in sorted(regions):
size = regions[start]
f.write("<tr>")
f.write("<td>%s</td>" % self.format_address(start))
f.write("<td> %s</td>" % self.format_address(start + size))
f.write("<td> %d</td>" % size)
f.write("</tr>\n")
f.write("</table>\n")
f.write('</div>')
return
def output_module_details(self, f, module):
f.write("<b>%s</b>" % GetModuleName(self.reader, module))
file_version = GetVersionString(module.version_info.dwFileVersionMS,
module.version_info.dwFileVersionLS)
product_version = GetVersionString(module.version_info.dwProductVersionMS,
module.version_info.dwProductVersionLS)
f.write("<br> \n")
f.write("base: %s" % self.reader.FormatIntPtr(module.base_of_image))
f.write("<br> \n")
f.write(" end: %s" % self.reader.FormatIntPtr(module.base_of_image +
module.size_of_image))
f.write("<br> \n")
f.write(" file version: %s" % file_version)
f.write("<br> \n")
f.write(" product version: %s" % product_version)
f.write("<br> \n")
time_date_stamp = datetime.datetime.fromtimestamp(module.time_date_stamp)
f.write(" timestamp: %s" % time_date_stamp)
f.write("<br>\n");
def output_modules(self, f):
self.output_header(f)
f.write('<div class="code">')
for module in self.reader.module_list.modules:
self.output_module_details(f, module)
f.write("</div>")
self.output_footer(f)
return
def output_context(self, f, details):
exception_thread = self.reader.thread_map[self.reader.exception.thread_id]
f.write("<h3>Exception context</h3>")
f.write('<div class="code">\n')
f.write("Thread id: %d" % exception_thread.id)
f.write(" Exception code: %08X<br/>\n" %
self.reader.exception.exception.code)
if details == InspectionWebFormatter.CONTEXT_FULL:
if self.reader.exception.exception.parameter_count > 0:
f.write(" Exception parameters: \n")
for i in xrange(0, self.reader.exception.exception.parameter_count):
f.write("%08x" % self.reader.exception.exception.information[i])
f.write("<br><br>\n")
for r in CONTEXT_FOR_ARCH[self.reader.arch]:
f.write(HTML_REG_FORMAT %
(r, self.format_address(self.reader.Register(r))))
# TODO(vitalyr): decode eflags.
if self.reader.arch == MD_CPU_ARCHITECTURE_ARM:
f.write("<b>cpsr</b>: %s" % bin(self.reader.exception_context.cpsr)[2:])
else:
f.write("<b>eflags</b>: %s" %
bin(self.reader.exception_context.eflags)[2:])
f.write('</div>\n')
return
def align_down(self, a, size):
alignment_correction = a % size
return a - alignment_correction
def align_up(self, a, size):
alignment_correction = (size - 1) - ((a + size - 1) % size)
return a + alignment_correction
def format_object(self, address):
heap_object = self.padawan.SenseObject(address)
return cgi.escape(str(heap_object or ""))
def output_data(self, f, straddress, datakind):
try:
self.output_header(f)
address = int(straddress, 0)
if not self.reader.IsValidAddress(address):
f.write("<h3>Address 0x%x not found in the dump.</h3>" % address)
return
region = self.reader.FindRegion(address)
if datakind == "address":
self.output_words(f, region[0], region[0] + region[1], address, "Dump")
elif datakind == "ascii":
self.output_ascii(f, region[0], region[0] + region[1], address)
self.output_footer(f)
except ValueError:
f.write("<h3>Unrecognized address format \"%s\".</h3>" % straddress)
return
def output_words(self, f, start_address, end_address,
highlight_address, desc):
region = self.reader.FindRegion(highlight_address)
if region is None:
f.write("<h3>Address 0x%x not found in the dump.</h3>\n" %
(highlight_address))
return
size = self.heap.PointerSize()
start_address = self.align_down(start_address, size)
low = self.align_down(region[0], size)
high = self.align_up(region[0] + region[1], size)
if start_address < low:
start_address = low
end_address = self.align_up(end_address, size)
if end_address > high:
end_address = high
expand = ""
if start_address != low or end_address != high:
expand = ("(<a href=\"data.html?%s&val=0x%x#highlight\">"
" more..."
" </a>)" %
(self.encfilename, highlight_address))
f.write("<h3>%s 0x%x - 0x%x, "
"highlighting <a href=\"#highlight\">0x%x</a> %s</h3>\n" %
(desc, start_address, end_address, highlight_address, expand))
f.write('<div class="code">')
f.write("<table class=\"codedump\">\n")
for slot in xrange(start_address, end_address, size):
heap_object = ""
maybe_address = None
end_region = region[0] + region[1]
if slot < region[0] or slot + size > end_region:
straddress = "0x"
for i in xrange(end_region, slot + size):
straddress += "??"
for i in reversed(
xrange(max(slot, region[0]), min(slot + size, end_region))):
straddress += "%02x" % self.reader.ReadU8(i)
for i in xrange(slot, region[0]):
straddress += "??"
else:
maybe_address = self.reader.ReadUIntPtr(slot)
straddress = self.format_address(maybe_address)
if maybe_address:
heap_object = self.format_object(maybe_address)
address_fmt = "%s </td>\n"
if slot == highlight_address:
f.write("<tr class=\"highlight-line\">\n")
address_fmt = "<a id=\"highlight\"></a>%s </td>\n"
elif slot < highlight_address and highlight_address < slot + size:
f.write("<tr class=\"inexact-highlight-line\">\n")
address_fmt = "<a id=\"highlight\"></a>%s </td>\n"
else:
f.write("<tr>\n")
f.write(" <td>")
self.output_comment_box(f, "da-", slot)
f.write("</td>\n")
f.write(" ")
self.td_from_address(f, slot)
f.write(address_fmt % self.format_address(slot))
f.write(" ")
self.td_from_address(f, maybe_address)
f.write(": %s </td>\n" % straddress)
f.write(" <td>")
if maybe_address != None:
self.output_comment_box(
f, "sv-" + self.reader.FormatIntPtr(slot), maybe_address)
f.write(" </td>\n")
f.write(" <td>%s</td>\n" % (heap_object or ''))
f.write("</tr>\n")
f.write("</table>\n")
f.write("</div>")
return
def output_ascii(self, f, start_address, end_address, highlight_address):
region = self.reader.FindRegion(highlight_address)
if region is None:
f.write("<h3>Address %x not found in the dump.</h3>" %
highlight_address)
return
if start_address < region[0]:
start_address = region[0]
if end_address > region[0] + region[1]:
end_address = region[0] + region[1]
expand = ""
if start_address != region[0] or end_address != region[0] + region[1]:
link = ("data.html?%s&val=0x%x&type=ascii#highlight" %
(self.encfilename, highlight_address))
expand = "(<a href=\"%s\">more...</a>)" % link
f.write("<h3>ASCII dump 0x%x - 0x%x, highlighting 0x%x %s</h3>" %
(start_address, end_address, highlight_address, expand))
line_width = 64
f.write('<div class="code">')
start = self.align_down(start_address, line_width)
for address in xrange(start, end_address):
if address % 64 == 0:
if address != start:
f.write("<br>")
f.write("0x%08x: " % address)
if address < start_address:
f.write(" ")
else:
if address == highlight_address:
f.write("<span class=\"highlight\">")
code = self.reader.ReadU8(address)
if code < 127 and code >= 32:
f.write("&#")
f.write(str(code))
f.write(";")
else:
f.write("·")
if address == highlight_address:
f.write("</span>")
f.write("</div>")
return
def output_disasm(self, f, straddress, strexact):
try:
self.output_header(f)
address = int(straddress, 0)
if not self.reader.IsValidAddress(address):
f.write("<h3>Address 0x%x not found in the dump.</h3>" % address)
return
region = self.reader.FindRegion(address)
self.output_disasm_range(
f, region[0], region[0] + region[1], address, strexact == "on")
self.output_footer(f)
except ValueError:
f.write("<h3>Unrecognized address format \"%s\".</h3>" % straddress)
return
def output_disasm_range(
self, f, start_address, end_address, highlight_address, exact):
region = self.reader.FindRegion(highlight_address)
if start_address < region[0]:
start_address = region[0]
if end_address > region[0] + region[1]:
end_address = region[0] + region[1]
count = end_address - start_address
lines = self.reader.GetDisasmLines(start_address, count)
found = False
if exact:
for line in lines:
if line[0] + start_address == highlight_address:
found = True
break
if not found:
start_address = highlight_address
count = end_address - start_address
lines = self.reader.GetDisasmLines(highlight_address, count)
expand = ""
if start_address != region[0] or end_address != region[0] + region[1]:
exactness = ""
if exact and not found and end_address == region[0] + region[1]:
exactness = "&exact=off"
expand = ("(<a href=\"disasm.html?%s%s"
"&val=0x%x#highlight\">more...</a>)" %
(self.encfilename, exactness, highlight_address))
f.write("<h3>Disassembling 0x%x - 0x%x, highlighting 0x%x %s</h3>" %
(start_address, end_address, highlight_address, expand))
f.write('<div class="code">')
f.write("<table class=\"codedump\">\n");
for i in xrange(0, len(lines)):
line = lines[i]
next_address = count
if i + 1 < len(lines):
next_line = lines[i + 1]
next_address = next_line[0]
self.format_disasm_line(
f, start_address, line, next_address, highlight_address)
f.write("</table>\n")
f.write("</div>")
return
def annotate_disasm_addresses(self, line):
extra = []
for m in ADDRESS_RE.finditer(line):
maybe_address = int(m.group(0), 16)
formatted_address = self.format_address(maybe_address, m.group(0))
line = line.replace(m.group(0), formatted_address)
object_info = self.padawan.SenseObject(maybe_address)
if not object_info:
continue
extra.append(cgi.escape(str(object_info)))
if len(extra) == 0:
return line
return ("%s <span class=\"disasmcomment\">;; %s</span>" %
(line, ", ".join(extra)))
def format_disasm_line(
self, f, start, line, next_address, highlight_address):
line_address = start + line[0]
address_fmt = " <td>%s</td>\n"
if line_address == highlight_address:
f.write("<tr class=\"highlight-line\">\n")
address_fmt = " <td><a id=\"highlight\">%s</a></td>\n"
elif (line_address < highlight_address and
highlight_address < next_address + start):
f.write("<tr class=\"inexact-highlight-line\">\n")
address_fmt = " <td><a id=\"highlight\">%s</a></td>\n"
else:
f.write("<tr>\n")
num_bytes = next_address - line[0]
stack_slot = self.heap.stack_map.get(line_address)
marker = ""
if stack_slot:
marker = "=>"
op_offset = 3 * num_bytes - 1
code = line[1]
# Compute the actual call target which the disassembler is too stupid
# to figure out (it adds the call offset to the disassembly offset rather
# than the absolute instruction address).
if self.heap.reader.arch == MD_CPU_ARCHITECTURE_X86:
if code.startswith("e8"):
words = code.split()
if len(words) > 6 and words[5] == "call":
offset = int(words[4] + words[3] + words[2] + words[1], 16)
target = (line_address + offset + 5) & 0xFFFFFFFF
code = code.replace(words[6], "0x%08x" % target)
# TODO(jkummerow): port this hack to ARM and x64.
opcodes = code[:op_offset]
code = self.annotate_disasm_addresses(code[op_offset:])
f.write(" <td>")
self.output_comment_box(f, "codel-", line_address)
f.write("</td>\n")
f.write(address_fmt % marker)
f.write(" ")
self.td_from_address(f, line_address)
f.write("%s (+0x%x)</td>\n" %
(self.format_address(line_address), line[0]))
f.write(" <td>: %s </td>\n" % opcodes)
f.write(" <td>%s</td>\n" % code)
f.write("</tr>\n")
def output_comment_box(self, f, prefix, address):
f.write("<input type=\"text\" class=\"commentinput\" "
"id=\"%s-address-0x%s\" onchange=\"comment()\" value=\"%s\">" %
(prefix,
self.reader.FormatIntPtr(address),
cgi.escape(self.comments.get_comment(address)) or ""))
MAX_FOUND_RESULTS = 100
def output_find_results(self, f, results):
f.write("Addresses")
toomany = len(results) > self.MAX_FOUND_RESULTS
if toomany:
f.write("(found %i results, displaying only first %i)" %
(len(results), self.MAX_FOUND_RESULTS))
f.write(": \n")
results = sorted(results)
results = results[:min(len(results), self.MAX_FOUND_RESULTS)]
for address in results:
f.write("<span %s>%s</span>\n" %
(self.comments.get_style_class_string(address),
self.format_address(address)))
if toomany:
f.write("...\n")
def output_page_info(self, f, page_kind, page_address, my_page_address):
if my_page_address == page_address and page_address != 0:
f.write("Marked first %s page.\n" % page_kind)
else:
f.write("<span id=\"%spage\" style=\"display:none\">" % page_kind)
f.write("Marked first %s page." % page_kind)
f.write("</span>\n")
f.write("<button onclick=\"onpage('%spage', '0x%x')\">" %
(page_kind, my_page_address))
f.write("Mark as first %s page</button>\n" % page_kind)
return
def output_search_res(self, f, straddress):
try:
self.output_header(f)
f.write("<h3>Search results for %s</h3>" % straddress)
address = int(straddress, 0)
f.write("Comment: ")
self.output_comment_box(f, "search-", address)
f.write("<br>\n")
page_address = address & ~self.heap.PageAlignmentMask()
f.write("Page info: \n")
self.output_page_info(f, "data", self.padawan.known_first_data_page, \
page_address)
self.output_page_info(f, "map", self.padawan.known_first_map_page, \
page_address)
self.output_page_info(f, "pointer", \
self.padawan.known_first_pointer_page, \
page_address)
if not self.reader.IsValidAddress(address):
f.write("<h3>The contents at address %s not found in the dump.</h3>" % \
straddress)
else:
# Print as words
self.output_words(f, address - 8, address + 32, address, "Dump")
# Print as ASCII
f.write("<hr>\n")
self.output_ascii(f, address, address + 256, address)
# Print as code
f.write("<hr>\n")
self.output_disasm_range(f, address - 16, address + 16, address, True)
aligned_res, unaligned_res = self.reader.FindWordList(address)
if len(aligned_res) > 0:
f.write("<h3>Occurrences of 0x%x at aligned addresses</h3>\n" %
address)
self.output_find_results(f, aligned_res)
if len(unaligned_res) > 0:
f.write("<h3>Occurrences of 0x%x at unaligned addresses</h3>\n" % \
address)
self.output_find_results(f, unaligned_res)
if len(aligned_res) + len(unaligned_res) == 0:
f.write("<h3>No occurences of 0x%x found in the dump</h3>\n" % address)
self.output_footer(f)
except ValueError:
f.write("<h3>Unrecognized address format \"%s\".</h3>" % straddress)
return
def output_disasm_pc(self, f):
address = self.reader.ExceptionIP()
if not self.reader.IsValidAddress(address):
return
self.output_disasm_range(f, address - 16, address + 16, address, True)
WEB_DUMPS_HEADER = """
<!DOCTYPE html>
<html>
<head>
<meta content="text/html; charset=utf-8" http-equiv="content-type">
<style media="screen" type="text/css">
.dumplist {
border-collapse : collapse;
border-spacing : 0px;
font-family: monospace;
}
.dumpcomments {
border : 1px solid LightGray;
width : 32em;
}
</style>
<script type="application/javascript">
var dump_str = "dump-";
var dump_len = dump_str.length;
function dump_comment() {
var s = event.srcElement.id;
var index = s.indexOf(dump_str);
if (index >= 0) {
send_dump_desc(s.substring(index + dump_len), event.srcElement.value);
}
}
function send_dump_desc(name, desc) {
xmlhttp = new XMLHttpRequest();
name = encodeURIComponent(name)
desc = encodeURIComponent(desc)
xmlhttp.open("GET",
"setdumpdesc?dump=" + name +
"&description=" + desc, true);
xmlhttp.send();
}
</script>
<title>Dump list</title>
</head>
<body>
"""
WEB_DUMPS_FOOTER = """
</body>
</html>
"""
DUMP_FILE_RE = re.compile(r"[-_0-9a-zA-Z][-\._0-9a-zA-Z]*\.dmp$")
class InspectionWebServer(BaseHTTPServer.HTTPServer):
def __init__(self, port_number, switches, minidump_name):
BaseHTTPServer.HTTPServer.__init__(
self, ('', port_number), InspectionWebHandler)
splitpath = os.path.split(minidump_name)
self.dumppath = splitpath[0]
self.dumpfilename = splitpath[1]
self.default_formatter = InspectionWebFormatter(
switches, minidump_name, self)
self.formatters = { self.dumpfilename : self.default_formatter }
self.switches = switches
def output_dump_desc_field(self, f, name):
try:
descfile = open(os.path.join(self.dumppath, name + ".desc"), "r")
desc = descfile.readline()
descfile.close()
except IOError:
desc = ""
f.write("<input type=\"text\" class=\"dumpcomments\" "
"id=\"dump-%s\" onchange=\"dump_comment()\" value=\"%s\">\n" %
(cgi.escape(name), desc))
def set_dump_desc(self, name, description):
if not DUMP_FILE_RE.match(name):
return False
fname = os.path.join(self.dumppath, name)
if not os.path.isfile(fname):
return False
fname = fname + ".desc"
descfile = open(fname, "w")
descfile.write(description)
descfile.close()
return True
def get_dump_formatter(self, name):
if name is None:
return self.default_formatter
else:
if not DUMP_FILE_RE.match(name):
raise WebParameterError("Invalid name '%s'" % name)
formatter = self.formatters.get(name, None)
if formatter is None:
try:
formatter = InspectionWebFormatter(
self.switches, os.path.join(self.dumppath, name), self)
self.formatters[name] = formatter
except IOError:
raise WebParameterError("Could not open dump '%s'" % name)
return formatter
def output_dumps(self, f):
f.write(WEB_DUMPS_HEADER)
f.write("<h3>List of available dumps</h3>")
f.write("<table class=\"dumplist\">\n")
f.write("<thead><tr>")
f.write("<th>Name</th>")
f.write("<th>File time</th>")
f.write("<th>Comment</th>")
f.write("</tr></thead>")
dumps_by_time = {}
for fname in os.listdir(self.dumppath):
if DUMP_FILE_RE.match(fname):
mtime = os.stat(os.path.join(self.dumppath, fname)).st_mtime
fnames = dumps_by_time.get(mtime, [])
fnames.append(fname)
dumps_by_time[mtime] = fnames
for mtime in sorted(dumps_by_time, reverse=True):
fnames = dumps_by_time[mtime]
for fname in fnames:
f.write("<tr>\n")
f.write("<td><a href=\"summary.html?%s\">%s</a></td>\n" % (
(urllib.urlencode({ 'dump' : fname }), fname)))
f.write("<td> ")
f.write(datetime.datetime.fromtimestamp(mtime))
f.write("</td>")
f.write("<td> ")
self.output_dump_desc_field(f, fname)
f.write("</td>")
f.write("</tr>\n")
f.write("</table>\n")
f.write(WEB_DUMPS_FOOTER)
return
class InspectionShell(cmd.Cmd):
def __init__(self, reader, heap):
cmd.Cmd.__init__(self)
self.reader = reader
self.heap = heap
self.padawan = InspectionPadawan(reader, heap)
self.prompt = "(grok) "
def do_da(self, address):
"""
Print ASCII string starting at specified address.
"""
address = int(address, 16)
string = ""
while self.reader.IsValidAddress(address):
code = self.reader.ReadU8(address)
if code < 128:
string += chr(code)
else:
break
address += 1
if string == "":
print "Not an ASCII string at %s" % self.reader.FormatIntPtr(address)
else:
print "%s\n" % string
def do_dd(self, args):
"""
Interpret memory in the given region [address, address + num * word_size)
(if available) as a sequence of words. Automatic alignment is not performed.
If the num is not specified, a default value of 16 words is used.
Synopsis: dd 0x<address> 0x<num>
"""
args = args.split(' ')
start = int(args[0], 16)
num = int(args[1], 16) if len(args) > 1 else 0x10
if (start & self.heap.ObjectAlignmentMask()) != 0:
print "Warning: Dumping un-aligned memory, is this what you had in mind?"
for slot in xrange(start,
start + self.reader.PointerSize() * num,
self.reader.PointerSize()):
if not self.reader.IsValidAddress(slot):
print "Address is not contained within the minidump!"
return
maybe_address = self.reader.ReadUIntPtr(slot)
heap_object = self.padawan.SenseObject(maybe_address)
print "%s: %s %s" % (self.reader.FormatIntPtr(slot),
self.reader.FormatIntPtr(maybe_address),
heap_object or '')
def do_do(self, address):
"""
Interpret memory at the given address as a V8 object. Automatic
alignment makes sure that you can pass tagged as well as un-tagged
addresses.
"""
address = int(address, 16)
if (address & self.heap.ObjectAlignmentMask()) == 0:
address = address + 1
elif (address & self.heap.ObjectAlignmentMask()) != 1:
print "Address doesn't look like a valid pointer!"
return
heap_object = self.padawan.SenseObject(address)
if heap_object:
heap_object.Print(Printer())
else:
print "Address cannot be interpreted as object!"
def do_do_desc(self, address):
"""
Print a descriptor array in a readable format.
"""
start = int(address, 16)
if ((start & 1) == 1): start = start - 1
DescriptorArray(FixedArray(self.heap, None, start)).Print(Printer())
def do_do_map(self, address):
"""
Print a descriptor array in a readable format.
"""
start = int(address, 16)
if ((start & 1) == 1): start = start - 1
Map(self.heap, None, start).Print(Printer())
def do_do_trans(self, address):
"""
Print a transition array in a readable format.
"""
start = int(address, 16)
if ((start & 1) == 1): start = start - 1
TransitionArray(FixedArray(self.heap, None, start)).Print(Printer())
def do_dp(self, address):
"""
Interpret memory at the given address as being on a V8 heap page
and print information about the page header (if available).
"""
address = int(address, 16)
page_address = address & ~self.heap.PageAlignmentMask()
if self.reader.IsValidAddress(page_address):
raise NotImplementedError
else:
print "Page header is not available!"
def do_k(self, arguments):
"""
Teach V8 heap layout information to the inspector. This increases
the amount of annotations the inspector can produce while dumping
data. The first page of each heap space is of particular interest
because it contains known objects that do not move.
"""
self.padawan.PrintKnowledge()
def do_kd(self, address):
"""
Teach V8 heap layout information to the inspector. Set the first
data-space page by passing any pointer into that page.
"""
address = int(address, 16)
page_address = address & ~self.heap.PageAlignmentMask()
self.padawan.known_first_data_page = page_address
def do_km(self, address):
"""
Teach V8 heap layout information to the inspector. Set the first
map-space page by passing any pointer into that page.
"""
address = int(address, 16)
page_address = address & ~self.heap.PageAlignmentMask()
self.padawan.known_first_map_page = page_address
def do_kp(self, address):
"""
Teach V8 heap layout information to the inspector. Set the first
pointer-space page by passing any pointer into that page.
"""
address = int(address, 16)
page_address = address & ~self.heap.PageAlignmentMask()
self.padawan.known_first_pointer_page = page_address
def do_list(self, smth):
"""
List all available memory regions.
"""
def print_region(reader, start, size, location):
print " %s - %s (%d bytes)" % (reader.FormatIntPtr(start),
reader.FormatIntPtr(start + size),
size)
print "Available memory regions:"
self.reader.ForEachMemoryRegion(print_region)
def do_lm(self, arg):
"""
List details for all loaded modules in the minidump. An argument can
be passed to limit the output to only those modules that contain the
argument as a substring (case insensitive match).
"""
for module in self.reader.module_list.modules:
if arg:
name = GetModuleName(self.reader, module).lower()
if name.find(arg.lower()) >= 0:
PrintModuleDetails(self.reader, module)
else:
PrintModuleDetails(self.reader, module)
print
def do_s(self, word):
"""
Search for a given word in available memory regions. The given word
is expanded to full pointer size and searched at aligned as well as
un-aligned memory locations. Use 'sa' to search aligned locations
only.
"""
try:
word = int(word, 0)
except ValueError:
print "Malformed word, prefix with '0x' to use hexadecimal format."
return
print "Searching for word %d/0x%s:" % (word, self.reader.FormatIntPtr(word))
self.reader.FindWord(word)
def do_sh(self, none):
"""
Search for the V8 Heap object in all available memory regions. You
might get lucky and find this rare treasure full of invaluable
information.
"""
raise NotImplementedError
def do_u(self, args):
"""
Unassemble memory in the region [address, address + size). If the
size is not specified, a default value of 32 bytes is used.
Synopsis: u 0x<address> 0x<size>
"""
args = args.split(' ')
start = int(args[0], 16)
size = int(args[1], 16) if len(args) > 1 else 0x20
if not self.reader.IsValidAddress(start):
print "Address is not contained within the minidump!"
return
lines = self.reader.GetDisasmLines(start, size)
for line in lines:
print FormatDisasmLine(start, self.heap, line)
print
def do_EOF(self, none):
raise KeyboardInterrupt
EIP_PROXIMITY = 64
CONTEXT_FOR_ARCH = {
MD_CPU_ARCHITECTURE_AMD64:
['rax', 'rbx', 'rcx', 'rdx', 'rdi', 'rsi', 'rbp', 'rsp', 'rip',
'r8', 'r9', 'r10', 'r11', 'r12', 'r13', 'r14', 'r15'],
MD_CPU_ARCHITECTURE_ARM:
['r0', 'r1', 'r2', 'r3', 'r4', 'r5', 'r6', 'r7', 'r8', 'r9',
'r10', 'r11', 'r12', 'sp', 'lr', 'pc'],
MD_CPU_ARCHITECTURE_X86:
['eax', 'ebx', 'ecx', 'edx', 'edi', 'esi', 'ebp', 'esp', 'eip']
}
KNOWN_MODULES = {'chrome.exe', 'chrome.dll'}
def GetVersionString(ms, ls):
return "%d.%d.%d.%d" % (ms >> 16, ms & 0xffff, ls >> 16, ls & 0xffff)
def GetModuleName(reader, module):
name = reader.ReadMinidumpString(module.module_name_rva)
# simplify for path manipulation
name = name.encode('utf-8')
return str(os.path.basename(str(name).replace("\\", "/")))
def PrintModuleDetails(reader, module):
print "%s" % GetModuleName(reader, module)
file_version = GetVersionString(module.version_info.dwFileVersionMS,
module.version_info.dwFileVersionLS)
product_version = GetVersionString(module.version_info.dwProductVersionMS,
module.version_info.dwProductVersionLS)
print " base: %s" % reader.FormatIntPtr(module.base_of_image)
print " end: %s" % reader.FormatIntPtr(module.base_of_image +
module.size_of_image)
print " file version: %s" % file_version
print " product version: %s" % product_version
time_date_stamp = datetime.datetime.fromtimestamp(module.time_date_stamp)
print " timestamp: %s" % time_date_stamp
def AnalyzeMinidump(options, minidump_name):
reader = MinidumpReader(options, minidump_name)
heap = None
DebugPrint("========================================")
if reader.exception is None:
print "Minidump has no exception info"
else:
print "Exception info:"
exception_thread = reader.thread_map[reader.exception.thread_id]
print " thread id: %d" % exception_thread.id
print " code: %08X" % reader.exception.exception.code
print " context:"
for r in CONTEXT_FOR_ARCH[reader.arch]:
print " %s: %s" % (r, reader.FormatIntPtr(reader.Register(r)))
# TODO(vitalyr): decode eflags.
if reader.arch == MD_CPU_ARCHITECTURE_ARM:
print " cpsr: %s" % bin(reader.exception_context.cpsr)[2:]
else:
print " eflags: %s" % bin(reader.exception_context.eflags)[2:]
print
print " modules:"
for module in reader.module_list.modules:
name = GetModuleName(reader, module)
if name in KNOWN_MODULES:
print " %s at %08X" % (name, module.base_of_image)
reader.TryLoadSymbolsFor(name, module)
print
stack_top = reader.ExceptionSP()
stack_bottom = exception_thread.stack.start + \
exception_thread.stack.memory.data_size
stack_map = {reader.ExceptionIP(): -1}
for slot in xrange(stack_top, stack_bottom, reader.PointerSize()):
maybe_address = reader.ReadUIntPtr(slot)
if not maybe_address in stack_map:
stack_map[maybe_address] = slot
heap = V8Heap(reader, stack_map)
print "Disassembly around exception.eip:"
eip_symbol = reader.FindSymbol(reader.ExceptionIP())
if eip_symbol is not None:
print eip_symbol
disasm_start = reader.ExceptionIP() - EIP_PROXIMITY
disasm_bytes = 2 * EIP_PROXIMITY
if (options.full):
full_range = reader.FindRegion(reader.ExceptionIP())
if full_range is not None:
disasm_start = full_range[0]
disasm_bytes = full_range[1]
lines = reader.GetDisasmLines(disasm_start, disasm_bytes)
for line in lines:
print FormatDisasmLine(disasm_start, heap, line)
print
if heap is None:
heap = V8Heap(reader, None)
if options.full:
FullDump(reader, heap)
if options.command:
InspectionShell(reader, heap).onecmd(options.command)
if options.shell:
try:
InspectionShell(reader, heap).cmdloop("type help to get help")
except KeyboardInterrupt:
print "Kthxbye."
elif not options.command:
if reader.exception is not None:
frame_pointer = reader.ExceptionFP()
print "Annotated stack (from exception.esp to bottom):"
for slot in xrange(stack_top, stack_bottom, reader.PointerSize()):
ascii_content = [c if c >= '\x20' and c < '\x7f' else '.'
for c in reader.ReadBytes(slot, reader.PointerSize())]
maybe_address = reader.ReadUIntPtr(slot)
heap_object = heap.FindObject(maybe_address)
maybe_symbol = reader.FindSymbol(maybe_address)
if slot == frame_pointer:
maybe_symbol = "<---- frame pointer"
frame_pointer = maybe_address
print "%s: %s %s %s" % (reader.FormatIntPtr(slot),
reader.FormatIntPtr(maybe_address),
"".join(ascii_content),
maybe_symbol or "")
if heap_object:
heap_object.Print(Printer())
print
reader.Dispose()
if __name__ == "__main__":
parser = optparse.OptionParser(USAGE)
parser.add_option("-s", "--shell", dest="shell", action="store_true",
help="start an interactive inspector shell")
parser.add_option("-w", "--web", dest="web", action="store_true",
help="start a web server on localhost:%i" % PORT_NUMBER)
parser.add_option("-c", "--command", dest="command", default="",
help="run an interactive inspector shell command and exit")
parser.add_option("-f", "--full", dest="full", action="store_true",
help="dump all information contained in the minidump")
parser.add_option("--symdir", dest="symdir", default=".",
help="directory containing *.pdb.sym file with symbols")
parser.add_option("--objdump",
default="/usr/bin/objdump",
help="objdump tool to use [default: %default]")
options, args = parser.parse_args()
if os.path.exists(options.objdump):
disasm.OBJDUMP_BIN = options.objdump
OBJDUMP_BIN = options.objdump
else:
print "Cannot find %s, falling back to default objdump" % options.objdump
if len(args) != 1:
parser.print_help()
sys.exit(1)
if options.web:
try:
server = InspectionWebServer(PORT_NUMBER, options, args[0])
print 'Started httpserver on port ' , PORT_NUMBER
webbrowser.open('http://localhost:%i/summary.html' % PORT_NUMBER)
server.serve_forever()
except KeyboardInterrupt:
print '^C received, shutting down the web server'
server.socket.close()
else:
AnalyzeMinidump(options, args[0])
| {
"content_hash": "c6b855b279f96d17118862df20287f44",
"timestamp": "",
"source": "github",
"line_count": 3165,
"max_line_length": 81,
"avg_line_length": 33.95608214849921,
"alnum_prop": 0.6333522531659703,
"repo_name": "nekulin/arangodb",
"id": "8986a91b5c318302c0f8ee8601d8919df675d9d6",
"size": "109067",
"binary": false,
"copies": "13",
"ref": "refs/heads/devel",
"path": "3rdParty/V8-4.3.61/tools/grokdump.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Ada",
"bytes": "89080"
},
{
"name": "AppleScript",
"bytes": "1429"
},
{
"name": "Assembly",
"bytes": "142084"
},
{
"name": "Batchfile",
"bytes": "9073"
},
{
"name": "C",
"bytes": "1938354"
},
{
"name": "C#",
"bytes": "55625"
},
{
"name": "C++",
"bytes": "79307771"
},
{
"name": "CLIPS",
"bytes": "5291"
},
{
"name": "CMake",
"bytes": "109682"
},
{
"name": "CSS",
"bytes": "1683781"
},
{
"name": "CoffeeScript",
"bytes": "94"
},
{
"name": "DIGITAL Command Language",
"bytes": "27303"
},
{
"name": "Emacs Lisp",
"bytes": "15477"
},
{
"name": "Go",
"bytes": "1018005"
},
{
"name": "Groff",
"bytes": "263567"
},
{
"name": "HTML",
"bytes": "458914"
},
{
"name": "JavaScript",
"bytes": "57970034"
},
{
"name": "LLVM",
"bytes": "39361"
},
{
"name": "Lua",
"bytes": "16189"
},
{
"name": "Makefile",
"bytes": "177932"
},
{
"name": "Module Management System",
"bytes": "1545"
},
{
"name": "NSIS",
"bytes": "26909"
},
{
"name": "Objective-C",
"bytes": "4430"
},
{
"name": "Objective-C++",
"bytes": "1857"
},
{
"name": "Pascal",
"bytes": "145262"
},
{
"name": "Perl",
"bytes": "227308"
},
{
"name": "Protocol Buffer",
"bytes": "5837"
},
{
"name": "Python",
"bytes": "3563935"
},
{
"name": "Ruby",
"bytes": "1000569"
},
{
"name": "SAS",
"bytes": "1847"
},
{
"name": "Scheme",
"bytes": "19885"
},
{
"name": "Shell",
"bytes": "488744"
},
{
"name": "VimL",
"bytes": "4075"
},
{
"name": "Yacc",
"bytes": "36950"
}
],
"symlink_target": ""
} |
import sys
import config_util # pylint: disable=F0401
# This class doesn't need an __init__ method, so we disable the warning
# pylint: disable=W0232
class Skia(config_util.Config):
"""Basic Config class for the Skia repository."""
@staticmethod
def fetch_spec(_props):
solution = {
'name' : 'skia',
'url' : 'https://skia.googlesource.com/skia.git',
'deps_file': 'DEPS',
'managed' : False,
}
spec = {
'solutions': [solution]
}
return {
'type': 'gclient_git',
'gclient_git_spec': spec,
}
@staticmethod
def expected_root(_props):
return 'skia'
def main(argv=None):
return Skia().handle_args(argv)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| {
"content_hash": "91cd64ea3449c40cb4f58bd2413c9a88",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 71,
"avg_line_length": 20.405405405405407,
"alnum_prop": 0.5894039735099338,
"repo_name": "junhuac/MQUIC",
"id": "930173a0c22e07732143fc3abcc0ff44d69486fb",
"size": "918",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "depot_tools/fetch_configs/skia.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "25707"
},
{
"name": "Assembly",
"bytes": "5386"
},
{
"name": "Batchfile",
"bytes": "42909"
},
{
"name": "C",
"bytes": "1168925"
},
{
"name": "C#",
"bytes": "81308"
},
{
"name": "C++",
"bytes": "43919800"
},
{
"name": "CMake",
"bytes": "46379"
},
{
"name": "CSS",
"bytes": "19668"
},
{
"name": "Emacs Lisp",
"bytes": "32613"
},
{
"name": "Go",
"bytes": "7247"
},
{
"name": "Groff",
"bytes": "127224"
},
{
"name": "HTML",
"bytes": "2548385"
},
{
"name": "Java",
"bytes": "1332462"
},
{
"name": "JavaScript",
"bytes": "851006"
},
{
"name": "M4",
"bytes": "29823"
},
{
"name": "Makefile",
"bytes": "459525"
},
{
"name": "Objective-C",
"bytes": "120158"
},
{
"name": "Objective-C++",
"bytes": "330017"
},
{
"name": "PHP",
"bytes": "11283"
},
{
"name": "Protocol Buffer",
"bytes": "2991"
},
{
"name": "Python",
"bytes": "16872234"
},
{
"name": "R",
"bytes": "1842"
},
{
"name": "Ruby",
"bytes": "937"
},
{
"name": "Shell",
"bytes": "764509"
},
{
"name": "Swift",
"bytes": "116"
},
{
"name": "VimL",
"bytes": "12288"
},
{
"name": "nesC",
"bytes": "14779"
}
],
"symlink_target": ""
} |
from unittest import mock
from oslotest import base
from monasca_api.common.rest import exceptions
from monasca_api.common.rest import utils
class TestRestUtils(base.BaseTestCase):
def setUp(self):
super(TestRestUtils, self).setUp()
self.mock_json_patcher = mock.patch('monasca_api.common.rest.utils.json')
self.mock_json = self.mock_json_patcher.start()
def tearDown(self):
super(TestRestUtils, self).tearDown()
self.mock_json_patcher.stop()
def test_read_body_with_success(self):
self.mock_json.loads.return_value = ""
payload = mock.Mock()
utils.read_body(payload)
self.mock_json.loads.assert_called_once_with(payload.read.return_value)
def test_read_body_empty_content_in_payload(self):
self.mock_json.loads.return_value = ""
payload = mock.Mock()
payload.read.return_value = None
self.assertIsNone(utils.read_body(payload))
def test_read_body_json_loads_exception(self):
self.mock_json.loads.side_effect = Exception
payload = mock.Mock()
self.assertRaises(exceptions.DataConversionException,
utils.read_body, payload)
def test_read_body_unsupported_content_type(self):
unsupported_content_type = mock.Mock()
self.assertRaises(
exceptions.UnsupportedContentTypeException, utils.read_body, None,
unsupported_content_type)
def test_read_body_unreadable_content_error(self):
unreadable_content = mock.Mock()
unreadable_content.read.side_effect = Exception
self.assertRaises(
exceptions.UnreadableContentError,
utils.read_body, unreadable_content)
def test_as_json_success(self):
data = mock.Mock()
dumped_json = utils.as_json(data)
self.assertEqual(dumped_json, self.mock_json.dumps.return_value)
def test_as_json_with_exception(self):
data = mock.Mock()
self.mock_json.dumps.side_effect = Exception
self.assertRaises(exceptions.DataConversionException,
utils.as_json, data)
| {
"content_hash": "35be15991a7f5ef91ea6bba151f2520a",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 81,
"avg_line_length": 31.159420289855074,
"alnum_prop": 0.6576744186046511,
"repo_name": "openstack/monasca-api",
"id": "33e60066c6c2117e0895af01a1004f9b04497e18",
"size": "2696",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "monasca_api/tests/test_rest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2638"
},
{
"name": "Java",
"bytes": "883947"
},
{
"name": "Jinja",
"bytes": "32747"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "936668"
},
{
"name": "Shell",
"bytes": "129514"
}
],
"symlink_target": ""
} |
import argparse
import re
# Taken from http://genomewiki.ucsc.edu/index.php/Hg19_Genome_size_statistics
HG19_NON_N_GENOME_SIZE = 2897310462
def scrape_bamstat(statsfile, threshold=7):
proper_exp = re.compile(r'Proper pairs\s*(\d+)')
insert_exp = re.compile(r'Actual FR median insert size:\s*(\d+)')
dev_exp = re.compile(r'Actual FR median absolute deviation:\s*(\d+)')
for line in statsfile:
if proper_exp.match(line):
proper = int(proper_exp.match(line).group(1))
if insert_exp.match(line):
insert = int(insert_exp.match(line).group(1))
if dev_exp.match(line):
dev = int(dev_exp.match(line).group(1))
coverage = proper * insert / HG19_NON_N_GENOME_SIZE
del_size = insert + threshold * dev
return coverage, proper, insert, dev, del_size
def main():
parser = argparse.ArgumentParser(
description="Script that scrapes sample directories in Samples/ndd "
"for bamstat stats files and writes a file of average library "
"coverage, number of proper pairs in the library, median insert of "
"the library, and the median absolute deviation from this median.")
parser.add_argument('samples', type=argparse.FileType('r'),
help="Tab separated file containing sample names "
"and their subdirectories under "
"/data/talkowski/Samples/ndd")
parser.add_argument('outfile', type=argparse.FileType('w'),
help="Output file to write to. File will be tab "
"separated and be in the format: Sample Coverage "
"Proper_Pair_Count Median_Insert Deviation")
args = parser.parse_args()
args.outfile.write('sample\tcoverage\tproper\tinsert\tdev\tdel_size\n')
for line in args.samples:
sample, bstat_dir = line.rstrip().split()[0:2]
statsfile = open('%s/stats.file' % bstat_dir)
coverage, proper, insert, dev, del_size = scrape_bamstat(statsfile)
args.outfile.write("%s\t%d\t%d\t%d\t%d\t%d\n" %
(sample, coverage, proper, insert, dev, del_size))
if __name__ == '__main__':
main()
| {
"content_hash": "1ba2426dc979bc6a8b5e3349965a31c2",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 77,
"avg_line_length": 40.32727272727273,
"alnum_prop": 0.6172227231740307,
"repo_name": "talkowski-lab/Holmes",
"id": "66fc230f5ca571dc561970ea569b6b9185cbec8d",
"size": "2242",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pycluster/scrape_bamstat.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "14905"
},
{
"name": "Makefile",
"bytes": "627"
},
{
"name": "Python",
"bytes": "184975"
},
{
"name": "R",
"bytes": "49110"
},
{
"name": "Shell",
"bytes": "349854"
}
],
"symlink_target": ""
} |
"""
pyinotify
@author: Sebastien Martini
@license: MIT License
@contact: [email protected]
"""
class PyinotifyError(Exception):
"""Indicates exceptions raised by a Pyinotify class."""
pass
class UnsupportedPythonVersionError(PyinotifyError):
"""
Raised on unsupported Python versions.
"""
def __init__(self, version):
"""
@param version: Current Python version
@type version: string
"""
PyinotifyError.__init__(self,
('Python %s is unsupported, requires '
'at least Python 3.0') % version)
class UnsupportedLibcVersionError(PyinotifyError):
"""
Raised when libc couldn't be loaded or when inotify functions werent
provided.
"""
def __init__(self):
err = 'libc does not provide required inotify support'
PyinotifyError.__init__(self, err)
# Check Python version
import sys
if sys.version < '3.0':
raise UnsupportedPythonVersionError(sys.version)
# Import directives
import threading
import os
import select
import struct
import fcntl
import errno
import termios
import array
import logging
import atexit
from collections import deque
from datetime import datetime, timedelta
import time
import fnmatch
import re
import ctypes
import ctypes.util
import asyncore
import glob
try:
from functools import reduce
except ImportError:
pass # Will fail on Python 2.4 which has reduce() builtin anyway.
__author__ = "[email protected] (Sebastien Martini)"
__version__ = "0.9.0"
# Compatibity mode: set to True to improve compatibility with
# Pyinotify 0.7.1. Do not set this variable yourself, call the
# function compatibility_mode() instead.
COMPATIBILITY_MODE = False
# Load libc
LIBC = None
def strerrno():
code = ctypes.get_errno()
return '%s (%s)' % (os.strerror(code), errno.errorcode[code])
def load_libc():
global LIBC
libc = None
try:
libc = ctypes.util.find_library('c')
except OSError as err:
pass # Will attemp to load it with None anyway.
except IOError as err:
pass
LIBC = ctypes.CDLL(libc, use_errno=True)
# Check that libc has needed functions inside.
if (not hasattr(LIBC, 'inotify_init') or
not hasattr(LIBC, 'inotify_add_watch') or
not hasattr(LIBC, 'inotify_rm_watch')):
raise UnsupportedLibcVersionError()
load_libc()
class PyinotifyLogger(logging.Logger):
"""
Pyinotify logger used for logging unicode strings.
"""
def makeRecord(self, name, level, fn, lno, msg, args, exc_info, func=None,
extra=None):
rv = UnicodeLogRecord(name, level, fn, lno, msg, args, exc_info, func)
if extra is not None:
for key in extra:
if (key in ["message", "asctime"]) or (key in rv.__dict__):
raise KeyError("Attempt to overwrite %r in LogRecord" % key)
rv.__dict__[key] = extra[key]
return rv
# Logging
def logger_init():
"""Initialize logger instance."""
log = logging.getLogger("pyinotify")
console_handler = logging.StreamHandler()
console_handler.setFormatter(
logging.Formatter("[%(asctime)s %(name)s %(levelname)s] %(message)s"))
log.addHandler(console_handler)
log.setLevel(20)
return log
log = logger_init()
# inotify's variables
class SysCtlINotify:
"""
Access (read, write) inotify's variables through sysctl. Usually it
requires administrator rights to update them.
Examples:
- Read max_queued_events attribute: myvar = max_queued_events.value
- Update max_queued_events attribute: max_queued_events.value = 42
"""
inotify_attrs = {'max_user_instances': 1,
'max_user_watches': 2,
'max_queued_events': 3}
def __init__(self, attrname):
sino = ctypes.c_int * 3
self._attrname = attrname
self._attr = sino(5, 20, SysCtlINotify.inotify_attrs[attrname])
def get_val(self):
"""
Gets attribute's value.
@return: stored value.
@rtype: int
"""
oldv = ctypes.c_int(0)
size = ctypes.c_int(ctypes.sizeof(oldv))
LIBC.sysctl(self._attr, 3,
ctypes.c_voidp(ctypes.addressof(oldv)),
ctypes.addressof(size),
None, 0)
return oldv.value
def set_val(self, nval):
"""
Sets new attribute's value.
@param nval: replaces current value by nval.
@type nval: int
"""
oldv = ctypes.c_int(0)
sizeo = ctypes.c_int(ctypes.sizeof(oldv))
newv = ctypes.c_int(nval)
sizen = ctypes.c_int(ctypes.sizeof(newv))
LIBC.sysctl(self._attr, 3,
ctypes.c_voidp(ctypes.addressof(oldv)),
ctypes.addressof(sizeo),
ctypes.c_voidp(ctypes.addressof(newv)),
ctypes.addressof(sizen))
value = property(get_val, set_val)
def __repr__(self):
return '<%s=%d>' % (self._attrname, self.get_val())
# Singleton instances
#
# read: myvar = max_queued_events.value
# update: max_queued_events.value = 42
#
for attrname in ('max_queued_events', 'max_user_instances', 'max_user_watches'):
globals()[attrname] = SysCtlINotify(attrname)
class EventsCodes:
"""
Set of codes corresponding to each kind of events.
Some of these flags are used to communicate with inotify, whereas
the others are sent to userspace by inotify notifying some events.
@cvar IN_ACCESS: File was accessed.
@type IN_ACCESS: int
@cvar IN_MODIFY: File was modified.
@type IN_MODIFY: int
@cvar IN_ATTRIB: Metadata changed.
@type IN_ATTRIB: int
@cvar IN_CLOSE_WRITE: Writtable file was closed.
@type IN_CLOSE_WRITE: int
@cvar IN_CLOSE_NOWRITE: Unwrittable file closed.
@type IN_CLOSE_NOWRITE: int
@cvar IN_OPEN: File was opened.
@type IN_OPEN: int
@cvar IN_MOVED_FROM: File was moved from X.
@type IN_MOVED_FROM: int
@cvar IN_MOVED_TO: File was moved to Y.
@type IN_MOVED_TO: int
@cvar IN_CREATE: Subfile was created.
@type IN_CREATE: int
@cvar IN_DELETE: Subfile was deleted.
@type IN_DELETE: int
@cvar IN_DELETE_SELF: Self (watched item itself) was deleted.
@type IN_DELETE_SELF: int
@cvar IN_MOVE_SELF: Self (watched item itself) was moved.
@type IN_MOVE_SELF: int
@cvar IN_UNMOUNT: Backing fs was unmounted.
@type IN_UNMOUNT: int
@cvar IN_Q_OVERFLOW: Event queued overflowed.
@type IN_Q_OVERFLOW: int
@cvar IN_IGNORED: File was ignored.
@type IN_IGNORED: int
@cvar IN_ONLYDIR: only watch the path if it is a directory (new
in kernel 2.6.15).
@type IN_ONLYDIR: int
@cvar IN_DONT_FOLLOW: don't follow a symlink (new in kernel 2.6.15).
IN_ONLYDIR we can make sure that we don't watch
the target of symlinks.
@type IN_DONT_FOLLOW: int
@cvar IN_MASK_ADD: add to the mask of an already existing watch (new
in kernel 2.6.14).
@type IN_MASK_ADD: int
@cvar IN_ISDIR: Event occurred against dir.
@type IN_ISDIR: int
@cvar IN_ONESHOT: Only send event once.
@type IN_ONESHOT: int
@cvar ALL_EVENTS: Alias for considering all of the events.
@type ALL_EVENTS: int
"""
# The idea here is 'configuration-as-code' - this way, we get our nice class
# constants, but we also get nice human-friendly text mappings to do lookups
# against as well, for free:
FLAG_COLLECTIONS = {'OP_FLAGS': {
'IN_ACCESS' : 0x00000001, # File was accessed
'IN_MODIFY' : 0x00000002, # File was modified
'IN_ATTRIB' : 0x00000004, # Metadata changed
'IN_CLOSE_WRITE' : 0x00000008, # Writable file was closed
'IN_CLOSE_NOWRITE' : 0x00000010, # Unwritable file closed
'IN_OPEN' : 0x00000020, # File was opened
'IN_MOVED_FROM' : 0x00000040, # File was moved from X
'IN_MOVED_TO' : 0x00000080, # File was moved to Y
'IN_CREATE' : 0x00000100, # Subfile was created
'IN_DELETE' : 0x00000200, # Subfile was deleted
'IN_DELETE_SELF' : 0x00000400, # Self (watched item itself)
# was deleted
'IN_MOVE_SELF' : 0x00000800, # Self (watched item itself) was moved
},
'EVENT_FLAGS': {
'IN_UNMOUNT' : 0x00002000, # Backing fs was unmounted
'IN_Q_OVERFLOW' : 0x00004000, # Event queued overflowed
'IN_IGNORED' : 0x00008000, # File was ignored
},
'SPECIAL_FLAGS': {
'IN_ONLYDIR' : 0x01000000, # only watch the path if it is a
# directory
'IN_DONT_FOLLOW' : 0x02000000, # don't follow a symlink
'IN_MASK_ADD' : 0x20000000, # add to the mask of an already
# existing watch
'IN_ISDIR' : 0x40000000, # event occurred against dir
'IN_ONESHOT' : 0x80000000, # only send event once
},
}
def maskname(mask):
"""
Returns the event name associated to mask. IN_ISDIR is appended to
the result when appropriate. Note: only one event is returned, because
only one event can be raised at a given time.
@param mask: mask.
@type mask: int
@return: event name.
@rtype: str
"""
ms = mask
name = '%s'
if mask & IN_ISDIR:
ms = mask - IN_ISDIR
name = '%s|IN_ISDIR'
return name % EventsCodes.ALL_VALUES[ms]
maskname = staticmethod(maskname)
# So let's now turn the configuration into code
EventsCodes.ALL_FLAGS = {}
EventsCodes.ALL_VALUES = {}
for flagc, valc in EventsCodes.FLAG_COLLECTIONS.items():
# Make the collections' members directly accessible through the
# class dictionary
setattr(EventsCodes, flagc, valc)
# Collect all the flags under a common umbrella
EventsCodes.ALL_FLAGS.update(valc)
# Make the individual masks accessible as 'constants' at globals() scope
# and masknames accessible by values.
for name, val in valc.items():
globals()[name] = val
EventsCodes.ALL_VALUES[val] = name
# all 'normal' events
ALL_EVENTS = reduce(lambda x, y: x | y, EventsCodes.OP_FLAGS.values())
EventsCodes.ALL_FLAGS['ALL_EVENTS'] = ALL_EVENTS
EventsCodes.ALL_VALUES[ALL_EVENTS] = 'ALL_EVENTS'
class _Event:
"""
Event structure, represent events raised by the system. This
is the base class and should be subclassed.
"""
def __init__(self, dict_):
"""
Attach attributes (contained in dict_) to self.
@param dict_: Set of attributes.
@type dict_: dictionary
"""
for tpl in dict_.items():
setattr(self, *tpl)
def __repr__(self):
"""
@return: Generic event string representation.
@rtype: str
"""
s = ''
for attr, value in sorted(self.__dict__.items(), key=lambda x: x[0]):
if attr.startswith('_'):
continue
if attr == 'mask':
value = hex(getattr(self, attr))
elif isinstance(value, str) and not value:
value = "''"
s += ' %s%s%s' % (output_format.field_name(attr),
output_format.punctuation('='),
output_format.field_value(value))
s = '%s%s%s %s' % (output_format.punctuation('<'),
output_format.class_name(self.__class__.__name__),
s,
output_format.punctuation('>'))
return s
def __str__(self):
return repr(self)
class _RawEvent(_Event):
"""
Raw event, it contains only the informations provided by the system.
It doesn't infer anything.
"""
def __init__(self, wd, mask, cookie, name):
"""
@param wd: Watch Descriptor.
@type wd: int
@param mask: Bitmask of events.
@type mask: int
@param cookie: Cookie.
@type cookie: int
@param name: Basename of the file or directory against which the
event was raised in case where the watched directory
is the parent directory. None if the event was raised
on the watched item itself.
@type name: string or None
"""
# Use this variable to cache the result of str(self), this object
# is immutable.
self._str = None
# name: remove trailing '\0'
d = {'wd': wd,
'mask': mask,
'cookie': cookie,
'name': name.rstrip('\0')}
_Event.__init__(self, d)
log.debug(str(self))
def __str__(self):
if self._str is None:
self._str = _Event.__str__(self)
return self._str
class Event(_Event):
"""
This class contains all the useful informations about the observed
event. However, the presence of each field is not guaranteed and
depends on the type of event. In effect, some fields are irrelevant
for some kind of event (for example 'cookie' is meaningless for
IN_CREATE whereas it is mandatory for IN_MOVE_TO).
The possible fields are:
- wd (int): Watch Descriptor.
- mask (int): Mask.
- maskname (str): Readable event name.
- path (str): path of the file or directory being watched.
- name (str): Basename of the file or directory against which the
event was raised in case where the watched directory
is the parent directory. None if the event was raised
on the watched item itself. This field is always provided
even if the string is ''.
- pathname (str): Concatenation of 'path' and 'name'.
- src_pathname (str): Only present for IN_MOVED_TO events and only in
the case where IN_MOVED_FROM events are watched too. Holds the
source pathname from where pathname was moved from.
- cookie (int): Cookie.
- dir (bool): True if the event was raised against a directory.
"""
def __init__(self, raw):
"""
Concretely, this is the raw event plus inferred infos.
"""
_Event.__init__(self, raw)
self.maskname = EventsCodes.maskname(self.mask)
if COMPATIBILITY_MODE:
self.event_name = self.maskname
try:
if self.name:
self.pathname = os.path.abspath(os.path.join(self.path,
self.name))
else:
self.pathname = os.path.abspath(self.path)
except AttributeError as err:
# Usually it is not an error some events are perfectly valids
# despite the lack of these attributes.
log.debug(err)
class ProcessEventError(PyinotifyError):
"""
ProcessEventError Exception. Raised on ProcessEvent error.
"""
def __init__(self, err):
"""
@param err: Exception error description.
@type err: string
"""
PyinotifyError.__init__(self, err)
class _ProcessEvent:
"""
Abstract processing event class.
"""
def __call__(self, event):
"""
To behave like a functor the object must be callable.
This method is a dispatch method. Its lookup order is:
1. process_MASKNAME method
2. process_FAMILY_NAME method
3. otherwise calls process_default
@param event: Event to be processed.
@type event: Event object
@return: By convention when used from the ProcessEvent class:
- Returning False or None (default value) means keep on
executing next chained functors (see chain.py example).
- Returning True instead means do not execute next
processing functions.
@rtype: bool
@raise ProcessEventError: Event object undispatchable,
unknown event.
"""
stripped_mask = event.mask - (event.mask & IN_ISDIR)
maskname = EventsCodes.ALL_VALUES.get(stripped_mask)
if maskname is None:
raise ProcessEventError("Unknown mask 0x%08x" % stripped_mask)
# 1- look for process_MASKNAME
meth = getattr(self, 'process_' + maskname, None)
if meth is not None:
return meth(event)
# 2- look for process_FAMILY_NAME
meth = getattr(self, 'process_IN_' + maskname.split('_')[1], None)
if meth is not None:
return meth(event)
# 3- default call method process_default
return self.process_default(event)
def __repr__(self):
return '<%s>' % self.__class__.__name__
class _SysProcessEvent(_ProcessEvent):
"""
There is three kind of processing according to each event:
1. special handling (deletion from internal container, bug, ...).
2. default treatment: which is applied to the majority of events.
3. IN_ISDIR is never sent alone, he is piggybacked with a standard
event, he is not processed as the others events, instead, its
value is captured and appropriately aggregated to dst event.
"""
def __init__(self, wm, notifier):
"""
@param wm: Watch Manager.
@type wm: WatchManager instance
@param notifier: Notifier.
@type notifier: Notifier instance
"""
self._watch_manager = wm # watch manager
self._notifier = notifier # notifier
self._mv_cookie = {} # {cookie(int): (src_path(str), date), ...}
self._mv = {} # {src_path(str): (dst_path(str), date), ...}
def cleanup(self):
"""
Cleanup (delete) old (>1mn) records contained in self._mv_cookie
and self._mv.
"""
date_cur_ = datetime.now()
for seq in (self._mv_cookie, self._mv):
for k in list(seq.keys()):
if (date_cur_ - seq[k][1]) > timedelta(minutes=1):
log.debug('Cleanup: deleting entry %s', seq[k][0])
del seq[k]
def process_IN_CREATE(self, raw_event):
"""
If the event affects a directory and the auto_add flag of the
targetted watch is set to True, a new watch is added on this
new directory, with the same attribute values than those of
this watch.
"""
if raw_event.mask & IN_ISDIR:
watch_ = self._watch_manager.get_watch(raw_event.wd)
created_dir = os.path.join(watch_.path, raw_event.name)
if watch_.auto_add and not watch_.exclude_filter(created_dir):
addw = self._watch_manager.add_watch
# The newly monitored directory inherits attributes from its
# parent directory.
addw_ret = addw(created_dir, watch_.mask,
proc_fun=watch_.proc_fun,
rec=False, auto_add=watch_.auto_add,
exclude_filter=watch_.exclude_filter)
# Trick to handle mkdir -p /t1/t2/t3 where t1 is watched and
# t2 and t3 are created.
# Since the directory is new, then everything inside it
# must also be new.
created_dir_wd = addw_ret.get(created_dir)
if (created_dir_wd is not None) and created_dir_wd > 0:
for name in os.listdir(created_dir):
inner = os.path.join(created_dir, name)
if (os.path.isdir(inner) and
self._watch_manager.get_wd(inner) is None):
# Generate (simulate) creation event for sub
# directories.
rawevent = _RawEvent(created_dir_wd,
IN_CREATE | IN_ISDIR,
0, name)
self._notifier.append_event(rawevent)
return self.process_default(raw_event)
def process_IN_MOVED_FROM(self, raw_event):
"""
Map the cookie with the source path (+ date for cleaning).
"""
watch_ = self._watch_manager.get_watch(raw_event.wd)
path_ = watch_.path
src_path = os.path.normpath(os.path.join(path_, raw_event.name))
self._mv_cookie[raw_event.cookie] = (src_path, datetime.now())
return self.process_default(raw_event, {'cookie': raw_event.cookie})
def process_IN_MOVED_TO(self, raw_event):
"""
Map the source path with the destination path (+ date for
cleaning).
"""
watch_ = self._watch_manager.get_watch(raw_event.wd)
path_ = watch_.path
dst_path = os.path.normpath(os.path.join(path_, raw_event.name))
mv_ = self._mv_cookie.get(raw_event.cookie)
to_append = {'cookie': raw_event.cookie}
if mv_ is not None:
self._mv[mv_[0]] = (dst_path, datetime.now())
# Let's assume that IN_MOVED_FROM event is always queued before
# that its associated (they share a common cookie) IN_MOVED_TO
# event is queued itself. It is then possible in that scenario
# to provide as additional information to the IN_MOVED_TO event
# the original pathname of the moved file/directory.
to_append['src_pathname'] = mv_[0]
elif (raw_event.mask & IN_ISDIR and watch_.auto_add and
not watch_.exclude_filter(dst_path)):
# We got a diretory that's "moved in" from an unknown source and
# auto_add is enabled. Manually add watches to the inner subtrees.
# The newly monitored directory inherits attributes from its
# parent directory.
self._watch_manager.add_watch(dst_path, watch_.mask,
proc_fun=watch_.proc_fun,
rec=True, auto_add=True,
exclude_filter=watch_.exclude_filter)
return self.process_default(raw_event, to_append)
def process_IN_MOVE_SELF(self, raw_event):
"""
STATUS: the following bug has been fixed in recent kernels (FIXME:
which version ?). Now it raises IN_DELETE_SELF instead.
Old kernels were bugged, this event raised when the watched item
were moved, so we had to update its path, but under some circumstances
it was impossible: if its parent directory and its destination
directory wasn't watched. The kernel (see include/linux/fsnotify.h)
doesn't bring us enough informations like the destination path of
moved items.
"""
watch_ = self._watch_manager.get_watch(raw_event.wd)
src_path = watch_.path
mv_ = self._mv.get(src_path)
if mv_:
dest_path = mv_[0]
watch_.path = dest_path
# add the separator to the source path to avoid overlapping
# path issue when testing with startswith()
src_path += os.path.sep
src_path_len = len(src_path)
# The next loop renames all watches with src_path as base path.
# It seems that IN_MOVE_SELF does not provide IN_ISDIR information
# therefore the next loop is iterated even if raw_event is a file.
for w in self._watch_manager.watches.values():
if w.path.startswith(src_path):
# Note that dest_path is a normalized path.
w.path = os.path.join(dest_path, w.path[src_path_len:])
else:
log.error("The pathname '%s' of this watch %s has probably changed "
"and couldn't be updated, so it cannot be trusted "
"anymore. To fix this error move directories/files only "
"between watched parents directories, in this case e.g. "
"put a watch on '%s'.",
watch_.path, watch_,
os.path.normpath(os.path.join(watch_.path,
os.path.pardir)))
if not watch_.path.endswith('-unknown-path'):
watch_.path += '-unknown-path'
return self.process_default(raw_event)
def process_IN_Q_OVERFLOW(self, raw_event):
"""
Only signal an overflow, most of the common flags are irrelevant
for this event (path, wd, name).
"""
return Event({'mask': raw_event.mask})
def process_IN_IGNORED(self, raw_event):
"""
The watch descriptor raised by this event is now ignored (forever),
it can be safely deleted from the watch manager dictionary.
After this event we can be sure that neither the event queue nor
the system will raise an event associated to this wd again.
"""
event_ = self.process_default(raw_event)
self._watch_manager.del_watch(raw_event.wd)
return event_
def process_default(self, raw_event, to_append=None):
"""
Commons handling for the followings events:
IN_ACCESS, IN_MODIFY, IN_ATTRIB, IN_CLOSE_WRITE, IN_CLOSE_NOWRITE,
IN_OPEN, IN_DELETE, IN_DELETE_SELF, IN_UNMOUNT.
"""
watch_ = self._watch_manager.get_watch(raw_event.wd)
if raw_event.mask & (IN_DELETE_SELF | IN_MOVE_SELF):
# Unfornulately this information is not provided by the kernel
dir_ = watch_.dir
else:
dir_ = bool(raw_event.mask & IN_ISDIR)
dict_ = {'wd': raw_event.wd,
'mask': raw_event.mask,
'path': watch_.path,
'name': raw_event.name,
'dir': dir_}
if COMPATIBILITY_MODE:
dict_['is_dir'] = dir_
if to_append is not None:
dict_.update(to_append)
return Event(dict_)
class ProcessEvent(_ProcessEvent):
"""
Process events objects, can be specialized via subclassing, thus its
behavior can be overriden:
Note: you should not override __init__ in your subclass instead define
a my_init() method, this method will be called automatically from the
constructor of this class with its optionals parameters.
1. Provide specialized individual methods, e.g. process_IN_DELETE for
processing a precise type of event (e.g. IN_DELETE in this case).
2. Or/and provide methods for processing events by 'family', e.g.
process_IN_CLOSE method will process both IN_CLOSE_WRITE and
IN_CLOSE_NOWRITE events (if process_IN_CLOSE_WRITE and
process_IN_CLOSE_NOWRITE aren't defined though).
3. Or/and override process_default for catching and processing all
the remaining types of events.
"""
pevent = None
def __init__(self, pevent=None, **kargs):
"""
Enable chaining of ProcessEvent instances.
@param pevent: Optional callable object, will be called on event
processing (before self).
@type pevent: callable
@param kargs: This constructor is implemented as a template method
delegating its optionals keyworded arguments to the
method my_init().
@type kargs: dict
"""
self.pevent = pevent
self.my_init(**kargs)
def my_init(self, **kargs):
"""
This method is called from ProcessEvent.__init__(). This method is
empty here and must be redefined to be useful. In effect, if you
need to specifically initialize your subclass' instance then you
just have to override this method in your subclass. Then all the
keyworded arguments passed to ProcessEvent.__init__() will be
transmitted as parameters to this method. Beware you MUST pass
keyword arguments though.
@param kargs: optional delegated arguments from __init__().
@type kargs: dict
"""
pass
def __call__(self, event):
stop_chaining = False
if self.pevent is not None:
# By default methods return None so we set as guideline
# that methods asking for stop chaining must explicitely
# return non None or non False values, otherwise the default
# behavior will be to accept chain call to the corresponding
# local method.
stop_chaining = self.pevent(event)
if not stop_chaining:
return _ProcessEvent.__call__(self, event)
def nested_pevent(self):
return self.pevent
def process_IN_Q_OVERFLOW(self, event):
"""
By default this method only reports warning messages, you can overredide
it by subclassing ProcessEvent and implement your own
process_IN_Q_OVERFLOW method. The actions you can take on receiving this
event is either to update the variable max_queued_events in order to
handle more simultaneous events or to modify your code in order to
accomplish a better filtering diminishing the number of raised events.
Because this method is defined, IN_Q_OVERFLOW will never get
transmitted as arguments to process_default calls.
@param event: IN_Q_OVERFLOW event.
@type event: dict
"""
log.warning('Event queue overflowed.')
def process_default(self, event):
"""
Default processing event method. By default does nothing. Subclass
ProcessEvent and redefine this method in order to modify its behavior.
@param event: Event to be processed. Can be of any type of events but
IN_Q_OVERFLOW events (see method process_IN_Q_OVERFLOW).
@type event: Event instance
"""
pass
class PrintAllEvents(ProcessEvent):
"""
Dummy class used to print events strings representations. For instance this
class is used from command line to print all received events to stdout.
"""
def my_init(self, out=None):
"""
@param out: Where events will be written.
@type out: Object providing a valid file object interface.
"""
if out is None:
out = sys.stdout
self._out = out
def process_default(self, event):
"""
Writes event string representation to file object provided to
my_init().
@param event: Event to be processed. Can be of any type of events but
IN_Q_OVERFLOW events (see method process_IN_Q_OVERFLOW).
@type event: Event instance
"""
self._out.write(str(event))
self._out.write('\n')
self._out.flush()
class ChainIfTrue(ProcessEvent):
"""
Makes conditional chaining depending on the result of the nested
processing instance.
"""
def my_init(self, func):
"""
Method automatically called from base class constructor.
"""
self._func = func
def process_default(self, event):
return not self._func(event)
class Stats(ProcessEvent):
"""
Compute and display trivial statistics about processed events.
"""
def my_init(self):
"""
Method automatically called from base class constructor.
"""
self._start_time = time.time()
self._stats = {}
self._stats_lock = threading.Lock()
def process_default(self, event):
"""
Processes |event|.
"""
self._stats_lock.acquire()
try:
events = event.maskname.split('|')
for event_name in events:
count = self._stats.get(event_name, 0)
self._stats[event_name] = count + 1
finally:
self._stats_lock.release()
def _stats_copy(self):
self._stats_lock.acquire()
try:
return self._stats.copy()
finally:
self._stats_lock.release()
def __repr__(self):
stats = self._stats_copy()
elapsed = int(time.time() - self._start_time)
elapsed_str = ''
if elapsed < 60:
elapsed_str = str(elapsed) + 'sec'
elif 60 <= elapsed < 3600:
elapsed_str = '%dmn%dsec' % (elapsed / 60, elapsed % 60)
elif 3600 <= elapsed < 86400:
elapsed_str = '%dh%dmn' % (elapsed / 3600, (elapsed % 3600) / 60)
elif elapsed >= 86400:
elapsed_str = '%dd%dh' % (elapsed / 86400, (elapsed % 86400) / 3600)
stats['ElapsedTime'] = elapsed_str
l = []
for ev, value in sorted(stats.items(), key=lambda x: x[0]):
l.append(' %s=%s' % (output_format.field_name(ev),
output_format.field_value(value)))
s = '<%s%s >' % (output_format.class_name(self.__class__.__name__),
''.join(l))
return s
def dump(self, filename):
"""
Dumps statistics to file |filename|.
@param filename: pathname.
@type filename: string
"""
with open(filename, 'w') as file_obj:
file_obj.write(str(self))
def __str__(self, scale=45):
stats = self._stats_copy()
if not stats:
return ''
m = max(stats.values())
unity = scale / m
fmt = '%%-26s%%-%ds%%s' % (len(output_format.field_value('@' * scale))
+ 1)
def func(x):
return fmt % (output_format.field_name(x[0]),
output_format.field_value('@' * int(x[1] * unity)),
output_format.simple('%d' % x[1], 'yellow'))
s = '\n'.join(map(func, sorted(stats.items(), key=lambda x: x[0])))
return s
class NotifierError(PyinotifyError):
"""
Notifier Exception. Raised on Notifier error.
"""
def __init__(self, err):
"""
@param err: Exception string's description.
@type err: string
"""
PyinotifyError.__init__(self, err)
class Notifier:
"""
Read notifications, process events.
"""
def __init__(self, watch_manager, default_proc_fun=None, read_freq=0,
threshold=0, timeout=None):
"""
Initialization. read_freq, threshold and timeout parameters are used
when looping.
@param watch_manager: Watch Manager.
@type watch_manager: WatchManager instance
@param default_proc_fun: Default processing method. If None, a new
instance of PrintAllEvents will be assigned.
@type default_proc_fun: instance of ProcessEvent
@param read_freq: if read_freq == 0, events are read asap,
if read_freq is > 0, this thread sleeps
max(0, read_freq - timeout) seconds. But if
timeout is None it may be different because
poll is blocking waiting for something to read.
@type read_freq: int
@param threshold: File descriptor will be read only if the accumulated
size to read becomes >= threshold. If != 0, you likely
want to use it in combination with an appropriate
value for read_freq because without that you would
keep looping without really reading anything and that
until the amount of events to read is >= threshold.
At least with read_freq set you might sleep.
@type threshold: int
@param timeout:
http://docs.python.org/lib/poll-objects.html#poll-objects
@type timeout: int
"""
# Watch Manager instance
self._watch_manager = watch_manager
# File descriptor
self._fd = self._watch_manager.get_fd()
# Poll object and registration
self._pollobj = select.poll()
self._pollobj.register(self._fd, select.POLLIN)
# This pipe is correctely initialized and used by ThreadedNotifier
self._pipe = (-1, -1)
# Event queue
self._eventq = deque()
# System processing functor, common to all events
self._sys_proc_fun = _SysProcessEvent(self._watch_manager, self)
# Default processing method
self._default_proc_fun = default_proc_fun
if default_proc_fun is None:
self._default_proc_fun = PrintAllEvents()
# Loop parameters
self._read_freq = read_freq
self._threshold = threshold
self._timeout = timeout
# Coalesce events option
self._coalesce = False
# set of str(raw_event), only used when coalesce option is True
self._eventset = set()
def append_event(self, event):
"""
Append a raw event to the event queue.
@param event: An event.
@type event: _RawEvent instance.
"""
self._eventq.append(event)
def proc_fun(self):
return self._default_proc_fun
def coalesce_events(self, coalesce=True):
"""
Coalescing events. Events are usually processed by batchs, their size
depend on various factors. Thus, before processing them, events received
from inotify are aggregated in a fifo queue. If this coalescing
option is enabled events are filtered based on their unicity, only
unique events are enqueued, doublons are discarded. An event is unique
when the combination of its fields (wd, mask, cookie, name) is unique
among events of a same batch. After a batch of events is processed any
events is accepted again. By default this option is disabled, you have
to explictly call this function to turn it on.
@param coalesce: Optional new coalescing value. True by default.
@type coalesce: Bool
"""
self._coalesce = coalesce
if not coalesce:
self._eventset.clear()
def check_events(self, timeout=None):
"""
Check for new events available to read, blocks up to timeout
milliseconds.
@param timeout: If specified it overrides the corresponding instance
attribute _timeout.
@type timeout: int
@return: New events to read.
@rtype: bool
"""
while True:
try:
# blocks up to 'timeout' milliseconds
if timeout is None:
timeout = self._timeout
ret = self._pollobj.poll(timeout)
except select.error as err:
if err.errno == errno.EINTR:
continue # interrupted, retry
else:
raise
else:
break
if not ret or (self._pipe[0] == ret[0][0]):
return False
# only one fd is polled
return ret[0][1] & select.POLLIN
def read_events(self):
"""
Read events from device, build _RawEvents, and enqueue them.
"""
buf_ = array.array('i', [0])
# get event queue size
if fcntl.ioctl(self._fd, termios.FIONREAD, buf_, 1) == -1:
return
queue_size = buf_[0]
if queue_size < self._threshold:
log.debug('(fd: %d) %d bytes available to read but threshold is '
'fixed to %d bytes', self._fd, queue_size,
self._threshold)
return
try:
# Read content from file
r = os.read(self._fd, queue_size)
except Exception as msg:
raise NotifierError(msg)
log.debug('Event queue size: %d', queue_size)
rsum = 0 # counter
while rsum < queue_size:
s_size = 16
# Retrieve wd, mask, cookie and fname_len
wd, mask, cookie, fname_len = struct.unpack('iIII',
r[rsum:rsum+s_size])
# Retrieve name
bname, = struct.unpack('%ds' % fname_len,
r[rsum + s_size:rsum + s_size + fname_len])
# FIXME: should we explictly call sys.getdefaultencoding() here ??
uname = bname.decode()
rawevent = _RawEvent(wd, mask, cookie, uname)
if self._coalesce:
# Only enqueue new (unique) events.
raweventstr = str(rawevent)
if raweventstr not in self._eventset:
self._eventset.add(raweventstr)
self._eventq.append(rawevent)
else:
self._eventq.append(rawevent)
rsum += s_size + fname_len
def process_events(self):
"""
Routine for processing events from queue by calling their
associated proccessing method (an instance of ProcessEvent).
It also does internal processings, to keep the system updated.
"""
while self._eventq:
raw_event = self._eventq.popleft() # pop next event
watch_ = self._watch_manager.get_watch(raw_event.wd)
if watch_ is None:
# Not really sure how we ended up here, nor how we should
# handle these types of events and if it is appropriate to
# completly skip them (like we are doing here).
log.warning("Unable to retrieve Watch object associated to %s",
repr(raw_event))
continue
revent = self._sys_proc_fun(raw_event) # system processings
if watch_ and watch_.proc_fun:
watch_.proc_fun(revent) # user processings
else:
self._default_proc_fun(revent)
self._sys_proc_fun.cleanup() # remove olds MOVED_* events records
if self._coalesce:
self._eventset.clear()
def __daemonize(self, pid_file=None, force_kill=False, stdin=os.devnull,
stdout=os.devnull, stderr=os.devnull):
"""
pid_file: file to which the pid will be written.
force_kill: if True kill the process associated to pid_file.
stdin, stdout, stderr: files associated to common streams.
"""
if pid_file is None:
dirname = '/var/run/'
basename = os.path.basename(sys.argv[0]) or 'pyinotify'
pid_file = os.path.join(dirname, basename + '.pid')
if os.path.exists(pid_file):
with open(pid_file, 'r') as fo:
try:
pid = int(fo.read())
except ValueError:
pid = None
if pid is not None:
try:
os.kill(pid, 0)
except OSError as err:
if err.errno == errno.ESRCH:
log.debug(err)
else:
log.error(err)
else:
if not force_kill:
s = 'There is already a pid file %s with pid %d'
raise NotifierError(s % (pid_file, pid))
else:
os.kill(pid, 9)
def fork_daemon():
# Adapted from Chad J. Schroeder's recipe
# @see http://code.activestate.com/recipes/278731/
pid = os.fork()
if (pid == 0):
# parent 2
os.setsid()
pid = os.fork()
if (pid == 0):
# child
os.chdir('/')
os.umask(0)
else:
# parent 2
os._exit(0)
else:
# parent 1
os._exit(0)
fd_inp = open(stdin, 'r')
os.dup2(fd_inp.fileno(), 0)
fd_out = open(stdout, 'w')
os.dup2(fd_out.fileno(), 1)
fd_err = open(stderr, 'w')
os.dup2(fd_err.fileno(), 2)
# Detach task
fork_daemon()
# Write pid
with open(pid_file, 'w') as file_obj:
file_obj.write(str(os.getpid()) + '\n')
atexit.register(lambda : os.unlink(pid_file))
def _sleep(self, ref_time):
# Only consider sleeping if read_freq is > 0
if self._read_freq > 0:
cur_time = time.time()
sleep_amount = self._read_freq - (cur_time - ref_time)
if sleep_amount > 0:
log.debug('Now sleeping %d seconds', sleep_amount)
time.sleep(sleep_amount)
def loop(self, callback=None, daemonize=False, **args):
"""
Events are read only one time every min(read_freq, timeout)
seconds at best and only if the size to read is >= threshold.
After this method returns it must not be called again for the same
instance.
@param callback: Functor called after each event processing iteration.
Expects to receive the notifier object (self) as first
parameter. If this function returns True the loop is
immediately terminated otherwise the loop method keeps
looping.
@type callback: callable object or function
@param daemonize: This thread is daemonized if set to True.
@type daemonize: boolean
@param args: Optional and relevant only if daemonize is True. Remaining
keyworded arguments are directly passed to daemonize see
__daemonize() method.
@type args: various
"""
if daemonize:
self.__daemonize(**args)
# Read and process events forever
while 1:
try:
self.process_events()
if (callback is not None) and (callback(self) is True):
break
ref_time = time.time()
# check_events is blocking
if self.check_events():
self._sleep(ref_time)
self.read_events()
except KeyboardInterrupt:
# Stop monitoring if sigint is caught (Control-C).
log.debug('Pyinotify stops monitoring.')
break
# Close internals
self.stop()
def stop(self):
"""
Close inotify's instance (close its file descriptor).
It destroys all existing watches, pending events,...
This method is automatically called at the end of loop().
"""
self._pollobj.unregister(self._fd)
os.close(self._fd)
class ThreadedNotifier(threading.Thread, Notifier):
"""
This notifier inherits from threading.Thread for instanciating a separate
thread, and also inherits from Notifier, because it is a threaded notifier.
Note that every functionality provided by this class is also provided
through Notifier class. Moreover Notifier should be considered first because
it is not threaded and could be easily daemonized.
"""
def __init__(self, watch_manager, default_proc_fun=None, read_freq=0,
threshold=0, timeout=None):
"""
Initialization, initialize base classes. read_freq, threshold and
timeout parameters are used when looping.
@param watch_manager: Watch Manager.
@type watch_manager: WatchManager instance
@param default_proc_fun: Default processing method. See base class.
@type default_proc_fun: instance of ProcessEvent
@param read_freq: if read_freq == 0, events are read asap,
if read_freq is > 0, this thread sleeps
max(0, read_freq - timeout) seconds.
@type read_freq: int
@param threshold: File descriptor will be read only if the accumulated
size to read becomes >= threshold. If != 0, you likely
want to use it in combination with an appropriate
value set for read_freq because without that you would
keep looping without really reading anything and that
until the amount of events to read is >= threshold. At
least with read_freq you might sleep.
@type threshold: int
@param timeout:
see http://docs.python.org/lib/poll-objects.html#poll-objects
@type timeout: int
"""
# Init threading base class
threading.Thread.__init__(self)
# Stop condition
self._stop_event = threading.Event()
# Init Notifier base class
Notifier.__init__(self, watch_manager, default_proc_fun, read_freq,
threshold, timeout)
# Create a new pipe used for thread termination
self._pipe = os.pipe()
self._pollobj.register(self._pipe[0], select.POLLIN)
def stop(self):
"""
Stop notifier's loop. Stop notification. Join the thread.
"""
self._stop_event.set()
os.write(self._pipe[1], b'stop')
threading.Thread.join(self)
Notifier.stop(self)
self._pollobj.unregister(self._pipe[0])
os.close(self._pipe[0])
os.close(self._pipe[1])
def loop(self):
"""
Thread's main loop. Don't meant to be called by user directly.
Call inherited start() method instead.
Events are read only once time every min(read_freq, timeout)
seconds at best and only if the size of events to read is >= threshold.
"""
# When the loop must be terminated .stop() is called, 'stop'
# is written to pipe fd so poll() returns and .check_events()
# returns False which make evaluate the While's stop condition
# ._stop_event.isSet() wich put an end to the thread's execution.
while not self._stop_event.isSet():
self.process_events()
ref_time = time.time()
if self.check_events():
self._sleep(ref_time)
self.read_events()
def run(self):
"""
Start thread's loop: read and process events until the method
stop() is called.
Never call this method directly, instead call the start() method
inherited from threading.Thread, which then will call run() in
its turn.
"""
self.loop()
class AsyncNotifier(asyncore.file_dispatcher, Notifier):
"""
This notifier inherits from asyncore.file_dispatcher in order to be able to
use pyinotify along with the asyncore framework.
"""
def __init__(self, watch_manager, default_proc_fun=None, read_freq=0,
threshold=0, timeout=None, channel_map=None):
"""
Initializes the async notifier. The only additional parameter is
'channel_map' which is the optional asyncore private map. See
Notifier class for the meaning of the others parameters.
"""
Notifier.__init__(self, watch_manager, default_proc_fun, read_freq,
threshold, timeout)
asyncore.file_dispatcher.__init__(self, self._fd, channel_map)
def handle_read(self):
"""
When asyncore tells us we can read from the fd, we proceed processing
events. This method can be overridden for handling a notification
differently.
"""
self.read_events()
self.process_events()
class Watch:
"""
Represent a watch, i.e. a file or directory being watched.
"""
def __init__(self, wd, path, mask, proc_fun, auto_add, exclude_filter):
"""
Initializations.
@param wd: Watch descriptor.
@type wd: int
@param path: Path of the file or directory being watched.
@type path: str
@param mask: Mask.
@type mask: int
@param proc_fun: Processing callable object.
@type proc_fun:
@param auto_add: Automatically add watches on new directories.
@type auto_add: bool
@param exclude_filter: Boolean function, used to exclude new
directories from being automatically watched.
See WatchManager.__init__
@type exclude_filter: callable object
"""
self.wd = wd
self.path = path
self.mask = mask
self.proc_fun = proc_fun
self.auto_add = auto_add
self.exclude_filter = exclude_filter
self.dir = os.path.isdir(self.path)
def __repr__(self):
"""
@return: String representation.
@rtype: str
"""
s = ' '.join(['%s%s%s' % (output_format.field_name(attr),
output_format.punctuation('='),
output_format.field_value(getattr(self,
attr))) \
for attr in self.__dict__ if not attr.startswith('_')])
s = '%s%s %s %s' % (output_format.punctuation('<'),
output_format.class_name(self.__class__.__name__),
s,
output_format.punctuation('>'))
return s
class ExcludeFilter:
"""
ExcludeFilter is an exclusion filter.
"""
def __init__(self, arg_lst):
"""
Examples:
ef1 = ExcludeFilter(["^/etc/rc.*", "^/etc/hostname"])
ef2 = ExcludeFilter("/my/path/exclude.lst")
Where exclude.lst contains:
^/etc/rc.*
^/etc/hostname
@param arg_lst: is either a list of patterns or a filename from which
patterns will be loaded.
@type arg_lst: list of str or str
"""
if isinstance(arg_lst, str):
lst = self._load_patterns_from_file(arg_lst)
elif isinstance(arg_lst, list):
lst = arg_lst
else:
raise TypeError
self._lregex = []
for regex in lst:
self._lregex.append(re.compile(regex, re.UNICODE))
def _load_patterns_from_file(self, filename):
lst = []
with open(filename, 'r') as file_obj:
for line in file_obj.readlines():
# Trim leading an trailing whitespaces
pattern = line.strip()
if not pattern or pattern.startswith('#'):
continue
lst.append(pattern)
return lst
def _match(self, regex, path):
return regex.match(path) is not None
def __call__(self, path):
"""
@param path: Path to match against provided regexps.
@type path: str
@return: Return True if path has been matched and should
be excluded, False otherwise.
@rtype: bool
"""
for regex in self._lregex:
if self._match(regex, path):
return True
return False
class WatchManagerError(Exception):
"""
WatchManager Exception. Raised on error encountered on watches
operations.
"""
def __init__(self, msg, wmd):
"""
@param msg: Exception string's description.
@type msg: string
@param wmd: This dictionary contains the wd assigned to paths of the
same call for which watches were successfully added.
@type wmd: dict
"""
self.wmd = wmd
Exception.__init__(self, msg)
class WatchManager:
"""
Provide operations for watching files and directories. Its internal
dictionary is used to reference watched items. When used inside
threaded code, one must instanciate as many WatchManager instances as
there are ThreadedNotifier instances.
"""
def __init__(self, exclude_filter=lambda path: False):
"""
Initialization: init inotify, init watch manager dictionary.
Raise OSError if initialization fails.
@param exclude_filter: boolean function, returns True if current
path must be excluded from being watched.
Convenient for providing a common exclusion
filter for every call to add_watch.
@type exclude_filter: callable object
"""
self._exclude_filter = exclude_filter
self._wmd = {} # watch dict key: watch descriptor, value: watch
self._fd = LIBC.inotify_init() # inotify's init, file descriptor
if self._fd < 0:
err = 'Cannot initialize new instance of inotify Errno=%s'
raise OSError(err % strerrno())
def get_fd(self):
"""
Return assigned inotify's file descriptor.
@return: File descriptor.
@rtype: int
"""
return self._fd
def get_watch(self, wd):
"""
Get watch from provided watch descriptor wd.
@param wd: Watch descriptor.
@type wd: int
"""
return self._wmd.get(wd)
def del_watch(self, wd):
"""
Remove watch entry associated to watch descriptor wd.
@param wd: Watch descriptor.
@type wd: int
"""
try:
del self._wmd[wd]
except KeyError as err:
log.error(str(err))
@property
def watches(self):
"""
Get a reference on the internal watch manager dictionary.
@return: Internal watch manager dictionary.
@rtype: dict
"""
return self._wmd
def __format_path(self, path):
"""
Format path to its internal (stored in watch manager) representation.
"""
# path must be a unicode string (str) and is just normalized.
return os.path.normpath(path)
def __add_watch(self, path, mask, proc_fun, auto_add, exclude_filter):
"""
Add a watch on path, build a Watch object and insert it in the
watch manager dictionary. Return the wd value.
"""
path = self.__format_path(path)
# path to a bytes string. This conversion seems to be required because
# ctypes.create_string_buffer seems to manipulate bytes
# strings representations internally.
# Moreover it seems that LIBC.inotify_add_watch does not work very
# well when it receives an ctypes.create_unicode_buffer instance as
# argument. However wd are _always_ indexed with their original
# unicode paths in wmd.
byte_path = path.encode(sys.getfilesystemencoding())
wd_ = LIBC.inotify_add_watch(self._fd,
ctypes.create_string_buffer(byte_path),
mask)
if wd_ < 0:
return wd_
watch_ = Watch(wd=wd_, path=path, mask=mask, proc_fun=proc_fun,
auto_add=auto_add, exclude_filter=exclude_filter)
self._wmd[wd_] = watch_
log.debug('New %s', watch_)
return wd_
def __glob(self, path, do_glob):
if do_glob:
return glob.iglob(path)
else:
return [path]
def add_watch(self, path, mask, proc_fun=None, rec=False,
auto_add=False, do_glob=False, quiet=True,
exclude_filter=None):
"""
Add watch(s) on the provided |path|(s) with associated |mask| flag
value and optionally with a processing |proc_fun| function and
recursive flag |rec| set to True.
All |path| components _must_ be str (i.e. unicode) objects.
If |path| is already watched it is ignored, but if it is called with
option rec=True a watch is put on each one of its not-watched
subdirectory.
@param path: Path to watch, the path can either be a file or a
directory. Also accepts a sequence (list) of paths.
@type path: string or list of strings
@param mask: Bitmask of events.
@type mask: int
@param proc_fun: Processing object.
@type proc_fun: function or ProcessEvent instance or instance of
one of its subclasses or callable object.
@param rec: Recursively add watches from path on all its
subdirectories, set to False by default (doesn't
follows symlinks in any case).
@type rec: bool
@param auto_add: Automatically add watches on newly created
directories in watched parent |path| directory.
@type auto_add: bool
@param do_glob: Do globbing on pathname (see standard globbing
module for more informations).
@type do_glob: bool
@param quiet: if False raises a WatchManagerError exception on
error. See example not_quiet.py.
@type quiet: bool
@param exclude_filter: predicate (boolean function), which returns
True if the current path must be excluded
from being watched. This argument has
precedence over exclude_filter passed to
the class' constructor.
@type exclude_filter: callable object
@return: dict of paths associated to watch descriptors. A wd value
is positive if the watch was added sucessfully, otherwise
the value is negative. If the path was invalid or was already
watched it is not included into this returned dictionary.
@rtype: dict of {str: int}
"""
ret_ = {} # return {path: wd, ...}
if exclude_filter is None:
exclude_filter = self._exclude_filter
# normalize args as list elements
for npath in self.__format_param(path):
# Require that path be a unicode string
if not isinstance(npath, str):
ret_[path] = -3
continue
# unix pathname pattern expansion
for apath in self.__glob(npath, do_glob):
# recursively list subdirs according to rec param
for rpath in self.__walk_rec(apath, rec):
if self.get_wd(rpath) is not None:
# We decide to ignore paths already inserted into
# the watch manager. Need to be removed with rm_watch()
# first. Or simply call update_watch() to update it.
continue
if not exclude_filter(rpath):
wd = ret_[rpath] = self.__add_watch(rpath, mask,
proc_fun,
auto_add,
exclude_filter)
if wd < 0:
err = 'add_watch: cannot watch %s WD=%d Errno=%s'
err = err % (rpath, wd, strerrno())
if quiet:
log.error(err)
else:
raise WatchManagerError(err, ret_)
else:
# Let's say -2 means 'explicitely excluded
# from watching'.
ret_[rpath] = -2
return ret_
def __get_sub_rec(self, lpath):
"""
Get every wd from self._wmd if its path is under the path of
one (at least) of those in lpath. Doesn't follow symlinks.
@param lpath: list of watch descriptor
@type lpath: list of int
@return: list of watch descriptor
@rtype: list of int
"""
for d in lpath:
root = self.get_path(d)
if root is not None:
# always keep root
yield d
else:
# if invalid
continue
# nothing else to expect
if not os.path.isdir(root):
continue
# normalization
root = os.path.normpath(root)
# recursion
lend = len(root)
for iwd in self._wmd.items():
cur = iwd[1].path
pref = os.path.commonprefix([root, cur])
if root == os.sep or (len(pref) == lend and \
len(cur) > lend and \
cur[lend] == os.sep):
yield iwd[1].wd
def update_watch(self, wd, mask=None, proc_fun=None, rec=False,
auto_add=False, quiet=True):
"""
Update existing watch descriptors |wd|. The |mask| value, the
processing object |proc_fun|, the recursive param |rec| and the
|auto_add| and |quiet| flags can all be updated.
@param wd: Watch Descriptor to update. Also accepts a list of
watch descriptors.
@type wd: int or list of int
@param mask: Optional new bitmask of events.
@type mask: int
@param proc_fun: Optional new processing function.
@type proc_fun: function or ProcessEvent instance or instance of
one of its subclasses or callable object.
@param rec: Optionally adds watches recursively on all
subdirectories contained into |wd| directory.
@type rec: bool
@param auto_add: Automatically adds watches on newly created
directories in the watch's path corresponding to
|wd|.
@type auto_add: bool
@param quiet: If False raises a WatchManagerError exception on
error. See example not_quiet.py
@type quiet: bool
@return: dict of watch descriptors associated to booleans values.
True if the corresponding wd has been successfully
updated, False otherwise.
@rtype: dict of {int: bool}
"""
lwd = self.__format_param(wd)
if rec:
lwd = self.__get_sub_rec(lwd)
ret_ = {} # return {wd: bool, ...}
for awd in lwd:
apath = self.get_path(awd)
if not apath or awd < 0:
err = 'update_watch: invalid WD=%d' % awd
if quiet:
log.error(err)
continue
raise WatchManagerError(err, ret_)
if mask:
addw = LIBC.inotify_add_watch
# apath is always stored as unicode string so encode it to
# bytes.
byte_path = apath.encode(sys.getfilesystemencoding())
wd_ = addw(self._fd, ctypes.create_string_buffer(byte_path),
mask)
if wd_ < 0:
ret_[awd] = False
err = 'update_watch: cannot update %s WD=%d Errno=%s'
err = err % (apath, wd_, strerrno())
if quiet:
log.error(err)
continue
raise WatchManagerError(err, ret_)
assert(awd == wd_)
if proc_fun or auto_add:
watch_ = self._wmd[awd]
if proc_fun:
watch_.proc_fun = proc_fun
if auto_add:
watch_.auto_add = auto_add
ret_[awd] = True
log.debug('Updated watch - %s', self._wmd[awd])
return ret_
def __format_param(self, param):
"""
@param param: Parameter.
@type param: string or int
@return: wrap param.
@rtype: list of type(param)
"""
if isinstance(param, list):
for p_ in param:
yield p_
else:
yield param
def get_wd(self, path):
"""
Returns the watch descriptor associated to path. This method
presents a prohibitive cost, always prefer to keep the WD
returned by add_watch(). If the path is unknown it returns None.
@param path: Path.
@type path: str
@return: WD or None.
@rtype: int or None
"""
path = self.__format_path(path)
for iwd in self._wmd.items():
if iwd[1].path == path:
return iwd[0]
def get_path(self, wd):
"""
Returns the path associated to WD, if WD is unknown it returns None.
@param wd: Watch descriptor.
@type wd: int
@return: Path or None.
@rtype: string or None
"""
watch_ = self._wmd.get(wd)
if watch_ is not None:
return watch_.path
def __walk_rec(self, top, rec):
"""
Yields each subdirectories of top, doesn't follow symlinks.
If rec is false, only yield top.
@param top: root directory.
@type top: string
@param rec: recursive flag.
@type rec: bool
@return: path of one subdirectory.
@rtype: string
"""
if not rec or os.path.islink(top) or not os.path.isdir(top):
yield top
else:
for root, dirs, files in os.walk(top):
yield root
def rm_watch(self, wd, rec=False, quiet=True):
"""
Removes watch(s).
@param wd: Watch Descriptor of the file or directory to unwatch.
Also accepts a list of WDs.
@type wd: int or list of int.
@param rec: Recursively removes watches on every already watched
subdirectories and subfiles.
@type rec: bool
@param quiet: If False raises a WatchManagerError exception on
error. See example not_quiet.py
@type quiet: bool
@return: dict of watch descriptors associated to booleans values.
True if the corresponding wd has been successfully
removed, False otherwise.
@rtype: dict of {int: bool}
"""
lwd = self.__format_param(wd)
if rec:
lwd = self.__get_sub_rec(lwd)
ret_ = {} # return {wd: bool, ...}
for awd in lwd:
# remove watch
wd_ = LIBC.inotify_rm_watch(self._fd, awd)
if wd_ < 0:
ret_[awd] = False
err = 'rm_watch: cannot remove WD=%d Errno=%s' % (awd,
strerrno())
if quiet:
log.error(err)
continue
raise WatchManagerError(err, ret_)
ret_[awd] = True
log.debug('Watch WD=%d (%s) removed', awd, self.get_path(awd))
return ret_
def watch_transient_file(self, filename, mask, proc_class):
"""
Watch a transient file, which will be created and deleted frequently
over time (e.g. pid file).
@attention: Currently under the call to this function it is not
possible to correctly watch the events triggered into the same
base directory than the directory where is located this watched
transient file. For instance it would be wrong to make these
two successive calls: wm.watch_transient_file('/var/run/foo.pid', ...)
and wm.add_watch('/var/run/', ...)
@param filename: Filename.
@type filename: string
@param mask: Bitmask of events, should contain IN_CREATE and IN_DELETE.
@type mask: int
@param proc_class: ProcessEvent (or of one of its subclass), beware of
accepting a ProcessEvent's instance as argument into
__init__, see transient_file.py example for more
details.
@type proc_class: ProcessEvent's instance or of one of its subclasses.
@return: Same as add_watch().
@rtype: Same as add_watch().
"""
dirname = os.path.dirname(filename)
if dirname == '':
return {} # Maintains coherence with add_watch()
basename = os.path.basename(filename)
# Assuming we are watching at least for IN_CREATE and IN_DELETE
mask |= IN_CREATE | IN_DELETE
def cmp_name(event):
if getattr(event, 'name') is None:
return False
return basename == event.name
return self.add_watch(dirname, mask,
proc_fun=proc_class(ChainIfTrue(func=cmp_name)),
rec=False,
auto_add=False, do_glob=False,
exclude_filter=lambda path: False)
class RawOutputFormat:
"""
Format string representations.
"""
def __init__(self, format=None):
self.format = format or {}
def simple(self, s, attribute):
if not isinstance(s, str):
s = str(s)
return (self.format.get(attribute, '') + s +
self.format.get('normal', ''))
def punctuation(self, s):
"""Punctuation color."""
return self.simple(s, 'normal')
def field_value(self, s):
"""Field value color."""
return self.simple(s, 'purple')
def field_name(self, s):
"""Field name color."""
return self.simple(s, 'blue')
def class_name(self, s):
"""Class name color."""
return self.format.get('red', '') + self.simple(s, 'bold')
output_format = RawOutputFormat()
class ColoredOutputFormat(RawOutputFormat):
"""
Format colored string representations.
"""
def __init__(self):
f = {'normal': '\033[0m',
'black': '\033[30m',
'red': '\033[31m',
'green': '\033[32m',
'yellow': '\033[33m',
'blue': '\033[34m',
'purple': '\033[35m',
'cyan': '\033[36m',
'bold': '\033[1m',
'uline': '\033[4m',
'blink': '\033[5m',
'invert': '\033[7m'}
RawOutputFormat.__init__(self, f)
def compatibility_mode():
"""
Use this function to turn on the compatibility mode. The compatibility
mode is used to improve compatibility with Pyinotify 0.7.1 (or older)
programs. The compatibility mode provides additional variables 'is_dir',
'event_name', 'EventsCodes.IN_*' and 'EventsCodes.ALL_EVENTS' as
Pyinotify 0.7.1 provided. Do not call this function from new programs!!
Especially if there are developped for Pyinotify >= 0.8.x.
"""
setattr(EventsCodes, 'ALL_EVENTS', ALL_EVENTS)
for evname in globals():
if evname.startswith('IN_'):
setattr(EventsCodes, evname, globals()[evname])
global COMPATIBILITY_MODE
COMPATIBILITY_MODE = True
def command_line():
"""
By default the watched path is '/tmp' and all types of events are
monitored. Events monitoring serves forever, type c^c to stop it.
"""
from optparse import OptionParser
usage = "usage: %prog [options] [path1] [path2] [pathn]"
parser = OptionParser(usage=usage)
parser.add_option("-v", "--verbose", action="store_true",
dest="verbose", help="Verbose mode")
parser.add_option("-r", "--recursive", action="store_true",
dest="recursive",
help="Add watches recursively on paths")
parser.add_option("-a", "--auto_add", action="store_true",
dest="auto_add",
help="Automatically add watches on new directories")
parser.add_option("-e", "--events-list", metavar="EVENT[,...]",
dest="events_list",
help=("A comma-separated list of events to watch for - "
"see the documentation for valid options (defaults"
" to everything)"))
parser.add_option("-s", "--stats", action="store_true",
dest="stats",
help="Display dummy statistics")
parser.add_option("-V", "--version", action="store_true",
dest="version", help="Pyinotify version")
parser.add_option("-f", "--raw-format", action="store_true",
dest="raw_format",
help="Disable enhanced output format.")
(options, args) = parser.parse_args()
if options.verbose:
log.setLevel(10)
if options.version:
print(__version__)
if not options.raw_format:
global output_format
output_format = ColoredOutputFormat()
if len(args) < 1:
path = '/tmp' # default watched path
else:
path = args
# watch manager instance
wm = WatchManager()
# notifier instance and init
if options.stats:
notifier = Notifier(wm, default_proc_fun=Stats(), read_freq=5)
else:
notifier = Notifier(wm, default_proc_fun=PrintAllEvents())
# What mask to apply
mask = 0
if options.events_list:
events_list = options.events_list.split(',')
for ev in events_list:
evcode = EventsCodes.ALL_FLAGS.get(ev, 0)
if evcode:
mask |= evcode
else:
parser.error("The event '%s' specified with option -e"
" is not valid" % ev)
else:
mask = ALL_EVENTS
# stats
cb_fun = None
if options.stats:
def cb(s):
sys.stdout.write(repr(s.proc_fun()))
sys.stdout.write('\n')
sys.stdout.write(str(s.proc_fun()))
sys.stdout.write('\n')
sys.stdout.flush()
cb_fun = cb
log.debug('Start monitoring %s, (press c^c to halt pyinotify)' % path)
wm.add_watch(path, mask, rec=options.recursive, auto_add=options.auto_add)
# Loop forever (until sigint signal get caught)
notifier.loop(callback=cb_fun)
if __name__ == '__main__':
command_line()
| {
"content_hash": "7f75902de17ff3886c320b8903e4b434",
"timestamp": "",
"source": "github",
"line_count": 2108,
"max_line_length": 80,
"avg_line_length": 37.28795066413662,
"alnum_prop": 0.5553477602636032,
"repo_name": "dunkfordyce/pyinotify",
"id": "467a5217664794b280ea8b059c8243fe9295018e",
"size": "79785",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python3/pyinotify.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "176314"
},
{
"name": "Shell",
"bytes": "130"
}
],
"symlink_target": ""
} |
from __future__ import division
# Local RGZ modules
import rgz
import consensus
from load_contours import get_contours,make_pathdict
# Default packages
import json
import cStringIO
import urllib
import time
import random
import os
from ast import literal_eval
from collections import Counter
# Other packages
import pandas as pd
import numpy as np
from astropy.io import ascii,fits
from astropy import wcs
from PIL import Image
from matplotlib import pyplot as plt
from matplotlib.pyplot import cm
from matplotlib.path import Path
import matplotlib.patches as patches
from scipy.interpolate import griddata
from scipy.ndimage.filters import maximum_filter
from scipy.ndimage.morphology import generate_binary_structure, binary_erosion
from astroML.plotting import hist as histML
# Local paths and files
rgz_dir = '/Users/willettk/Astronomy/Research/GalaxyZoo/rgz-analysis'
rgz_consensus_file = '%s/csv/consensus_rgz_first.csv' % rgz_dir
# Various image parameters
IMG_HEIGHT_OLD = 424.0 # number of pixels in the original JPG image along the y axis
IMG_WIDTH_OLD = 424.0 # number of pixels in the original JPG image along the x axis
IMG_HEIGHT_NEW = 500.0 # number of pixels in the downloaded JPG image along the y axis
IMG_WIDTH_NEW = 500.0 # number of pixels in the downloaded JPG image along the x axis
FITS_HEIGHT = 301.0 # number of pixels in the FITS image (?) along the y axis
FITS_WIDTH = 301.0 # number of pixels in the FITS image (?) along the x axis
FIRST_FITS_HEIGHT = 132.0 # number of pixels in the FITS image along the y axis
FIRST_FITS_WIDTH = 132.0 # number of pixels in the FITS image along the y axis
first_ir_scale_x = FIRST_FITS_WIDTH / IMG_WIDTH_NEW
first_ir_scale_y = FIRST_FITS_HEIGHT / IMG_HEIGHT_NEW
PIXEL_SIZE = 0.00016667#/3600.0 # the number of arcseconds per pixel in the FITS image
xmin = 1.
xmax = IMG_HEIGHT_NEW
ymin = 1.
ymax = IMG_WIDTH_NEW
subjects,classifications = rgz.load_rgz_data()
def get_doubles(consensus_level=0.50):
# Find examples of RGZ subjects with exactly two radio components
rgzconsensus = ascii.read(rgz_consensus_file,format='csv')
dblidx = (rgzconsensus['n_radio'] == 2) & (rgzconsensus['consensus_level'] >= consensus_level)
doubles = rgzconsensus[dblidx]
return doubles
def get_triples(consensus_level=0.50):
# Find examples of RGZ subjects with exactly three radio components
rgzconsensus = ascii.read(rgz_consensus_file,format='csv')
trpidx = (rgzconsensus['n_radio'] == 3) & (rgzconsensus['consensus_level'] >= consensus_level)
triples = rgzconsensus[trpidx]
return triples
def all_doubles_pixradio(doubles,pathdict):
# Compute the coordinates of the optical ID and radio component centroids, bending angle, and position angle
# for all consensus RGZ subjects with exactly two radio components
with open('%s/bending_angles/angles_double_pixradio.csv' % rgz_dir,'w') as f:
print >> f,'zooniverse_id,bending_angle,position_angle'
for double in doubles:
#irx,iry,radio_components = pix_convert(double,pathdict)
xc,yc = literal_eval(double['ir_peak'])
if xc is not None:
subject = subjects.find_one({'zooniverse_id':double['zooniverse_id']})
contours = get_contours(subject,pathdict)
radio_components = contours['contours']
radio_centroids = pix_radio(radio_components)
alpha = bending_angle(xc,yc,radio_centroids[0][0],radio_centroids[0][1],radio_centroids[1][0],radio_centroids[1][1])
alpha_deg = alpha * 180./np.pi
phi = position_angle(xc,yc,radio_centroids[0][0],radio_centroids[0][1],radio_centroids[1][0],radio_centroids[1][1])
phi_deg = phi * 180./np.pi
if alpha is not None:
print >> f,'%s,%.3f,%.3f' % (double['zooniverse_id'],alpha_deg,phi_deg)
return None
def dblid(doubles,zooniverse_id):
# Retrieve subject for a single two-component radio source in the doubles list
dbl = doubles[doubles['zooniverse_id'] == zooniverse_id][0]
return dbl
def pix_convert(galaxy,pathdict,local=False):
# Convert IR coordinates from RA/dec into pixel
subject = subjects.find_one({'zooniverse_id':galaxy['zooniverse_id']})
contours = get_contours(subject,pathdict)
radio_components = contours['contours']
try:
assert len(radio_components) > 1, \
'Radio data only has %i component for %s' % (len(radio_components),galaxy['zooniverse_id'])
except AssertionError:
return None,None,None
# Keep everything in pixel coordinates. Reverse what's done in consensus.py;
# transform an RA/dec pair into the pixel x/y pair.
# Convert the pixel coordinates into RA,dec using the WCS object from the header
hdulist = fits.open(pathdict[galaxy['first_id']])
w = wcs.WCS(hdulist[0].header)
worldcrd = np.array([[galaxy['ra'],galaxy['dec']]],np.float_)
pix = w.wcs_world2pix(worldcrd,0)
irx,iry = pix[0]
irx_first,iry_first = np.round(pix[0][0] / first_ir_scale_x), np.round(IMG_HEIGHT_NEW - pix[0][1] / first_ir_scale_y)
return irx,iry,radio_components
def bending_angle(xc,yc,x1,y1,x2,y2):
# Compute the bending angle (in radians) between three points in pixel space
'''
Points are:
- xc,yc: x,y center of IR counterpart
- x1,y1: x,y center of 1st radio lobe
- x2,y2: x,y center of 2nd radio lobe
'''
r1 = np.array([x1,y1])
r2 = np.array([x2,y2])
center = np.array([xc,yc])
r1diff = r1-center
r2diff = r2-center
r1len = np.hypot(r1diff[0],r1diff[1])
r2len = np.hypot(r2diff[0],r2diff[1])
alpha = np.arccos(np.dot(r1diff,r2diff) / (r1len*r2len))
return alpha
def bending_angle_sdss(zid,x1,y1,x2,y2):
# Compute the bending angle (in radians) between three points in pixel space
'''
- zid
- x1,y1: x,y center of 1st radio lobe
- x2,y2: x,y center of 2nd radio lobe
'''
# I'd love to do this purely in RA/dec, converting all positions in Astropy, but functionality doesn't seem to be there.
# Convert SDSS optical position into radio-frame pixel coordinates
hdulist = fits.open(pathdict[galaxy['first_id']])
w = wcs.WCS(hdulist[0].header)
worldcrd = np.array([[galaxy['ra'],galaxy['dec']]],np.float_)
pix = w.wcs_world2pix(worldcrd,0)
xc,yc = np.round(pix[0][0] / first_ir_scale_x), np.round(IMG_HEIGHT_NEW - pix[0][1] / first_ir_scale_y)
r1 = np.array([x1,y1])
r2 = np.array([x2,y2])
center = np.array([xc,yc])
r1diff = r1-center
r2diff = r2-center
r1len = np.hypot(r1diff[0],r1diff[1])
r2len = np.hypot(r2diff[0],r2diff[1])
alpha = np.arccos(np.dot(r1diff,r2diff) / (r1len*r2len))
return alpha
def position_angle(xc,yc,x1,y1,x2,y2):
# Compute the position angle (in radians, with respect to north) between three points in pixel space
'''
Points are:
- xc,yc: x,y center of bending angle
- x1,y1: x,y center of 1st component
- x2,y2: x,y center of 2nd component
'''
r1 = np.array([x1,y1])
r2 = np.array([x2,y2])
center = np.array([xc,yc])
r12sum = (r1-center) + (r2-center)
r12len = np.hypot(r12sum[0],r12sum[1])
north = np.array([0,1])
northlen = np.hypot(north[0],north[1])
alpha = np.arccos(np.dot(r12sum,north) / (r12len*northlen))
# Measure CCW from north
if r12sum[0] > 0.:
alpha = 2*np.pi - alpha
return alpha
def pix_radio(radio_components):
# From list of bounding boxes in radio pixel coordinates,
# return the centroids of the boxes in IR pixel coordinates
radio_centroids = []
for comp in radio_components:
bbox = comp[0]['bbox']
cxu = np.median((bbox[0],bbox[2]))
cyu = np.median((bbox[1],bbox[3]))
cx,cy = cxu/first_ir_scale_x,cyu/first_ir_scale_y
radio_centroids.append((cx,cy))
return radio_centroids
def bbox_radio_to_ir(bbox):
# Convert the bbox in RGZ subject from radio to infrared pixel scale
bbox_ir = [bbox[0]/first_ir_scale_x,bbox[1]/first_ir_scale_x,bbox[2]/first_ir_scale_x,bbox[3]/first_ir_scale_x]
return bbox_ir
def load_angles(filename):
# Load the CSV file of the computed bending angles for multi-peaked or multi-lobed sources
with open('%s/bending_angles/%s.csv' % (rgz_dir,filename),'r') as f:
angstr = f.readlines()
ba = [float(x.split(',')[1]) for x in angstr[1:]]
#pa = [float(x.split(',')[2]) for x in angstr[1:]]
return ba#,pa
def plothist(savefig=False):
# Plot distribution of the bending angles for RGZ sources
'''
angles_double_pixradio = load_angles('angles_double_pixradio')
angles_triple_pixradio = load_angles('angles_triple_pixradio')
angles_double_mps = load_angles('angles_multipeaked_singles')
angles_triple_mps = load_angles('angles_multipeaked_singles_no_optical')
'''
data = ascii.read('{:}/csv/static_catalog3.csv'.format(rgz_dir),delimiter=' ')
angles_radiodouble = data[data['angle_type'] == 'double_pixradio']['bending_angle']
angles_mps = data[data['angle_type'] == 'multipeaked_singles']['bending_angle']
# Set up figure
fig = plt.figure(2,(15,8))
c1 = '#377eb8'
c2 = '#e41a1c'
c3 = '#4daf4a'
c4 = '#984ea3'
# Panel 1 - histogram
ax1 = fig.add_subplot(121)
histML(angles_radiodouble, bins=15, ax=ax1, histtype='step', lw=3, alpha=1.0, color=c1, range=(0,90),label='double lobed, multi-contour')
histML(angles_mps, bins=15, ax=ax1, histtype='step', lw=3, alpha=1.0, color=c2, range=(0,90),label='double-peaked, single-contour')
ax1.set_xlim(0,90)
ax1.vlines(x=np.median(angles_radiodouble),ymin=ax1.get_ylim()[0],ymax = ax1.get_ylim()[1],color=c1,linestyle='--')
ax1.vlines(x=np.median(angles_mps),ymin=ax1.get_ylim()[0],ymax = ax1.get_ylim()[1],color=c2,linestyle='--')
ax1.set_xlabel(r'bending angle [deg]',fontsize=24)
ax1.set_ylabel('count',fontsize=20)
plt.tick_params(axis='both', which='major', labelsize=20)
# Panel 2 - cumulative
ax2 = fig.add_subplot(122)
histML(angles_radiodouble, bins=15, ax=ax2, histtype='step', lw=3, alpha=1.0, color=c1, range=(0,90),label='double lobed, multi-contour',cumulative=True)
histML(angles_mps, bins=15, ax=ax2, histtype='step', lw=3, alpha=1.0, color=c2, range=(0,90),label='double-peaked, single-contour',cumulative=True)
ax2.set_xlim(0,90)
ax2.vlines(x=np.median(angles_radiodouble),ymin=ax2.get_ylim()[0],ymax = ax2.get_ylim()[1],color=c1,linestyle='--')
ax2.vlines(x=np.median(angles_mps),ymin=ax2.get_ylim()[0],ymax = ax2.get_ylim()[1],color=c2,linestyle='--')
ax2.set_xlabel(r'bending angle [deg]',fontsize=24)
ax2.set_ylabel('count',fontsize=20)
ax2.legend(loc='upper left')
plt.tick_params(axis='both', which='major', labelsize=20)
# Finish adjusting plot parameters
fig.tight_layout()
if savefig:
fig.savefig('%s/bending_angles/plots/bending_angles_hist.pdf' % rgz_dir)
else:
plt.show()
return None
def plot_one_double(zooniverse_id,pathdict,figno=1,savefig=False,anglepath='',dbltype='radio'):
# Make a four-panel plot of the consensus identification with marked bending angle and position angle for a double source
cons = consensus.checksum(zooniverse_id)
subject = subjects.find_one({'zooniverse_id':zooniverse_id})
contours = get_contours(subject,pathdict)
radio_components = contours['contours']
# Plot image
answer = cons['answer']
# Download contour data
sf_x = 500./contours['width']
sf_y = 500./contours['height']
verts_all = []
codes_all = []
components = contours['contours']
for comp in components:
# Order of bounding box components is (xmax,ymax,xmin,ymin)
comp_xmax,comp_ymax,comp_xmin,comp_ymin = comp[0]['bbox']
# Only plot radio components identified by the users as the consensus;
# check on the xmax value to make sure
for v in answer.itervalues():
if comp_xmax in v['xmax']:
for idx,level in enumerate(comp):
verts = [((p['x'])*sf_x,(p['y']-1)*sf_y) for p in level['arr']]
codes = np.ones(len(verts),int) * Path.LINETO
codes[0] = Path.MOVETO
verts_all.extend(verts)
codes_all.extend(codes)
try:
path = Path(verts_all, codes_all)
patch_black = patches.PathPatch(path, facecolor = 'none', edgecolor='black', lw=1)
except AssertionError:
print 'Users found no components for consensus match of %s' % zooniverse_id
# Plot the infrared results
fig = plt.figure(figno,(15,4))
fig.clf()
ax3 = fig.add_subplot(143)
ax4 = fig.add_subplot(144)
colormaparr = [cm.hot_r,cm.Blues,cm.RdPu,cm.Greens,cm.PuBu,cm.YlGn,cm.Greys][::-1]
colorarr = ['r','b','m','g','c','y','k'][::-1]
if len(answer) > 0: # At least one galaxy was identified
for idx,ans in enumerate(answer.itervalues()):
if ans.has_key('peak_data'):
# Plot the KDE map
colormap = colormaparr.pop()
ax3.imshow(np.rot90(ans['peak_data']['Z']), cmap=colormap,extent=[xmin, xmax, ymin, ymax])
# Plot individual sources
color = colorarr.pop()
'''
x_plot = [xt * 500./424 for xt in ans['ir_x'] if xt != -99.0]
y_plot = [yt * 500./424 for yt in ans['ir_y'] if yt != -99.0]
'''
x_plot,y_plot = ans['ir_x'],ans['ir_y']
ax3.scatter(x_plot, y_plot, c=color, marker='o', s=10, alpha=1./len(x_plot))
ax4.plot([ans['ir_peak'][0]],[ans['ir_peak'][1]],color=color,marker='*',markersize=12)
elif ans.has_key('ir'):
color = colorarr.pop()
x_plot,y_plot = ans['ir']
ax3.plot([x_plot],[y_plot],color=color,marker='o',markersize=2)
ax4.plot([x_plot],[y_plot],color=color,marker='*',markersize=12)
else:
ax4.text(550,idx*25,'#%i - no IR host' % idx,fontsize=11)
ax3.set_xlim([0, 500])
ax3.set_ylim([500, 0])
ax3.set_title(zooniverse_id)
ax3.set_aspect('equal')
ax4.set_xlim([0, 500])
ax4.set_ylim([500, 0])
ax4.set_title('Consensus (%i/%i users)' % (cons['n_users'],cons['n_total']))
ax4.set_aspect('equal')
# Display IR and radio images
url_standard = subject['location']['standard']
im_standard = Image.open(cStringIO.StringIO(urllib.urlopen(url_standard).read()))
ax1 = fig.add_subplot(141)
ax1.imshow(im_standard,origin='upper')
ax1.set_title('WISE')
url_radio = subject['location']['radio']
im_radio = Image.open(cStringIO.StringIO(urllib.urlopen(url_radio).read()))
ax2 = fig.add_subplot(142)
ax2.imshow(im_radio,origin='upper')
ax2.set_title(subject['metadata']['source'])
ax2.get_yaxis().set_ticklabels([])
ax3.get_yaxis().set_ticklabels([])
# Plot contours identified as the consensus
if len(answer) > 0:
ax4.add_patch(patch_black)
radio_centers = []
for component in components:
bbox = component[0]['bbox']
# Draw centers of bounding boxes
xradiocen = np.median((bbox[0],bbox[2])) / first_ir_scale_x
yradiocen = np.median((bbox[1],bbox[3])) / first_ir_scale_y
radio_centers.append((xradiocen,yradiocen))
ax4.scatter(xradiocen,yradiocen, c='g', marker='s', s=15, alpha=1)
# Draw edge of bounding box
xradiomin = bbox[2] / first_ir_scale_x
xradiomax = bbox[0] / first_ir_scale_x
yradiomin = bbox[3] / first_ir_scale_y
yradiomax = bbox[1] / first_ir_scale_y
ax4.plot([xradiomin,xradiomin,xradiomax,xradiomax,xradiomin],[yradiomin,yradiomax,yradiomax,yradiomin,yradiomin],color='g')
for ans in answer:
if answer[ans].has_key('ir_peak'):
# Optical counterpart position
xc,yc = answer[ans]['ir_peak']
# Position of radio sources for multi-peaked, single-component subjects
if dbltype == "mps":
local_maxima = mps_cc(subject,pathdict,plot=False,verbose=False)
suffix = '' if len(local_maxima) == 1 else 's'
assert len(local_maxima) >= 2, \
"%i peak%s in first radio component of %s; must have exactly 2 peaks to plot bending angle using mps method." % (len(local_maxima),suffix,zooniverse_id)
x1 = local_maxima[0][1][0] / first_ir_scale_x
y1 = local_maxima[0][1][1] / first_ir_scale_y
x2 = local_maxima[1][1][0] / first_ir_scale_x
y2 = local_maxima[1][1][1] / first_ir_scale_y
ax4.scatter(x1,y1, color='darkorange', marker='s', s=15, alpha=1)
ax4.scatter(x2,y2, color='darkorange', marker='s', s=15, alpha=1)
# Position of radio sources for double-lobed, two-component subjects
elif len(radio_centers) == 2:
x1,y1 = radio_centers[0]
x2,y2 = radio_centers[1]
else:
raise ValueError("Centers of radio boxes not defined.")
m1 = (y1 - yc) / (x1 - xc)
b1 = yc - m1*xc
m2 = (y2 - yc) / (x2 - xc)
b2 = yc - m2*xc
xedge1 = 0 if x1 < xc else 500
yedge1 = y1 - (x1-xedge1)*(yc-y1)/(xc-x1)
xedge2 = 0 if x2 < xc else 500
yedge2 = y2 - (x2-xedge2)*(yc-y2)/(xc-x2)
# Draw and annotate the the bending angle
ax4.plot([xedge1,xc],[yedge1,yc],color='orange',linestyle='--')
ax4.plot([xedge2,xc],[yedge2,yc],color='orange',linestyle='--')
alpha_deg = bending_angle(xc,yc,x1,y1,x2,y2) * 180/np.pi
ax4.text(550,0,r'$\alpha$ = %.1f deg' % alpha_deg,fontsize=11)
# Draw vector pointing north
# Draw the bisector vector
'''
yd = y_bisect(xc,yc,xedge1,yedge1,xedge2,yedge2)
ax4.arrow(xc,yc,-xc,yd-yc,head_width=20, head_length=40, fc='blue', ec='blue')
'''
# Compute the position angle with respect to north
phi_deg = position_angle(xc,500-yc,x1,500-y1,x2,500-y2) * 180/np.pi
ax4.text(550,50,r'$\phi$ = %.1f deg' % phi_deg,fontsize=11)
ax4.arrow(xc,yc,0,-yc,head_width=20, head_length=40, fc='grey', ec='grey',ls='dotted')
else:
print "No peak for %s" % zooniverse_id
ax4.yaxis.tick_right()
ax1.get_xaxis().set_ticks([0,100,200,300,400])
ax2.get_xaxis().set_ticks([0,100,200,300,400])
ax3.get_xaxis().set_ticks([0,100,200,300,400])
ax4.get_xaxis().set_ticks([0,100,200,300,400,500])
plt.subplots_adjust(wspace=0.02)
# Save hard copy of the figure
if savefig:
fig.savefig('%s/bending_angles/plots/individual/%sba_%s.pdf' % (rgz_dir,anglepath,zooniverse_id))
plt.close()
else:
plt.show()
# Close figure after it's done; otherwise mpl complains about having thousands of stuff open
return None
def plot_one_triple(zooniverse_id,pathdict,figno=1,savefig=False,anglepath=''):
# Make a four-panel plot of the consensus identification with marked bending angle and position angle for a triple source
cons = consensus.checksum(zooniverse_id)
subject = subjects.find_one({'zooniverse_id':zooniverse_id})
contours = get_contours(subject,pathdict)
radio_components = contours['contours']
# Plot image
answer = cons['answer']
# Download contour data
sf_x = 500./contours['width']
sf_y = 500./contours['height']
verts_all = []
codes_all = []
components = contours['contours']
for comp in components:
# Order of bounding box components is (xmax,ymax,xmin,ymin)
comp_xmax,comp_ymax,comp_xmin,comp_ymin = comp[0]['bbox']
# Only plot radio components identified by the users as the consensus;
# check on the xmax value to make sure
for v in answer.itervalues():
if comp_xmax in v['xmax']:
for idx,level in enumerate(comp):
verts = [((p['x'])*sf_x,(p['y']-1)*sf_y) for p in level['arr']]
codes = np.ones(len(verts),int) * Path.LINETO
codes[0] = Path.MOVETO
verts_all.extend(verts)
codes_all.extend(codes)
try:
path = Path(verts_all, codes_all)
patch_black = patches.PathPatch(path, facecolor = 'none', edgecolor='black', lw=1)
except AssertionError:
print 'Users found no components for consensus match of %s' % zooniverse_id
# Plot the infrared results
fig = plt.figure(figno,(15,4))
fig.clf()
ax3 = fig.add_subplot(143)
ax4 = fig.add_subplot(144)
colormaparr = [cm.hot_r,cm.Blues,cm.RdPu,cm.Greens,cm.PuBu,cm.YlGn,cm.Greys][::-1]
colorarr = ['r','b','m','g','c','y','k'][::-1]
if len(answer) > 0: # At least one galaxy was identified
for idx,ans in enumerate(answer.itervalues()):
if ans.has_key('peak_data'):
# Plot the KDE map
colormap = colormaparr.pop()
ax3.imshow(np.rot90(ans['peak_data']['Z']), cmap=colormap,extent=[xmin, xmax, ymin, ymax])
# Plot individual sources
color = colorarr.pop()
x_plot,y_plot = ans['ir_x'],ans['ir_y']
ax3.scatter(x_plot, y_plot, c=color, marker='o', s=10, alpha=1./len(x_plot))
ax4.plot([ans['ir_peak'][0]],[ans['ir_peak'][1]],color=color,marker='*',markersize=12)
elif ans.has_key('ir'):
color = colorarr.pop()
x_plot,y_plot = ans['ir']
ax3.plot([x_plot],[y_plot],color=color,marker='o',markersize=2)
ax4.plot([x_plot],[y_plot],color=color,marker='*',markersize=12)
else:
ax4.text(550,idx*25,'#%i - no IR host' % idx,fontsize=11)
ax3.set_xlim([0, 500])
ax3.set_ylim([500, 0])
ax3.set_title(zooniverse_id)
ax3.set_aspect('equal')
ax4.set_xlim([0, 500])
ax4.set_ylim([500, 0])
ax4.set_title('Consensus (%i/%i users)' % (cons['n_users'],cons['n_total']))
ax4.set_aspect('equal')
# Display IR and radio images
url_standard = subject['location']['standard']
im_standard = Image.open(cStringIO.StringIO(urllib.urlopen(url_standard).read()))
ax1 = fig.add_subplot(141)
ax1.imshow(im_standard,origin='upper')
ax1.set_title('WISE')
url_radio = subject['location']['radio']
im_radio = Image.open(cStringIO.StringIO(urllib.urlopen(url_radio).read()))
ax2 = fig.add_subplot(142)
ax2.imshow(im_radio,origin='upper')
ax2.set_title(subject['metadata']['source'])
ax2.get_yaxis().set_ticklabels([])
ax3.get_yaxis().set_ticklabels([])
# Plot contours identified as the consensus
if len(answer) > 0:
ax4.add_patch(patch_black)
# Add centers of bounding boxes
for comp in components:
bbox_radio = comp[0]['bbox']
bbox_ir = bbox_radio_to_ir(bbox_radio)
xrad = np.median((bbox_ir[0],bbox_ir[2]))
yrad = np.median((bbox_ir[1],bbox_ir[3]))
ax4.scatter(xrad,yrad, c='g', marker='s', s=15, alpha=1)
dbx = [bbox_ir[i] for i in (2,2,0,0,2)]
dby = [bbox_ir[i] for i in (3,1,1,3,3)]
ax4.plot(dbx,dby,color='g')
radiobeamsize = 5. # arcsec
imagesize = 3. # arcmin
imagescale = IMG_HEIGHT_NEW/imagesize / 60. # pixel / arcsec
radio_tol = radiobeamsize * imagescale
for ans in answer:
if answer[ans].has_key('ir_peak'):
# Optical counterpart position
xc,yc = answer[ans]['ir_peak']
# Measure all positions in radio pixel coordinates
radio_centroids = pix_radio(components)
maxdist = 0
for centroid in radio_centroids:
d = pix_dist(xc,yc,centroid[0],centroid[1])
maxdist = d if d > maxdist else maxdist
if d <= radio_tol:
middle_radio = centroid
radio_centroids.remove(middle_radio)
x1 = radio_centroids[0][0]
y1 = radio_centroids[0][1]
x2 = radio_centroids[1][0]
y2 = radio_centroids[1][1]
if len(radio_centroids) == 2:
m1 = (y1 - yc) / (x1 - xc)
b1 = yc - m1*xc
m2 = (y2 - yc) / (x2 - xc)
b2 = yc - m2*xc
xedge1 = 0 if x1 < xc else 500
yedge1 = y1 - (x1-xedge1)*(yc-y1)/(xc-x1)
xedge2 = 0 if x2 < xc else 500
yedge2 = y2 - (x2-xedge2)*(yc-y2)/(xc-x2)
# Draw and annotate the the bending angle
ax4.plot([xedge1,xc],[yedge1,yc],color='orange',linestyle='--')
ax4.plot([xedge2,xc],[yedge2,yc],color='orange',linestyle='--')
alpha_deg = bending_angle(xc,yc,x1,y1,x2,y2) * 180/np.pi
ax4.text(550,0,r'$\alpha$ = %.1f deg' % alpha_deg,fontsize=11)
else:
print "\tDidn't find match to optical ID for triple radio source %s" % zooniverse_id
else:
print "\tNo IR peak for %s" % zooniverse_id
ax4.yaxis.tick_right()
ax1.get_xaxis().set_ticks([0,100,200,300,400])
ax2.get_xaxis().set_ticks([0,100,200,300,400])
ax3.get_xaxis().set_ticks([0,100,200,300,400])
ax4.get_xaxis().set_ticks([0,100,200,300,400,500])
plt.subplots_adjust(wspace=0.02)
# Save hard copy of the figure
if savefig:
fig.savefig('{0:}/bending_angles/plots/individual/triples/{1:}ba_{2:}.pdf'.format(rgz_dir,anglepath,zooniverse_id))
plt.close()
else:
plt.show()
# Close figure after it's done; otherwise mpl complains about having thousands of stuff open
return None
def y_bisect(xc,yc,xt,yt,xb,yb):
# Finds the point yd such that the vector (xc,yc) -> (0,yd)
# bisects the angle formed by the vectors (xb,yb) -> (xc,yc)
# and (xt,yt) -> (xc,yc)
bc_length = np.hypot(xb - xc,yb - yc)
tc_length = np.hypot(xt - xc,yt - yc)
numerator = ((xb - xc)*xc + (yb - yc)*yc)/bc_length - (xc*(xt - xc) + yc*(yt - yc))/tc_length
denominator = (yb - yc)/bc_length - (yt - yc)/tc_length
return numerator/denominator
def plot_some(n,random_selection=False):
# Plot a random selection of double and triple sources
pathdict = make_pathdict()
# Doubles
doubles = get_doubles()
somedoubles = random.sample(doubles,n) if random_selection else doubles[:n]
for dbl in somedoubles:
plot_one_double(dbl['zooniverse_id'],pathdict,savefig=True)
# Triples
sometriples = get_triples()
for triple in sometriples:
plot_one_triple(triple['zooniverse_id'],pathdict,savefig=True)
return None
def pix_dist(x1,y1,x2,y2):
# Find the distance between two sets of Cartesian points via the Pythagorean theorem
dist = np.sqrt((x2-x1)**2 + (y2-y1)**2)
return dist
def all_triples_pixradio(triples,pathdict):
# Compute the bending angle for RGZ subjects with three radio components and an optical ID within 1 beam size of the center component
radiobeamsize = 5. # arcsec
imagesize = 3. # arcmin
imagescale = IMG_HEIGHT_NEW/imagesize / 60. # pixel / arcsec
radio_tol = radiobeamsize * imagescale
with open('%s/bending_angles/angles_triple_pixradio.csv' % rgz_dir,'w') as f:
print >> f,'zooniverse_id,bending_angle,position_angle'
for triple in triples:
irx,iry = literal_eval(triple['ir_peak'])
subject = subjects.find_one({'zooniverse_id':triple['zooniverse_id']})
contours = get_contours(subject,pathdict)
radio_components = contours['contours']
# Measure all positions in radio pixel coordinates
radio_centroids = pix_radio(radio_components)
maxdist = 0
for centroid in radio_centroids:
d = pix_dist(irx,iry,centroid[0],centroid[1])
maxdist = d if d > maxdist else maxdist
if d <= radio_tol:
middle_radio = centroid
radio_centroids.remove(middle_radio)
alpha = bending_angle(middle_radio[0],middle_radio[1],radio_centroids[0][0],radio_centroids[0][1],radio_centroids[1][0],radio_centroids[1][1])
alpha_deg = alpha * 180./np.pi
phi = position_angle(middle_radio[0],middle_radio[1],radio_centroids[0][0],radio_centroids[0][1],radio_centroids[1][0],radio_centroids[1][1])
phi_deg = phi * 180./np.pi
print >> f,'%s,%.3f,%.3f' % (triple['zooniverse_id'],alpha_deg,phi_deg)
break
else:
"Couldn't match the optical ID within 1 beam size of center for %s" % triple['zooniverse_id']
return None
def find_multipeaked_singles(subject,plot=False,verbose=True):
# Deprecated in favor of mps_cc
# Find multi-peaked single component sources via binary kernels.
# Download contour data
contours = get_contours(subject,pathdict)
lobe = contours['contours'][0]
# Order of bounding box components is (xmax,ymax,xmin,ymin)
xmax,ymax,xmin,ymin = lobe[0]['bbox']
xsize,ysize = 0.1,0.1
X,Y = np.mgrid[xmin:xmax:xsize,ymin:ymax:ysize]
parr = []
valarr = []
for cl in lobe:
parr.extend([(p['x'],p['y']) for p in cl['arr']])
valarr.extend(np.ones(len(cl['arr']))+cl['level'])
points = np.array([(px,py) for px,py in parr])
values = np.array(valarr)
grid_z0 = griddata(points,values,(X,Y),method='nearest')
grid_z1 = griddata(points,values,(X,Y),method='linear')
grid_z2 = griddata(points,values,(X,Y),method='cubic')
# Find the number of peaks
# http://stackoverflow.com/questions/3684484/peak-detection-in-a-2d-array
#neighborhood = generate_binary_structure(2,2)
kernelsize = 50
neighborhood = np.ones((kernelsize,kernelsize))
'''
Z = np.copy(grid_z2)
Z[np.isnan(grid_z2)] = 1.
'''
Z = grid_z2
local_max = maximum_filter(Z, footprint=neighborhood)==Z
background = np.isnan(Z)
eroded_background = binary_erosion(background, structure=neighborhood, border_value=1)
all_peaks = local_max - eroded_background
# Check if peak is in the background
detected_peaks = np.isfinite(Z) & all_peaks
npeaks = detected_peaks.sum()
xdp = X[detected_peaks]
ydp = Y[detected_peaks]
if verbose:
print '%i peaks detected' % npeaks
print xdp,ydp
if plot:
plt.subplot(231,aspect='equal')
plt.plot(points[:,0], points[:,1], 'k.', ms=1)
plt.title(subject['zooniverse_id'])
plt.subplot(232)
plt.imshow(grid_z0.T, extent=(xmin,xmax,ymin,ymax), cmap = cm.cubehelix, origin='lower',interpolation='none')
plt.title('Nearest')
plt.subplot(233)
plt.imshow(grid_z1.T, extent=(xmin,xmax,ymin,ymax), cmap = cm.cubehelix, origin='lower',interpolation='none')
plt.title('Linear')
plt.subplot(234)
plt.imshow(grid_z2.T, extent=(xmin,xmax,ymin,ymax), cmap = cm.cubehelix, origin='lower',interpolation='none')
plt.title('Cubic')
plt.subplot(235)
plt.imshow(Z.T, extent=(xmin,xmax,ymin,ymax), cmap = cm.cubehelix, origin='lower')#,vmin=0.999,vmax=1.012)
plt.title('Z')
'''
plt.subplot(235)
plt.imshow(background.T, extent=(xmin,xmax,ymin,ymax), cmap = cm.cubehelix, origin='lower',interpolation='none')
plt.title('Background')
plt.subplot(235)
plt.imshow(eroded_background.T, extent=(xmin,xmax,ymin,ymax), cmap = cm.cubehelix, origin='lower',interpolation='none')
plt.title('Eroded background')
'''
plt.subplot(236,aspect='equal')
plt.plot(points[:,0], points[:,1], 'k.', ms=1)
plt.plot(xdp,ydp,'ro')
plt.title('Detected peaks')
plt.gcf().set_size_inches(18, 12)
plt.show()
return xdp,ydp
def centroid(arr):
# Find the centroid of a polygon defined by a list of (x,y) points
x = [l['x'] for l in arr]
y = [l['y'] for l in arr]
xmean = np.mean(x)
ymean = np.mean(y)
return xmean,ymean
def point_in_poly(x,y,poly):
# Determine whether a given point (x,y) is within a convex polygon defined by an array of points
n = len(poly)
inside = False
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y = poly[i % n]
if y > min(p1y,p2y):
if y <= max(p1y,p2y):
if x <= max(p1x,p2x):
if p1y != p2y:
xints = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x == p2x or x <= xints:
inside = not inside
p1x,p1y = p2x,p2y
return inside
def make_polygon(arr):
# Create a list of x,y pairs out of an array to draw a polygon
x = [l['x'] for l in arr]
y = [l['y'] for l in arr]
polygon = [(xx,yy) for xx,yy in zip(x,y)]
return polygon
def mps_cc(subject,pathdict,plot=True,verbose=True):
# Find location of peaks within a single-component radio source via contour counting
contours = get_contours(subject,pathdict)
lobe = contours['contours'][0]
xmax,ymax,xmin,ymin = lobe[0]['bbox']
parr = []
valarr = []
for cl in lobe:
parr.extend([(p['x'],p['y']) for p in cl['arr']])
valarr.extend(np.ones(len(cl['arr']))+cl['level'])
points = np.array([(px,py) for px,py in parr])
values = np.array(valarr)
# Find levels with multiple contours
# For each of those levels, check if next level up has geometric center within that contour
# If no, then that level's geometric center is a local maximum
# If yes, then move up one level and repeat
k = [l['k'] for l in lobe]
ck = Counter(k)
mlarr = []
for x,y in ck.iteritems():
if y > 1:
mlarr.append(x)
if max(k) not in mlarr:
mlarr.append(max(k))
mlarr.sort()
local_maxima = []
for m in mlarr:
levels = [l for l in lobe if l['k'] == m]
# Is there a higher level?
if m < max(k):
upper_levels = [l for l in lobe if l['k'] == m+1]
for level in levels:
within = False
for ul in upper_levels:
gc = centroid(ul['arr'])
polygon = make_polygon(level['arr'])
result = point_in_poly(gc[0],gc[1],polygon)
within += result
if not within:
gc = centroid(level['arr'])
local_maxima.append((m,gc))
if verbose:
print 'Point in poly, m=%i, center=(%.1f,%.1f)' % (m,gc[0],gc[1])
# If no higher level, centroids = local max
else:
for level in levels:
gc = centroid(level['arr'])
local_maxima.append((m,gc))
if verbose:
print 'No higher levels, m=%i, center=(%.1f,%.1f)' % (m,gc[0],gc[1])
# Plot locations of peaks
npeaks = len(local_maxima)
if plot:
xc = [x[1][0] for x in local_maxima]
yc = [x[1][1] for x in local_maxima]
fig = plt.figure()
ax = fig.add_subplot(111)
verts_all = []
codes_all = []
components = contours['contours']
for comp in components:
# Order of bounding box components is (xmax,ymax,xmin,ymin)
comp_xmax,comp_ymax,comp_xmin,comp_ymin = comp[0]['bbox']
# Only plot radio components identified by the users as the consensus;
# check on the xmax value to make sure
for idx,level in enumerate(comp):
verts = [(p['x'],p['y']) for p in level['arr']]
codes = np.ones(len(verts),int) * Path.LINETO
codes[0] = Path.MOVETO
verts_all.extend(verts)
codes_all.extend(codes)
try:
path = Path(verts_all, codes_all)
patch_black = patches.PathPatch(path, facecolor = 'none', edgecolor='black', lw=1)
except AssertionError:
print 'Users found no components for consensus match of %s' % zooniverse_id
# Plot contours identified as the consensus
ax.add_patch(patch_black)
ax.plot(xc,yc,'r*',ms=10)
ax.set_xlim(0,FIRST_FITS_WIDTH)
ax.set_ylim(FIRST_FITS_HEIGHT,0)
ax.set_aspect('equal')
#ax.title(subject['zooniverse_id'])
plt.show()
return local_maxima
def batch_mps_cc():
# Find location of peaks within all single-component radio sources via contour counting
'''
Time estimate:
Contour data retrieved over network:
5013.01 seconds (~83 minutes) for 38,750 images
7.73 images per second
Contour data stored locally:
1559.67 seconds (~26 minutes) for 46,068 images
29.54 images per second
'''
# Note - only enabled for FIRST now.
tstart = time.time()
mps = subjects.find({'state':'complete','metadata.contour_count':1,'metadata.survey':'first'},timeout=False)
n = mps.count()
with open('%s/bending_angles/multipeaked_singles_cc.csv' % rgz_dir,'w') as f:
print >> f,"zooniverse_id,nlobe,ntotal,xc,yc"
idx_s = 0
for subject in mps:
try:
local_maxima = mps_cc(subject,pathdict,plot=False,verbose=False)
if len(local_maxima) > 1:
for idx,lm in enumerate(local_maxima):
print >> f,"{0:},{1:d},{2:d},{3:.4f},{4:.4f}".format(subject['zooniverse_id'],idx+1,len(local_maxima),lm[1][0],lm[1][1])
except ValueError:
print "Error retrieving JSON object for {0:}".format(subject['zooniverse_id'])
idx_s += 1
if ~idx_s % 100:
print "%i completed" % idx_s
mps.close()
tend = time.time()
print '%.2f minutes for %i images' % ((tend - tstart)/60.,n)
print '%.2f images per second' % (n/(tend - tstart))
return None
def hist_mps_cc():
# Plot the distribution of the number of peaks in single-component radio subjects
data = ascii.read('%s/bending_angles/multipeaked_singles_cc.csv' % rgz_dir,delimiter=' ',data_start=1,header_start=0)
c = Counter(data['zooniverse_id'])
# Doesn't include npeaks = 1, so calculate that separately
ntotal = subjects.find({'state':'complete','metadata.contour_count':1,'metadata.survey':'first'}).count()
runningsum = 0
for v in c.itervalues():
runningsum += v
c[1] = ntotal - runningsum
fig = plt.figure(1,(12,6))
ax1 = fig.add_subplot(121)
histML(c.values(), bins=range(10), ax=ax1, histtype='step', lw=2, alpha=1.0, color='#377eb8',log=True)
ax1.set_xlim(1,10)
ax1.set_xlabel(r'$N_{peaks}$',fontsize=18)
ax1.set_ylabel('Count')
ax1.set_title('RGZ 1-contour sources')
ax2 = fig.add_subplot(122)
histML(c.values(), bins=range(10), ax=ax2, histtype='step', lw=2, alpha=1.0, color='#e41a1c',cumulative=True,normed=True)
ax2.set_xlabel(r'$N_{peaks}$',fontsize=18)
ax1.set_title('RGZ 1-contour sources')
ax2.set_ylabel('Cumulative fraction')
fig.savefig('%s/bending_angles/plots/mps_cc.pdf' % rgz_dir)
plt.show()
return None
def batch_mps_kernel():
# Deprecated in favor of batch_mps_cc
# Find location of peaks within all single-component radio sources via binary kernels
tstart = time.time()
mps = subjects.find({'state':'complete','metadata.contour_count':1})
n = mps.count()
with open('%s/bending_angles/multipeaked_singles.csv' % rgz_dir,'w') as f:
for subject in mps:
xdp,ydp = find_multipeaked_singles(subject,plot=False,verbose=False)
if len(xdp) > 0 and len(ydp) > 0:
for idx,(xsubpeak,ysubpeak) in enumerate(zip(xdp,ydp)):
print >> f,subject['zooniverse_id'],idx+1,len(xdp),xsubpeak,ysubpeak
tend = time.time()
print '%.2f seconds for %i images' % (tend - tstart,n)
print '%.2f images per second' % (n/(tend - tstart))
return None
def mps_bending_angle(consensus_level = 0.50):
# Compute the bending and position angles for double-peaked, single-contour sources with optical counterparts
tstart = time.time()
# Load data
df = pd.read_csv('%s/bending_angles/multipeaked_singles_cc.csv' % rgz_dir,delimiter=',')
df2 = df[(df['ntotal'] == 2)]
with open('%s/bending_angles/angles_multipeaked_singles.csv' % rgz_dir,'w') as f:
print >> f,'zooniverse_id,bending_angle,position_angle'
# get optical counterpart for zooniverse_id (loop over eventually)
df2_pair1 = df2[::2]
df2_pair2 = df2[1::2]
_zid = np.array(df2_pair1['zooniverse_id'])
_x1 = np.array(df2_pair1['xc'])
_y1 = np.array(df2_pair1['yc'])
_x2 = np.array(df2_pair2['xc'])
_y2 = np.array(df2_pair2['yc'])
for zooniverse_id,x1,y1,x2,y2 in zip(_zid,_x1,_y1,_x2,_y2):
c = consensus.checksum(zooniverse_id)
try:
if (len(c['answer']) == 1) and (c['n_users']/float(c['n_total']) >= consensus_level):
if c['answer'][c['answer'].keys()[0]].has_key('ir_peak'):
peak_x,peak_y = c['answer'][c['answer'].keys()[0]]['ir_peak']
ir_x = peak_x * first_ir_scale_x
ir_y = peak_y * first_ir_scale_y
alpha = bending_angle(ir_x,ir_y,x1,y1,x2,y2)
alpha_deg = alpha * 180./np.pi
phi = position_angle(ir_x,ir_y,x1,y1,x2,y2)
phi_deg = phi * 180./np.pi
print >> f,'{0:s},{1:.4f},{2:.4f}'.format(zooniverse_id,alpha_deg,phi_deg)
else:
print "Had more than 1 IR sources and/or less than {0:.2f} percent consensus for {1:s}".format(consensus_level,zooniverse_id)
except TypeError:
print "No 'answer' key for %s" % zooniverse_id
# Timing the process
tend = time.time()
n = len(df2)/2
print '%.2f minutes for %i subjects' % ((tend - tstart)/60.,n)
print '%.2f subjects per second' % (n/(tend - tstart))
return None
if __name__ == '__main__':
# If run from command line, computes bending angles for all double and multi-peaked single component RGZ sources
pathdict = make_pathdict()
doubles = get_doubles()
all_doubles_pixradio(doubles,pathdict)
batch_mps_cc()
mps_bending_angle()
'''
triples = get_triples()
all_triples_pixradio(triples,pathdict)
'''
| {
"content_hash": "09fca722b8fc4017b1d631bbef9edc06",
"timestamp": "",
"source": "github",
"line_count": 1288,
"max_line_length": 176,
"avg_line_length": 34.34549689440994,
"alnum_prop": 0.5834030336596062,
"repo_name": "willettk/rgz-analysis",
"id": "3ea925c2f64b73425edc6066e63356d8e728dd80",
"size": "44326",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/bending_angles.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "147317"
},
{
"name": "Python",
"bytes": "691021"
},
{
"name": "Ruby",
"bytes": "3598"
},
{
"name": "Shell",
"bytes": "6723"
},
{
"name": "TeX",
"bytes": "40897"
}
],
"symlink_target": ""
} |
"""Tests for classes and methods relating to user rights."""
from core.domain import collection_services
from core.domain import exp_domain
from core.domain import exp_services
from core.domain import rights_manager
from core.domain import user_services
from core.tests import test_utils
class ExplorationRightsTests(test_utils.GenericTestBase):
"""Test that rights for actions on explorations work as expected."""
EXP_ID = 'exp_id'
def setUp(self):
super(ExplorationRightsTests, self).setUp()
self.signup('[email protected]', 'A')
self.signup('[email protected]', 'B')
self.signup('[email protected]', 'C')
self.signup('[email protected]', 'D')
self.signup('[email protected]', 'E')
self.signup('[email protected]', 'F')
self.signup(self.ADMIN_EMAIL, username=self.ADMIN_USERNAME)
self.signup(self.MODERATOR_EMAIL, username=self.MODERATOR_USERNAME)
self.user_id_a = self.get_user_id_from_email('[email protected]')
self.user_id_b = self.get_user_id_from_email('[email protected]')
self.user_id_c = self.get_user_id_from_email('[email protected]')
self.user_id_d = self.get_user_id_from_email('[email protected]')
self.user_id_e = self.get_user_id_from_email('[email protected]')
self.user_id_f = self.get_user_id_from_email('[email protected]')
self.user_id_admin = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.user_id_moderator = self.get_user_id_from_email(
self.MODERATOR_EMAIL)
self.set_admins([self.ADMIN_USERNAME])
self.set_moderators([self.MODERATOR_USERNAME])
self.user_a = user_services.UserActionsInfo(self.user_id_a)
self.user_b = user_services.UserActionsInfo(self.user_id_b)
self.user_c = user_services.UserActionsInfo(self.user_id_c)
self.user_d = user_services.UserActionsInfo(self.user_id_d)
self.user_e = user_services.UserActionsInfo(self.user_id_e)
self.user_f = user_services.UserActionsInfo(self.user_id_f)
self.user_admin = user_services.UserActionsInfo(self.user_id_admin)
self.user_moderator = user_services.UserActionsInfo(
self.user_id_moderator)
self.system_user = user_services.get_system_user()
def test_get_exploration_rights_for_nonexistent_exploration(self):
non_exp_id = 'this_exp_does_not_exist_id'
with self.assertRaisesRegexp(
Exception,
'Entity for class ExplorationRightsModel with id '
'this_exp_does_not_exist_id not found'
):
rights_manager.get_exploration_rights(non_exp_id)
self.assertIsNone(
rights_manager.get_exploration_rights(non_exp_id, strict=False))
def test_demo_exploration(self):
exp_services.load_demo('1')
rights_manager.release_ownership_of_exploration(
self.system_user, '1')
exp_rights = rights_manager.get_exploration_rights('1')
self.assertTrue(rights_manager.check_can_access_activity(
self.user_a, exp_rights))
self.assertTrue(rights_manager.check_can_edit_activity(
self.user_a, exp_rights))
self.assertTrue(rights_manager.check_can_translate_activity(
self.user_a, exp_rights))
self.assertFalse(rights_manager.check_can_delete_activity(
self.user_a, exp_rights))
self.assertTrue(rights_manager.check_can_access_activity(
self.user_admin, exp_rights))
self.assertTrue(rights_manager.check_can_edit_activity(
self.user_admin, exp_rights))
self.assertTrue(rights_manager.check_can_translate_activity(
self.user_admin, exp_rights))
self.assertTrue(rights_manager.check_can_delete_activity(
self.user_admin, exp_rights))
self.assertTrue(rights_manager.check_can_access_activity(
self.user_moderator, exp_rights))
self.assertTrue(rights_manager.check_can_edit_activity(
self.user_moderator, exp_rights))
self.assertTrue(rights_manager.check_can_translate_activity(
self.user_moderator, exp_rights))
self.assertTrue(rights_manager.check_can_delete_activity(
self.user_moderator, exp_rights))
def test_non_splash_page_demo_exploration(self):
# Note: there is no difference between permissions for demo
# explorations, whether or not they are on the splash page.
exp_services.load_demo('3')
rights_manager.release_ownership_of_exploration(
self.system_user, '3')
exp_rights = rights_manager.get_exploration_rights('3')
self.assertTrue(rights_manager.check_can_access_activity(
self.user_a, exp_rights))
self.assertTrue(rights_manager.check_can_edit_activity(
self.user_a, exp_rights))
self.assertTrue(rights_manager.check_can_translate_activity(
self.user_a, exp_rights))
self.assertFalse(rights_manager.check_can_delete_activity(
self.user_a, exp_rights))
self.assertTrue(rights_manager.check_can_access_activity(
self.user_admin, exp_rights))
self.assertTrue(rights_manager.check_can_edit_activity(
self.user_admin, exp_rights))
self.assertTrue(rights_manager.check_can_translate_activity(
self.user_admin, exp_rights))
self.assertTrue(rights_manager.check_can_delete_activity(
self.user_admin, exp_rights))
self.assertTrue(rights_manager.check_can_access_activity(
self.user_moderator, exp_rights))
self.assertTrue(rights_manager.check_can_edit_activity(
self.user_moderator, exp_rights))
self.assertTrue(rights_manager.check_can_translate_activity(
self.user_moderator, exp_rights))
self.assertTrue(rights_manager.check_can_delete_activity(
self.user_moderator, exp_rights))
def test_ownership_of_exploration(self):
exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID)
exp_services.save_new_exploration(self.user_id_a, exp)
rights_manager.assign_role_for_exploration(
self.user_a, self.EXP_ID, self.user_id_b,
rights_manager.ROLE_EDITOR)
exp_rights = rights_manager.get_exploration_rights(self.EXP_ID)
self.assertTrue(exp_rights.is_owner(self.user_id_a))
self.assertFalse(exp_rights.is_owner(self.user_id_b))
self.assertFalse(exp_rights.is_owner(self.user_id_admin))
def test_newly_created_exploration(self):
exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID)
exp_services.save_new_exploration(self.user_id_a, exp)
exp_rights = rights_manager.get_exploration_rights(self.EXP_ID)
self.assertTrue(rights_manager.check_can_access_activity(
self.user_a, exp_rights))
self.assertTrue(rights_manager.check_can_edit_activity(
self.user_a, exp_rights))
self.assertTrue(rights_manager.check_can_translate_activity(
self.user_a, exp_rights))
self.assertTrue(rights_manager.check_can_delete_activity(
self.user_a, exp_rights))
self.assertTrue(rights_manager.check_can_access_activity(
self.user_admin, exp_rights))
self.assertTrue(rights_manager.check_can_edit_activity(
self.user_admin, exp_rights))
self.assertTrue(rights_manager.check_can_translate_activity(
self.user_admin, exp_rights))
self.assertTrue(rights_manager.check_can_delete_activity(
self.user_admin, exp_rights))
self.assertTrue(rights_manager.check_can_access_activity(
self.user_moderator, exp_rights))
self.assertFalse(rights_manager.check_can_edit_activity(
self.user_moderator, exp_rights))
self.assertFalse(rights_manager.check_can_translate_activity(
self.user_moderator, exp_rights))
self.assertFalse(rights_manager.check_can_delete_activity(
self.user_moderator, exp_rights))
self.assertFalse(rights_manager.check_can_access_activity(
self.user_b, exp_rights))
self.assertFalse(rights_manager.check_can_edit_activity(
self.user_b, exp_rights))
self.assertFalse(rights_manager.check_can_translate_activity(
self.user_b, exp_rights))
self.assertFalse(rights_manager.check_can_delete_activity(
self.user_b, exp_rights))
def test_inviting_collaborator_to_exploration(self):
exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID)
exp_services.save_new_exploration(self.user_id_a, exp)
exp_rights = rights_manager.get_exploration_rights(self.EXP_ID)
self.assertFalse(rights_manager.check_can_access_activity(
self.user_b, exp_rights))
self.assertFalse(rights_manager.check_can_edit_activity(
self.user_b, exp_rights))
self.assertFalse(rights_manager.check_can_translate_activity(
self.user_b, exp_rights))
self.assertFalse(rights_manager.check_can_delete_activity(
self.user_b, exp_rights))
rights_manager.assign_role_for_exploration(
self.user_a, self.EXP_ID, self.user_id_b,
rights_manager.ROLE_EDITOR)
exp_rights = rights_manager.get_exploration_rights(self.EXP_ID)
self.assertTrue(rights_manager.check_can_access_activity(
self.user_b, exp_rights))
self.assertTrue(rights_manager.check_can_edit_activity(
self.user_b, exp_rights))
self.assertTrue(rights_manager.check_can_translate_activity(
self.user_b, exp_rights))
self.assertFalse(rights_manager.check_can_delete_activity(
self.user_b, exp_rights))
def test_inviting_translator_to_exploration(self):
exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID)
exp_services.save_new_exploration(self.user_id_a, exp)
exp_rights = rights_manager.get_exploration_rights(self.EXP_ID)
self.assertFalse(rights_manager.check_can_access_activity(
self.user_b, exp_rights))
self.assertFalse(rights_manager.check_can_edit_activity(
self.user_b, exp_rights))
self.assertFalse(rights_manager.check_can_translate_activity(
self.user_b, exp_rights))
self.assertFalse(rights_manager.check_can_delete_activity(
self.user_b, exp_rights))
rights_manager.assign_role_for_exploration(
self.user_a, self.EXP_ID, self.user_id_b,
rights_manager.ROLE_TRANSLATOR)
exp_rights = rights_manager.get_exploration_rights(self.EXP_ID)
self.assertTrue(rights_manager.check_can_access_activity(
self.user_b, exp_rights))
self.assertFalse(rights_manager.check_can_edit_activity(
self.user_b, exp_rights))
self.assertTrue(rights_manager.check_can_translate_activity(
self.user_b, exp_rights))
self.assertFalse(rights_manager.check_can_delete_activity(
self.user_b, exp_rights))
def test_inviting_playtester_to_exploration(self):
exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID)
exp_services.save_new_exploration(self.user_id_a, exp)
exp_rights = rights_manager.get_exploration_rights(self.EXP_ID)
self.assertFalse(rights_manager.check_can_access_activity(
self.user_b, exp_rights))
self.assertFalse(rights_manager.check_can_edit_activity(
self.user_b, exp_rights))
self.assertFalse(rights_manager.check_can_translate_activity(
self.user_b, exp_rights))
self.assertFalse(rights_manager.check_can_delete_activity(
self.user_b, exp_rights))
rights_manager.assign_role_for_exploration(
self.user_a, self.EXP_ID, self.user_id_b,
rights_manager.ROLE_VIEWER)
exp_rights = rights_manager.get_exploration_rights(self.EXP_ID)
self.assertTrue(rights_manager.check_can_access_activity(
self.user_b, exp_rights))
self.assertFalse(rights_manager.check_can_edit_activity(
self.user_b, exp_rights))
self.assertFalse(rights_manager.check_can_translate_activity(
self.user_b, exp_rights))
self.assertFalse(rights_manager.check_can_delete_activity(
self.user_b, exp_rights))
def test_setting_rights_of_exploration(self):
exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID)
exp_services.save_new_exploration(self.user_id_a, exp)
rights_manager.assign_role_for_exploration(
self.user_a, self.EXP_ID, self.user_id_b,
rights_manager.ROLE_VIEWER)
with self.assertRaisesRegexp(Exception, 'Could not assign new role.'):
rights_manager.assign_role_for_exploration(
self.user_b, self.EXP_ID, self.user_id_c,
rights_manager.ROLE_VIEWER)
rights_manager.assign_role_for_exploration(
self.user_a, self.EXP_ID, self.user_id_b,
rights_manager.ROLE_TRANSLATOR)
with self.assertRaisesRegexp(Exception, 'Could not assign new role.'):
rights_manager.assign_role_for_exploration(
self.user_b, self.EXP_ID, self.user_id_c,
rights_manager.ROLE_VIEWER)
rights_manager.assign_role_for_exploration(
self.user_a, self.EXP_ID, self.user_id_b,
rights_manager.ROLE_EDITOR)
with self.assertRaisesRegexp(Exception, 'Could not assign new role.'):
rights_manager.assign_role_for_exploration(
self.user_b, self.EXP_ID, self.user_id_c,
rights_manager.ROLE_VIEWER)
rights_manager.assign_role_for_exploration(
self.user_a, self.EXP_ID, self.user_id_b,
rights_manager.ROLE_OWNER)
rights_manager.assign_role_for_exploration(
self.user_b, self.EXP_ID, self.user_id_c,
rights_manager.ROLE_OWNER)
rights_manager.assign_role_for_exploration(
self.user_b, self.EXP_ID, self.user_id_d,
rights_manager.ROLE_EDITOR)
rights_manager.assign_role_for_exploration(
self.user_b, self.EXP_ID, self.user_id_e,
rights_manager.ROLE_TRANSLATOR)
rights_manager.assign_role_for_exploration(
self.user_b, self.EXP_ID, self.user_id_f,
rights_manager.ROLE_VIEWER)
def test_publishing_and_unpublishing_exploration(self):
exp = exp_domain.Exploration.create_default_exploration(
self.EXP_ID, title='A title', category='A category')
exp_services.save_new_exploration(self.user_id_a, exp)
exp_rights = rights_manager.get_exploration_rights(self.EXP_ID)
self.assertFalse(rights_manager.check_can_access_activity(
self.user_b, exp_rights))
rights_manager.publish_exploration(self.user_a, self.EXP_ID)
exp_rights = rights_manager.get_exploration_rights(self.EXP_ID)
self.assertTrue(rights_manager.check_can_access_activity(
self.user_b, exp_rights))
self.assertFalse(rights_manager.check_can_unpublish_activity(
self.user_a, exp_rights))
rights_manager.unpublish_exploration(self.user_admin, self.EXP_ID)
exp_rights = rights_manager.get_exploration_rights(self.EXP_ID)
self.assertTrue(rights_manager.check_can_access_activity(
self.user_a, exp_rights))
self.assertFalse(rights_manager.check_can_access_activity(
self.user_b, exp_rights))
def test_can_only_delete_unpublished_explorations(self):
exp = exp_domain.Exploration.create_default_exploration(
self.EXP_ID, title='A title', category='A category')
exp_services.save_new_exploration(self.user_id_a, exp)
exp_rights = rights_manager.get_exploration_rights(self.EXP_ID)
self.assertTrue(rights_manager.check_can_delete_activity(
self.user_a, exp_rights))
rights_manager.publish_exploration(self.user_a, self.EXP_ID)
exp_rights = rights_manager.get_exploration_rights(self.EXP_ID)
self.assertFalse(rights_manager.check_can_delete_activity(
self.user_a, exp_rights))
rights_manager.unpublish_exploration(self.user_admin, self.EXP_ID)
exp_rights = rights_manager.get_exploration_rights(self.EXP_ID)
self.assertTrue(rights_manager.check_can_delete_activity(
self.user_a, exp_rights))
def test_changing_viewability_of_exploration(self):
exp = exp_domain.Exploration.create_default_exploration(
self.EXP_ID, title='A title', category='A category')
exp_services.save_new_exploration(self.user_id_a, exp)
exp_rights = rights_manager.get_exploration_rights(self.EXP_ID)
self.assertFalse(rights_manager.check_can_access_activity(
self.user_b, exp_rights))
with self.assertRaisesRegexp(Exception, 'already the current value'):
rights_manager.set_private_viewability_of_exploration(
self.user_a, self.EXP_ID, False)
with self.assertRaisesRegexp(Exception, 'cannot be changed'):
rights_manager.set_private_viewability_of_exploration(
self.user_b, self.EXP_ID, True)
rights_manager.set_private_viewability_of_exploration(
self.user_a, self.EXP_ID, True)
exp_rights = rights_manager.get_exploration_rights(self.EXP_ID)
self.assertTrue(rights_manager.check_can_access_activity(
self.user_a, exp_rights))
self.assertTrue(rights_manager.check_can_access_activity(
self.user_b, exp_rights))
rights_manager.set_private_viewability_of_exploration(
self.user_a, self.EXP_ID, False)
exp_rights = rights_manager.get_exploration_rights(self.EXP_ID)
self.assertTrue(rights_manager.check_can_access_activity(
self.user_a, exp_rights))
self.assertFalse(rights_manager.check_can_access_activity(
self.user_b, exp_rights))
def test_check_exploration_rights(self):
exp = exp_domain.Exploration.create_default_exploration(self.EXP_ID)
exp_services.save_new_exploration(self.user_id_a, exp)
rights_manager.assign_role_for_exploration(
self.user_a, self.EXP_ID, self.user_id_b,
rights_manager.ROLE_VIEWER)
rights_manager.assign_role_for_exploration(
self.user_a, self.EXP_ID, self.user_id_c,
rights_manager.ROLE_EDITOR)
rights_manager.assign_role_for_exploration(
self.user_a, self.EXP_ID, self.user_id_d,
rights_manager.ROLE_TRANSLATOR)
exp_rights = rights_manager.get_exploration_rights(self.EXP_ID)
self.assertTrue(exp_rights.is_owner(self.user_id_a))
self.assertTrue(exp_rights.is_editor(self.user_id_c))
self.assertTrue(exp_rights.is_viewer(self.user_id_b))
self.assertFalse(exp_rights.is_viewer(self.user_id_a))
self.assertFalse(exp_rights.is_owner(self.user_id_b))
self.assertFalse(exp_rights.is_editor(self.user_id_b))
self.assertTrue(exp_rights.is_translator(self.user_id_d))
self.assertFalse(exp_rights.is_translator(self.user_id_b))
def test_get_multiple_exploration_rights(self):
exp_ids = ['exp1', 'exp2', 'exp3', 'exp4']
# saving only first 3 explorations to check that None is returned for
# non-existing exploration.
for exp_id in exp_ids[:3]:
self.save_new_valid_exploration(exp_id, self.user_id_admin)
exp_rights = rights_manager.get_multiple_exploration_rights_by_ids(
exp_ids)
self.assertEqual(len(exp_rights), 4)
for rights_object in exp_rights[:3]:
self.assertIsNotNone(rights_object)
self.assertIsNone(exp_rights[3])
class CollectionRightsTests(test_utils.GenericTestBase):
"""Test that rights for actions on collections work as expected."""
COLLECTION_ID = 'collection_id'
EXP_ID_FOR_COLLECTION = 'exp_id_for_collection'
def setUp(self):
super(CollectionRightsTests, self).setUp()
self.signup('[email protected]', 'A')
self.signup('[email protected]', 'B')
self.signup('[email protected]', 'C')
self.signup('[email protected]', 'D')
self.signup('[email protected]', 'E')
self.signup(self.ADMIN_EMAIL, username=self.ADMIN_USERNAME)
self.signup(self.MODERATOR_EMAIL, username=self.MODERATOR_USERNAME)
self.user_id_a = self.get_user_id_from_email('[email protected]')
self.user_id_b = self.get_user_id_from_email('[email protected]')
self.user_id_c = self.get_user_id_from_email('[email protected]')
self.user_id_d = self.get_user_id_from_email('[email protected]')
self.user_id_e = self.get_user_id_from_email('[email protected]')
self.user_id_admin = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.user_id_moderator = self.get_user_id_from_email(
self.MODERATOR_EMAIL)
self.set_admins([self.ADMIN_USERNAME])
self.set_moderators([self.MODERATOR_USERNAME])
self.user_a = user_services.UserActionsInfo(self.user_id_a)
self.user_b = user_services.UserActionsInfo(self.user_id_b)
self.user_c = user_services.UserActionsInfo(self.user_id_c)
self.user_d = user_services.UserActionsInfo(self.user_id_d)
self.user_e = user_services.UserActionsInfo(self.user_id_e)
self.user_admin = user_services.UserActionsInfo(self.user_id_admin)
self.user_moderator = user_services.UserActionsInfo(
self.user_id_moderator)
self.system_user = user_services.get_system_user()
def test_get_collection_rights_for_nonexistent_collection(self):
non_col_id = 'this_collection_does_not_exist_id'
with self.assertRaisesRegexp(
Exception,
'Entity for class CollectionRightsModel with id '
'this_collection_does_not_exist_id not found'
):
rights_manager.get_collection_rights(non_col_id)
self.assertIsNone(
rights_manager.get_collection_rights(non_col_id, strict=False))
def test_demo_collection(self):
collection_services.load_demo('0')
rights_manager.release_ownership_of_collection(
self.system_user, '0')
collection_rights = rights_manager.get_collection_rights('0')
self.assertTrue(rights_manager.check_can_access_activity(
self.user_a, collection_rights))
self.assertTrue(rights_manager.check_can_edit_activity(
self.user_a, collection_rights))
self.assertFalse(rights_manager.check_can_delete_activity(
self.user_a, collection_rights))
self.assertTrue(rights_manager.check_can_access_activity(
self.user_admin, collection_rights))
self.assertTrue(rights_manager.check_can_edit_activity(
self.user_admin, collection_rights))
self.assertTrue(rights_manager.check_can_delete_activity(
self.user_admin, collection_rights))
self.assertTrue(rights_manager.check_can_access_activity(
self.user_moderator, collection_rights))
self.assertTrue(rights_manager.check_can_edit_activity(
self.user_moderator, collection_rights))
self.assertTrue(rights_manager.check_can_delete_activity(
self.user_moderator, collection_rights))
def test_ownership_of_collection(self):
self.save_new_default_collection(self.COLLECTION_ID, self.user_id_a)
rights_manager.assign_role_for_collection(
self.user_a, self.COLLECTION_ID, self.user_id_b,
rights_manager.ROLE_EDITOR)
self.assertListEqual(
['A'],
rights_manager.get_collection_owner_names(
self.COLLECTION_ID))
collection_rights = rights_manager.get_collection_rights(
self.COLLECTION_ID)
self.assertTrue(collection_rights.is_owner(self.user_id_a))
self.assertFalse(collection_rights.is_owner(self.user_id_b))
self.assertFalse(collection_rights.is_owner(self.user_id_admin))
def test_newly_created_collection(self):
self.save_new_default_collection(self.COLLECTION_ID, self.user_id_a)
self.assertListEqual(
['A'],
rights_manager.get_collection_owner_names(
self.COLLECTION_ID))
collection_rights = rights_manager.get_collection_rights(
self.COLLECTION_ID)
self.assertTrue(rights_manager.check_can_access_activity(
self.user_a, collection_rights))
self.assertTrue(rights_manager.check_can_edit_activity(
self.user_a, collection_rights))
self.assertTrue(rights_manager.check_can_delete_activity(
self.user_a, collection_rights))
self.assertTrue(rights_manager.check_can_access_activity(
self.user_admin, collection_rights))
self.assertTrue(rights_manager.check_can_edit_activity(
self.user_admin, collection_rights))
self.assertTrue(rights_manager.check_can_delete_activity(
self.user_admin, collection_rights))
self.assertTrue(rights_manager.check_can_access_activity(
self.user_moderator, collection_rights))
self.assertFalse(rights_manager.check_can_edit_activity(
self.user_moderator, collection_rights))
self.assertFalse(rights_manager.check_can_delete_activity(
self.user_moderator, collection_rights))
self.assertFalse(rights_manager.check_can_access_activity(
self.user_b, collection_rights))
self.assertFalse(rights_manager.check_can_edit_activity(
self.user_b, collection_rights))
self.assertFalse(rights_manager.check_can_delete_activity(
self.user_b, collection_rights))
def test_inviting_collaborator_to_collection(self):
self.save_new_valid_collection(
self.COLLECTION_ID, self.user_id_a,
exploration_id=self.EXP_ID_FOR_COLLECTION)
collection_rights = rights_manager.get_collection_rights(
self.COLLECTION_ID)
# Verify initial editor permissions for the collection.
self.assertTrue(rights_manager.check_can_access_activity(
self.user_a, collection_rights))
self.assertTrue(rights_manager.check_can_edit_activity(
self.user_a, collection_rights))
# Verify initial editor permissions for the exploration within the
# collection.
self.assertFalse(rights_manager.check_can_access_activity(
self.user_b, collection_rights))
self.assertFalse(rights_manager.check_can_edit_activity(
self.user_b, collection_rights))
# User A adds user B to the collection as an editor.
rights_manager.assign_role_for_collection(
self.user_a, self.COLLECTION_ID, self.user_id_b,
rights_manager.ROLE_EDITOR)
# Ensure User A is the only user in the owner names list.
self.assertListEqual(
['A'],
rights_manager.get_collection_owner_names(
self.COLLECTION_ID))
collection_rights = rights_manager.get_collection_rights(
self.COLLECTION_ID)
# Ensure User B is now an editor of the collection.
self.assertTrue(rights_manager.check_can_access_activity(
self.user_b, collection_rights))
self.assertTrue(rights_manager.check_can_edit_activity(
self.user_b, collection_rights))
self.assertFalse(rights_manager.check_can_delete_activity(
self.user_b, collection_rights))
exp_for_collection_rights = rights_manager.get_exploration_rights(
self.EXP_ID_FOR_COLLECTION)
# Ensure User B is not an editor of the exploration within the
# collection.
self.assertFalse(rights_manager.check_can_access_activity(
self.user_b, exp_for_collection_rights))
self.assertFalse(rights_manager.check_can_edit_activity(
self.user_b, exp_for_collection_rights))
def test_inviting_playtester_to_collection(self):
self.save_new_valid_collection(
self.COLLECTION_ID, self.user_id_a,
exploration_id=self.EXP_ID_FOR_COLLECTION)
collection_rights = rights_manager.get_collection_rights(
self.COLLECTION_ID)
exp_for_collection_rights = rights_manager.get_exploration_rights(
self.EXP_ID_FOR_COLLECTION)
# Verify initial viewer permissions for the collection.
self.assertFalse(rights_manager.check_can_access_activity(
self.user_b, collection_rights))
self.assertFalse(rights_manager.check_can_edit_activity(
self.user_b, collection_rights))
# Verify initial viewer permissions for the exploration within the
# collection.
self.assertFalse(rights_manager.check_can_access_activity(
self.user_b, exp_for_collection_rights))
self.assertFalse(rights_manager.check_can_edit_activity(
self.user_b, exp_for_collection_rights))
# User A adds user B to the collection as a viewer.
rights_manager.assign_role_for_collection(
self.user_a, self.COLLECTION_ID, self.user_id_b,
rights_manager.ROLE_VIEWER)
collection_rights = rights_manager.get_collection_rights(
self.COLLECTION_ID)
exp_for_collection_rights = rights_manager.get_exploration_rights(
self.EXP_ID_FOR_COLLECTION)
# Ensure User B is now a viewer of the collection.
self.assertTrue(rights_manager.check_can_access_activity(
self.user_b, collection_rights))
self.assertFalse(rights_manager.check_can_edit_activity(
self.user_b, collection_rights))
# Ensure User B cannot view the exploration just because he/she has
# access to the collection containing it.
self.assertFalse(rights_manager.check_can_access_activity(
self.user_b, exp_for_collection_rights))
self.assertFalse(rights_manager.check_can_edit_activity(
self.user_b, exp_for_collection_rights))
def test_setting_rights_of_collection(self):
self.save_new_default_collection(self.COLLECTION_ID, self.user_id_a)
rights_manager.assign_role_for_collection(
self.user_a, self.COLLECTION_ID, self.user_id_b,
rights_manager.ROLE_VIEWER)
with self.assertRaisesRegexp(Exception, 'Could not assign new role.'):
rights_manager.assign_role_for_collection(
self.user_b, self.COLLECTION_ID, self.user_id_c,
rights_manager.ROLE_VIEWER)
rights_manager.assign_role_for_collection(
self.user_a, self.COLLECTION_ID, self.user_id_b,
rights_manager.ROLE_EDITOR)
with self.assertRaisesRegexp(Exception, 'Could not assign new role.'):
rights_manager.assign_role_for_collection(
self.user_b, self.COLLECTION_ID, self.user_id_c,
rights_manager.ROLE_VIEWER)
rights_manager.assign_role_for_collection(
self.user_a, self.COLLECTION_ID, self.user_id_b,
rights_manager.ROLE_OWNER)
rights_manager.assign_role_for_collection(
self.user_b, self.COLLECTION_ID, self.user_id_c,
rights_manager.ROLE_OWNER)
rights_manager.assign_role_for_collection(
self.user_b, self.COLLECTION_ID, self.user_id_d,
rights_manager.ROLE_EDITOR)
rights_manager.assign_role_for_collection(
self.user_b, self.COLLECTION_ID, self.user_id_e,
rights_manager.ROLE_VIEWER)
def test_publishing_and_unpublishing_collection(self):
self.save_new_default_collection(self.COLLECTION_ID, self.user_id_a)
collection_rights = rights_manager.get_collection_rights(
self.COLLECTION_ID)
self.assertFalse(rights_manager.check_can_access_activity(
self.user_b, collection_rights))
rights_manager.publish_collection(self.user_a, self.COLLECTION_ID)
collection_rights = rights_manager.get_collection_rights(
self.COLLECTION_ID)
self.assertTrue(rights_manager.check_can_access_activity(
self.user_b, collection_rights))
self.assertFalse(rights_manager.check_can_unpublish_activity(
self.user_a, collection_rights))
rights_manager.unpublish_collection(
self.user_admin, self.COLLECTION_ID)
collection_rights = rights_manager.get_collection_rights(
self.COLLECTION_ID)
self.assertTrue(rights_manager.check_can_access_activity(
self.user_a, collection_rights))
self.assertFalse(rights_manager.check_can_access_activity(
self.user_b, collection_rights))
def test_can_only_delete_unpublished_collections(self):
self.save_new_default_collection(self.COLLECTION_ID, self.user_id_a)
collection_rights = rights_manager.get_collection_rights(
self.COLLECTION_ID)
self.assertTrue(rights_manager.check_can_delete_activity(
self.user_a, collection_rights))
rights_manager.publish_collection(self.user_a, self.COLLECTION_ID)
collection_rights = rights_manager.get_collection_rights(
self.COLLECTION_ID)
self.assertFalse(rights_manager.check_can_delete_activity(
self.user_a, collection_rights))
rights_manager.unpublish_collection(
self.user_admin, self.COLLECTION_ID)
collection_rights = rights_manager.get_collection_rights(
self.COLLECTION_ID)
self.assertTrue(rights_manager.check_can_delete_activity(
self.user_a, collection_rights))
class CheckCanReleaseOwnershipTest(test_utils.GenericTestBase):
"""Tests for check_can_release_ownership function."""
published_exp_id = 'exp_id_1'
private_exp_id = 'exp_id_2'
def setUp(self):
super(CheckCanReleaseOwnershipTest, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.set_admins([self.ADMIN_USERNAME])
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.admin = user_services.UserActionsInfo(self.admin_id)
self.owner = user_services.UserActionsInfo(self.owner_id)
self.save_new_valid_exploration(
self.published_exp_id, self.owner_id)
self.save_new_valid_exploration(
self.private_exp_id, self.owner_id)
rights_manager.publish_exploration(self.owner, self.published_exp_id)
def test_admin_can_release_ownership_of_published_exploration(self):
self.assertTrue(rights_manager.check_can_release_ownership(
self.admin,
rights_manager.get_exploration_rights(self.published_exp_id)))
def test_owner_can_release_ownership_of_published_exploration(self):
self.assertTrue(rights_manager.check_can_release_ownership(
self.owner,
rights_manager.get_exploration_rights(self.published_exp_id)))
def test_admin_cannot_release_ownership_of_private_exploration(self):
self.assertFalse(rights_manager.check_can_release_ownership(
self.admin,
rights_manager.get_exploration_rights(self.private_exp_id)))
def test_owner_cannot_release_ownership_of_private_exploration(self):
self.assertFalse(rights_manager.check_can_release_ownership(
self.owner,
rights_manager.get_exploration_rights(self.private_exp_id)))
class CheckCanUnpublishActivityTest(test_utils.GenericTestBase):
"""Tests for check_can_unpublish_activity function."""
published_exp_id = 'exp_id_1'
private_exp_id = 'exp_id_2'
private_col_id = 'col_id_1'
published_col_id = 'col_id_2'
def setUp(self):
super(CheckCanUnpublishActivityTest, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME)
self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.moderator_id = self.get_user_id_from_email(self.MODERATOR_EMAIL)
self.set_admins([self.ADMIN_USERNAME])
self.set_moderators([self.MODERATOR_USERNAME])
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.admin = user_services.UserActionsInfo(self.admin_id)
self.owner = user_services.UserActionsInfo(self.owner_id)
self.moderator = user_services.UserActionsInfo(self.moderator_id)
self.save_new_valid_exploration(
self.published_exp_id, self.owner_id)
self.save_new_valid_exploration(
self.private_exp_id, self.owner_id)
self.save_new_valid_collection(
self.published_col_id, self.owner_id,
exploration_id=self.published_col_id)
self.save_new_valid_collection(
self.private_col_id, self.owner_id,
exploration_id=self.private_col_id)
rights_manager.publish_exploration(self.owner, self.published_exp_id)
rights_manager.publish_collection(self.owner, self.published_col_id)
def test_admin_can_unpublish_published_collection(self):
self.assertTrue(rights_manager.check_can_unpublish_activity(
self.admin,
rights_manager.get_collection_rights(self.published_col_id)))
def test_owner_cannot_unpublish_published_collection(self):
self.assertFalse(rights_manager.check_can_unpublish_activity(
self.owner,
rights_manager.get_collection_rights(self.published_col_id)))
def test_admin_cannot_unpublish_private_collection(self):
self.assertFalse(rights_manager.check_can_unpublish_activity(
self.admin,
rights_manager.get_collection_rights(self.private_col_id)))
def test_admin_can_unpublish_published_exploration(self):
self.assertTrue(rights_manager.check_can_unpublish_activity(
self.admin,
rights_manager.get_exploration_rights(self.published_exp_id)))
def test_owner_cannot_unpublish_published_exploration(self):
self.assertFalse(rights_manager.check_can_unpublish_activity(
self.owner,
rights_manager.get_exploration_rights(self.published_exp_id)))
def test_admin_cannot_unpublish_private_exploration(self):
self.assertFalse(rights_manager.check_can_unpublish_activity(
self.admin,
rights_manager.get_exploration_rights(self.private_exp_id)))
def test_moderator_can_unpublish_published_exploration(self):
self.assertTrue(rights_manager.check_can_unpublish_activity(
self.moderator,
rights_manager.get_exploration_rights(self.published_exp_id)))
| {
"content_hash": "30b2cbc4319a99a3e82049d4822e3b63",
"timestamp": "",
"source": "github",
"line_count": 860,
"max_line_length": 78,
"avg_line_length": 45.78139534883721,
"alnum_prop": 0.6604947678553287,
"repo_name": "AllanYangZhou/oppia",
"id": "6983dcd2540be7c55340e31dd64c73ddf32ef6a8",
"size": "39977",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "core/domain/rights_manager_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "82690"
},
{
"name": "HTML",
"bytes": "1128088"
},
{
"name": "JavaScript",
"bytes": "3945933"
},
{
"name": "Python",
"bytes": "4888439"
},
{
"name": "Shell",
"bytes": "50051"
}
],
"symlink_target": ""
} |
__author__ = "Fabio Tea <[email protected]>"
import feedparser
from urllib.request import urlopen
from sopel.tools import SopelMemory, SopelMemoryWithDefault
from sopel.module import commands, example, NOLIMIT, require_privilege, OP, require_admin
from sopel.config.types import (StaticSection, ValidatedAttribute, ListAttribute)
class RSSSection(StaticSection):
feeds = ListAttribute("feeds", default=[])
update_interval = ValidatedAttribute("update_interval", int, default=10)
def setup(bot):
bot.config.define_section("rss", RSSSection)
bot.memory["rss"] = SopelMemory()
bot.memory["rss"]["feeds"] = []
bot.memory["rss"]["update_interval"] = 10
if bot.config.rss.feeds:
bot.memory["rss"]["feeds"] = bot.config.rss.feeds
if bot.config.rss.update_interval:
bot.memory["rss"]["update_interval"] = bot.config.rss.update_interval
def configure(config):
config.define_section("rss", RSSSection)
config.rss.configure_setting("feeds", "Feed URLs")
config.rss.configure_setting("update_interval", "How often to check? (secounds)")
def shutdown(bot):
print("shutting down...")
bot.debug("RSS", "shutting down...", "always")
bot.config.rss.feeds = bot.memory["rss"]["feeds"]
bot.config.rss.update_interval = bot.memory["rss"]["update_interval"]
bot.config.save()
print(bot.config.rss.feeds)
bot.debug("RSS", bot.config.rss.feeds, "always")
bot.debug("RSS", bot.config.rss.update_interval, "always")
@require_admin
@commands("rssget")
def rssget(bot, trigger):
if not trigger.group(2) is None:
bot.say("coming soon")
return NOLIMIT
# rss = "http://lorem-rss.herokuapp.com/feed"
# feed = feedparser.parse(rss)
# for key in feed["entries"]:
# bot.say(unidecode.unidecode(key["title"]))
@require_admin
@commands("rsslist")
@example(".rsslist")
def rsslist(bot, trigger):
if not trigger.group(2) is None:
bot.say("expecting no parameter for this command...")
return NOLIMIT
feeds = bot.memory["rss"]["feeds"]
bot.say("RSS Feed URLs (#{}): ".format(len(feeds)))
for feed in feeds:
bot.say("{}: {}".format(feeds.index(feed) + 1, feed))
return NOLIMIT
@require_admin
@commands("rssadd")
@example(".rssadd http://google.com")
def rssadd(bot, trigger):
url = trigger.group(2)
if trigger.group(2) is None:
bot.say("expecting one parameter for this command...")
return NOLIMIT
try:
with urlopen(url) as f:
if f.status == 200:
bot.memory["rss"]["feeds"].append(url)
# bot.config.save()
bot.say("RSS feed '{}' added successfully".format(url))
except:
bot.say("Unable to add feed '{}' - Invalid URL!".format(url))
return NOLIMIT
@require_admin
@commands("rssdel")
@example(".rssdel 2")
def rssdel(bot, trigger):
idx = trigger.group(2)
if idx is None:
bot.say("expecting one parameter for this command...")
return NOLIMIT
try:
if bot.memory["rss"]["feeds"][idx]:
bot.memory["rss"]["feeds"].remove(idx)
# bot.config.save()
bot.say("RSS feed '{}' deleted successfully".format(idx))
except:
bot.say("Unable to delete feed '{}' - No such index!".format(idx))
return NOLIMIT
@require_admin
@commands("rssclear")
@example(".rssclear")
def rssclear(bot, trigger):
if not trigger.group(2) is None:
bot.say("expecting no parameter for this command...")
return NOLIMIT
bot.memory["rss"]["feeds"].clear()
# bot.config.save()
bot.say("All RSS feeds deleted successfully")
return NOLIMIT
| {
"content_hash": "549494610b8cd6496aed2fac4c4d895f",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 89,
"avg_line_length": 30.42063492063492,
"alnum_prop": 0.6146621445343073,
"repo_name": "f4bio/sopel-rss",
"id": "5623a88f64b146f7f85e15a1de23ef1c4490ffdc",
"size": "3833",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rss.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3833"
}
],
"symlink_target": ""
} |
import errno
import logging
import os
from ceph_deploy import hosts, exc
from ceph_deploy.lib import remoto
LOG = logging.getLogger(__name__)
def distro_is_supported(distro_name):
"""
An enforcer of supported distros that can differ from what ceph-deploy
supports.
"""
supported = ['centos', 'redhat', 'ubuntu', 'debian']
if distro_name in supported:
return True
return False
def connect(args):
for hostname in args.hosts:
distro = hosts.get(hostname, username=args.username)
if not distro_is_supported(distro.normalized_name):
raise exc.UnsupportedPlatform(
distro.distro_name,
distro.codename,
distro.release
)
LOG.info(
'Distro info: %s %s %s',
distro.name,
distro.release,
distro.codename
)
LOG.info('assuming that a repository with Calamari packages is already configured.')
LOG.info('Refer to the docs for examples (http://ceph.com/ceph-deploy/docs/conf.html)')
rlogger = logging.getLogger(hostname)
# Emplace minion config prior to installation so that it is present
# when the minion first starts.
minion_config_dir = os.path.join('/etc/salt/', 'minion.d')
minion_config_file = os.path.join(minion_config_dir, 'calamari.conf')
rlogger.debug('creating config dir: %s' % minion_config_dir)
distro.conn.remote_module.makedir(minion_config_dir, [errno.EEXIST])
rlogger.debug(
'creating the calamari salt config: %s' % minion_config_file
)
distro.conn.remote_module.write_file(
minion_config_file,
('master: %s\n' % args.master).encode('utf-8')
)
distro.packager.install('salt-minion')
distro.packager.install('diamond')
# redhat/centos need to get the service started
if distro.normalized_name in ['redhat', 'centos']:
remoto.process.run(
distro.conn,
['chkconfig', 'salt-minion', 'on']
)
remoto.process.run(
distro.conn,
['service', 'salt-minion', 'start']
)
distro.conn.exit()
def calamari(args):
if args.subcommand == 'connect':
connect(args)
def make(parser):
"""
Install and configure Calamari nodes. Assumes that a repository with
Calamari packages is already configured. Refer to the docs for examples
(http://ceph.com/ceph-deploy/docs/conf.html)
"""
calamari_parser = parser.add_subparsers(dest='subcommand')
calamari_parser.required = True
calamari_connect = calamari_parser.add_parser(
'connect',
help='Configure host(s) to connect to Calamari master'
)
calamari_connect.add_argument(
'--master',
nargs='?',
metavar='MASTER SERVER',
help="The domain for the Calamari master server"
)
calamari_connect.add_argument(
'hosts',
nargs='+',
)
parser.set_defaults(
func=calamari,
)
| {
"content_hash": "8228d6ec6d5dfc6301c4b636cf4a267f",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 95,
"avg_line_length": 29.055555555555557,
"alnum_prop": 0.5984703632887189,
"repo_name": "codenrhoden/ceph-deploy",
"id": "9bbea65ce29d1a6edec9c41d5141d7a83b58cdea",
"size": "3138",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "ceph_deploy/calamari.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "392696"
},
{
"name": "Shell",
"bytes": "8614"
}
],
"symlink_target": ""
} |
"""
Common functions
Marco Lui, January 2013
"""
from itertools import islice
import marshal
class Enumerator(object):
"""
Enumerator object. Returns a larger number each call.
Can be used with defaultdict to enumerate a sequence of items.
"""
def __init__(self, start=0):
self.n = start
def __call__(self):
retval = self.n
self.n += 1
return retval
def chunk(seq, chunksize):
"""
Break a sequence into chunks not exceeeding a predetermined size
"""
seq_iter = iter(seq)
while True:
chunk = tuple(islice(seq_iter, chunksize))
if not chunk: break
yield chunk
def unmarshal_iter(path):
"""
Open a given path and yield an iterator over items unmarshalled from it.
"""
with open(path, 'rb') as f:
while True:
try:
yield marshal.load(f)
except EOFError:
break
import os, errno
def makedir(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
import csv
def write_weights(weights, path):
w = dict(weights)
with open(path, 'w') as f:
writer = csv.writer(f)
try:
key_order = sorted(w, key=w.get, reverse=True)
except ValueError:
# Could not order keys by value, value is probably a vector.
# Order keys alphabetically in this case.
key_order = sorted(w)
for k in key_order:
row = [repr(k)]
try:
row.extend(w[k])
except TypeError:
row.append(w[k])
writer.writerow(row)
import numpy
def read_weights(path):
with open(path) as f:
reader = csv.reader(f)
retval = dict()
for row in reader:
key = eval(row[0])
#val = numpy.array( map(float,row[1:]) )
val = numpy.array( [float(v) if v != 'nan' else 0. for v in row[1:]] )
retval[key] = val
return retval
def read_features(path):
"""
Read a list of features in feature-per-line format, where each
feature is a repr and needs to be evaled.
@param path path to read from
"""
with open(path) as f:
return map(eval, f)
def write_features(features, path):
"""
Write a list of features to a file at `path`. The repr of each
feature is written on a new line.
@param features list of features to write
@param path path to write to
"""
with open(path,'w') as f:
for feat in features:
print >>f, repr(feat)
def index(seq):
"""
Build an index for a sequence of items. Assumes
that the items in the sequence are unique.
@param seq the sequence to index
@returns a dictionary from item to position in the sequence
"""
return dict((k,v) for (v,k) in enumerate(seq))
from itertools import imap
from contextlib import contextmanager, closing
import multiprocessing as mp
@contextmanager
def MapPool(processes=None, initializer=None, initargs=None, maxtasksperchild=None, chunksize=1):
"""
Contextmanager to express the common pattern of not using multiprocessing if
only 1 job is allocated (for example for debugging reasons)
"""
if processes is None:
processes = mp.cpu_count() + 4
if processes > 1:
with closing( mp.Pool(processes, initializer, initargs, maxtasksperchild)) as pool:
f = lambda fn, chunks: pool.imap_unordered(fn, chunks, chunksize=chunksize)
yield f
else:
if initializer is not None:
initializer(*initargs)
f = imap
yield f
if processes > 1:
pool.join()
| {
"content_hash": "cfca8098af0a19ebce260945e4d1f69b",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 97,
"avg_line_length": 24.25,
"alnum_prop": 0.655081001472754,
"repo_name": "plamenbbn/XDATA",
"id": "e7b2d18fb5b70865966d23fe7e62eba8fd88b6ce",
"size": "3395",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "textstructure/utilities/langid/langid/train/common.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2911"
},
{
"name": "C",
"bytes": "299272"
},
{
"name": "C++",
"bytes": "40035"
},
{
"name": "HTML",
"bytes": "13611"
},
{
"name": "Java",
"bytes": "1096166"
},
{
"name": "Makefile",
"bytes": "831"
},
{
"name": "Perl",
"bytes": "59273"
},
{
"name": "Python",
"bytes": "9060352"
},
{
"name": "Shell",
"bytes": "3032"
}
],
"symlink_target": ""
} |
"""
Django settings for rbe project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'o+)#@ur4@hx78g&4(f=qw&o3l!l)l++yo+qfb+&97-hff!-nqy'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'query',
'upload',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'rbe.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'rbe.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Chicago'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
| {
"content_hash": "899b7d40d5d651c7d8a7455433117b72",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 71,
"avg_line_length": 25.634615384615383,
"alnum_prop": 0.6864216054013503,
"repo_name": "install-logos/RiceBE",
"id": "2284cc3f337cba60db061c0f05b751838237b684",
"size": "2666",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rbe/rbe/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12692"
}
],
"symlink_target": ""
} |
import os
import shutil
import snapcraft
class WafPlugin(snapcraft.BasePlugin):
@classmethod
def schema(cls):
schema = super().schema()
schema['properties']['configflags'] = {
'type': 'array',
'minitems': 1,
'uniqueItems': True,
'items': {
'type': 'string',
},
'default': [],
}
# Inform Snapcraft of the properties associated with building. If these
# change in the YAML Snapcraft will consider the build step dirty.
schema['build-properties'].append('configflags')
return schema
def __init__(self, name, options, project):
super().__init__(name, options, project)
self.build_packages.extend(['make'])
def build(self):
super().build()
# if os.path.exists(self.builddir):
# shutil.rmtree(self.builddir)
# os.mkdir(self.builddir)
# source_subdir = getattr(self.options, 'source_subdir', None)
# if source_subdir:
# sourcedir = os.path.join(self.sourcedir, source_subdir)
# else:
# sourcedir = self.sourcedir
env = self._build_environment()
# Run bootstrap.py to download waf binary
self.run(['./bootstrap.py'], env=env)
# Run waf to configure
print(env)
self.run(['./waf', '-v', 'configure', '--prefix=/usr/local'], env=env)
# Run waf to build the sources
self.run(['./waf', '-v'], env=env)
# Install
self.run(['./waf', '-v', 'install', '--destdir=' + self.installdir], env=env)
def _build_environment(self):
env = os.environ.copy()
env['QT_SELECT'] = '5'
env['LFLAGS'] = '-L ' + ' -L'.join(
['{0}/lib', '{0}/usr/lib', '{0}/lib/{1}',
'{0}/usr/lib/{1}']).format(
self.project.stage_dir, self.project.arch_triplet)
env['INCDIRS'] = ':'.join(
['{0}/include', '{0}/usr/include', '{0}/include/{1}',
'{0}/usr/include/{1}']).format(
self.project.stage_dir, self.project.arch_triplet)
env['CPATH'] = ':'.join(
['{0}/include', '{0}/usr/include', '{0}/include/{1}',
'{0}/usr/include/{1}']).format(
self.project.stage_dir, self.project.arch_triplet)
env['LIBRARY_PATH'] = '$LD_LIBRARY_PATH:' + ':'.join(
['{0}/lib', '{0}/usr/lib', '{0}/lib/{1}',
'{0}/usr/lib/{1}']).format(
self.project.stage_dir, self.project.arch_triplet)
return env
| {
"content_hash": "bc36d7f0d3673b74ec59236558b739b4",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 85,
"avg_line_length": 34.13157894736842,
"alnum_prop": 0.5169622205088666,
"repo_name": "Zap123/snappy-playpen",
"id": "01771c957103ca618daa4c8a5cb62f9b0e8d1078",
"size": "2595",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "mpv/parts/plugins/x-waf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "921"
},
{
"name": "Python",
"bytes": "19139"
},
{
"name": "Shell",
"bytes": "21752"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
# Get the long description from the relevant file
# with codecs_open('README.rst', encoding='utf-8') as f:
# long_description = f.read()
setup(name='mica',
version='0.0.1',
description=u"Matplotlib Improved Color Abbreviations",
#long_description=long_description,
classifiers=[],
keywords='Matplotlib',
author=u"Julian Irwin",
author_email='[email protected]',
url='https://github.com/julianirwin/mica',
license='MIT',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
setup_requires=['nose>=1.0'],
extras_require={
'test': ['nose'],
},
test_suite = 'nose.collector',
entry_points="""
[console_scripts]
pyskel=pyskel.scripts.cli:cli
"""
)
| {
"content_hash": "ab89848cec3f45b4f6c4efabc60f0fbb",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 72,
"avg_line_length": 28.806451612903224,
"alnum_prop": 0.6136618141097424,
"repo_name": "julianirwin/mica",
"id": "fe49d3b079b6661b4e72caa469d3545b551e756b",
"size": "934",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1845"
}
],
"symlink_target": ""
} |
import os
import unittest
import imath
import IECore
import Gaffer
import GafferTest
import GafferScene
import GafferSceneTest
class SubTreeTest( GafferSceneTest.SceneTestCase ) :
def testPassThrough( self ) :
a = GafferScene.SceneReader()
a["fileName"].setValue( os.path.dirname( __file__ ) + "/alembicFiles/groupedPlane.abc" )
s = GafferScene.SubTree()
s["in"].setInput( a["out"] )
# We have to skip the test of built in sets, because our alembic file contains cameras
# and alembic doesn't provide a means of flagging them upfront.
self.assertSceneValid( s["out"], assertBuiltInSetsComplete = False )
self.assertScenesEqual( a["out"], s["out"] )
self.assertSceneHashesEqual( a["out"], s["out"] )
self.assertTrue( a["out"].object( "/group/plane", _copy = False ).isSame( s["out"].object( "/group/plane", _copy = False ) ) )
def testSubTree( self ) :
a = GafferScene.SceneReader()
a["fileName"].setValue( os.path.dirname( __file__ ) + "/alembicFiles/groupedPlane.abc" )
s = GafferScene.SubTree()
s["in"].setInput( a["out"] )
s["root"].setValue( "/group" )
self.assertSceneValid( s["out"] )
self.assertScenesEqual( s["out"], a["out"], scenePlug2PathPrefix = "/group" )
self.assertTrue( a["out"].object( "/group/plane", _copy = False ).isSame( s["out"].object( "/plane", _copy = False ) ) )
def testSets( self ) :
l = GafferSceneTest.TestLight()
g = GafferScene.Group()
g["in"][0].setInput( l["out"] )
self.assertSetsValid( g["out"] )
s = GafferScene.SubTree()
s["in"].setInput( g["out"] )
s["root"].setValue( "/group" )
self.assertSetsValid( s["out"] )
def testRootHashesEqual( self ) :
a = GafferScene.SceneReader()
a["fileName"].setValue( os.path.dirname( __file__ ) + "/alembicFiles/animatedCube.abc" )
s = GafferScene.SubTree()
s["in"].setInput( a["out"] )
# We have to skip the test of built in sets, because our alembic file contains cameras
# and alembic doesn't provide a means of flagging them upfront.
self.assertSceneValid( s["out"], assertBuiltInSetsComplete = False )
self.assertPathHashesEqual( a["out"], "/", s["out"], "/" )
def testDisabled( self ) :
p = GafferScene.Plane()
g = GafferScene.Group()
g["in"][0].setInput( p["out"] )
s = GafferScene.SubTree()
s["in"].setInput( g["out"] )
s["root"].setValue( "/group" )
s["enabled"].setValue( False )
self.assertSceneValid( s["out"] )
self.assertScenesEqual( s["out"], g["out"] )
self.assertSceneHashesEqual( s["out"], g["out"] )
def testForwardDeclarationsFromOmittedBranchAreOmitted( self ) :
# /group
# /lightGroup1
# /light
# /lightGroup2
# /light
# /lightGroup
# /light
# /lightGroup10
# /light
l = GafferSceneTest.TestLight()
lg1 = GafferScene.Group()
lg1["name"].setValue( "lightGroup1" )
lg1["in"][0].setInput( l["out"] )
lg2 = GafferScene.Group()
lg2["name"].setValue( "lightGroup2" )
lg2["in"][0].setInput( l["out"] )
lg3 = GafferScene.Group()
lg3["name"].setValue( "lightGroup" )
lg3["in"][0].setInput( l["out"] )
lg4 = GafferScene.Group()
lg4["name"].setValue( "lightGroup10" )
lg4["in"][0].setInput( l["out"] )
g = GafferScene.Group()
g["in"][0].setInput( lg1["out"] )
g["in"][1].setInput( lg2["out"] )
g["in"][2].setInput( lg3["out"] )
g["in"][3].setInput( lg4["out"] )
self.assertSetsValid( g["out"] )
# /light
s = GafferScene.SubTree()
s["in"].setInput( g["out"] )
s["root"].setValue( "/group/lightGroup1" )
lightSet = s["out"].set( "__lights" )
self.assertEqual( lightSet.value.paths(), [ "/light" ] )
self.assertSetsValid( s["out"] )
# with includeRoot == True
s["includeRoot"].setValue( True )
lightSet = s["out"].set( "__lights" )
self.assertEqual( lightSet.value.paths(), [ "/lightGroup1/light" ] )
self.assertSetsValid( s["out"] )
def testSetsPassThroughWhenNoRoot( self ) :
l = GafferSceneTest.TestLight()
g = GafferScene.Group()
g["in"][0].setInput( l["out"] )
s = GafferScene.SubTree()
s["in"].setInput( g["out"] )
lightSet = s["out"].set( "__lights" )
self.assertEqual( lightSet.value.paths(), [ "/group/light" ] )
self.assertSetsValid( s["out"] )
s["root"].setValue( "/" )
lightSet = s["out"].set( "__lights" )
self.assertEqual( lightSet.value.paths(), [ "/group/light" ] )
self.assertSetsValid( s["out"] )
# with includeRoot == True
s["includeRoot"].setValue( True )
s["root"].setValue( "" )
lightSet = s["out"].set( "__lights" )
self.assertEqual( lightSet.value.paths(), [ "/group/light" ] )
self.assertSetsValid( s["out"] )
s["root"].setValue( "/" )
lightSet = s["out"].set( "__lights" )
self.assertEqual( lightSet.value.paths(), [ "/group/light" ] )
self.assertSetsValid( s["out"] )
def testAffects( self ) :
s = GafferScene.SubTree()
for n in [ "bound", "transform", "attributes", "object", "childNames", "set" ] :
a = s.affects( s["in"][n] )
self.assertEqual( len( a ), 1 )
self.assertTrue( a[0].isSame( s["out"][n] ) )
def testIncludeRoot( self ) :
a = GafferScene.SceneReader()
a["fileName"].setValue( os.path.dirname( __file__ ) + "/alembicFiles/groupedPlane.abc" )
s = GafferScene.SubTree()
s["in"].setInput( a["out"] )
s["root"].setValue( "/group" )
s["includeRoot"].setValue( True )
self.assertSceneValid( s["out"] )
self.assertScenesEqual( s["out"], a["out"] )
self.assertEqual( s["out"].childNames( "/" ), IECore.InternedStringVectorData( [ "group" ] ) )
self.assertEqual( s["out"].bound( "/" ), a["out"].bound( "/group" ) )
self.assertTrue( a["out"].object( "/group/plane", _copy = False ).isSame( s["out"].object( "/group/plane", _copy = False ) ) )
def testRootBoundWithTransformedChild( self ) :
a = GafferScene.SceneReader()
a["fileName"].setValue( os.path.dirname( __file__ ) + "/alembicFiles/animatedCube.abc" )
s = GafferScene.SubTree()
s["in"].setInput( a["out"] )
s["root"].setValue( "/pCube1" )
s["includeRoot"].setValue( True )
with Gaffer.Context() as c :
c.setFrame( 10 )
expectedRootBound = a["out"].bound( "/pCube1" )
expectedRootBound = expectedRootBound * a["out"].transform( "/pCube1" )
self.assertEqual( s["out"].bound( "/" ), expectedRootBound )
def testIncludeRootPassesThroughWhenNoRootSpecified( self ) :
a = GafferScene.SceneReader()
a["fileName"].setValue( os.path.dirname( __file__ ) + "/alembicFiles/animatedCube.abc" )
s = GafferScene.SubTree()
s["in"].setInput( a["out"] )
s["root"].setValue( "" )
s["includeRoot"].setValue( True )
# We have to skip the test of built in sets, because our alembic file contains cameras
# and alembic doesn't provide a means of flagging them upfront.
self.assertSceneValid( s["out"], assertBuiltInSetsComplete = False )
self.assertScenesEqual( a["out"], s["out"] )
self.assertSceneHashesEqual( a["out"], s["out"] )
self.assertTrue( a["out"].object( "/pCube1", _copy = False ).isSame( s["out"].object( "/pCube1", _copy = False ) ) )
def testSetsWithIncludeRoot( self ) :
l = GafferSceneTest.TestLight()
g = GafferScene.Group()
g["in"][0].setInput( l["out"] )
self.assertSetsValid( g["out"] )
s = GafferScene.SubTree()
s["in"].setInput( g["out"] )
s["root"].setValue( "/group" )
s["includeRoot"].setValue( True )
lightSet = s["out"].set( "__lights" )
self.assertEqual( lightSet.value.paths(), [ "/group/light" ] )
self.assertSetsValid( s["out"] )
def testSetsWithNoLeadingSlash( self ) :
l = GafferSceneTest.TestLight()
g = GafferScene.Group()
g["in"][0].setInput( l["out"] )
self.assertSetsValid( g["out"] )
s = GafferScene.SubTree()
s["in"].setInput( g["out"] )
s["root"].setValue( "group" )
lightSet = s["out"].set( "__lights" )
self.assertEqual( lightSet.value.paths(), [ "/light" ] )
self.assertSetsValid( s["out"] )
def testSetNamesAndGlobalsPassThrough( self ) :
l = GafferSceneTest.TestLight()
g = GafferScene.Group()
g["in"][0].setInput( l["out"] )
s = GafferScene.SubTree()
s["in"].setInput( g["out"] )
s["root"].setValue( "group" )
self.assertEqual( s["out"]["globals"].hash(), g["out"]["globals"].hash() )
self.assertTrue( s["out"]["globals"].getValue( _copy = False ).isSame( g["out"]["globals"].getValue( _copy = False ) ) )
self.assertEqual( s["out"]["setNames"].hash(), g["out"]["setNames"].hash() )
self.assertTrue( s["out"]["setNames"].getValue( _copy = False ).isSame( g["out"]["setNames"].getValue( _copy = False ) ) )
def testInvalidRoot( self ) :
p = GafferScene.Plane()
p["sets"].setValue( "A" )
g = GafferScene.Group()
g["in"][0].setInput( p["out"] )
s = GafferScene.SubTree()
s["in"].setInput( g["out"] )
# An invalid root matches nothing, so should output an empty scene
for includeRoot in ( True, False ) :
s["includeRoot"].setValue( includeRoot )
for root in ( "notAThing", "/group/stillNotAThing", "/group/definitely/not/a/thing" ) :
s["root"].setValue( root )
self.assertSceneValid( s["out"] )
self.assertEqual( s["out"].childNames( "/" ), IECore.InternedStringVectorData() )
self.assertEqual( s["out"].set( "A" ), IECore.PathMatcherData() )
self.assertEqual( s["out"].bound( "/" ), imath.Box3f() )
self.assertEqual( s["out"].attributes( "/" ), IECore.CompoundObject() )
self.assertEqual( s["out"].transform( "/" ), imath.M44f() )
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "c36108af9b3f88f446aa30fa30f49966",
"timestamp": "",
"source": "github",
"line_count": 318,
"max_line_length": 128,
"avg_line_length": 29.547169811320753,
"alnum_prop": 0.6330353341847594,
"repo_name": "ImageEngine/gaffer",
"id": "7cc4f8c5f13f6860deb0c678632443afc64818e1",
"size": "11261",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "python/GafferSceneTest/SubTreeTest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4486"
},
{
"name": "C++",
"bytes": "5353598"
},
{
"name": "CSS",
"bytes": "28027"
},
{
"name": "GLSL",
"bytes": "6250"
},
{
"name": "Python",
"bytes": "5296193"
},
{
"name": "Shell",
"bytes": "8008"
},
{
"name": "Slash",
"bytes": "41159"
}
],
"symlink_target": ""
} |
"""Database setup and migration commands."""
from nova import utils
IMPL = utils.LazyPluggable(
'baremetal_db_backend',
sqlalchemy='nova.virt.baremetal.db.sqlalchemy.migration')
INIT_VERSION = 0
def db_sync(version=None):
"""Migrate the database to `version` or the most recent version."""
return IMPL.db_sync(version=version)
def db_version():
"""Display the current database version."""
return IMPL.db_version()
| {
"content_hash": "71f1eef4038a0520b445c13c645a2b32",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 71,
"avg_line_length": 22.7,
"alnum_prop": 0.6916299559471366,
"repo_name": "houshengbo/nova_vmware_compute_driver",
"id": "40631bf45c8218121b847352d19865a54773c20c",
"size": "1231",
"binary": false,
"copies": "3",
"ref": "refs/heads/attach-detach-VMware-iSCSI-driver",
"path": "nova/virt/baremetal/db/migration.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "7173520"
},
{
"name": "Shell",
"bytes": "15478"
}
],
"symlink_target": ""
} |
import logging
import math
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
import sys
import cv2
import math
class ClassifyEnv(gym.Env):
def __init__(self, trainSet, target, batch_size=1000, accuracy_mode=False):
"""
Data set is a tuple of
[0] input data: [nSamples x nInputs]
[1] labels: [nSamples x 1]
Example data sets are given at the end of this file
"""
self.t = 0 # Current batch number
self.t_limit = 0 # Number of batches if you need them
self.batch = batch_size # Number of images per batch
self.accuracy_mode = accuracy_mode
self.seed()
self.viewer = None
self.trainSet = trainSet
self.target = target
nInputs = np.shape(trainSet)[1]
high = np.array([1.0]*nInputs)
self.action_space = spaces.Box(np.array(0,dtype=np.float32), \
np.array(1,dtype=np.float32))
self.observation_space = spaces.Box(np.array(0,dtype=np.float32), \
np.array(1,dtype=np.float32))
self.state = None
self.trainOrder = None
self.currIndx = None
def seed(self, seed=None):
''' Randomly select from training set'''
self.np_random, seed = seeding.np_random(seed)
return [seed]
def reset(self):
''' Initialize State'''
#print('Lucky number', np.random.randint(10)) # same randomness?
self.trainOrder = np.random.permutation(len(self.target))
self.t = 0 # timestep
self.currIndx = self.trainOrder[self.t:self.t+self.batch]
self.state = self.trainSet[self.currIndx,:]
return self.state
def step(self, action):
'''
Judge Classification, increment to next batch
action - [batch x output] - softmax output
'''
y = self.target[self.currIndx]
m = y.shape[0]
if self.accuracy_mode:
p = np.argmax(action, axis=1)
accuracy = (float(np.sum(p==y)) / self.batch)
reward = accuracy
else:
log_likelihood = -np.log(action[range(m),y])
loss = np.sum(log_likelihood) / m
reward = -loss
if self.t_limit > 0: # We are doing batches
reward *= (1/self.t_limit) # average
self.t += 1
done = False
if self.t >= self.t_limit:
done = True
self.currIndx = self.trainOrder[(self.t*self.batch):\
(self.t*self.batch + self.batch)]
self.state = self.trainSet[self.currIndx,:]
else:
done = True
obs = self.state
return obs, reward, done, {}
# -- Data Sets ----------------------------------------------------------- -- #
def digit_raw():
'''
Converts 8x8 scikit digits to
[samples x pixels] ([N X 64])
'''
from sklearn import datasets
digits = datasets.load_digits()
z = (digits.images/16)
z = z.reshape(-1, (64))
return z, digits.target
def mnist_784():
'''
Converts 28x28 mnist digits to
[samples x pixels] ([N X 784])
'''
import mnist
z = (mnist.train_images()/255)
z = preprocess(z,(28,28))
z = z.reshape(-1, (784))
return z, mnist.train_labels()
def mnist_256():
'''
Converts 28x28 mnist digits to [16x16]
[samples x pixels] ([N X 256])
'''
import mnist
z = (mnist.train_images()/255)
z = preprocess(z,(16,16))
z = z.reshape(-1, (256))
return z, mnist.train_labels()
def mnist_256_test():
'''
Converts 28x28 mnist digits to [16x16]
[samples x pixels] ([N X 256])
'''
import mnist
z = (mnist.test_images()/255)
z = preprocess(z,(16,16))
z = z.reshape(-1, (256))
return z, mnist.test_labels()
def mnist_patch9():
'''
Crops 28x28 mnist digits to a [9x9] patch
[samples x pixels] ([N X 81])
'''
import mnist
z = (mnist.train_images()/255)
z = preprocess(z,(28,28),patchDim=(9,9),patchCorner=(12,12))
z = z.reshape(-1, (81))
return z, mnist.train_labels()
'''
This part can be put in step if we want to try classification from patches
---
if self.patchSize != None: # (add self.patchSize to class)
z = np.reshape(self.state,(len(self.currIndx),28,28))
corner = (np.random.randint(28 - self.patchSize),\
np.random.randint(28 - self.patchSize) )
#corner = (12,12)
z = preprocess(z,(28,28),patchDim=(9,9),patchCorner=corner)
z = z.reshape(-1, (81))
self.state = z
---
'''
def preprocess(img,size, patchCorner=(0,0), patchDim=None, unskew=True):
"""
Resizes, crops, and unskewes images
"""
if patchDim == None: patchDim = size
nImg = np.shape(img)[0]
procImg = np.empty((nImg,size[0],size[1]))
# Unskew and Resize
if unskew == True:
for i in range(nImg):
procImg[i,:,:] = deskew(cv2.resize(img[i,:,:],size),size)
# Crop
cropImg = np.empty((nImg,patchDim[0],patchDim[1]))
for i in range(nImg):
cropImg[i,:,:] = procImg[i,patchCorner[0]:patchCorner[0]+patchDim[0],\
patchCorner[1]:patchCorner[1]+patchDim[1]]
procImg = cropImg
return procImg
def deskew(image, image_shape, negated=True):
"""
This method deskwes an image using moments
:param image: a numpy nd array input image
:param image_shape: a tuple denoting the image`s shape
:param negated: a boolean flag telling whether the input image is negated
:returns: a numpy nd array deskewd image
source: https://github.com/vsvinayak/mnist-helper
"""
# negate the image
if not negated:
image = 255-image
# calculate the moments of the image
m = cv2.moments(image)
if abs(m['mu02']) < 1e-2:
return image.copy()
# caclulating the skew
skew = m['mu11']/m['mu02']
M = np.float32([[1, skew, -0.5*image_shape[0]*skew], [0,1,0]])
img = cv2.warpAffine(image, M, image_shape, \
flags=cv2.WARP_INVERSE_MAP|cv2.INTER_LINEAR)
return img
| {
"content_hash": "68f659b33636b54e3d946c630c2a5c9f",
"timestamp": "",
"source": "github",
"line_count": 220,
"max_line_length": 79,
"avg_line_length": 26.45,
"alnum_prop": 0.6021653205018044,
"repo_name": "google/brain-tokyo-workshop",
"id": "742610c1e79b207175c68ba587616f42c9621553",
"size": "5820",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WANNRelease/WANNTool/custom_envs/classify_gym.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "671"
},
{
"name": "HTML",
"bytes": "1031"
},
{
"name": "Jupyter Notebook",
"bytes": "47079538"
},
{
"name": "Python",
"bytes": "1037153"
},
{
"name": "Shell",
"bytes": "6053"
}
],
"symlink_target": ""
} |
from setuptools import setup
def readme():
with open('README.rst') as f:
return f.read()
setup(name='oeis',
version='0.1',
description='OEIS utilities and sequence generation',
long_description=readme(),
classifiers=[
'Programming Language :: Python :: 2.7.3',
],
keywords='oeis',
url='https://github.com/GuySrinivasan/oeis',
author='Guy Srinivasan',
author_email='[email protected]',
license='MIT',
packages=['oeis'],
install_requires=[
'markdown',
],
test_suite='nose.collector',
tests_require=['nose'],
scripts=['scripts/print-factorials'],
zip_safe=False)
| {
"content_hash": "1ba677ae98548f5a92851122c06fc0b5",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 59,
"avg_line_length": 25.107142857142858,
"alnum_prop": 0.5803698435277382,
"repo_name": "GuySrinivasan/oeis",
"id": "7c794f9e2339589a161df8c62ca1720302cab9e1",
"size": "703",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "456"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('farmwork', '0008_auto_20170608_1217'),
]
operations = [
migrations.CreateModel(
name='Job',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('role', models.CharField(max_length=50)),
('person', models.CharField(max_length=50)),
],
options={
'abstract': False,
},
),
migrations.AlterField(
model_name='farmwork',
name='con_number',
field=models.CharField(max_length=10),
),
migrations.AlterField(
model_name='farmwork',
name='job_fruit',
field=models.CharField(choices=[(None, '— Select fruit type -'), ('ap', 'Apples'), ('ar', 'Apricots'), ('as', 'Asparagus'), ('ba', 'Bananas'), ('bl', 'Blueberries'), ('br', 'Brocolli'), ('ca', 'Capsicums'), ('ch', 'Cherries'), ('cn', 'Chestnuts'), ('ci', 'Citrus'), ('co', 'Colliflower'), ('eg', 'Eggplant'), ('he', 'Herbs'), ('le', 'Lemons'), ('ly', 'Lychees'), ('ma', 'Mandarins'), ('mg', 'Mangoes'), ('me', 'Melons'), ('or', 'Oranges'), ('pe', 'Pears'), ('ra', 'Raspberries'), ('st', 'Strawberries'), ('ta', 'Tomatoes'), ('zu', 'Zuchinni')], max_length=2),
),
migrations.AlterField(
model_name='farmwork',
name='slug',
field=models.SlugField(unique=True),
),
migrations.AddField(
model_name='farmwork',
name='get_job',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='farmwork.Job'),
),
]
| {
"content_hash": "b1b09877be5325ef1f86517792be72e4",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 571,
"avg_line_length": 42.765957446808514,
"alnum_prop": 0.5308457711442786,
"repo_name": "ianmilliken/rwf",
"id": "88f1c0847242b2f8da02c2dba37226b135206a8e",
"size": "2083",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/apps/farmwork/migrations/0009_auto_20170618_1335.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "441558"
},
{
"name": "HTML",
"bytes": "13521"
},
{
"name": "JavaScript",
"bytes": "1036656"
},
{
"name": "Python",
"bytes": "28122"
}
],
"symlink_target": ""
} |
import os
class DefaultConfig(object):
INSTANCE_FOLDER_PATH = '/usr/lib/camus'
# Get app root path, also can use flask.root_path.
# ../../config.py
PROJECT_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
DEBUG = False
TESTING = False
ADMINS = ['[email protected]']
# Fild upload, should override in production.
# Limited the maximum allowed payload to 16 megabytes.
# http://flask.pocoo.org/docs/patterns/fileuploads/#improving-uploads
MAX_CONTENT_LENGTH = 16 * 1024 * 1024
DEBUG = False
# Flask-babel: http://pythonhosted.org/Flask-Babel/
# ACCEPT_LANGUAGES = ['zh']
BABEL_DEFAULT_LOCALE = 'en'
# Flask-cache: http://pythonhosted.org/Flask-Cache/
CACHE_TYPE = 'simple'
CACHE_DEFAULT_TIMEOUT = 60
# Flask-mail: http://pythonhosted.org/flask-mail/
# https://bitbucket.org/danjac/flask-mail/issue/3/problem-with-gmails-smtp-server
MAIL_DEBUG = DEBUG
MAIL_SERVER = 'smtp.gmail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USE_SSL = False
# Should put MAIL_USERNAME and MAIL_PASSWORD in production under instance folder.
MAIL_USERNAME = '[email protected]'
MAIL_PASSWORD = 'yourpass'
MAIL_DEFAULT_SENDER = MAIL_USERNAME
_UPLOADS_FOLDER = None
class TestConfig(DefaultConfig):
INSTANCE_FOLDER_PATH = '/tmp/testing/camus'
TESTING = True
DEBUG = False
WTF_CSRF_ENABLED = False
SECRET_KEY = 'secret key'
class DevelopConfig(DefaultConfig):
DEBUG = True
INSTANCE_FOLDER_PATH = '/tmp/developer/camus'
SECRET_KEY = 'secret key'
DB_URL = 'postgres://camus:camus@localhost/camus'
| {
"content_hash": "4028d251f5b13ef14809935910b053af",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 85,
"avg_line_length": 29.210526315789473,
"alnum_prop": 0.6732732732732732,
"repo_name": "kindly/camus",
"id": "af0e2bee43a0cb0c7bc245617a83d10913d099f3",
"size": "1690",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "camus/config.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "148356"
},
{
"name": "JavaScript",
"bytes": "2571"
},
{
"name": "Python",
"bytes": "28464"
},
{
"name": "Shell",
"bytes": "224"
}
],
"symlink_target": ""
} |
"""Tests for tensorflow.ops.io_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os.path
import time
import contextlib
import shutil
import tempfile
import tensorflow as tf
import numpy as np
import six
from google.protobuf.any_pb2 import Any
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf import queue_runner_pb2
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.platform import gfile
from tensorflow.python.training import saver as saver_module
from tensorflow.python.util import compat
def _TestDir(test_name):
test_dir = os.path.join(tf.test.get_temp_dir(), test_name)
if os.path.exists(test_dir):
shutil.rmtree(test_dir)
gfile.MakeDirs(test_dir)
return test_dir
class SaverTest(tf.test.TestCase):
def testBasics(self):
save_path = os.path.join(self.get_temp_dir(), "basics")
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = tf.Variable(10.0, name="v0")
v1 = tf.Variable(20.0, name="v1")
save = tf.train.Saver({"v0": v0, "v1": v1}, restore_sequentially=True)
init_all_op = tf.initialize_all_variables()
with self.test_session() as sess:
# Initialize all variables
sess.run(init_all_op)
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
# Save the initialized values in the file at "save_path"
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
# Start a second session. In that session the parameter nodes
# have not been initialized either.
with self.test_session() as sess:
v0 = tf.Variable(-1.0, name="v0")
v1 = tf.Variable(-1.0, name="v1")
save = tf.train.Saver({"v0": v0, "v1": v1})
with self.assertRaisesWithPredicateMatch(
tf.OpError, lambda e: "uninitialized value v0" in e.message):
sess.run(v0)
with self.assertRaisesWithPredicateMatch(
tf.OpError, lambda e: "uninitialized value v1" in e.message):
sess.run(v1)
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
# Build another graph with 2 nodes, initialized
# differently, and a Restore node for them.
with self.test_session() as sess:
v0_2 = tf.Variable(1000.0, name="v0")
v1_2 = tf.Variable(2000.0, name="v1")
save2 = tf.train.Saver({"v0": v0_2, "v1": v1_2})
tf.initialize_all_variables().run()
# Check that the parameter nodes have been initialized.
self.assertEqual(1000.0, v0_2.eval())
self.assertEqual(2000.0, v1_2.eval())
# Restore the values saved earlier in the parameter nodes.
save2.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, v0_2.eval())
self.assertEqual(20.0, v1_2.eval())
def testInt64(self):
save_path = os.path.join(self.get_temp_dir(), "int64")
with self.test_session() as sess:
# Build a graph with 1 node, and save and restore for them.
v = tf.Variable(np.int64(15), name="v")
save = tf.train.Saver({"v": v}, restore_sequentially=True)
tf.initialize_all_variables().run()
# Save the initialized values in the file at "save_path"
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
with self.test_session() as sess:
v = tf.Variable(np.int64(-1), name="v")
save = tf.train.Saver({"v": v})
with self.assertRaisesWithPredicateMatch(
tf.OpError, lambda e: "uninitialized value v" in e.message):
sess.run(v)
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(np.int64(15), v.eval())
def testSomeErrors(self):
with tf.Graph().as_default():
v0 = tf.Variable([10.0], name="v0")
v1 = tf.Variable([20.0], name="v1")
v2 = tf.Variable([20.0], name="v2")
v2._set_save_slice_info(tf.Variable.SaveSliceInfo("v1", [1], [0], [1]))
# By default the name used for "v2" will be "v1" and raise an error.
with self.assertRaisesRegexp(ValueError, "same name: v1"):
tf.train.Saver([v0, v1, v2])
# The names are different and will work.
tf.train.Saver({"vee1": v1, "other": [v2]})
def testBasicsWithListOfVariables(self):
save_path = os.path.join(self.get_temp_dir(), "basics_with_list")
with self.test_session(graph=tf.Graph()) as sess:
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = tf.Variable(10.0, name="v0")
v1 = tf.Variable(20.0, name="v1")
save = tf.train.Saver([v0, v1])
tf.initialize_all_variables().run()
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
# Save the initialized values in the file at "save_path"
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
# Start a second session. In that session the variables
# have not been initialized either.
with self.test_session(graph=tf.Graph()) as sess:
v0 = tf.Variable(-1.0, name="v0")
v1 = tf.Variable(-1.0, name="v1")
save = tf.train.Saver([v0, v1])
with self.assertRaisesWithPredicateMatch(
tf.OpError, lambda e: "uninitialized value v0" in e.message):
sess.run(v0)
with self.assertRaisesWithPredicateMatch(
tf.OpError, lambda e: "uninitialized value v1" in e.message):
sess.run(v1)
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
# Build another graph with 2 nodes, initialized
# differently, and a Restore node for them.
with self.test_session(graph=tf.Graph()) as sess:
v0_2 = tf.Variable(1000.0, name="v0")
v1_2 = tf.Variable(2000.0, name="v1")
save2 = tf.train.Saver([v0_2, v1_2])
tf.initialize_all_variables().run()
# Check that the parameter nodes have been initialized.
self.assertEqual(1000.0, v0_2.eval())
self.assertEqual(2000.0, v1_2.eval())
# Restore the values saved earlier in the parameter nodes.
save2.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, v0_2.eval())
self.assertEqual(20.0, v1_2.eval())
def _SaveAndLoad(self, var_name, var_value, other_value, save_path):
with self.test_session() as sess:
var = tf.Variable(var_value, name=var_name)
save = tf.train.Saver({var_name: var})
var.initializer.run()
val = save.save(sess, save_path)
self.assertEqual(save_path, val)
with self.test_session() as sess:
var = tf.Variable(other_value, name=var_name)
save = tf.train.Saver({var_name: var})
save.restore(sess, save_path)
self.assertAllClose(var_value, var.eval())
def testCacheRereadsFile(self):
save_path = os.path.join(self.get_temp_dir(), "cache_rereads")
# Save and reload one Variable named "var0".
self._SaveAndLoad("var0", 0.0, 1.0, save_path)
# Save and reload one Variable named "var1" in the same file.
# The cached readers should know to re-read the file.
self._SaveAndLoad("var1", 1.1, 2.2, save_path)
def testGPU(self):
if not tf.test.is_built_with_cuda():
return
save_path = os.path.join(self.get_temp_dir(), "gpu")
with tf.Session("", graph=tf.Graph()) as sess:
with sess.graph.device("/gpu:0"):
v0_1 = tf.Variable(123.45)
save = tf.train.Saver({"v0": v0_1})
tf.initialize_all_variables().run()
save.save(sess, save_path)
with tf.Session("", graph=tf.Graph()) as sess:
with sess.graph.device("/gpu:0"):
v0_2 = tf.Variable(543.21)
save = tf.train.Saver({"v0": v0_2})
tf.initialize_all_variables().run()
self.assertAllClose(543.21, v0_2.eval())
save.restore(sess, save_path)
self.assertAllClose(123.45, v0_2.eval())
def testVariables(self):
save_path = os.path.join(self.get_temp_dir(), "variables")
with tf.Session("", graph=tf.Graph()) as sess:
one = tf.Variable(1.0)
twos = tf.Variable([2.0, 2.0, 2.0])
init = tf.initialize_all_variables()
save = tf.train.Saver(tf.all_variables())
init.run()
save.save(sess, save_path)
with tf.Session("", graph=tf.Graph()) as sess:
one = tf.Variable(0.0)
twos = tf.Variable([0.0, 0.0, 0.0])
# Saver with no arg, defaults to 'all variables'.
save = tf.train.Saver()
save.restore(sess, save_path)
self.assertAllClose(1.0, one.eval())
self.assertAllClose([2.0, 2.0, 2.0], twos.eval())
def testSaveWithGlobalStep(self):
save_path = os.path.join(self.get_temp_dir(), "ckpt_with_global_step")
global_step_int = 5
# Save and reload one Variable named "var0".
self._SaveAndLoad("var0", 0.0, 1.0, save_path)
for use_tensor in [True, False]:
with self.test_session() as sess:
var = tf.Variable(1.0, name="var0")
save = tf.train.Saver({var.op.name: var})
var.initializer.run()
if use_tensor:
global_step = tf.constant(global_step_int)
val = save.save(sess, save_path, global_step=global_step)
else:
val = save.save(sess, save_path, global_step=global_step_int)
expected_save_path = "%s-%d" % (save_path, global_step_int)
self.assertEqual(expected_save_path, val)
class SaveRestoreShardedTest(tf.test.TestCase):
def testBasics(self):
save_path = os.path.join(self.get_temp_dir(), "sharded")
# Build a graph with 2 parameter nodes on different devices.
with tf.Session(
target="",
config=tf.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v0 = tf.Variable(10, name="v0")
with sess.graph.device("/cpu:1"):
v1 = tf.Variable(20, name="v1")
save = tf.train.Saver({"v0": v0, "v1": v1}, sharded=True)
tf.initialize_all_variables().run()
val = save.save(sess, save_path)
self.assertEqual(save_path + "-?????-of-00002", val)
meta_graph_filename = save._MetaGraphFilename(val)
self.assertEqual(save_path + ".meta", meta_graph_filename)
# Restore a different "v0" from shard 0 of the saved files.
with tf.Session(
target="",
config=tf.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v0 = tf.Variable(111, name="v0")
save = tf.train.Saver({"v0": v0}, sharded=True)
tf.initialize_all_variables().run()
self.assertEqual(111, v0.eval())
save.restore(sess, save_path + "-00000-of-00002")
self.assertEqual(10, v0.eval())
# Restore a different "v1" from shard 1 of the saved files.
with tf.Session(
target="",
config=tf.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v1 = tf.Variable(222)
save = tf.train.Saver({"v1": v1}, sharded=True)
tf.initialize_all_variables().run()
self.assertEqual(222, v1.eval())
save.restore(sess, save_path + "-00001-of-00002")
self.assertEqual(20, v1.eval())
# Now try a restore with the sharded filename.
with tf.Session(
target="",
config=tf.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v0 = tf.Variable(111, name="v0")
with sess.graph.device("/cpu:1"):
v1 = tf.Variable(222, name="v1")
save = tf.train.Saver({"v0": v0, "v1": v1}, sharded=True)
tf.initialize_all_variables().run()
self.assertEqual(111, v0.eval())
self.assertEqual(222, v1.eval())
save_path = os.path.join(self.get_temp_dir(), "sharded")
save.restore(sess, save_path + "-?????-of-?????")
self.assertEqual(10, v0.eval())
self.assertEqual(20, v1.eval())
self.assertEqual(
tf.train.latest_checkpoint(self.get_temp_dir()),
os.path.join(self.get_temp_dir(), "sharded-?????-of-00002"))
def testSaverDef(self):
with self.test_session():
v0 = tf.Variable(123, name="v0")
save = tf.train.Saver({"v0": v0}, sharded=True)
sd = save.as_saver_def()
self.assertTrue(sd.sharded)
class MaxToKeepTest(tf.test.TestCase):
def testNonSharded(self):
save_dir = _TestDir("max_to_keep_non_sharded")
with self.test_session() as sess:
v = tf.Variable(10.0, name="v")
save = tf.train.Saver({"v": v}, max_to_keep=2)
tf.initialize_all_variables().run()
self.assertEqual([], save.last_checkpoints)
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s1], save.last_checkpoints)
self.assertTrue(gfile.Exists(s1))
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s1, s2], save.last_checkpoints)
self.assertTrue(gfile.Exists(s1))
self.assertTrue(gfile.Exists(s2))
s3 = save.save(sess, os.path.join(save_dir, "s3"))
self.assertEqual([s2, s3], save.last_checkpoints)
self.assertFalse(gfile.Exists(s1))
self.assertTrue(gfile.Exists(s2))
self.assertTrue(gfile.Exists(s3))
# Create a second helper, identical to the first.
save2 = tf.train.Saver(saver_def=save.as_saver_def())
save2.set_last_checkpoints(save.last_checkpoints)
# Create a third helper, with the same configuration but no knowledge of
# previous checkpoints.
save3 = tf.train.Saver(saver_def=save.as_saver_def())
# Exercise the first helper.
# Adding s2 again (old s2 is removed first, then new s2 appended)
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s3, s2], save.last_checkpoints)
self.assertFalse(gfile.Exists(s1))
self.assertFalse(gfile.Exists(save._MetaGraphFilename(s1)))
self.assertTrue(gfile.Exists(s3))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s3)))
self.assertTrue(gfile.Exists(s2))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2)))
# Adding s1 (s3 should now be deleted as oldest in list)
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s2, s1], save.last_checkpoints)
self.assertFalse(gfile.Exists(s3))
self.assertFalse(gfile.Exists(save._MetaGraphFilename(s3)))
self.assertTrue(gfile.Exists(s2))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2)))
self.assertTrue(gfile.Exists(s1))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1)))
# Exercise the second helper.
# Adding s2 again (old s2 is removed first, then new s2 appended)
s2 = save2.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s3, s2], save2.last_checkpoints)
# Created by the first helper.
self.assertTrue(gfile.Exists(s1))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1)))
# Deleted by the first helper.
self.assertFalse(gfile.Exists(s3))
self.assertFalse(gfile.Exists(save._MetaGraphFilename(s3)))
self.assertTrue(gfile.Exists(s2))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2)))
# Adding s1 (s3 should now be deleted as oldest in list)
s1 = save2.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s2, s1], save2.last_checkpoints)
self.assertFalse(gfile.Exists(s3))
self.assertFalse(gfile.Exists(save._MetaGraphFilename(s3)))
self.assertTrue(gfile.Exists(s2))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2)))
self.assertTrue(gfile.Exists(s1))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1)))
# Exercise the third helper.
# Adding s2 again (but helper is unaware of previous s2)
s2 = save3.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s2], save3.last_checkpoints)
# Created by the first helper.
self.assertTrue(gfile.Exists(s1))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1)))
# Deleted by the first helper.
self.assertFalse(gfile.Exists(s3))
self.assertFalse(gfile.Exists(save._MetaGraphFilename(s3)))
self.assertTrue(gfile.Exists(s2))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2)))
# Adding s1 (s3 should not be deleted because helper is unaware of it)
s1 = save3.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s2, s1], save3.last_checkpoints)
self.assertFalse(gfile.Exists(s3))
self.assertFalse(gfile.Exists(save._MetaGraphFilename(s3)))
self.assertTrue(gfile.Exists(s2))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2)))
self.assertTrue(gfile.Exists(s1))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1)))
def testSharded(self):
save_dir = _TestDir("max_to_keep_sharded")
with tf.Session(
target="",
config=tf.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v0 = tf.Variable(111, name="v0")
with sess.graph.device("/cpu:1"):
v1 = tf.Variable(222, name="v1")
save = tf.train.Saver({"v0": v0, "v1": v1}, sharded=True, max_to_keep=2)
tf.initialize_all_variables().run()
self.assertEqual([], save.last_checkpoints)
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s1], save.last_checkpoints)
self.assertEqual(2, len(gfile.Glob(s1)))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1)))
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s1, s2], save.last_checkpoints)
self.assertEqual(2, len(gfile.Glob(s1)))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s1)))
self.assertEqual(2, len(gfile.Glob(s2)))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2)))
s3 = save.save(sess, os.path.join(save_dir, "s3"))
self.assertEqual([s2, s3], save.last_checkpoints)
self.assertEqual(0, len(gfile.Glob(s1)))
self.assertFalse(gfile.Exists(save._MetaGraphFilename(s1)))
self.assertEqual(2, len(gfile.Glob(s2)))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s2)))
self.assertEqual(2, len(gfile.Glob(s3)))
self.assertTrue(gfile.Exists(save._MetaGraphFilename(s3)))
def testNoMaxToKeep(self):
save_dir = _TestDir("no_max_to_keep")
save_dir2 = _TestDir("max_to_keep_0")
with self.test_session() as sess:
v = tf.Variable(10.0, name="v")
tf.initialize_all_variables().run()
# Test max_to_keep being None.
save = tf.train.Saver({"v": v}, max_to_keep=None)
self.assertEqual([], save.last_checkpoints)
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([], save.last_checkpoints)
self.assertTrue(gfile.Exists(s1))
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([], save.last_checkpoints)
self.assertTrue(gfile.Exists(s2))
# Test max_to_keep being 0.
save2 = tf.train.Saver({"v": v}, max_to_keep=0)
self.assertEqual([], save2.last_checkpoints)
s1 = save2.save(sess, os.path.join(save_dir2, "s1"))
self.assertEqual([], save2.last_checkpoints)
self.assertTrue(gfile.Exists(s1))
s2 = save2.save(sess, os.path.join(save_dir2, "s2"))
self.assertEqual([], save2.last_checkpoints)
self.assertTrue(gfile.Exists(s2))
def testNoMetaGrap(self):
save_dir = _TestDir("no_meta_graph")
with self.test_session() as sess:
v = tf.Variable(10.0, name="v")
save = tf.train.Saver({"v": v})
tf.initialize_all_variables().run()
s1 = save.save(sess, os.path.join(save_dir, "s1"),
write_meta_graph=False)
self.assertTrue(gfile.Exists(s1))
self.assertFalse(gfile.Exists(save._MetaGraphFilename(s1)))
class KeepCheckpointEveryNHoursTest(tf.test.TestCase):
def testNonSharded(self):
save_dir = _TestDir("keep_checkpoint_every_n_hours")
with self.test_session() as sess:
v = tf.Variable([10.0], name="v")
# Run the initializer NOW to avoid the 0.5s overhead of the first Run()
# call, which throws the test timing off in fastbuild mode.
tf.initialize_all_variables().run()
# Create a saver that will keep the last 2 checkpoints plus one every 0.7
# seconds.
start_time = time.time()
save = tf.train.Saver({"v": v}, max_to_keep=2,
keep_checkpoint_every_n_hours=0.7 / 3600)
self.assertEqual([], save.last_checkpoints)
# Wait till 0.7 second have elapsed so s1 will be old enough to keep.
time.sleep((time.time() + 0.7) - start_time)
s1 = save.save(sess, os.path.join(save_dir, "s1"))
self.assertEqual([s1], save.last_checkpoints)
s2 = save.save(sess, os.path.join(save_dir, "s2"))
self.assertEqual([s1, s2], save.last_checkpoints)
# We now have 2 'last_checkpoints': [s1, s2]. The next call to Save(),
# would normally delete s1, because max_to_keep is 2. However, s1 is
# older than 0.7s so we must keep it.
s3 = save.save(sess, os.path.join(save_dir, "s3"))
self.assertEqual([s2, s3], save.last_checkpoints)
# s1 should still be here, we are Not checking now to reduce time
# variance in the test.
# We now have 2 'last_checkpoints': [s2, s3], and s1 on disk. The next
# call to Save(), will delete s2, because max_to_keep is 2, and because
# we already kept the old s1. s2 is very close in time to s1 so it gets
# deleted.
s4 = save.save(sess, os.path.join(save_dir, "s4"))
self.assertEqual([s3, s4], save.last_checkpoints)
# Check that s1 is still here, but s2 is gone.
self.assertTrue(gfile.Exists(s1))
self.assertFalse(gfile.Exists(s2))
self.assertTrue(gfile.Exists(s3))
self.assertTrue(gfile.Exists(s4))
class SaveRestoreWithVariableNameMap(tf.test.TestCase):
def testNonReshape(self):
save_path = os.path.join(self.get_temp_dir(), "basics")
with self.test_session() as sess:
# Build a graph with 2 parameter nodes, and Save and
# Restore nodes for them.
v0 = tf.Variable(10.0, name="v0")
v1 = tf.Variable(20.0, name="v1")
save = tf.train.Saver({"save_prefix/v0": v0, "save_prefix/v1": v1})
tf.initialize_all_variables().run()
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
# Save the initialized values in the file at "save_path"
# Use a variable name map to set the saved tensor names
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
# Verify that the original names are not in the Saved file
save = tf.train.Saver({"v0": v0, "v1": v1})
with self.assertRaisesOpError("not found in checkpoint"):
save.restore(sess, save_path)
# Verify that the mapped names are present in the Saved file and can be
# Restored using remapped names.
with self.test_session() as sess:
v0 = tf.Variable(-1.0, name="v0")
v1 = tf.Variable(-1.0, name="v1")
with self.assertRaisesOpError("uninitialized value v0"):
sess.run(v0)
with self.assertRaisesOpError("uninitialized value v1"):
sess.run(v1)
save = tf.train.Saver({"save_prefix/v0": v0, "save_prefix/v1": v1})
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
# Add a prefix to the node names in the current graph and Restore using
# remapped names.
with self.test_session() as sess:
v0 = tf.Variable(-1.0, name="restore_prefix/v0")
v1 = tf.Variable(-1.0, name="restore_prefix/v1")
with self.assertRaisesOpError("uninitialized value restore_prefix/v0"):
sess.run(v0)
with self.assertRaisesOpError("uninitialized value restore_prefix/v1"):
sess.run(v1)
# Restore the saved values in the parameter nodes.
save = tf.train.Saver({"save_prefix/v0": v0, "save_prefix/v1": v1})
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
class LatestCheckpointWithRelativePaths(tf.test.TestCase):
@staticmethod
@contextlib.contextmanager
def tempWorkingDir(temppath):
cwd = os.getcwd()
os.chdir(temppath)
try:
yield
finally:
os.chdir(cwd)
@staticmethod
@contextlib.contextmanager
def tempDir():
tempdir = tempfile.mkdtemp()
try:
yield tempdir
finally:
shutil.rmtree(tempdir)
def testRelativePath(self):
# Make sure we have a clean directory to work in.
with self.tempDir() as tempdir:
# Jump to that directory until this test is done.
with self.tempWorkingDir(tempdir):
# Save training snapshots to a relative path.
traindir = "train/"
os.mkdir(traindir)
filename = "snapshot"
filepath = os.path.join(traindir, filename)
with self.test_session() as sess:
# Build a simple graph.
v0 = tf.Variable(0.0)
inc = v0.assign_add(1.0)
save = tf.train.Saver({"v0": v0})
# Record a short training history.
tf.initialize_all_variables().run()
save.save(sess, filepath, global_step=0)
inc.eval()
save.save(sess, filepath, global_step=1)
inc.eval()
save.save(sess, filepath, global_step=2)
with self.test_session() as sess:
# Build a new graph with different initialization.
v0 = tf.Variable(-1.0)
# Create a new saver.
save = tf.train.Saver({"v0": v0})
tf.initialize_all_variables().run()
# Get the most recent checkpoint name from the training history file.
name = tf.train.latest_checkpoint(traindir)
self.assertIsNotNone(name)
# Restore "v0" from that checkpoint.
save.restore(sess, name)
self.assertEqual(v0.eval(), 2.0)
class CheckpointStateTest(tf.test.TestCase):
def testAbsPath(self):
save_dir = _TestDir("abs_paths")
abs_path = os.path.join(save_dir, "model-0")
ckpt = tf.train.generate_checkpoint_state_proto(save_dir, abs_path)
self.assertEqual(ckpt.model_checkpoint_path, abs_path)
self.assertTrue(os.path.isabs(ckpt.model_checkpoint_path))
self.assertEqual(len(ckpt.all_model_checkpoint_paths), 1)
self.assertEqual(ckpt.all_model_checkpoint_paths[-1], abs_path)
def testRelPath(self):
train_dir = "train"
model = os.path.join(train_dir, "model-0")
# model_checkpoint_path should have no "train" directory part.
new_rel_path = "model-0"
ckpt = tf.train.generate_checkpoint_state_proto(train_dir, model)
self.assertEqual(ckpt.model_checkpoint_path, new_rel_path)
self.assertEqual(len(ckpt.all_model_checkpoint_paths), 1)
self.assertEqual(ckpt.all_model_checkpoint_paths[-1], new_rel_path)
def testAllModelCheckpointPaths(self):
save_dir = _TestDir("all_models_test")
abs_path = os.path.join(save_dir, "model-0")
for paths in [None, [], ["model-2"]]:
ckpt = tf.train.generate_checkpoint_state_proto(
save_dir,
abs_path,
all_model_checkpoint_paths=paths)
self.assertEqual(ckpt.model_checkpoint_path, abs_path)
self.assertTrue(os.path.isabs(ckpt.model_checkpoint_path))
self.assertEqual(
len(ckpt.all_model_checkpoint_paths), len(paths) if paths else 1)
self.assertEqual(ckpt.all_model_checkpoint_paths[-1], abs_path)
def testUpdateCheckpointState(self):
save_dir = _TestDir("update_checkpoint_state")
os.chdir(save_dir)
# Make a temporary train directory.
train_dir = "train"
os.mkdir(train_dir)
abs_path = os.path.join(save_dir, "model-0")
rel_path = "train/model-2"
tf.train.update_checkpoint_state(
train_dir,
rel_path,
all_model_checkpoint_paths=[abs_path, rel_path])
ckpt = tf.train.get_checkpoint_state(train_dir)
self.assertEqual(ckpt.model_checkpoint_path, rel_path)
self.assertEqual(len(ckpt.all_model_checkpoint_paths), 2)
self.assertEqual(ckpt.all_model_checkpoint_paths[-1], rel_path)
self.assertEqual(ckpt.all_model_checkpoint_paths[0], abs_path)
class MetaGraphTest(tf.test.TestCase):
def testNoVariables(self):
test_dir = _TestDir("no_variables")
filename = os.path.join(test_dir, "metafile")
input_feed_value = -10 # Arbitrary input value for feed_dict.
orig_graph = tf.Graph()
with self.test_session(graph=orig_graph) as sess:
# Create a minimal graph with zero variables.
input_tensor = tf.placeholder(tf.float32, shape=[], name="input")
offset = tf.constant(42, dtype=tf.float32, name="offset")
output_tensor = tf.add(input_tensor, offset, name="add_offset")
# Add input and output tensors to graph collections.
tf.add_to_collection("input_tensor", input_tensor)
tf.add_to_collection("output_tensor", output_tensor)
output_value = sess.run(output_tensor, {input_tensor: input_feed_value})
self.assertEqual(output_value, 32)
# Generates MetaGraphDef.
#
# Note that this is calling the saver *module-level* export_meta_graph and
# not the Saver.export_meta_graph instance-level method.
meta_graph_def = saver_module.export_meta_graph(
filename=filename,
graph_def=tf.get_default_graph().as_graph_def(add_shapes=True),
collection_list=["input_tensor", "output_tensor"],
saver_def=None,
)
# Create a clean graph and import the MetaGraphDef nodes.
new_graph = tf.Graph()
with self.test_session(graph=new_graph) as sess:
# Import the previously export meta graph.
saver_instance = saver_module.import_meta_graph(filename)
# The saver instance should be None since there are no graph variables
# to be restored in this case.
self.assertIsNone(saver_instance)
# Re-exports the current graph state for comparison to the original.
new_meta_graph_def = saver_module.export_meta_graph(filename + "_new")
self.assertProtoEquals(meta_graph_def, new_meta_graph_def)
# Ensures that we can still get a reference to our graph collections.
new_input_tensor = tf.get_collection("input_tensor")[0]
new_output_tensor = tf.get_collection("output_tensor")[0]
# Verifies that the new graph computes the same result as the original.
new_output_value = sess.run(
new_output_tensor, {new_input_tensor: input_feed_value})
self.assertEqual(new_output_value, output_value)
def testAddCollectionDef(self):
test_dir = _TestDir("good_collection")
filename = os.path.join(test_dir, "metafile")
with self.test_session():
# Creates a graph.
v0 = tf.Variable(10.0, name="v0")
var = tf.Variable(tf.constant(0, dtype=tf.int64))
count_up_to = var.count_up_to(3)
input_queue = tf.FIFOQueue(30, tf.float32, shared_name="collection_queue")
qr = tf.train.QueueRunner(input_queue, [count_up_to])
tf.initialize_all_variables()
# Creates a saver.
save = tf.train.Saver({"v0": v0})
# Adds a set of collections.
tf.add_to_collection("int_collection", 3)
tf.add_to_collection("float_collection", 3.5)
tf.add_to_collection("string_collection", "hello")
tf.add_to_collection("variable_collection", v0)
# Add QueueRunners.
tf.train.add_queue_runner(qr)
# Adds user_defined proto in three formats: string, bytes and Any.
queue_runner = queue_runner_pb2.QueueRunnerDef(queue_name="test_queue")
tf.add_to_collection("user_defined_string_collection", str(queue_runner))
tf.add_to_collection("user_defined_bytes_collection",
queue_runner.SerializeToString())
any_buf = Any()
any_buf.Pack(queue_runner)
tf.add_to_collection("user_defined_any_collection", any_buf)
# Generates MetaGraphDef.
meta_graph_def = save.export_meta_graph(filename)
self.assertTrue(meta_graph_def.HasField("saver_def"))
self.assertTrue(meta_graph_def.HasField("graph_def"))
collection_def = meta_graph_def.collection_def
self.assertEqual(len(collection_def), 10)
with tf.Graph().as_default():
# Restores from MetaGraphDef.
new_saver = tf.train.import_meta_graph(filename)
# Generates a new MetaGraphDef.
new_meta_graph_def = new_saver.export_meta_graph()
# It should be the same as the original.
self.assertProtoEquals(meta_graph_def, new_meta_graph_def)
def testAddCollectionDefFails(self):
with self.test_session():
# Creates a graph.
v0 = tf.Variable(10.0, name="v0")
# Creates a saver.
save = tf.train.Saver({"v0": v0})
# Generates MetaGraphDef.
meta_graph_def = meta_graph_pb2.MetaGraphDef()
# Verifies that collection with unsupported key will not be added.
tf.add_to_collection(save, 3)
save._add_collection_def(meta_graph_def, save)
self.assertEqual(len(meta_graph_def.collection_def), 0)
# Verifies that collection where item type does not match expected
# type will not be added.
tf.add_to_collection("int_collection", 3)
tf.add_to_collection("int_collection", 3.5)
save._add_collection_def(meta_graph_def, "int_collection")
self.assertEqual(len(meta_graph_def.collection_def), 0)
def _testMultiSaverCollectionSave(self):
test_dir = _TestDir("saver_collection")
filename = os.path.join(test_dir, "metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
saver1_ckpt = os.path.join(test_dir, "saver1.ckpt")
with self.test_session(graph=tf.Graph()) as sess:
# Creates a graph.
v0 = tf.Variable([[1.0, 2.0],
[3.0, 4.0],
[5.0, 6.0]], name="v0")
v1 = tf.Variable(11.0, name="v1")
# Creates 2 savers.
saver0 = tf.train.Saver({"v0": v0}, name="saver0")
saver1 = tf.train.Saver({"v1": v1}, name="saver1")
tf.add_to_collection("savers", saver0)
tf.add_to_collection("savers", saver1)
tf.initialize_all_variables().run()
# Saves to different checkpoints.
saver0.save(sess, saver0_ckpt)
saver1.save(sess, saver1_ckpt)
# Generates MetaGraphDef.
meta_graph_def = tf.train.export_meta_graph(filename)
meta_graph_def0 = saver0.export_meta_graph()
meta_graph_def1 = saver1.export_meta_graph()
# Verifies that there is no saver_def in meta_graph_def.
self.assertFalse(meta_graph_def.HasField("saver_def"))
# Verifies that there is saver_def in meta_graph_def0 and 1.
self.assertTrue(meta_graph_def0.HasField("saver_def"))
self.assertTrue(meta_graph_def1.HasField("saver_def"))
# Verifies SAVERS is saved as bytes_list for meta_graph_def.
collection_def = meta_graph_def.collection_def["savers"]
kind = collection_def.WhichOneof("kind")
self.assertEqual(kind, "bytes_list")
# Verifies that there are 2 entries in SAVERS collection.
savers = getattr(collection_def, kind)
self.assertEqual(2, len(savers.value))
# Verifies SAVERS collection is saved as bytes_list for meta_graph_def0.
collection_def = meta_graph_def0.collection_def["savers"]
kind = collection_def.WhichOneof("kind")
self.assertEqual(kind, "bytes_list")
# Verifies that there are 3 entries in SAVERS collection.
savers = getattr(collection_def, kind)
self.assertEqual(2, len(savers.value))
def _testMultiSaverCollectionRestore(self):
test_dir = os.path.join(self.get_temp_dir(), "saver_collection")
filename = os.path.join(test_dir, "metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
saver1_ckpt = os.path.join(test_dir, "saver1.ckpt")
with self.test_session(graph=tf.Graph()) as sess:
# Imports from meta_graph.
tf.train.import_meta_graph(filename)
# Retrieves SAVERS collection. Verifies there are 2 entries.
savers = tf.get_collection("savers")
self.assertEqual(2, len(savers))
# Retrieves saver0. Verifies that new_saver0 can restore v0, but not v1.
new_saver0 = savers[0]
new_saver0.restore(sess, saver0_ckpt)
v0 = sess.graph.get_tensor_by_name("v0:0")
v1 = sess.graph.get_tensor_by_name("v1:0")
self.assertAllEqual([[1.0, 2.0],
[3.0, 4.0],
[5.0, 6.0]], v0.eval())
self.assertEqual([3, 2], v0.get_shape())
self.assertEqual([], v1.get_shape())
with self.assertRaisesWithPredicateMatch(
tf.OpError, lambda e: "uninitialized value v1" in e.message):
sess.run(v1)
# Retrieves saver1. Verifies that new_saver1 can restore v1.
new_saver1 = savers[1]
new_saver1.restore(sess, saver1_ckpt)
v1 = sess.graph.get_tensor_by_name("v1:0")
self.assertEqual(11.0, v1.eval())
def testMultiSaverCollection(self):
self._testMultiSaverCollectionSave()
self._testMultiSaverCollectionRestore()
def testBinaryAndTextFormat(self):
test_dir = _TestDir("binary_and_text")
filename = os.path.join(test_dir, "metafile")
with self.test_session(graph=tf.Graph()):
# Creates a graph.
tf.Variable(10.0, name="v0")
# Exports the graph as binary format.
tf.train.export_meta_graph(filename, as_text=False)
with self.test_session(graph=tf.Graph()):
# Imports the binary format graph.
saver = tf.train.import_meta_graph(filename)
self.assertIsNotNone(saver)
# Exports the graph as text format.
saver.export_meta_graph(filename, as_text=True)
with self.test_session(graph=tf.Graph()):
# Imports the text format graph.
tf.train.import_meta_graph(filename)
# Writes wrong contents to the file.
tf.train.write_graph(saver.as_saver_def(), os.path.dirname(filename),
os.path.basename(filename))
with self.test_session(graph=tf.Graph()):
# Import should fail.
with self.assertRaisesWithPredicateMatch(
IOError, lambda e: "Cannot parse file"):
tf.train.import_meta_graph(filename)
# Deletes the file
gfile.Remove(filename)
with self.assertRaisesWithPredicateMatch(
IOError, lambda e: "does not exist"):
tf.train.import_meta_graph(filename)
def testSliceVariable(self):
test_dir = _TestDir("slice_saver")
filename = os.path.join(test_dir, "metafile")
with self.test_session():
v1 = tf.Variable([20.0], name="v1")
v2 = tf.Variable([20.0], name="v2")
v2._set_save_slice_info(tf.Variable.SaveSliceInfo("v1", [1], [0], [1]))
# The names are different and will work.
slice_saver = tf.train.Saver({"first": v1, "second": v2})
tf.initialize_all_variables().run()
# Exports to meta_graph
meta_graph_def = slice_saver.export_meta_graph(filename)
with tf.Graph().as_default():
# Restores from MetaGraphDef.
new_saver = tf.train.import_meta_graph(filename)
self.assertIsNotNone(new_saver)
# Generates a new MetaGraphDef.
new_meta_graph_def = new_saver.export_meta_graph()
# It should be the same as the original.
self.assertProtoEquals(meta_graph_def, new_meta_graph_def)
def _testGraphExtensionSave(self):
test_dir = _TestDir("graph_extension")
filename = os.path.join(test_dir, "metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
# Creates an inference graph.
# Hidden 1
images = tf.constant(1.2, tf.float32, shape=[100, 28])
with tf.name_scope("hidden1"):
weights = tf.Variable(
tf.truncated_normal([28, 128],
stddev=1.0 / math.sqrt(float(28))),
name="weights")
biases = tf.Variable(tf.zeros([128]),
name="biases")
hidden1 = tf.nn.relu(tf.matmul(images, weights) + biases)
# Hidden 2
with tf.name_scope("hidden2"):
weights = tf.Variable(
tf.truncated_normal([128, 32],
stddev=1.0 / math.sqrt(float(128))),
name="weights")
biases = tf.Variable(tf.zeros([32]),
name="biases")
hidden2 = tf.nn.relu(tf.matmul(hidden1, weights) + biases)
# Linear
with tf.name_scope("softmax_linear"):
weights = tf.Variable(
tf.truncated_normal([32, 10],
stddev=1.0 / math.sqrt(float(32))),
name="weights")
biases = tf.Variable(tf.zeros([10]),
name="biases")
logits = tf.matmul(hidden2, weights) + biases
tf.add_to_collection("logits", logits)
init_all_op = tf.initialize_all_variables()
with self.test_session() as sess:
# Initializes all the variables.
sess.run(init_all_op)
# Runs to logit.
sess.run(logits)
# Creates a saver.
saver0 = tf.train.Saver()
saver0.save(sess, saver0_ckpt)
# Generates MetaGraphDef.
saver0.export_meta_graph(filename)
def _testGraphExtensionRestore(self):
test_dir = os.path.join(self.get_temp_dir(), "graph_extension")
filename = os.path.join(test_dir, "metafile")
saver0_ckpt = os.path.join(test_dir, "saver0.ckpt")
with self.test_session(graph=tf.Graph()) as sess:
# Restores from MetaGraphDef.
new_saver = tf.train.import_meta_graph(filename)
# Generates a new MetaGraphDef.
new_saver.export_meta_graph()
# Restores from checkpoint.
new_saver.restore(sess, saver0_ckpt)
# Addes loss and train.
labels = tf.constant(0, tf.int32, shape=[100], name="labels")
batch_size = tf.size(labels)
labels = tf.expand_dims(labels, 1)
indices = tf.expand_dims(tf.range(0, batch_size), 1)
concated = tf.concat(1, [indices, labels])
onehot_labels = tf.sparse_to_dense(
concated, tf.pack([batch_size, 10]), 1.0, 0.0)
logits = tf.get_collection("logits")[0]
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits,
onehot_labels,
name="xentropy")
loss = tf.reduce_mean(cross_entropy, name="xentropy_mean")
tf.scalar_summary(loss.op.name, loss)
# Creates the gradient descent optimizer with the given learning rate.
optimizer = tf.train.GradientDescentOptimizer(0.01)
# Runs train_op.
train_op = optimizer.minimize(loss)
sess.run(train_op)
def testGraphExtension(self):
self._testGraphExtensionSave()
self._testGraphExtensionRestore()
def testStrippedOpListDef(self):
with self.test_session():
# Creates a graph.
v0 = tf.Variable(0.0)
var = tf.Variable(10.0)
tf.add(v0, var)
@function.Defun(x=tf.float32)
def minus_one(x):
return x - 1
minus_one(tf.identity(v0))
save = tf.train.Saver({"v0": v0})
tf.initialize_all_variables()
# Generates MetaGraphDef.
meta_graph_def = save.export_meta_graph()
ops = [o.name for o in meta_graph_def.meta_info_def.stripped_op_list.op]
self.assertEqual(ops, ["Add", "Assign", "Const", "Identity", "NoOp",
"RestoreSlice", "SaveSlices", "Sub", "Variable"])
# Test calling stripped_op_list_for_graph directly
op_list = tf.contrib.util.stripped_op_list_for_graph(
meta_graph_def.graph_def)
self.assertEqual(ops, [o.name for o in op_list.op])
for o in op_list.op:
self.assertEqual(o.summary, "")
self.assertEqual(o.description, "")
def testStrippedOpListNestedFunctions(self):
with self.test_session():
# Square two levels deep
def f0(x):
return tf.square(x)
f0 = function.define_function(f0, {"x": tf.int32})
def f1(x):
return function.call_function(f0, x)
f1 = function.define_function(f1, {"x": tf.int32})
# At this point we've defined two functions but haven't called them, so
# there should be no used ops.
op_list = tf.contrib.util.stripped_op_list_for_graph(
tf.get_default_graph().as_graph_def())
self.assertEquals(len(op_list.op), 0)
# If we call the function on a constant, there should be two ops
function.call_function(f1, tf.constant(7))
op_list = tf.contrib.util.stripped_op_list_for_graph(
tf.get_default_graph().as_graph_def())
self.assertEquals(["Const", "Square"], [op.name for op in op_list.op])
def testStrippedOpListRecursiveFunctions(self):
# The function module doesn't support recursive functions, so we build a
# recursive function situation by ourselves: A calls B calls A and Const.
graph = graph_pb2.GraphDef()
a = graph.library.function.add()
b = graph.library.function.add()
a.signature.name = "A"
b.signature.name = "B"
a.node.add().op = "B"
b.node.add().op = "Const"
b.node.add().op = "A"
# Use A in the graph
graph.node.add().op = "A"
# The stripped op list should contain just Const.
op_list = tf.contrib.util.stripped_op_list_for_graph(graph)
self.assertEquals(["Const"], [op.name for op in op_list.op])
class CheckpointReaderTest(tf.test.TestCase):
def testDebugString(self):
# Builds a graph.
v0 = tf.Variable([[1, 2, 3], [4, 5, 6]], dtype=tf.float32, name="v0")
v1 = tf.Variable([[[1], [2]], [[3], [4]], [[5], [6]]], dtype=tf.float32,
name="v1")
init_all_op = tf.initialize_all_variables()
save = tf.train.Saver({"v0": v0, "v1": v1})
save_path = os.path.join(self.get_temp_dir(), "ckpt_for_debug_string")
with self.test_session() as sess:
sess.run(init_all_op)
# Saves a checkpoint.
save.save(sess, save_path)
# Creates a reader.
reader = tf.train.NewCheckpointReader(save_path)
# Verifies that the tensors exist.
self.assertTrue(reader.has_tensor("v0"))
self.assertTrue(reader.has_tensor("v1"))
debug_string = reader.debug_string()
# Verifies that debug string contains the right strings.
self.assertTrue(compat.as_bytes("v0 (DT_FLOAT) [2,3]") in debug_string)
self.assertTrue(compat.as_bytes("v1 (DT_FLOAT) [3,2,1]") in debug_string)
# Verifies get_variable_to_shape_map() returns the correct information.
var_map = reader.get_variable_to_shape_map()
self.assertEquals([2, 3], var_map["v0"])
self.assertEquals([3, 2, 1], var_map["v1"])
# Verifies get_tensor() returns the tensor value.
v0_tensor = reader.get_tensor("v0")
v1_tensor = reader.get_tensor("v1")
self.assertAllEqual(v0.eval(), v0_tensor)
self.assertAllEqual(v1.eval(), v1_tensor)
# Verifies get_tensor() fails for non-existent tensors.
with self.assertRaisesRegexp(errors.NotFoundError,
"v3 not found in checkpoint file"):
reader.get_tensor("v3")
def testNonexistentPath(self):
with self.assertRaisesRegexp(errors.NotFoundError,
"Unsuccessful TensorSliceReader"):
tf.train.NewCheckpointReader("non-existent")
if __name__ == "__main__":
tf.test.main()
| {
"content_hash": "7338f57963d079673f63b9dc759412d6",
"timestamp": "",
"source": "github",
"line_count": 1217,
"max_line_length": 80,
"avg_line_length": 39.75842235004109,
"alnum_prop": 0.6402265118009342,
"repo_name": "ibab/tensorflow",
"id": "88839d421e1a83b7f795075c1af213534f05f6f6",
"size": "49064",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tensorflow/python/training/saver_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "156010"
},
{
"name": "C++",
"bytes": "9133299"
},
{
"name": "CMake",
"bytes": "29372"
},
{
"name": "CSS",
"bytes": "1297"
},
{
"name": "HTML",
"bytes": "773228"
},
{
"name": "Java",
"bytes": "39181"
},
{
"name": "JavaScript",
"bytes": "10779"
},
{
"name": "Jupyter Notebook",
"bytes": "1772913"
},
{
"name": "Protocol Buffer",
"bytes": "111555"
},
{
"name": "Python",
"bytes": "6393079"
},
{
"name": "Shell",
"bytes": "164997"
},
{
"name": "TypeScript",
"bytes": "409165"
}
],
"symlink_target": ""
} |
import os
from build_swift.build_swift.constants import MULTIROOT_DATA_FILE_PATH
from . import cmark
from . import foundation
from . import libcxx
from . import libdispatch
from . import libicu
from . import llbuild
from . import llvm
from . import product
from . import swift
from . import swiftpm
from . import xctest
from .. import shell
class SwiftSyntax(product.Product):
@classmethod
def product_source_name(cls):
"""product_source_name() -> str
The name of the source code directory of this product.
"""
return "swift-syntax"
@classmethod
def is_build_script_impl_product(cls):
return False
@classmethod
def is_swiftpm_unified_build_product(cls):
return True
def run_swiftsyntax_build_script(self, target, additional_params=[]):
llvm_build_dir = os.path.join(self.build_dir, '..', 'llvm-' + target)
llvm_build_dir = os.path.realpath(llvm_build_dir)
script_path = os.path.join(self.source_dir, 'build-script.py')
build_cmd = [
script_path,
'--build-dir', self.build_dir,
'--multiroot-data-file', MULTIROOT_DATA_FILE_PATH,
'--toolchain', self.install_toolchain_path(target),
'--filecheck-exec', os.path.join(llvm_build_dir, 'bin',
'FileCheck'),
]
if self.is_release():
build_cmd.append('--release')
if self.args.swiftsyntax_verify_generated_files:
build_cmd.append('--verify-generated-files')
build_cmd.extend(additional_params)
if self.args.verbose_build:
build_cmd.append('--verbose')
shell.call(build_cmd)
def should_build(self, host_target):
return True
def build(self, host_target):
self.run_swiftsyntax_build_script(target=host_target)
def should_test(self, host_target):
return self.args.test_swiftsyntax
def test(self, host_target):
self.run_swiftsyntax_build_script(target=host_target,
additional_params=['--test'])
def should_install(self, host_target):
return self.args.install_swiftsyntax
def install(self, target_name):
install_prefix = self.args.install_destdir + self.args.install_prefix
dylib_dir = os.path.join(install_prefix, 'lib')
additional_params = [
'--dylib-dir', dylib_dir,
'--install'
]
self.run_swiftsyntax_build_script(target=target_name,
additional_params=additional_params)
@classmethod
def get_dependencies(cls):
return [cmark.CMark,
llvm.LLVM,
libcxx.LibCXX,
libicu.LibICU,
swift.Swift,
libdispatch.LibDispatch,
foundation.Foundation,
xctest.XCTest,
llbuild.LLBuild,
swiftpm.SwiftPM]
| {
"content_hash": "8513a767127484219f7bb47ea7a81ff7",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 78,
"avg_line_length": 28.990384615384617,
"alnum_prop": 0.5887230514096186,
"repo_name": "CodaFi/swift",
"id": "00cdef0dd0c5c7007e9e6395b230cc285d963783",
"size": "3523",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "utils/swift_build_support/swift_build_support/products/swiftsyntax.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "13716"
},
{
"name": "C",
"bytes": "272549"
},
{
"name": "C++",
"bytes": "38990943"
},
{
"name": "CMake",
"bytes": "560004"
},
{
"name": "D",
"bytes": "1107"
},
{
"name": "DTrace",
"bytes": "2593"
},
{
"name": "Emacs Lisp",
"bytes": "57457"
},
{
"name": "LLVM",
"bytes": "70652"
},
{
"name": "MATLAB",
"bytes": "2576"
},
{
"name": "Makefile",
"bytes": "1841"
},
{
"name": "Objective-C",
"bytes": "428976"
},
{
"name": "Objective-C++",
"bytes": "244095"
},
{
"name": "Python",
"bytes": "1794381"
},
{
"name": "Roff",
"bytes": "3495"
},
{
"name": "Ruby",
"bytes": "2117"
},
{
"name": "Shell",
"bytes": "188874"
},
{
"name": "Swift",
"bytes": "33839781"
},
{
"name": "Vim Script",
"bytes": "19900"
},
{
"name": "sed",
"bytes": "1050"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/mission/quest_item/shared_attunement_grid.iff"
result.attribute_template_id = -1
result.stfName("item_n","attunement_grid")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "7a4f2a095273d9e7d4801410d7549074",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 82,
"avg_line_length": 24.23076923076923,
"alnum_prop": 0.7015873015873015,
"repo_name": "anhstudios/swganh",
"id": "ddf605b5d2e37eb365cbf7a0808f0e7b443f5323",
"size": "460",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/tangible/mission/quest_item/shared_attunement_grid.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
} |
import buildtool.command
COMMAND = 'test_command_example'
CUSTOM_ARG_NAME = 'custom_test_arg'
CUSTOM_ARG_DEFAULT_VALUE = 'Custom Default Value'
class TestCommand(buildtool.command.CommandProcessor):
@property
def calls(self):
return self.__calls
def __init__(self, factory, options):
super(TestCommand, self).__init__(factory, options)
self.__calls = 0
def _do_command(self):
self.__calls += 1
class TestCommandFactory(buildtool.command.CommandFactory):
def __init__(self):
super(TestCommandFactory, self).__init__(
COMMAND, TestCommand, 'My Test Command')
def init_argparser(self, parser, defaults):
super(TestCommandFactory, self).init_argparser(parser, defaults)
TestCommandFactory.add_argument(
parser, CUSTOM_ARG_NAME, defaults, CUSTOM_ARG_DEFAULT_VALUE)
def register_commands(registry, subparsers, defaults):
TestCommandFactory().register(registry, subparsers, defaults)
| {
"content_hash": "766e227184e84795193e766bba101df1",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 68,
"avg_line_length": 29.5,
"alnum_prop": 0.722457627118644,
"repo_name": "skim1420/spinnaker",
"id": "1cdba67ec569eb9c0a4646a9fe8e83c5d5042a89",
"size": "1578",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "unittest/buildtool/custom_test_command.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1199"
},
{
"name": "Go",
"bytes": "2745"
},
{
"name": "HTML",
"bytes": "422"
},
{
"name": "Makefile",
"bytes": "276"
},
{
"name": "Python",
"bytes": "1221215"
},
{
"name": "Shell",
"bytes": "182975"
},
{
"name": "Smarty",
"bytes": "2087"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import with_statement
import errno
import os
import resource
import signal
from mock import Mock, patch
from celery import current_app
from celery import platforms
from celery.platforms import (
get_fdmax,
shellsplit,
ignore_EBADF,
set_process_title,
signals,
maybe_drop_privileges,
setuid,
setgid,
seteuid,
setegid,
initgroups,
parse_uid,
parse_gid,
detached,
DaemonContext,
create_pidlock,
PIDFile,
LockFailed,
setgroups,
_setgroups_hack
)
from celery.tests.utils import Case, WhateverIO, override_stdouts
class test_ignore_EBADF(Case):
def test_raises_EBADF(self):
with ignore_EBADF():
exc = OSError()
exc.errno = errno.EBADF
raise exc
def test_otherwise(self):
with self.assertRaises(OSError):
with ignore_EBADF():
exc = OSError()
exc.errno = errno.ENOENT
raise exc
class test_shellsplit(Case):
def test_split(self):
self.assertEqual(shellsplit("the 'quick' brown fox"),
["the", "quick", "brown", "fox"])
class test_set_process_title(Case):
def when_no_setps(self):
prev = platforms._setproctitle = platforms._setproctitle, None
try:
set_process_title("foo")
finally:
platforms._setproctitle = prev
class test_Signals(Case):
@patch("signal.getsignal")
def test_getitem(self, getsignal):
signals["SIGINT"]
getsignal.assert_called_with(signal.SIGINT)
def test_supported(self):
self.assertTrue(signals.supported("INT"))
self.assertFalse(signals.supported("SIGIMAGINARY"))
def test_signum(self):
self.assertEqual(signals.signum(13), 13)
self.assertEqual(signals.signum("INT"), signal.SIGINT)
self.assertEqual(signals.signum("SIGINT"), signal.SIGINT)
with self.assertRaises(TypeError):
signals.signum("int")
signals.signum(object())
@patch("signal.signal")
def test_ignore(self, set):
signals.ignore("SIGINT")
set.assert_called_with(signals.signum("INT"), signals.ignored)
signals.ignore("SIGTERM")
set.assert_called_with(signals.signum("TERM"), signals.ignored)
@patch("signal.signal")
def test_setitem(self, set):
handle = lambda *a: a
signals["INT"] = handle
set.assert_called_with(signal.SIGINT, handle)
@patch("signal.signal")
def test_setitem_raises(self, set):
set.side_effect = ValueError()
signals["INT"] = lambda *a: a
if not current_app.IS_WINDOWS:
class test_get_fdmax(Case):
@patch("resource.getrlimit")
def test_when_infinity(self, getrlimit):
getrlimit.return_value = [None, resource.RLIM_INFINITY]
default = object()
self.assertIs(get_fdmax(default), default)
@patch("resource.getrlimit")
def test_when_actual(self, getrlimit):
getrlimit.return_value = [None, 13]
self.assertEqual(get_fdmax(None), 13)
class test_maybe_drop_privileges(Case):
@patch("celery.platforms.parse_uid")
@patch("pwd.getpwuid")
@patch("celery.platforms.setgid")
@patch("celery.platforms.setuid")
@patch("celery.platforms.initgroups")
def test_with_uid(self, initgroups, setuid, setgid,
getpwuid, parse_uid):
class pw_struct(object):
pw_gid = 50001
getpwuid.return_value = pw_struct()
parse_uid.return_value = 5001
maybe_drop_privileges(uid="user")
parse_uid.assert_called_with("user")
getpwuid.assert_called_with(5001)
setgid.assert_called_with(50001)
initgroups.assert_called_with(5001, 50001)
setuid.assert_called_with(5001)
@patch("celery.platforms.parse_uid")
@patch("celery.platforms.parse_gid")
@patch("celery.platforms.setgid")
@patch("celery.platforms.setuid")
@patch("celery.platforms.initgroups")
def test_with_guid(self, initgroups, setuid, setgid,
parse_gid, parse_uid):
parse_uid.return_value = 5001
parse_gid.return_value = 50001
maybe_drop_privileges(uid="user", gid="group")
parse_uid.assert_called_with("user")
parse_gid.assert_called_with("group")
setgid.assert_called_with(50001)
initgroups.assert_called_with(5001, 50001)
setuid.assert_called_with(5001)
@patch("celery.platforms.setuid")
@patch("celery.platforms.setgid")
@patch("celery.platforms.parse_gid")
def test_only_gid(self, parse_gid, setgid, setuid):
parse_gid.return_value = 50001
maybe_drop_privileges(gid="group")
parse_gid.assert_called_with("group")
setgid.assert_called_with(50001)
self.assertFalse(setuid.called)
class test_setget_uid_gid(Case):
@patch("celery.platforms.parse_uid")
@patch("os.setuid")
def test_setuid(self, _setuid, parse_uid):
parse_uid.return_value = 5001
setuid("user")
parse_uid.assert_called_with("user")
_setuid.assert_called_with(5001)
@patch("celery.platforms.parse_uid")
@patch("os.geteuid")
@patch("os.seteuid")
def test_seteuid(self, _seteuid, _geteuid, parse_uid):
parse_uid.return_value = 5001
_geteuid.return_value = 5001
seteuid("user")
parse_uid.assert_called_with("user")
self.assertFalse(_seteuid.called)
_geteuid.return_value = 1
seteuid("user")
_seteuid.assert_called_with(5001)
@patch("celery.platforms.parse_gid")
@patch("os.setgid")
def test_setgid(self, _setgid, parse_gid):
parse_gid.return_value = 50001
setgid("group")
parse_gid.assert_called_with("group")
_setgid.assert_called_with(50001)
@patch("celery.platforms.parse_gid")
@patch("os.getegid")
@patch("os.setegid")
def test_setegid(self, _setegid, _getegid, parse_gid):
parse_gid.return_value = 50001
_getegid.return_value = 50001
setegid("group")
parse_gid.assert_called_with("group")
self.assertFalse(_setegid.called)
_getegid.return_value = 1
setegid("group")
_setegid.assert_called_with(50001)
def test_parse_uid_when_int(self):
self.assertEqual(parse_uid(5001), 5001)
@patch("pwd.getpwnam")
def test_parse_uid_when_existing_name(self, getpwnam):
class pwent(object):
pw_uid = 5001
getpwnam.return_value = pwent()
self.assertEqual(parse_uid("user"), 5001)
@patch("pwd.getpwnam")
def test_parse_uid_when_nonexisting_name(self, getpwnam):
getpwnam.side_effect = KeyError("user")
with self.assertRaises(KeyError):
parse_uid("user")
def test_parse_gid_when_int(self):
self.assertEqual(parse_gid(50001), 50001)
@patch("grp.getgrnam")
def test_parse_gid_when_existing_name(self, getgrnam):
class grent(object):
gr_gid = 50001
getgrnam.return_value = grent()
self.assertEqual(parse_gid("group"), 50001)
@patch("grp.getgrnam")
def test_parse_gid_when_nonexisting_name(self, getgrnam):
getgrnam.side_effect = KeyError("group")
with self.assertRaises(KeyError):
parse_gid("group")
class test_initgroups(Case):
@patch("pwd.getpwuid")
@patch("os.initgroups", create=True)
def test_with_initgroups(self, initgroups_, getpwuid):
getpwuid.return_value = ["user"]
initgroups(5001, 50001)
initgroups_.assert_called_with("user", 50001)
@patch("celery.platforms.setgroups")
@patch("grp.getgrall")
@patch("pwd.getpwuid")
def test_without_initgroups(self, getpwuid, getgrall, setgroups):
prev = getattr(os, "initgroups", None)
try:
delattr(os, "initgroups")
except AttributeError:
pass
try:
getpwuid.return_value = ["user"]
class grent(object):
gr_mem = ["user"]
def __init__(self, gid):
self.gr_gid = gid
getgrall.return_value = [grent(1), grent(2), grent(3)]
initgroups(5001, 50001)
setgroups.assert_called_with([1, 2, 3])
finally:
if prev:
os.initgroups = prev
class test_detached(Case):
def test_without_resource(self):
prev, platforms.resource = platforms.resource, None
try:
with self.assertRaises(RuntimeError):
detached()
finally:
platforms.resource = prev
@patch("celery.platforms.create_pidlock")
@patch("celery.platforms.signals")
@patch("celery.platforms.maybe_drop_privileges")
@patch("os.geteuid")
@patch("__builtin__.open")
def test_default(self, open, geteuid, maybe_drop, signals, pidlock):
geteuid.return_value = 0
context = detached(uid="user", gid="group")
self.assertIsInstance(context, DaemonContext)
signals.reset.assert_called_with("SIGCLD")
maybe_drop.assert_called_with(uid="user", gid="group")
open.return_value = Mock()
geteuid.return_value = 5001
context = detached(uid="user", gid="group", logfile="/foo/bar")
self.assertIsInstance(context, DaemonContext)
open.assert_called_with("/foo/bar", "a")
open.return_value.close.assert_called_with()
context = detached(pidfile="/foo/bar/pid")
self.assertIsInstance(context, DaemonContext)
pidlock.assert_called_with("/foo/bar/pid")
class test_DaemonContext(Case):
@patch("os.fork")
@patch("os.setsid")
@patch("os._exit")
@patch("os.chdir")
@patch("os.umask")
@patch("os.close")
@patch("os.open")
@patch("os.dup2")
def test_open(self, dup2, open, close, umask, chdir, _exit, setsid,
fork):
x = DaemonContext(workdir="/opt/workdir")
fork.return_value = 0
with x:
self.assertTrue(x._is_open)
with x:
pass
self.assertEqual(fork.call_count, 2)
setsid.assert_called_with()
self.assertFalse(_exit.called)
chdir.assert_called_with(x.workdir)
umask.assert_called_with(x.umask)
open.assert_called_with(platforms.DAEMON_REDIRECT_TO, os.O_RDWR)
self.assertEqual(dup2.call_args_list[0], [(0, 1), {}])
self.assertEqual(dup2.call_args_list[1], [(0, 2), {}])
fork.reset_mock()
fork.return_value = 1
x = DaemonContext(workdir="/opt/workdir")
with x:
pass
self.assertEqual(fork.call_count, 1)
_exit.assert_called_with(0)
x = DaemonContext(workdir="/opt/workdir", fake=True)
x._detach = Mock()
with x:
pass
self.assertFalse(x._detach.called)
class test_PIDFile(Case):
@patch("celery.platforms.PIDFile")
def test_create_pidlock(self, PIDFile):
p = PIDFile.return_value = Mock()
p.is_locked.return_value = True
p.remove_if_stale.return_value = False
with self.assertRaises(SystemExit):
create_pidlock("/var/pid")
p.remove_if_stale.return_value = True
ret = create_pidlock("/var/pid")
self.assertIs(ret, p)
def test_context(self):
p = PIDFile("/var/pid")
p.write_pid = Mock()
p.remove = Mock()
with p as _p:
self.assertIs(_p, p)
p.write_pid.assert_called_with()
p.remove.assert_called_with()
def test_acquire_raises_LockFailed(self):
p = PIDFile("/var/pid")
p.write_pid = Mock()
p.write_pid.side_effect = OSError()
with self.assertRaises(LockFailed):
with p:
pass
@patch("os.path.exists")
def test_is_locked(self, exists):
p = PIDFile("/var/pid")
exists.return_value = True
self.assertTrue(p.is_locked())
exists.return_value = False
self.assertFalse(p.is_locked())
@patch("__builtin__.open")
def test_read_pid(self, open_):
s = open_.return_value = WhateverIO()
s.write("1816\n")
s.seek(0)
p = PIDFile("/var/pid")
self.assertEqual(p.read_pid(), 1816)
@patch("__builtin__.open")
def test_read_pid_partially_written(self, open_):
s = open_.return_value = WhateverIO()
s.write("1816")
s.seek(0)
p = PIDFile("/var/pid")
with self.assertRaises(ValueError):
p.read_pid()
@patch("__builtin__.open")
def test_read_pid_raises_ENOENT(self, open_):
exc = IOError()
exc.errno = errno.ENOENT
open_.side_effect = exc
p = PIDFile("/var/pid")
self.assertIsNone(p.read_pid())
@patch("__builtin__.open")
def test_read_pid_raises_IOError(self, open_):
exc = IOError()
exc.errno = errno.EAGAIN
open_.side_effect = exc
p = PIDFile("/var/pid")
with self.assertRaises(IOError):
p.read_pid()
@patch("__builtin__.open")
def test_read_pid_bogus_pidfile(self, open_):
s = open_.return_value = WhateverIO()
s.write("eighteensixteen\n")
s.seek(0)
p = PIDFile("/var/pid")
with self.assertRaises(ValueError):
p.read_pid()
@patch("os.unlink")
def test_remove(self, unlink):
unlink.return_value = True
p = PIDFile("/var/pid")
p.remove()
unlink.assert_called_with(p.path)
@patch("os.unlink")
def test_remove_ENOENT(self, unlink):
exc = OSError()
exc.errno = errno.ENOENT
unlink.side_effect = exc
p = PIDFile("/var/pid")
p.remove()
unlink.assert_called_with(p.path)
@patch("os.unlink")
def test_remove_EACCES(self, unlink):
exc = OSError()
exc.errno = errno.EACCES
unlink.side_effect = exc
p = PIDFile("/var/pid")
p.remove()
unlink.assert_called_with(p.path)
@patch("os.unlink")
def test_remove_OSError(self, unlink):
exc = OSError()
exc.errno = errno.EAGAIN
unlink.side_effect = exc
p = PIDFile("/var/pid")
with self.assertRaises(OSError):
p.remove()
unlink.assert_called_with(p.path)
@patch("os.kill")
def test_remove_if_stale_process_alive(self, kill):
p = PIDFile("/var/pid")
p.read_pid = Mock()
p.read_pid.return_value = 1816
kill.return_value = 0
self.assertFalse(p.remove_if_stale())
kill.assert_called_with(1816, 0)
p.read_pid.assert_called_with()
kill.side_effect = OSError()
kill.side_effect.errno = errno.ENOENT
self.assertFalse(p.remove_if_stale())
@patch("os.kill")
def test_remove_if_stale_process_dead(self, kill):
with override_stdouts():
p = PIDFile("/var/pid")
p.read_pid = Mock()
p.read_pid.return_value = 1816
p.remove = Mock()
exc = OSError()
exc.errno = errno.ESRCH
kill.side_effect = exc
self.assertTrue(p.remove_if_stale())
kill.assert_called_with(1816, 0)
p.remove.assert_called_with()
def test_remove_if_stale_broken_pid(self):
with override_stdouts():
p = PIDFile("/var/pid")
p.read_pid = Mock()
p.read_pid.side_effect = ValueError()
p.remove = Mock()
self.assertTrue(p.remove_if_stale())
p.remove.assert_called_with()
def test_remove_if_stale_no_pidfile(self):
p = PIDFile("/var/pid")
p.read_pid = Mock()
p.read_pid.return_value = None
p.remove = Mock()
self.assertTrue(p.remove_if_stale())
p.remove.assert_called_with()
@patch("os.fsync")
@patch("os.getpid")
@patch("os.open")
@patch("os.fdopen")
@patch("__builtin__.open")
def test_write_pid(self, open_, fdopen, osopen, getpid, fsync):
getpid.return_value = 1816
osopen.return_value = 13
w = fdopen.return_value = WhateverIO()
w.close = Mock()
r = open_.return_value = WhateverIO()
r.write("1816\n")
r.seek(0)
p = PIDFile("/var/pid")
p.write_pid()
w.seek(0)
self.assertEqual(w.readline(), "1816\n")
self.assertTrue(w.close.called)
getpid.assert_called_with()
osopen.assert_called_with(p.path, platforms.PIDFILE_FLAGS,
platforms.PIDFILE_MODE)
fdopen.assert_called_with(13, "w")
fsync.assert_called_with(13)
open_.assert_called_with(p.path)
@patch("os.fsync")
@patch("os.getpid")
@patch("os.open")
@patch("os.fdopen")
@patch("__builtin__.open")
def test_write_reread_fails(self, open_, fdopen,
osopen, getpid, fsync):
getpid.return_value = 1816
osopen.return_value = 13
w = fdopen.return_value = WhateverIO()
w.close = Mock()
r = open_.return_value = WhateverIO()
r.write("11816\n")
r.seek(0)
p = PIDFile("/var/pid")
with self.assertRaises(LockFailed):
p.write_pid()
class test_setgroups(Case):
@patch("os.setgroups", create=True)
def test_setgroups_hack_ValueError(self, setgroups):
def on_setgroups(groups):
if len(groups) <= 200:
setgroups.return_value = True
return
raise ValueError()
setgroups.side_effect = on_setgroups
_setgroups_hack(range(400))
setgroups.side_effect = ValueError()
with self.assertRaises(ValueError):
_setgroups_hack(range(400))
@patch("os.setgroups", create=True)
def test_setgroups_hack_OSError(self, setgroups):
exc = OSError()
exc.errno = errno.EINVAL
def on_setgroups(groups):
if len(groups) <= 200:
setgroups.return_value = True
return
raise exc
setgroups.side_effect = on_setgroups
_setgroups_hack(range(400))
setgroups.side_effect = exc
with self.assertRaises(OSError):
_setgroups_hack(range(400))
exc2 = OSError()
exc.errno = errno.ESRCH
setgroups.side_effect = exc2
with self.assertRaises(OSError):
_setgroups_hack(range(400))
@patch("os.sysconf")
@patch("celery.platforms._setgroups_hack")
def test_setgroups(self, hack, sysconf):
sysconf.return_value = 100
setgroups(range(400))
hack.assert_called_with(range(100))
@patch("os.sysconf")
@patch("celery.platforms._setgroups_hack")
def test_setgroups_sysconf_raises(self, hack, sysconf):
sysconf.side_effect = ValueError()
setgroups(range(400))
hack.assert_called_with(range(400))
@patch("os.getgroups")
@patch("os.sysconf")
@patch("celery.platforms._setgroups_hack")
def test_setgroups_raises_ESRCH(self, hack, sysconf, getgroups):
sysconf.side_effect = ValueError()
esrch = OSError()
esrch.errno = errno.ESRCH
hack.side_effect = esrch
with self.assertRaises(OSError):
setgroups(range(400))
@patch("os.getgroups")
@patch("os.sysconf")
@patch("celery.platforms._setgroups_hack")
def test_setgroups_raises_EPERM(self, hack, sysconf, getgroups):
sysconf.side_effect = ValueError()
eperm = OSError()
eperm.errno = errno.EPERM
hack.side_effect = eperm
getgroups.return_value = range(400)
setgroups(range(400))
getgroups.assert_called_with()
getgroups.return_value = [1000]
with self.assertRaises(OSError):
setgroups(range(400))
getgroups.assert_called_with()
| {
"content_hash": "ee0b18c4868471e02c116884d9069f31",
"timestamp": "",
"source": "github",
"line_count": 658,
"max_line_length": 76,
"avg_line_length": 33.209726443769,
"alnum_prop": 0.5417810726706938,
"repo_name": "couchbaselabs/celery",
"id": "de5494414af49fba2ed3642acfc5cfc9952f56f0",
"size": "21852",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "celery/tests/utilities/test_platforms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""Sensor from an SQL Query."""
import datetime
import decimal
import logging
import re
import sqlalchemy
from sqlalchemy.orm import scoped_session, sessionmaker
import voluptuous as vol
from homeassistant.components.recorder import CONF_DB_URL, DEFAULT_DB_FILE, DEFAULT_URL
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.const import CONF_NAME, CONF_UNIT_OF_MEASUREMENT, CONF_VALUE_TEMPLATE
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_COLUMN_NAME = "column"
CONF_QUERIES = "queries"
CONF_QUERY = "query"
DB_URL_RE = re.compile("//.*:.*@")
def redact_credentials(data):
"""Redact credentials from string data."""
return DB_URL_RE.sub("//****:****@", data)
def validate_sql_select(value):
"""Validate that value is a SQL SELECT query."""
if not value.lstrip().lower().startswith("select"):
raise vol.Invalid("Only SELECT queries allowed")
return value
_QUERY_SCHEME = vol.Schema(
{
vol.Required(CONF_COLUMN_NAME): cv.string,
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_QUERY): vol.All(cv.string, validate_sql_select),
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_QUERIES): [_QUERY_SCHEME], vol.Optional(CONF_DB_URL): cv.string}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the SQL sensor platform."""
db_url = config.get(CONF_DB_URL)
if not db_url:
db_url = DEFAULT_URL.format(hass_config_path=hass.config.path(DEFAULT_DB_FILE))
sess = None
try:
engine = sqlalchemy.create_engine(db_url)
sessmaker = scoped_session(sessionmaker(bind=engine))
# Run a dummy query just to test the db_url
sess = sessmaker()
sess.execute("SELECT 1;")
except sqlalchemy.exc.SQLAlchemyError as err:
_LOGGER.error(
"Couldn't connect using %s DB_URL: %s",
redact_credentials(db_url),
redact_credentials(str(err)),
)
return
finally:
if sess:
sess.close()
queries = []
for query in config.get(CONF_QUERIES):
name = query.get(CONF_NAME)
query_str = query.get(CONF_QUERY)
unit = query.get(CONF_UNIT_OF_MEASUREMENT)
value_template = query.get(CONF_VALUE_TEMPLATE)
column_name = query.get(CONF_COLUMN_NAME)
if value_template is not None:
value_template.hass = hass
# MSSQL uses TOP and not LIMIT
if not ("LIMIT" in query_str or "SELECT TOP" in query_str):
query_str = (
query_str.replace("SELECT", "SELECT TOP 1")
if "mssql" in db_url
else query_str.replace(";", " LIMIT 1;")
)
sensor = SQLSensor(
name, sessmaker, query_str, column_name, unit, value_template
)
queries.append(sensor)
add_entities(queries, True)
class SQLSensor(SensorEntity):
"""Representation of an SQL sensor."""
def __init__(self, name, sessmaker, query, column, unit, value_template):
"""Initialize the SQL sensor."""
self._name = name
self._query = query
self._unit_of_measurement = unit
self._template = value_template
self._column_name = column
self.sessionmaker = sessmaker
self._state = None
self._attributes = None
@property
def name(self):
"""Return the name of the query."""
return self._name
@property
def native_value(self):
"""Return the query's current state."""
return self._state
@property
def native_unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit_of_measurement
@property
def extra_state_attributes(self):
"""Return the state attributes."""
return self._attributes
def update(self):
"""Retrieve sensor data from the query."""
data = None
try:
sess = self.sessionmaker()
result = sess.execute(self._query)
self._attributes = {}
if not result.returns_rows or result.rowcount == 0:
_LOGGER.warning("%s returned no results", self._query)
self._state = None
return
for res in result.mappings():
_LOGGER.debug("result = %s", res.items())
data = res[self._column_name]
for key, value in res.items():
if isinstance(value, decimal.Decimal):
value = float(value)
if isinstance(value, datetime.date):
value = str(value)
self._attributes[key] = value
except sqlalchemy.exc.SQLAlchemyError as err:
_LOGGER.error(
"Error executing query %s: %s",
self._query,
redact_credentials(str(err)),
)
return
finally:
sess.close()
if data is not None and self._template is not None:
self._state = self._template.async_render_with_possible_json_value(
data, None
)
else:
self._state = data
| {
"content_hash": "9a4f14e2d508fa1e7bf008a633c25919",
"timestamp": "",
"source": "github",
"line_count": 178,
"max_line_length": 88,
"avg_line_length": 30.528089887640448,
"alnum_prop": 0.5901729849098271,
"repo_name": "sander76/home-assistant",
"id": "1b0ae5a9076af075c29e4ff63678f62c932a33d0",
"size": "5434",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/sql/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "36548768"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
} |
import urllib
import pytest
from mitmproxy.test import taddons
from mitmproxy.test import tflow
import mitmproxy.test.tutils
from mitmproxy.addons import serverplayback
from mitmproxy import options
from mitmproxy import exceptions
from mitmproxy import io
def tdump(path, flows):
w = io.FlowWriter(open(path, "wb"))
for i in flows:
w.add(i)
def test_config(tmpdir):
s = serverplayback.ServerPlayback()
with taddons.context() as tctx:
fpath = str(tmpdir.join("flows"))
tdump(fpath, [tflow.tflow(resp=True)])
tctx.configure(s, server_replay=[fpath])
with pytest.raises(exceptions.OptionsError):
tctx.configure(s, server_replay=[str(tmpdir)])
def test_tick():
s = serverplayback.ServerPlayback()
with taddons.context() as tctx:
s.stop = True
s.final_flow = tflow.tflow()
s.final_flow.live = False
s.tick()
assert tctx.master.has_event("processing_complete")
def test_server_playback():
sp = serverplayback.ServerPlayback()
sp.configure(options.Options(), [])
f = tflow.tflow(resp=True)
assert not sp.flowmap
sp.load_flows([f])
assert sp.flowmap
assert sp.next_flow(f)
assert not sp.flowmap
sp.load_flows([f])
assert sp.flowmap
sp.clear()
assert not sp.flowmap
def test_ignore_host():
sp = serverplayback.ServerPlayback()
sp.configure(options.Options(server_replay_ignore_host=True), [])
r = tflow.tflow(resp=True)
r2 = tflow.tflow(resp=True)
r.request.host = "address"
r2.request.host = "address"
assert sp._hash(r) == sp._hash(r2)
r2.request.host = "wrong_address"
assert sp._hash(r) == sp._hash(r2)
def test_ignore_content():
s = serverplayback.ServerPlayback()
s.configure(options.Options(server_replay_ignore_content=False), [])
r = tflow.tflow(resp=True)
r2 = tflow.tflow(resp=True)
r.request.content = b"foo"
r2.request.content = b"foo"
assert s._hash(r) == s._hash(r2)
r2.request.content = b"bar"
assert not s._hash(r) == s._hash(r2)
s.configure(options.Options(server_replay_ignore_content=True), [])
r = tflow.tflow(resp=True)
r2 = tflow.tflow(resp=True)
r.request.content = b"foo"
r2.request.content = b"foo"
assert s._hash(r) == s._hash(r2)
r2.request.content = b"bar"
assert s._hash(r) == s._hash(r2)
r2.request.content = b""
assert s._hash(r) == s._hash(r2)
r2.request.content = None
assert s._hash(r) == s._hash(r2)
def test_ignore_content_wins_over_params():
s = serverplayback.ServerPlayback()
s.configure(
options.Options(
server_replay_ignore_content=True,
server_replay_ignore_payload_params=[
"param1", "param2"
]
),
[]
)
# NOTE: parameters are mutually exclusive in options
r = tflow.tflow(resp=True)
r.request.headers["Content-Type"] = "application/x-www-form-urlencoded"
r.request.content = b"paramx=y"
r2 = tflow.tflow(resp=True)
r2.request.headers["Content-Type"] = "application/x-www-form-urlencoded"
r2.request.content = b"paramx=x"
# same parameters
assert s._hash(r) == s._hash(r2)
def test_ignore_payload_params_other_content_type():
s = serverplayback.ServerPlayback()
with taddons.context() as tctx:
tctx.configure(
s,
server_replay_ignore_content=False,
server_replay_ignore_payload_params=[
"param1", "param2"
]
)
r = tflow.tflow(resp=True)
r.request.headers["Content-Type"] = "application/json"
r.request.content = b'{"param1":"1"}'
r2 = tflow.tflow(resp=True)
r2.request.headers["Content-Type"] = "application/json"
r2.request.content = b'{"param1":"1"}'
# same content
assert s._hash(r) == s._hash(r2)
# distint content (note only x-www-form-urlencoded payload is analysed)
r2.request.content = b'{"param1":"2"}'
assert not s._hash(r) == s._hash(r2)
def test_hash():
s = serverplayback.ServerPlayback()
s.configure(options.Options(), [])
r = tflow.tflow()
r2 = tflow.tflow()
assert s._hash(r)
assert s._hash(r) == s._hash(r2)
r.request.headers["foo"] = "bar"
assert s._hash(r) == s._hash(r2)
r.request.path = "voing"
assert s._hash(r) != s._hash(r2)
r.request.path = "path?blank_value"
r2.request.path = "path?"
assert s._hash(r) != s._hash(r2)
def test_headers():
s = serverplayback.ServerPlayback()
s.configure(options.Options(server_replay_use_headers=["foo"]), [])
r = tflow.tflow(resp=True)
r.request.headers["foo"] = "bar"
r2 = tflow.tflow(resp=True)
assert not s._hash(r) == s._hash(r2)
r2.request.headers["foo"] = "bar"
assert s._hash(r) == s._hash(r2)
r2.request.headers["oink"] = "bar"
assert s._hash(r) == s._hash(r2)
r = tflow.tflow(resp=True)
r2 = tflow.tflow(resp=True)
assert s._hash(r) == s._hash(r2)
def test_load():
s = serverplayback.ServerPlayback()
s.configure(options.Options(), [])
r = tflow.tflow(resp=True)
r.request.headers["key"] = "one"
r2 = tflow.tflow(resp=True)
r2.request.headers["key"] = "two"
s.load_flows([r, r2])
assert s.count() == 2
n = s.next_flow(r)
assert n.request.headers["key"] == "one"
assert s.count() == 1
n = s.next_flow(r)
assert n.request.headers["key"] == "two"
assert not s.flowmap
assert s.count() == 0
assert not s.next_flow(r)
def test_load_with_server_replay_nopop():
s = serverplayback.ServerPlayback()
s.configure(options.Options(server_replay_nopop=True), [])
r = tflow.tflow(resp=True)
r.request.headers["key"] = "one"
r2 = tflow.tflow(resp=True)
r2.request.headers["key"] = "two"
s.load_flows([r, r2])
assert s.count() == 2
s.next_flow(r)
assert s.count() == 2
def test_ignore_params():
s = serverplayback.ServerPlayback()
s.configure(
options.Options(
server_replay_ignore_params=["param1", "param2"]
),
[]
)
r = tflow.tflow(resp=True)
r.request.path = "/test?param1=1"
r2 = tflow.tflow(resp=True)
r2.request.path = "/test"
assert s._hash(r) == s._hash(r2)
r2.request.path = "/test?param1=2"
assert s._hash(r) == s._hash(r2)
r2.request.path = "/test?param2=1"
assert s._hash(r) == s._hash(r2)
r2.request.path = "/test?param3=2"
assert not s._hash(r) == s._hash(r2)
def thash(r, r2, setter):
s = serverplayback.ServerPlayback()
s.configure(
options.Options(
server_replay_ignore_payload_params=["param1", "param2"]
),
[]
)
setter(r, paramx="x", param1="1")
setter(r2, paramx="x", param1="1")
# same parameters
assert s._hash(r) == s._hash(r2)
# ignored parameters !=
setter(r2, paramx="x", param1="2")
assert s._hash(r) == s._hash(r2)
# missing parameter
setter(r2, paramx="x")
assert s._hash(r) == s._hash(r2)
# ignorable parameter added
setter(r2, paramx="x", param1="2")
assert s._hash(r) == s._hash(r2)
# not ignorable parameter changed
setter(r2, paramx="y", param1="1")
assert not s._hash(r) == s._hash(r2)
# not ignorable parameter missing
setter(r2, param1="1")
r2.request.content = b"param1=1"
assert not s._hash(r) == s._hash(r2)
def test_ignore_payload_params():
def urlencode_setter(r, **kwargs):
r.request.content = urllib.parse.urlencode(kwargs).encode()
r = tflow.tflow(resp=True)
r.request.headers["Content-Type"] = "application/x-www-form-urlencoded"
r2 = tflow.tflow(resp=True)
r2.request.headers["Content-Type"] = "application/x-www-form-urlencoded"
thash(r, r2, urlencode_setter)
boundary = 'somefancyboundary'
def multipart_setter(r, **kwargs):
b = "--{0}\n".format(boundary)
parts = []
for k, v in kwargs.items():
parts.append(
"Content-Disposition: form-data; name=\"%s\"\n\n"
"%s\n" % (k, v)
)
c = b + b.join(parts) + b
r.request.content = c.encode()
r.request.headers["content-type"] = 'multipart/form-data; boundary=' +\
boundary
r = tflow.tflow(resp=True)
r2 = tflow.tflow(resp=True)
thash(r, r2, multipart_setter)
def test_server_playback_full():
s = serverplayback.ServerPlayback()
with taddons.context() as tctx:
tctx.configure(
s,
refresh_server_playback = True,
)
f = tflow.tflow()
f.response = mitmproxy.test.tutils.tresp(content=f.request.content)
s.load_flows([f, f])
tf = tflow.tflow()
assert not tf.response
s.request(tf)
assert tf.response == f.response
tf = tflow.tflow()
tf.request.content = b"gibble"
assert not tf.response
s.request(tf)
assert not tf.response
assert not s.stop
s.tick()
assert not s.stop
tf = tflow.tflow()
s.request(tflow.tflow())
assert s.stop
def test_server_playback_kill():
s = serverplayback.ServerPlayback()
with taddons.context() as tctx:
tctx.configure(
s,
refresh_server_playback = True,
replay_kill_extra=True
)
f = tflow.tflow()
f.response = mitmproxy.test.tutils.tresp(content=f.request.content)
s.load_flows([f])
f = tflow.tflow()
f.request.host = "nonexistent"
tctx.cycle(s, f)
assert f.reply.value == exceptions.Kill
| {
"content_hash": "f7bd8dadf8b2e5365e9ffe64464267ed",
"timestamp": "",
"source": "github",
"line_count": 360,
"max_line_length": 79,
"avg_line_length": 27.094444444444445,
"alnum_prop": 0.5952429772401067,
"repo_name": "xaxa89/mitmproxy",
"id": "7078b66e35af392762c8ab07d327deaf24637f4d",
"size": "9754",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/mitmproxy/addons/test_serverplayback.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "17714"
},
{
"name": "HTML",
"bytes": "4270"
},
{
"name": "JavaScript",
"bytes": "150625"
},
{
"name": "PowerShell",
"bytes": "494"
},
{
"name": "Python",
"bytes": "1535155"
},
{
"name": "Shell",
"bytes": "3660"
}
],
"symlink_target": ""
} |
"""Global registry for OpDefs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import op_def_pb2
_registered_ops = {}
def register_op_list(op_list):
"""Register all the ops in an op_def_pb2.OpList."""
if not isinstance(op_list, op_def_pb2.OpList):
raise TypeError("%s is %s, not an op_def_pb2.OpList" %
(op_list, type(op_list)))
for op_def in op_list.op:
if op_def.name in _registered_ops:
assert _registered_ops[op_def.name] == op_def
else:
_registered_ops[op_def.name] = op_def
def get_registered_ops():
"""Returns a dictionary mapping names to OpDefs."""
return _registered_ops
| {
"content_hash": "991e33a3ecdd5c11b6714e66a1230a21",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 58,
"avg_line_length": 27.333333333333332,
"alnum_prop": 0.6639566395663956,
"repo_name": "kcartier/tensorflow-toe-in-the-water",
"id": "28e55a47a444909185d74890734c49147a76a50a",
"size": "738",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tensorflow/python/framework/op_def_registry.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "127901"
},
{
"name": "C++",
"bytes": "4986624"
},
{
"name": "CSS",
"bytes": "107"
},
{
"name": "HTML",
"bytes": "638824"
},
{
"name": "Java",
"bytes": "44388"
},
{
"name": "JavaScript",
"bytes": "4876"
},
{
"name": "Objective-C",
"bytes": "630"
},
{
"name": "Protocol Buffer",
"bytes": "45325"
},
{
"name": "Python",
"bytes": "2494316"
},
{
"name": "Shell",
"bytes": "1714"
},
{
"name": "TypeScript",
"bytes": "238945"
}
],
"symlink_target": ""
} |
import zasim
import sys
ca_init, display_init, histogram_init = sys.argv[1:]
if ca_init == "constructor":
binrule = zasim.ca.BinRule(
rule = 110,
initial = zasim.conf.RandomConfiguration)
# this API must support setting the size manually and then initializing it
# from random data, zeros, ones, whatever as well as initializing it from
# a file
binrule.size = 300
binrule.init()
elif ca_init == "properties":
binRule = zasim.ca.BinRule()
# several attributes would actually be properties with getters and setters
binrule.rule = 110
binrule.size = (400,)
binrule.init(zasim.conf.RandomConfiguration)
if display_init == "constructor":
binrule_display = zasim.display.HistoryDisplay(
ca=binrule,
lines=300,
scale=2)
elif display_init == "properties":
binrule_display = zasim.display.HistoryDisplay()
binrule_display.ca = binrule
binrule_display.size = (800, 600)
elif display_init == "strange":
# maybe it would be nice if the different ca classes knew what kind of display
# is right for them
binrule_display = binrule.create_display(800, 600)
if histogram_init == "one_zero_distribution":
histogram = zasim.histogram.DistributionDisplay(binrule)
elif histogram_init == "activity_history":
histogram = zasim.histogram.ActivityHistoryDisplay(binrule)
elif histogram_init == "distribution_history":
histogram = zasim.histogram.DistributionHistoryDisplay(binrule)
histogram.show()
# open up the window
binrule_display.show()
# step once
# also update all guis related
binrule.step()
# do ten steps, update the gui as fast as possible
binrule.step(10)
# fast-forward 1000 steps without updating the gui
binrule.fast_step(1000)
# let the gui drive the stepping with delays
binrule_display.animate_steps(steps=1000, delay=0.01)
# why not also let the user say for how long it should animate?
binrule_display.animate_steps(steps=1000, time=10)
# displays should be able to save out images
binrule_display.screenshot("pretty.png")
# cas should be able to snapshot configurations or clone themselves
snap1 = binrule.snapshot()
binrule.step(100)
binrule.restore(snap1)
# maybe something like this is interesting
_snaps = []
def step_hook(ca):
_snaps.append(ca.snapshot())
binrule.step_hook(step_hook)
binrule.step(100)
binrule.step_hook(None)
# comparing configurations of different automatons - as long as they are
# comparable at all - is pretty neat, i suppose.
other_ca = binrule.shallow_clone()
# manipulation of cells should be possible from the gui as well as the shell
other_ca[10:15] = [0, 1, 0, 1, 1]
other_ca.step(100)
binrule.step(100)
# displaying a comparison between two configurations might work like this
diff = other_ca.diff(binrule)
diff_disp = zasim.display.DiffDisplay(diff)
# TODO zellularautomat game of life
# TODO moebius-bandtransformation
# TODO beliebige waende aneinanderkopieren
# 1. qt geschwindigkeit untersuchen: 1x1 pixel pro feld, 3x3 pixel, bilder
# 1d automat mit history, 2d automat (game of life z.B.)
# 2. vision implementieren
| {
"content_hash": "2a532d0cd31886cacec42d3f37535495",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 82,
"avg_line_length": 30.398058252427184,
"alnum_prop": 0.7304375598850208,
"repo_name": "timo/zasim",
"id": "b1ef79f0f897afce4502489d1369a3a35ec2c6da",
"size": "3131",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/vision.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "423165"
},
{
"name": "Shell",
"bytes": "4509"
}
],
"symlink_target": ""
} |
from neutron.tests.tempest.api import base
class BaseRouterTest(base.BaseAdminNetworkTest):
# NOTE(salv-orlando): This class inherits from BaseAdminNetworkTest
# as some router operations, such as enabling or disabling SNAT
# require admin credentials by default
def _cleanup_router(self, router):
self.delete_router(router)
self.routers.remove(router)
def _create_router(self, name, admin_state_up=False,
external_network_id=None, enable_snat=None):
# associate a cleanup with created routers to avoid quota limits
router = self.create_router(name, admin_state_up,
external_network_id, enable_snat)
self.addCleanup(self._cleanup_router, router)
return router
def _delete_router(self, router_id, network_client=None):
client = network_client or self.client
client.delete_router(router_id)
# Asserting that the router is not found in the list
# after deletion
list_body = self.client.list_routers()
routers_list = list()
for router in list_body['routers']:
routers_list.append(router['id'])
self.assertNotIn(router_id, routers_list)
| {
"content_hash": "e79b61497d566e346448b3f5aa64998b",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 72,
"avg_line_length": 41.46666666666667,
"alnum_prop": 0.655144694533762,
"repo_name": "igor-toga/local-snat",
"id": "8b0b5a477051b70151f956c5512214212294be72",
"size": "1880",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "neutron/tests/tempest/api/base_routers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "9636936"
},
{
"name": "Shell",
"bytes": "14072"
}
],
"symlink_target": ""
} |
import decimal
import datetime
import logging
from pyramid.renderers import JSON
from pyramid.events import NewRequest
logger = logging.getLogger('seth.tenancy')
class ValidationError(Exception):
pass
def _register_resource(config, view, path, *args, **kwargs):
route_name = getattr(view, '__qualname__', view.__name__)
attr = 'dispatch'
renderer = kwargs.pop('renderer', 'json')
web = kwargs.pop('web', False)
config.add_route(route_name, path)
if not web:
config.add_view(
view, route_name=route_name,
attr=attr, *args, renderer=renderer, **kwargs
)
else:
# if this is a web resource we optionally register json renderer
# which renders context as json object
if not renderer == 'json':
config.add_view(
view, route_name=route_name,
attr=attr, *args, renderer=renderer,
accept="text/html", **kwargs
)
config.add_view(
view, route_name=route_name,
attr=attr, *args, renderer='json',
accept="application/json", **kwargs
)
def _register_export(config, view, path, *args, **kwargs):
route_name = getattr(view, '__qualname__', view.__name__)
config.add_route(route_name, path)
config.add_view(
view, route_name=route_name,
attr='get', *args, renderer='json',
**kwargs
)
for renderer in ['pdf', 'csv']:
if path.endswith('/'):
path_ = '{0}{1}/'.format(path, renderer)
else:
path_ = '{0}/{1}/'.format(path, renderer)
route_name_ = "{0}_{1}".format(route_name, renderer)
config.add_route(route_name_, path_)
config.add_view(
view, route_name=route_name_,
attr=renderer, *args, renderer=renderer,
**kwargs
)
def get_adapted_json_renderer():
json_renderer = JSON()
def datetime_adapter(obj, request):
return obj.isoformat()
def decimal_adapter(obj, request):
return str(obj)
json_renderer.add_adapter(datetime.datetime, datetime_adapter)
json_renderer.add_adapter(datetime.date, datetime_adapter)
json_renderer.add_adapter(datetime.time, datetime_adapter)
json_renderer.add_adapter(decimal.Decimal, decimal_adapter)
return json_renderer
def _register_query_listener(config, engine, threshold=10):
from seth.ext.sa import setup_query_listener
setup_query_listener(engine, threshold)
def _register_tenancy(config, TenantModel):
from seth import db
from seth import tenancy
tenancy.Meta.TenantModel = TenantModel
session = db.get_session()
dialect = session.connection().engine.url.get_dialect()
if dialect.name in tenancy.supported_dialects:
config.add_subscriber(tenancy.set_search_path, NewRequest)
else:
msg = 'Cannot register tenancy. Dialect: {0} is not supported'.format(
dialect.name
)
logger.error(msg)
raise RuntimeError(msg)
| {
"content_hash": "6b8972f63054956981dee3355f1b4c6d",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 78,
"avg_line_length": 27.348214285714285,
"alnum_prop": 0.6173685928827947,
"repo_name": "jnosal/seth",
"id": "71007895cc58768979df1eea8bf8de304d900ea7",
"size": "3063",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "seth/core.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "155107"
}
],
"symlink_target": ""
} |
"""
This is a module that contains the tool for python to interact with our JFrog
artifactory API.
"""
import hashlib
import json
import os
import requests
class JFrogArtifactory(object):
"""
This is a class that holds the interacting method for JFrog Artifactory
"""
def __init__(self, user_cred,
artifactory_loc=
"http://afeossand1.cec.lab.emc.com/artifactory"):
"""
Initialize this artifact interaction class
:param artifactory_loc: the url for artifactory
:param user_cred: the credential that is enough to execute the
work required
:return:
"""
self.__artifactory_base = artifactory_loc
self.__credential = user_cred
self.__session = requests.session()
self.__session.auth = self.__credential
def __del__(self):
"""
Object destroyer, close file IO and HTTP session handler on destroy
:return:
"""
self.__session.close()
def __str__(self):
"""
String representation of this class
:return: "Interface to JFrog artifactory with {url}."
"""
str = "Interface to JFrog artifactory at: {url}".\
format(url=self.__artifactory_base)
return str
def get_package(self, repo_name, package_type, package_name):
"""
Return the packages list with specific package name
:param repo_name: Artifactory Repo name
:param package_type: example, for debian package, it's "deb"
:param package_name: example, on-http
"""
uri = "{uri_base}/api/search/prop?{pkg_type}.name={pkg_name}&repos={repo_name}".format(uri_base=self.__artifactory_base, pkg_type=package_type, pkg_name=package_name,repo_name=repo_name)
response = self.__session.get(uri)
if response.status_code != 200:
print "Did not get a 200 in your request: ", uri
return None
list = response.json()
#print "repo list is:\n{0}".format(list)
return list
def is_version_exist( self, repo_name, package_type, package_name, version_string ):
"""
Check if a version for specific package exist, by checking remote file names
"""
ret_json = self.get_package( repo_name, package_type, package_name )
if ret_json is None:
return False
pkg_list = ret_json['results']
desired_ver = package_name+"_"+version_string # this should align the package file name , instead of the version naming
for p in pkg_list:
if 'uri' in p.keys() :
if desired_ver in str(p['uri']):
return True
return False
def get_repo_list(self):
uri = "{uri_base}/api/repositories".format(uri_base=self.__artifactory_base)
response = self.__session.get(uri)
if response.status_code != 200:
print "Did not get a 200 in your request: ", uri
return None
list = response.json()
#print "repo list is:\n{0}".format(list)
return list
def get_artifactory_url(self):
"""
Getter for artifactory base url
:return: string based artifactory url
"""
return self.__artifactory_base
def repo_exists(self, rname):
"""
Return the existence status of the named repository
:param rname: name of the repo to check
:return: True (if rname exists), False otherwise
"""
repolist = self.get_repo_list();
for repo in repolist:
if 'key' in repo and repo['key'] == rname:
return True
return False
def new_local_repo(self, rname, description, repo_type="debian"):
"""
Creates a local repo at pre-given artifactory
:param rname: repository name
:param description: description of the artifactory
:param repo_type: optional -- the type of artifactory
default to debian
:return: return response instance
raise and return any other errors if encounters
"""
dict_artifact_config = {
"key": rname,
"rclass": "local",
"packageType": repo_type,
"description": description,
"enableDebianSupport": True,
"snapshotVersionBehavior": "unique",
"propertySets":["artifactory"]
}
uri = "{uri_base}/api/repositories/{repo_name}".format(
uri_base=self.__artifactory_base, repo_name=rname)
print "Trying to PUT\n{data}\nto\n{uri}.\n". \
format(data=json.dumps(dict_artifact_config), uri=uri)
try:
response = self.__session.put(
uri,
data=json.dumps(dict_artifact_config),
headers={"Content-Type": "application/json"}
)
if response.status_code != 200:
print "Did not get a 200 in your request: "
finally:
print "Successfully created new repo at artifactory."
return response
def upload_one_file(self, file_path, repository, dir_path,distribution, component, architecture ):
"""
This function uploads one file to target repository in artifactory.
:param file_path: The path to the file to be uploaded
:param repository: The repository folder name that the file
will be uploaded to
:param dir_path: The directory path that will have in artifactory
repository
:return: instance of response
"""
if os.path.exists(file_path):
file_name = os.path.basename(file_path)
else:
raise ValueError("The file path provided\n\t{path}\n"
"is not a file.".format(path=file_path))
url = "{uri_base}/{rname}/{dir_path}/{fname}"\
.format(uri_base=self.__artifactory_base, rname=repository,
dir_path=dir_path, fname=file_name)
# Only let debians have metadata
if file_path.endswith(".deb"):
url += ";deb.distribution={dist};deb.component={comp};" \
"deb.architecture={arch};".format(dist=distribution, comp=component, arch=architecture)
print "Trying to PUT\n{data}\n\tto\n{uri}".format(
data=file_path, uri=url)
try:
with open(file_path, 'rb') as fp:
file_data = fp.read()
finally:
fp.close()
response = self.__session.put(url, file_data)
if response.status_code != 201:
print "Did not get a 201 (Successfully Created) in upload request: "
return response
# There is successfully created code returned, verify the hashcodes
res_content = response.json()
md5 = hashlib.md5(file_data).hexdigest()
sha1 = hashlib.sha1(file_data).hexdigest()
if res_content['checksums']['md5'] != md5 or \
res_content['checksums']['sha1'] != sha1:
raise ValueError(
'Upload failure, the md5 or sha1 code returned'
' does not match the local version.')
else:
print "{file} is uploaded successfully.".format(file=file_name)
return response
def remove_repository(self, repo_name):
"""
remove all the contents under repository.
:param repo_name: the repository that will be deleted
:return:
instance of response
Raise any exceptions if encountered
"""
url = "{base}/api/repositories/{repo}"\
.format(base=self.__artifactory_base, repo=repo_name)
response = self.__session.delete(url)
if response.status_code == 200:
print "Repository {repo} deleted successfully."\
.format(repo=repo_name)
else:
print "Did not delete the repository successfully."
return response
| {
"content_hash": "c809671eb4fee8419ba03e823e8d1b94",
"timestamp": "",
"source": "github",
"line_count": 233,
"max_line_length": 194,
"avg_line_length": 34.61802575107296,
"alnum_prop": 0.5771138110587651,
"repo_name": "sunnyqianzhang/on-build-config",
"id": "c20396215f29c822928f207c704ad1307c971faf",
"size": "8066",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "build-release-tools/lib/ArtifactoryTools.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "83363"
},
{
"name": "Python",
"bytes": "312494"
},
{
"name": "Ruby",
"bytes": "3801"
},
{
"name": "Shell",
"bytes": "146040"
},
{
"name": "XSLT",
"bytes": "1843"
}
],
"symlink_target": ""
} |
import warnings
from sympy.matrices.expressions import Identity
from ..utils import PyDyUserWarning
from .visualization_frame import VisualizationFrame
__all__ = ['PerspectiveCamera', 'OrthoGraphicCamera']
warnings.simplefilter('once', PyDyUserWarning)
class PerspectiveCamera(VisualizationFrame):
"""Creates a Perspective Camera for visualization. The camera is
inherited from VisualizationFrame. It can be attached to dynamics
objects, hence we can get a moving camera. All the transformation matrix
generation methods are applicable to a Perspective Camera.
Like VisualizationFrame, it can also be initialized using:
1. Rigidbody
2. ReferenceFrame, Point
3. ReferenceFrame, Particle
Either one of these must be supplied during initialization. Unlike
VisualizationFrame, it does not require a Shape argument.
Parameters
==========
name : str
A name for the PerspectiveCamera(optional). Default is 'unnamed'
fov : float, default=45.0
Field Of View, It determines the angle between the top and bottom of
the viewable area (in degrees).
near : float
The distance of near plane of the PerspectiveCamera. All objects
closer to this distance are not displayed.
far : int or float
The distance of far plane of the PerspectiveCamera. All objects
farther than this distance are not displayed.
"""
def __init__(self, *args, **kwargs):
"""Initialises a PerspectiveCamera object. To initialize a
visualization frame, one needs to supply a name (optional), a
reference frame, a point, field of view (fov) (optional), near plane
distance (optional) and far plane distance (optional).
Examples
========
>>> from pydy.viz import VisualizationFrame, Shape
>>> from sympy.physics.mechanics import (ReferenceFrame, Point,
... RigidBody, Particle,
... inertia)
>>> from sympy import symbols
>>> I = ReferenceFrame('I')
>>> O = Point('O')
>>> shape = Shape()
>>> # initializing with reference frame, point
>>> camera1 = PerspectiveCamera('frame1', I, O)
>>> Ixx, Iyy, Izz, mass = symbols('Ixx Iyy Izz mass')
>>> i = inertia(I, Ixx, Iyy, Izz)
>>> rbody = RigidBody('rbody', O, I, mass, (inertia, O))
>>> # Initializing with a rigidbody ..
>>> camera2 = PerspectiveCamera('frame2', rbody)
>>> Pa = Particle('Pa', O, mass)
>>> # initializing with Particle, reference_frame ...
>>> camera3 = PerspectiveCamera('frame3', I, Pa)
"""
msg = ("Rotation of Perspective Camera does not work "
"properly in the visualiser.")
warnings.warn(msg, PyDyUserWarning)
try:
self._fov = kwargs['fov']
except KeyError:
self._fov = 45.0
try:
self._near = kwargs['near']
except KeyError:
self._near = 1.0
try:
self._far = kwargs['far']
except KeyError:
self._far = 1000.0
# Now we use same approach as in VisualizationFrame for setting
# reference_frame and origin
i = 0
# If first arg is not str, name the visualization frame 'unnamed'
if isinstance(args[i], str):
self._name = args[i]
i += 1
else:
self._name = 'unnamed'
try:
self._reference_frame = args[i].get_frame()
self._origin = args[i].get_masscenter()
except AttributeError:
# It is not a rigidbody, hence this arg should be a reference
# frame
try:
# TODO : dcm is never used.
dcm = args[i]._dcm_dict
self._reference_frame = args[i]
i += 1
except AttributeError:
raise TypeError('A ReferenceFrame is to be supplied '
'before a Particle/Point.')
# Now next arg can either be a Particle or point
try:
self._origin = args[i].get_point()
except AttributeError:
self._origin = args[i]
# basic thing required, transform matrix
self._transform = Identity(4).as_mutable()
def __str__(self):
return 'PerspectiveCamera: ' + self._name
def __repr__(self):
return 'PerspectiveCamera'
@property
def fov(self):
"""
attribute for Field Of view of a PerspectiveCamera.
Default value is 45 degrees
"""
return self._fov
@fov.setter
def fov(self, new_fov):
if not isinstance(new_fov, (int, str)):
raise TypeError('fov should be supplied in int or float')
else:
self._fov = new_fov
@property
def near(self):
"""
attribute for Near Plane distance of a PerspectiveCamera.
Default value is 1
"""
return self._near
@near.setter
def near(self, new_near):
if not isinstance(new_near, (int, str)):
raise TypeError('near should be supplied in int or float')
else:
self._near = new_near
@property
def far(self):
"""
Attribute for Far Plane distance of a PerspectiveCamera. The default
value is ``1000.0``.
"""
return self._far
@far.setter
def far(self, new_far):
if not isinstance(new_far, (int, str)):
raise TypeError('far should be supplied in int or float')
else:
self._far = new_far
def generate_scene_dict(self):
"""This method generates information for a static visualization in
the initial conditions, in the form of dictionary. This contains
camera parameters followed by an init_orientation Key.
Before calling this method, all the transformation matrix generation
methods should be called, or it will give an error.
Returns
=======
A dict with following Keys:
1. name: name for the camera
2. fov: Field of View value of the camera
3. near: near value of the camera
4. far: far value of the camera
5. init_orientation: Initial orientation of the camera
"""
scene_dict = {id(self): {}}
scene_dict[id(self)]['name'] = self.name
scene_dict[id(self)]['type'] = self.__repr__()
scene_dict[id(self)]['fov'] = self.fov
scene_dict[id(self)]['near'] = self.near
scene_dict[id(self)]['far'] = self.far
scene_dict[id(self)]["simulation_id"] = id(self)
scene_dict[id(self)]["init_orientation"] = self._visualization_matrix[0]
return scene_dict
def generate_simulation_dict(self):
"""Generates the simulation information for this visualization
frame. It maps the simulation data information to the scene
information via a unique id.
Before calling this method, all the transformation matrix generation
methods should be called, or it will give an error.
Returns
=======
simulation_dict : dictionary
A dictionary containing list of 4x4 matrices mapped to the
unique id as the key.
"""
simulation_dict = {}
try:
simulation_dict[id(self)] = self._visualization_matrix
except:
raise RuntimeError("Please call the numerical "
"transformation methods, "
"before generating visualization dict.")
return simulation_dict
class OrthoGraphicCamera(VisualizationFrame):
"""Creates a OrthoGraphic Camera for visualization. The camera is
inherited from ``VisualizationFrame``. It can be attached to dynamics
objects, hence we can get a moving camera. All the transformation matrix
generation methods are applicable to a Perspective Camera.
Like VisualizationFrame, it can also be initialized using:
1. :role:`Rigidbody`
2. ReferenceFrame, Point
3. ReferenceFrame, Particle
Either one of these must be supplied during initialization. Unlike
VisualizationFrame, it doesnt require a Shape argument.
Parameters
==========
name : str, optional, default='unnamed'
A name for the PerspectiveCamera.
near : float, optional, default=
The distance of near plane of the PerspectiveCamera. All objects
closer to this distance are not displayed.
far : float, optional, default=
The distance of far plane of the PerspectiveCamera. All objects
farther than this distance are not displayed.
"""
def __init__(self, *args, **kwargs):
"""
Initialises an OrthoGraphicCamera object. To initialize a
visualization frame, we need to supply a name (optional), a
reference frame, a point, near plane distance (optional) and far
plane distance (optional).
Examples
========
>>> from pydy.viz import OrthoGraphicCamera
>>> from sympy.physics.mechanics import (ReferenceFrame, Point,
... RigidBody, Particle,
... inertia)
>>> from sympy import symbols
>>> I = ReferenceFrame('I')
>>> O = Point('O')
>>> shape = Shape()
>>> # Initializing with ReferenceFrame, Point
>>> camera1 = OrthoGraphicCamera('frame1', I, O)
>>> Ixx, Iyy, Izz, mass = symbols('Ixx Iyy Izz mass')
>>> i = inertia(I, Ixx, Iyy, Izz)
>>> rbody = RigidBody('rbody', O, I, mass, (inertia, O))
>>> # Initializing with a Rigidbody
>>> camera2 = OrthoGraphicCamera('frame2', rbody)
>>> Pa = Particle('Pa', O, mass)
>>> # Initializing with Particle, ReferenceFrame
>>> camera3 = OrthoGraphicCamera('frame3', I, Pa)
"""
try:
self._near = kwargs['near']
except KeyError:
self._near = 1
try:
self._far = kwargs['far']
except KeyError:
self._far = 1000
# Now we use same approach as in VisualizationFrame for setting
# reference_frame and origin
i = 0
# If first arg is not str, name the visualization frame 'unnamed'
if isinstance(args[i], str):
self._name = args[i]
i += 1
else:
self._name = 'unnamed'
try:
self._reference_frame = args[i].get_frame()
self._origin = args[i].get_masscenter()
except AttributeError:
# It is not a rigidbody, hence this arg should be a reference
# frame.
self._reference_frame = args[i]
i += 1
# Now next arg can either be a Particle or point
try:
self._origin = args[i].get_point()
except AttributeError:
self._origin = args[i]
# basic thing required, transform matrix
self._transform = Identity(4).as_mutable()
def __str__(self):
return 'OrthoGraphicCamera: ' + self._name
def __repr__(self):
return 'OrthoGraphicCamera'
@property
def near(self):
"""Attribute for Near Plane distance of an OrthoGraphicCamera.
Default value is 1.0
"""
return self._near
@near.setter
def near(self, new_near):
if not isinstance(new_near, (int, str)):
raise TypeError('near should be supplied in int or float')
else:
self._near = new_near
@property
def far(self):
"""Attribute for Far Plane distance of an OrthoGraphicCamera.
Default value is 1000.0.
"""
return self._far
@far.setter
def far(self, new_far):
if not isinstance(new_far, (int, str)):
raise TypeError('far should be supplied in int or float')
else:
self._far = new_far
def generate_scene_dict(self):
"""
This method generates information for a static visualization in the
initial conditions, in the form of dictionary. This contains camera
parameters followed by an init_orientation Key.
Returns
=======
scene_dict : dictionary
A dict with following Keys:
1. name: name for the camera
2. near: near value of the camera
3. far: far value of the camera
4. init_orientation: Initial orientation of the camera
"""
scene_dict = {id(self): {}}
scene_dict[id(self)]['name'] = self.name
scene_dict[id(self)]['type'] = self.__repr__()
scene_dict[id(self)]['near'] = self.near
scene_dict[id(self)]['far'] = self.far
scene_dict[id(self)]["simulation_id"] = id(self)
scene_dict[id(self)]["init_orientation"] = self._visualization_matrix[0]
return scene_dict
def generate_simulation_dict(self):
"""Generates the simulation information for this visualization
frame. It maps the simulation data information to the scene
information via a unique id.
Returns
=======
A dictionary containing list of 4x4 matrices mapped to the unique id
as the key.
"""
simulation_dict = {}
try:
simulation_dict[id(self)] = self._visualization_matrix
except:
raise RuntimeError("Please call the numerical ",
"transformation methods, ",
"before generating visualization dict")
return simulation_dict
| {
"content_hash": "5afef5f7d5b88478e7877f9ea97ee988",
"timestamp": "",
"source": "github",
"line_count": 410,
"max_line_length": 80,
"avg_line_length": 33.86829268292683,
"alnum_prop": 0.5774161025493303,
"repo_name": "Shekharrajak/pydy",
"id": "8eb86b71515766d800b2893d598cd3b83d5ec6e3",
"size": "13886",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "pydy/viz/camera.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "102"
},
{
"name": "CSS",
"bytes": "14810"
},
{
"name": "HTML",
"bytes": "15405"
},
{
"name": "JavaScript",
"bytes": "49934"
},
{
"name": "Python",
"bytes": "279080"
},
{
"name": "Shell",
"bytes": "76"
}
],
"symlink_target": ""
} |
"""MetaGraph and related functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import os.path
import re
from collections import OrderedDict, deque
import six
from google.protobuf.any_pb2 import Any
from google.protobuf import text_format
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import op_def_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.framework import graph_io
from tensorflow.python.framework import importer
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import ops
from tensorflow.python.framework import versions
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
# Prefix to be added to unbound input names so they are easily identifiable.
_UNBOUND_INPUT_PREFIX = "$unbound_inputs_"
def _node_def(from_node_def, export_scope, unbound_inputs, clear_devices=False):
"""Create a `NodeDef` proto with export_scope stripped.
Args:
from_node_def: A `node_def_pb2.NodeDef` protocol buffer.
export_scope: A `string` representing the name scope to remove.
unbound_inputs: An array of unbound input names if they exist.
clear_devices: Boolean which controls whether to clear device information
from node_def. Default false.
Returns:
A `node_def_pb2.NodeDef` protocol buffer.
"""
node_def = copy.deepcopy(from_node_def)
for i, v in enumerate(node_def.input):
if (export_scope and
not node_def.input[i].lstrip("^").startswith(export_scope)):
# Adds "$unbound_inputs_" prefix to the unbound name so they are easily
# identifiable.
node_def.input[i] = re.sub(r"([\^]|^)(.*)",
r"\1" + _UNBOUND_INPUT_PREFIX + r"\2",
compat.as_str(v))
unbound_inputs.append(node_def.input[i])
else:
node_def.input[i] = ops.strip_name_scope(v, export_scope)
node_def.name = compat.as_bytes(
ops.strip_name_scope(from_node_def.name, export_scope))
for k, v in six.iteritems(from_node_def.attr):
if k == "_class":
new_s = [compat.as_bytes(
ops.strip_name_scope(s, export_scope)) for s in v.list.s
if not export_scope or
compat.as_str(s).split("@")[1].startswith(export_scope)]
node_def.attr[k].CopyFrom(attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(s=new_s)))
else:
node_def.attr[k].CopyFrom(v)
if clear_devices:
node_def.device = ""
return node_def
def _read_file(filename):
"""Reads a file containing `GraphDef` and returns the protocol buffer.
Args:
filename: `graph_def` filename including the path.
Returns:
A `GraphDef` protocol buffer.
Raises:
IOError: If the file doesn't exist, or cannot be successfully parsed.
"""
graph_def = graph_pb2.GraphDef()
if not file_io.file_exists(filename):
raise IOError("File %s does not exist." % filename)
# First try to read it as a binary file.
file_content = file_io.FileIO(filename, "rb").read()
try:
graph_def.ParseFromString(file_content)
return graph_def
except Exception: # pylint: disable=broad-except
pass
# Next try to read it as a text file.
try:
text_format.Merge(file_content, graph_def)
except text_format.ParseError as e:
raise IOError("Cannot parse file %s: %s." % (filename, str(e)))
return graph_def
def ops_used_by_graph_def(graph_def):
"""Collect the list of ops used by a graph.
Does not validate that the ops are all registered.
Args:
graph_def: A `GraphDef` proto, as from `graph.as_graph_def()`.
Returns:
A list of strings, each naming an op used by the graph.
"""
# Map function names to definitions
name_to_function = {}
for fun in graph_def.library.function:
name_to_function[fun.signature.name] = fun
# Collect the list of op names. Since functions can reference functions, we
# need a recursive traversal.
used_ops = set() # Includes both primitive ops and functions
functions_to_process = [] # A subset of used_ops
def mark_op_as_used(op):
if op not in used_ops and op in name_to_function:
functions_to_process.append(name_to_function[op])
used_ops.add(op)
for node in graph_def.node:
mark_op_as_used(node.op)
while functions_to_process:
fun = functions_to_process.pop()
for node in fun.node_def:
mark_op_as_used(node.op)
return [op for op in used_ops if op not in name_to_function]
def stripped_op_list_for_graph(graph_def):
"""Collect the stripped OpDefs for ops used by a graph.
This function computes the `stripped_op_list` field of `MetaGraphDef` and
similar protos. The result can be communicated from the producer to the
consumer, which can then use the C++ function
`RemoveNewDefaultAttrsFromGraphDef` to improve forwards compatibility.
Args:
graph_def: A `GraphDef` proto, as from `graph.as_graph_def()`.
Returns:
An `OpList` of ops used by the graph.
Raises:
ValueError: If an unregistered op is used.
"""
# This is the Python equivalent of StrippedOpListForGraph in C++.
# Unfortunately, since the Python op registry can differ from that in C++, we
# can't remove the duplication using swig (at least naively).
# TODO(irving): Support taking graphs directly.
used_ops = ops_used_by_graph_def(graph_def)
# Verify that all used ops are registered.
registered_ops = op_def_registry.get_registered_ops()
# These internal ops used by functions are not registered, so we need to
# whitelist them. # TODO(irving): Do something better here.
op_whitelist = ("_Arg", "_Retval", "_ListToArray", "_ArrayToList")
for op in used_ops:
if op not in registered_ops and op not in op_whitelist:
raise ValueError("Op %s is used by the graph, but is not registered" % op)
# Build the stripped op list in sorted order
return op_def_pb2.OpList(op=[registered_ops[op] for op in sorted(used_ops)
if op in registered_ops])
def _get_kind_name(item):
"""Returns the kind name in CollectionDef.
Args:
item: A data item.
Returns:
The string representation of the kind in CollectionDef.
"""
if isinstance(item, (six.string_types, six.binary_type)):
kind = "bytes_list"
elif isinstance(item, six.integer_types):
kind = "int64_list"
elif isinstance(item, float):
kind = "float_list"
elif isinstance(item, Any):
kind = "any_list"
else:
kind = "node_list"
return kind
def _should_include_node(node_or_node_name, export_scope):
"""Returns `True` if a node should be included.
Args:
node_or_node_name: A node or `string` node name.
export_scope: `string`. Name scope under which to extract the subgraph. The
scope name will be striped from the node definitions for easy import later
into new name scopes.
Returns:
`True` if the node should be included.
"""
if not isinstance(node_or_node_name, six.string_types):
try:
node_name = node_or_node_name.name
except AttributeError:
# Keep the object that we don't know how to process.
return True
else:
node_name = node_or_node_name
return (node_name.startswith(_UNBOUND_INPUT_PREFIX) or
(not export_scope or node_name.startswith(export_scope)))
def add_collection_def(meta_graph_def, key, graph=None,
export_scope=None):
"""Adds a collection to MetaGraphDef protocol buffer.
Args:
meta_graph_def: MetaGraphDef protocol buffer.
key: One of the GraphKeys or user-defined string.
graph: The `Graph` from which to get collections.
export_scope: Optional `string`. Name scope to remove.
"""
if graph and not isinstance(graph, ops.Graph):
raise TypeError("graph must be of type Graph, not %s", type(graph))
if not isinstance(key, six.string_types) and not isinstance(key, bytes):
logging.warning("Only collections with string type keys will be "
"serialized. This key has %s", type(key))
return
# Sets graph to default graph if it's not passed in.
graph = graph or ops.get_default_graph()
collection_list = graph.get_collection(key)
# Remove nodes that should not be exported from the collection list.
collection_list = [x for x in collection_list if
_should_include_node(x, export_scope)]
if not collection_list:
return
try:
col_def = meta_graph_def.collection_def[key]
to_proto = ops.get_to_proto_function(key)
proto_type = ops.get_collection_proto_type(key)
if to_proto:
kind = "bytes_list"
for x in collection_list:
# Additional type check to make sure the returned proto is indeed
# what we expect.
proto = to_proto(x, export_scope=export_scope)
if proto:
assert isinstance(proto, proto_type)
getattr(col_def, kind).value.append(proto.SerializeToString())
else:
kind = _get_kind_name(collection_list[0])
if kind == "node_list":
for x in collection_list:
if not export_scope or x.name.startswith(export_scope):
getattr(col_def, kind).value.append(
ops.strip_name_scope(x.name, export_scope))
elif kind == "bytes_list":
# NOTE(opensource): This force conversion is to work around the fact
# that Python3 distinguishes between bytes and strings.
getattr(col_def, kind).value.extend(
[compat.as_bytes(x) for x in collection_list])
else:
getattr(col_def, kind).value.extend([x for x in collection_list])
except Exception as e: # pylint: disable=broad-except
logging.warning("Error encountered when serializing %s.\n"
"Type is unsupported, or the types of the items don't "
"match field type in CollectionDef.\n%s", key, str(e))
if key in meta_graph_def.collection_def:
del meta_graph_def.collection_def[key]
return
def create_meta_graph_def(meta_info_def=None,
graph_def=None,
saver_def=None,
add_collections=True,
collection_list=None,
graph=None,
export_scope=None):
"""Construct and returns a `MetaGraphDef` protocol buffer.
Args:
meta_info_def: `MetaInfoDef` protocol buffer.
graph_def: `GraphDef` protocol buffer.
saver_def: `SaverDef` protocol buffer.
collection_list: List of string keys to collect.
graph: The `Graph` to create `MetaGraphDef` out of.
export_scope: Optional `string`. Name scope to remove.
Returns:
MetaGraphDef protocol buffer.
Raises:
TypeError: If the arguments are not of the correct proto buffer type.
"""
# Type check.
if graph and not isinstance(graph, ops.Graph):
raise TypeError("graph must be of type Graph, not %s", type(graph))
if meta_info_def and not isinstance(meta_info_def,
meta_graph_pb2.MetaGraphDef.MetaInfoDef):
raise TypeError("meta_info_def must be of type MetaInfoDef, not %s",
type(meta_info_def))
if graph_def and not isinstance(graph_def, graph_pb2.GraphDef):
raise TypeError("graph_def must be of type GraphDef, not %s",
type(graph_def))
if saver_def and not isinstance(saver_def, saver_pb2.SaverDef):
raise TypeError("saver_def must be of type SaverDef, not %s",
type(saver_def))
# Sets graph to default graph if it's not passed in.
graph = graph or ops.get_default_graph()
# Creates a MetaGraphDef proto.
meta_graph_def = meta_graph_pb2.MetaGraphDef()
# Adds meta_info_def.
if not meta_info_def:
meta_info_def = meta_graph_pb2.MetaGraphDef.MetaInfoDef()
# Set the tf version strings to the current tf build.
meta_info_def.tensorflow_version = versions.__version__
meta_info_def.tensorflow_git_version = versions.__git_version__
meta_graph_def.meta_info_def.MergeFrom(meta_info_def)
# Adds graph_def or the default.
if not graph_def:
meta_graph_def.graph_def.MergeFrom(graph.as_graph_def(add_shapes=True))
else:
meta_graph_def.graph_def.MergeFrom(graph_def)
# Fills in meta_info_def.stripped_op_list using the ops from graph_def.
# pylint: disable=g-explicit-length-test
if len(meta_graph_def.meta_info_def.stripped_op_list.op) == 0:
meta_graph_def.meta_info_def.stripped_op_list.MergeFrom(
stripped_op_list_for_graph(meta_graph_def.graph_def))
# pylint: enable=g-explicit-length-test
# Adds saver_def.
if saver_def:
meta_graph_def.saver_def.MergeFrom(saver_def)
# Adds collection_list.
if add_collections:
if collection_list:
clist = collection_list
else:
clist = graph.get_all_collection_keys()
for ctype in clist:
add_collection_def(meta_graph_def, ctype,
graph=graph,
export_scope=export_scope)
return meta_graph_def
def read_meta_graph_file(filename):
"""Reads a file containing `MetaGraphDef` and returns the protocol buffer.
Args:
filename: `meta_graph_def` filename including the path.
Returns:
A `MetaGraphDef` protocol buffer.
Raises:
IOError: If the file doesn't exist, or cannot be successfully parsed.
"""
meta_graph_def = meta_graph_pb2.MetaGraphDef()
if not file_io.file_exists(filename):
raise IOError("File %s does not exist." % filename)
# First try to read it as a binary file.
file_content = file_io.FileIO(filename, "rb").read()
try:
meta_graph_def.ParseFromString(file_content)
return meta_graph_def
except Exception: # pylint: disable=broad-except
pass
# Next try to read it as a text file.
try:
text_format.Merge(file_content.decode("utf-8"), meta_graph_def)
except text_format.ParseError as e:
raise IOError("Cannot parse file %s: %s." % (filename, str(e)))
return meta_graph_def
def import_scoped_meta_graph(meta_graph_or_file,
clear_devices=False,
graph=None,
import_scope=None,
input_map=None,
unbound_inputs_col_name="unbound_inputs"):
"""Recreates a`Graph` saved in a `MetaGraphDef` proto.
This function takes a `MetaGraphDef` protocol buffer as input. If
the argument is a file containing a `MetaGraphDef` protocol buffer ,
it constructs a protocol buffer from the file content. The function
then adds all the nodes from the `graph_def` field to the
current graph, recreates all the collections, and returns a saver
constructed from the `saver_def` field.
In combination with `export_scoped_meta_graph()`, this function can be used to
* Serialize a graph along with other Python objects such as `QueueRunner`,
`Variable` into a `MetaGraphDef`.
* Restart training from a saved graph and checkpoints.
* Run inference from a saved graph and checkpoints.
Args:
meta_graph_or_file: `MetaGraphDef` protocol buffer or filename (including
the path) containing a `MetaGraphDef`.
clear_devices: Boolean which controls whether to clear device information
from graph_def. Default false.
graph: The `Graph` to import into. If `None`, use the default graph.
import_scope: Optional `string`. Name scope into which to import the
subgraph. If `None`, the graph is imported to the root name scope.
input_map: A dictionary mapping input names (as strings) in `graph_def` to
`Tensor` objects. The values of the named input tensors in the imported
graph will be re-mapped to the respective `Tensor` values.
unbound_inputs_col_name: Collection name for looking up unbound inputs.
Returns:
A dictionary of all the `Variables` imported into the name scope.
Raises:
ValueError: If the graph_def contains unbound inputs.
"""
if isinstance(meta_graph_or_file, meta_graph_pb2.MetaGraphDef):
meta_graph_def = meta_graph_or_file
else:
meta_graph_def = read_meta_graph_file(meta_graph_or_file)
if unbound_inputs_col_name:
for key, col_def in meta_graph_def.collection_def.items():
if key == unbound_inputs_col_name:
kind = col_def.WhichOneof("kind")
field = getattr(col_def, kind)
if field.value and (
not input_map or
sorted(set([compat.as_str(v) for v in field.value])) !=
sorted(input_map)):
raise ValueError("Graph contains unbound inputs: %s. Must "
"provide these inputs through input_map." %
",".join([compat.as_str(v) for v in field.value
if not input_map or v not in input_map]))
break
# Sets graph to default graph if it's not passed in.
graph = graph or ops.get_default_graph()
# Gathers the list of nodes we are interested in.
with graph.as_default():
producer_op_list = None
if meta_graph_def.meta_info_def.HasField("stripped_op_list"):
producer_op_list = meta_graph_def.meta_info_def.stripped_op_list
input_graph_def = meta_graph_def.graph_def
# Remove all the explicit device specifications for this node. This helps to
# make the graph more portable.
if clear_devices:
for node in input_graph_def.node:
node.device = ""
importer.import_graph_def(
input_graph_def, name=(import_scope or ""), input_map=input_map,
producer_op_list=producer_op_list)
# Restores all the other collections.
for key, col_def in meta_graph_def.collection_def.items():
# Don't add unbound_inputs to the new graph.
if key == unbound_inputs_col_name:
continue
kind = col_def.WhichOneof("kind")
if kind is None:
logging.error("Cannot identify data type for collection %s. Skipping.",
key)
continue
from_proto = ops.get_from_proto_function(key)
if from_proto:
assert kind == "bytes_list"
proto_type = ops.get_collection_proto_type(key)
for value in col_def.bytes_list.value:
proto = proto_type()
proto.ParseFromString(value)
graph.add_to_collection(
key, from_proto(proto, import_scope=import_scope))
else:
field = getattr(col_def, kind)
if kind == "node_list":
for value in field.value:
col_op = graph.as_graph_element(
ops.prepend_name_scope(value, import_scope))
graph.add_to_collection(key, col_op)
elif kind == "int64_list":
# NOTE(opensource): This force conversion is to work around the fact
# that Python2 distinguishes between int and long, while Python3 has
# only int.
for value in field.value:
graph.add_to_collection(key, int(value))
else:
for value in field.value:
graph.add_to_collection(
key, ops.prepend_name_scope(value, import_scope))
var_list = {}
variables = graph.get_collection(ops.GraphKeys.GLOBAL_VARIABLES,
scope=import_scope)
for v in variables:
var_list[ops.strip_name_scope(v.name, import_scope)] = v
return var_list
def export_scoped_meta_graph(filename=None,
graph_def=None,
graph=None,
export_scope=None,
as_text=False,
unbound_inputs_col_name="unbound_inputs",
clear_devices=False,
**kwargs):
"""Returns `MetaGraphDef` proto. Optionally writes it to filename.
This function exports the graph, saver, and collection objects into
`MetaGraphDef` protocol buffer with the intention of it being imported
at a later time or location to restart training, run inference, or be
a subgraph.
Args:
filename: Optional filename including the path for writing the
generated `MetaGraphDef` protocol buffer.
graph_def: `GraphDef` protocol buffer.
graph: The `Graph` to import into. If `None`, use the default graph.
export_scope: Optional `string`. Name scope under which to extract
the subgraph. The scope name will be striped from the node definitions
for easy import later into new name scopes. If `None`, the whole graph
is exported. graph_def and export_scope cannot both be specified.
as_text: If `True`, writes the `MetaGraphDef` as an ASCII proto.
unbound_inputs_col_name: Optional `string`. If provided, a string collection
with the given name will be added to the returned `MetaGraphDef`,
containing the names of tensors that must be remapped when importing the
`MetaGraphDef`.
clear_devices: Boolean which controls whether to clear device information
before exporting the graph.
**kwargs: Optional keyed arguments, including meta_info_def,
saver_def, collection_list.
Returns:
A `MetaGraphDef` proto and dictionary of `Variables` in the exported
name scope.
Raises:
ValueError: When the `GraphDef` is larger than 2GB.
"""
graph = graph or ops.get_default_graph()
unbound_inputs = []
if export_scope or clear_devices:
if graph_def:
new_graph_def = graph_pb2.GraphDef()
new_graph_def.versions.CopyFrom(graph_def.versions)
for node_def in graph_def.node:
if _should_include_node(node_def.name, export_scope):
new_node_def = _node_def(node_def, export_scope, unbound_inputs,
clear_devices=clear_devices)
new_graph_def.node.extend([new_node_def])
graph_def = new_graph_def
else:
# Only do this complicated work if we want to remove a name scope.
graph_def = graph_pb2.GraphDef()
# pylint: disable=protected-access
graph_def.versions.CopyFrom(graph.graph_def_versions)
bytesize = 0
for key in sorted(graph._nodes_by_id):
if _should_include_node(graph._nodes_by_id[key].name, export_scope):
value = graph._nodes_by_id[key]
# pylint: enable=protected-access
node_def = _node_def(value.node_def, export_scope, unbound_inputs,
clear_devices=clear_devices)
graph_def.node.extend([node_def])
if value.outputs:
assert "_output_shapes" not in graph_def.node[-1].attr
graph_def.node[-1].attr["_output_shapes"].list.shape.extend([
output.get_shape().as_proto() for output in value.outputs])
bytesize += value.node_def.ByteSize()
if bytesize >= (1 << 31) or bytesize < 0:
raise ValueError("GraphDef cannot be larger than 2GB.")
# It's possible that not all the inputs are in the export_scope.
# If we would like such information included in the exported meta_graph,
# add them to a special unbound_inputs collection.
if unbound_inputs_col_name:
# Clears the unbound_inputs collections.
graph.clear_collection(unbound_inputs_col_name)
for k in unbound_inputs:
graph.add_to_collection(unbound_inputs_col_name, k)
var_list = {}
variables = graph.get_collection(ops.GraphKeys.GLOBAL_VARIABLES,
scope=export_scope)
for v in variables:
if _should_include_node(v, export_scope):
var_list[ops.strip_name_scope(v.name, export_scope)] = v
scoped_meta_graph_def = create_meta_graph_def(
graph_def=graph_def,
graph=graph,
export_scope=export_scope,
**kwargs)
if filename:
graph_io.write_graph(
scoped_meta_graph_def,
os.path.dirname(filename),
os.path.basename(filename),
as_text=as_text)
return scoped_meta_graph_def, var_list
def copy_scoped_meta_graph(from_scope, to_scope,
from_graph=None, to_graph=None):
"""Copies a sub-meta_graph from one scope to another.
Args:
from_scope: `String` name scope containing the subgraph to be copied.
to_scope: `String` name scope under which the copied subgraph will reside.
from_graph: Optional `Graph` from which to copy the subgraph. If `None`, the
default graph is use.
to_graph: Optional `Graph` to which to copy the subgraph. If `None`, the
default graph is used.
Returns:
A dictionary of `Variables` that has been copied into `to_scope`.
Raises:
ValueError: If `from_scope` and `to_scope` are the same while
`from_graph` and `to_graph` are also the same.
"""
from_graph = from_graph or ops.get_default_graph()
to_graph = to_graph or ops.get_default_graph()
if from_graph == to_graph and from_scope == to_scope:
raise ValueError("'from_scope' and 'to_scope' need to be different "
"when performing copy in the same graph.")
orig_meta_graph, var_list = export_scoped_meta_graph(
export_scope=from_scope, graph=from_graph)
var_list = import_scoped_meta_graph(orig_meta_graph,
graph=to_graph,
import_scope=to_scope)
return var_list
def _unbound_name(name):
return re.sub(r"([\^]|loc:@|^)(.*?)", r"\1" + _UNBOUND_INPUT_PREFIX + r"\2",
compat.as_str(name))
def _node_def_unbound(from_node_def, export_scope, unbound_inputs,
as_unbound_inputs, clear_devices=False):
"""Create a `NodeDef` proto with export_scope stripped given input names
that are treated as unbound.
Args:
from_node_def: A `node_def_pb2.NodeDef` protocol buffer.
export_scope: A `string` representing the name scope to remove.
unbound_inputs: An array of unbound input names if they exist.
as_unbound_inputs: A list of `String`s. Input names that are treated as
unbound when exporting Operations.
clear_devices: Boolean which controls whether to clear device information
from node_def. Default false.
Returns:
A `node_def_pb2.NodeDef` protocol buffer.
"""
node_def = copy.deepcopy(from_node_def)
as_unbound_inputs = set(as_unbound_inputs)
for i, v in enumerate(node_def.input):
if node_def.input[i] in as_unbound_inputs:
# Adds "$unbound_inputs_" prefix to the unbound name so they are easily
# identifiable.
node_def.input[i] = _unbound_name(v)
unbound_inputs.append(node_def.input[i])
else:
node_def.input[i] = ops.strip_name_scope(v, export_scope)
node_def.name = compat.as_bytes(
ops.strip_name_scope(from_node_def.name, export_scope))
for k, v in six.iteritems(from_node_def.attr):
if k == "_class":
new_s = []
for s in v.list.s:
if compat.as_str(s) in as_unbound_inputs:
new_s.append(compat.as_bytes(_unbound_name(s)))
else:
new_s.append(compat.as_bytes(ops.strip_name_scope(s, export_scope)))
node_def.attr[k].CopyFrom(attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(s=new_s)))
else:
node_def.attr[k].CopyFrom(v)
if clear_devices:
node_def.device = ""
return node_def
def export_ops_meta_graph(op_list,
graph=None,
export_scope="",
as_unbound_inputs=None,
as_text=False,
unbound_inputs_col_name="unbound_inputs",
clear_devices=False,
**kwargs):
"""This function exports a list of `Operation` objects into `MetaGraphDef`
protocol buffer with the intention of it being imported at a later time or
location.
Args:
op_list: A list of `Operation` objects to export.
graph: The `Graph` to import into. If `None`, use the default graph.
export_scope: Optional `string`. Name scope under which to extract the ops.
The scope name will be striped from the node definitions for easy import
later into new name scopes.
as_unbound_inputs: A list of `String`s. Input names that are treated as
unbound when exporting Operations.
as_text: If `True`, writes the `MetaGraphDef` as an ASCII proto.
unbound_inputs_col_name: Optional `string`. If provided, a string collection
with the given name will be added to the returned `MetaGraphDef`,
containing the names of tensors that must be remapped when importing the
`MetaGraphDef`.
clear_devices: Boolean which controls whether to clear device information
before exporting the graph.
**kwargs: Optional keyed arguments, including meta_info_def,
saver_def, collection_list.
Returns:
A `MetaGraphDef` proto.
"""
op_list = set(op_list)
for op in op_list:
if not op.name.startswith(export_scope):
raise ValueError("The Operation (%s) to export is not under "
"'export_scope'." % op.name)
graph = graph or ops.get_default_graph()
as_unbound_inputs = as_unbound_inputs or []
unbound_inputs = []
graph_def = graph_pb2.GraphDef()
# pylint: disable=protected-access
graph_def.versions.CopyFrom(graph.graph_def_versions)
bytesize = 0
for key in sorted(graph._nodes_by_id):
if graph._nodes_by_id[key] in op_list:
op = graph._nodes_by_id[key]
node_def = _node_def_unbound(
op.node_def, export_scope, unbound_inputs, as_unbound_inputs,
clear_devices=clear_devices)
graph_def.node.extend([node_def])
if op.outputs:
assert "_output_shapes" not in graph_def.node[-1].attr
graph_def.node[-1].attr["_output_shapes"].list.shape.extend([
output.get_shape().as_proto() for output in op.outputs])
bytesize += op.node_def.ByteSize()
if bytesize >= (1 << 31) or bytesize < 0:
raise ValueError("GraphDef cannot be larger than 2GB.")
# It's possible that not all the inputs are in the export_scope.
# If we would like such information included in the exported meta_graph,
# add them to a special unbound_inputs collection.
if unbound_inputs_col_name:
# Clears the unbound_inputs collections.
graph.clear_collection(unbound_inputs_col_name)
for k in unbound_inputs:
graph.add_to_collection(unbound_inputs_col_name, k)
scoped_meta_graph_def = create_meta_graph_def(
graph_def=graph_def,
graph=graph,
add_collections=False,
**kwargs)
return scoped_meta_graph_def
def copy_ops_meta_graph(op_list, from_scope, to_scope, replace=None):
"""Copies a list of `Operation`s from one scope to another, with variables
shared between them.
Args:
op_list: A list of `Operation` objects to be copied.
from_scope: `String` name scope containing the ops to be copied.
to_scope: `String` name scope under which the copied ops will reside.
replace: A dictionary containing the mapping from input Tensors of these
ops to their replacements.
Returns:
A dictionary containing the mapping from original ops to their copies and
a dictionary of `Variables` that have been copied into `to_scope`.
Raises:
ValueError: If `from_scope` and `to_scope` are the same.
"""
if from_scope == to_scope:
raise ValueError("'from_scope' and 'to_scope' need to be different "
"when performing copy in the same graph.")
op_list = set(op_list)
op_names = set(op.name for op in op_list)
op_outputs = set()
for op in op_list:
if not op.name.startswith(from_scope):
raise ValueError("The Operation (%s) to copy is not under "
"'from_scope'." % op.name)
op_outputs.update(set(op.outputs))
input_map = {}
as_unbound_inputs = []
for op in op_list:
for tensor in op.inputs:
if not (tensor in op_outputs) or (tensor in replace):
name = tensor.name[:-2] if tensor.name[-2:] == ":0" else tensor.name
as_unbound_inputs.append(name)
if tensor in replace:
input_map[_unbound_name(name)] = replace[tensor]
else:
input_map[_unbound_name(name)] = tensor
for dep in op.control_inputs:
if dep not in op_list:
name = "^" + dep.name
as_unbound_inputs.append(name)
input_map[_unbound_name(name)] = dep
for name in op.colocation_groups():
if name[5:] not in op_names:
as_unbound_inputs.append(name)
input_map[_unbound_name(name)] = ops.get_default_graph(). \
as_graph_element(name[5:])
orig_meta_graph = export_ops_meta_graph(
op_list, export_scope=from_scope, as_unbound_inputs=as_unbound_inputs)
_ = import_scoped_meta_graph(orig_meta_graph,
import_scope=to_scope,
input_map=input_map)
copied_ops = {}
for op in op_list:
new_op_name = ops.prepend_name_scope(
ops.strip_name_scope(op.name, from_scope), to_scope)
new_op = ops.get_default_graph().as_graph_element(new_op_name,
allow_tensor=False)
copied_ops[op] = new_op
return copied_ops
def _get_backward_ops(seed_tensors, as_inputs=None):
"""Get backward ops from inputs to `seed_tensors` by topological order.
Args:
seed_tensors: A list of `Tensor`s, for which to get all preceding ops.
as_inputs: A list of `Tensor`s that are treated as inputs during the
search (where to stop searching the backward graph).
Returns:
A list of `Operation`s in topological order.
"""
as_inputs = set(as_inputs or [])
seed_tensors = [t for t in seed_tensors if t not in as_inputs]
seed_ops = list(OrderedDict.fromkeys(t.op for t in seed_tensors))
q = deque(seed_ops)
seen = set()
done = set()
ret = []
while q:
op = q[0]
if op not in seen:
seen.add(op)
for tensor in reversed(op.inputs):
if tensor not in as_inputs:
q.appendleft(tensor.op)
q.extendleft(reversed(op.control_inputs))
else:
# have seen this op before
q.popleft()
if op not in done:
done.add(op)
ret.append(op)
return ret
def clone(outputs, to_scope, from_scope="", replace=None):
"""Copy the subgraph that generates `outputs` from one scope to another,
with Tensors in `replace` being replaced by their corresponding values. with
variables shared between them.
Args:
outputs: A `Tensor` or a list of `Tensor`s.
to_scope: `String` name scope under which the copied subgraph will reside.
from_scope: `String` name scope containing the subgraph to be copied.
replace: A dictionary containing the mapping from Tensors in the subgraph
to their replacements.
Returns:
A copy or a list of the copies of `outputs` in `to_scope` and
a dictionary of `Variables` that have been copied into `to_scope`.
"""
if from_scope == to_scope:
raise ValueError("'from_scope' and 'to_scope' need to be different "
"when performing copying in the same graph.")
seed_tensors = outputs
if not isinstance(outputs, (list, tuple)):
seed_tensors = [outputs]
seed_tensors_set = set(seed_tensors)
replace = replace or {}
for k, v in six.iteritems(replace):
try:
assert isinstance(k, ops.Tensor)
v = ops.convert_to_tensor(v)
except Exception:
raise TypeError(
"The 'replace' argument should consist of Tensor pairs. "
"Error type: (%s, %s)" % (type(k), type(v)))
try:
k.get_shape().merge_with(v.get_shape())
except ValueError:
raise ValueError(
"Key-value pairs in 'replace' should have the same "
"shape (%s vs %s). Error pair: (%s, %s)" % (
k.get_shape(), v.get_shape(), k, v))
as_inputs = list(replace.keys())
backward_ops = _get_backward_ops(seed_tensors, as_inputs)
copied_ops = set()
copied_tensors = set()
for op in backward_ops:
if any((t in replace or t in copied_tensors) for t in op.inputs) or \
any(dep in copied_ops for dep in op.control_inputs) or \
any(t in seed_tensors_set for t in op.outputs):
copied_ops.add(op)
copied_tensors.update(set(op.outputs))
new_ops = copy_ops_meta_graph(list(copied_ops), from_scope,
to_scope, replace=replace)
new_tensors = []
for tensor in seed_tensors:
if tensor in replace:
new_tensors.append(replace[tensor])
elif tensor.op in new_ops:
new_tensors.append(new_ops[tensor.op].outputs[tensor.value_index])
else:
new_tensors.append(tensor)
if len(new_tensors) == 1:
new_tensors = new_tensors[0]
return new_tensors
| {
"content_hash": "665a98244db58d05bc479058fceb8693",
"timestamp": "",
"source": "github",
"line_count": 974,
"max_line_length": 80,
"avg_line_length": 37.84907597535934,
"alnum_prop": 0.6518106605181067,
"repo_name": "thjashin/tensorflow",
"id": "0c1303edcaef36f90521355aa95dd4b5215aff41",
"size": "37555",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/framework/meta_graph.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7583"
},
{
"name": "C",
"bytes": "175009"
},
{
"name": "C++",
"bytes": "21512044"
},
{
"name": "CMake",
"bytes": "130133"
},
{
"name": "CSS",
"bytes": "774"
},
{
"name": "Go",
"bytes": "786880"
},
{
"name": "HTML",
"bytes": "557007"
},
{
"name": "Java",
"bytes": "277432"
},
{
"name": "JavaScript",
"bytes": "13406"
},
{
"name": "Jupyter Notebook",
"bytes": "1833840"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "36990"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "64764"
},
{
"name": "Protocol Buffer",
"bytes": "197812"
},
{
"name": "Python",
"bytes": "17881706"
},
{
"name": "Shell",
"bytes": "319872"
},
{
"name": "TypeScript",
"bytes": "775401"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
input_name = '../examples/homogenization/linear_elastic_mM.py'
output_name = 'test_linear_elastic_mM.vtk'
from tests_basic import TestInput
class Test( TestInput ):
pass
| {
"content_hash": "d8cbdac713921d92c541177e63cb3d78",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 62,
"avg_line_length": 30.571428571428573,
"alnum_prop": 0.7523364485981309,
"repo_name": "lokik/sfepy",
"id": "5ac80deb276cf8a4d6ec9f4a633b5250cbc3ba48",
"size": "214",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_input_linear_elastic_mM.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "448969"
},
{
"name": "C++",
"bytes": "37842"
},
{
"name": "GLSL",
"bytes": "6058"
},
{
"name": "Makefile",
"bytes": "184"
},
{
"name": "PowerShell",
"bytes": "3118"
},
{
"name": "Python",
"bytes": "2701733"
},
{
"name": "Shell",
"bytes": "71"
}
],
"symlink_target": ""
} |
import mysql.connector
'''
(C)2015 [email protected]
'''
class Dmdb:
""" DB operation of drinking running server"""
#class variables
USER = "root"
PASSWORD = "123456"
DATABASE = "db_drinking_man"
ADD_DRINK = '''
INSERT INTO `db_drinking_man`.`drinks`
(
`id_string`,
`name`,
`image`,
`score`,
`gold`,
`config`,
`cost_rmb`,
`pass_md5`)
VALUES
(%(id_string)s,%(name)s,%(image)s,%(score)s,%(gold)s,%(config)s,%(cost_rmb)s,%(pass_md5)s)
'''
SELECT_BY_ID = '''
SELECT id, id_string, name, image, score, gold, config, cost_rmb, pass_md5
FROM `db_drinking_man`.`drinks` WHERE id=%s
'''
SELECT_BY_ID_STR = '''
SELECT id, id_string, name, image, score, gold, config, cost_rmb, pass_md5
FROM `db_drinking_man`.`drinks` WHERE id_string=%s
'''
SELECT_TOP_3 = '''
SELECT id, id_string, name, image, score, gold, config, cost_rmb, pass_md5
FROM `db_drinking_man`.`drinks` ORDER BY score DESC LIMIT 0,3
'''
COUNT_SCORE_RANK = '''
SELECT count(*) as co
FROM `db_drinking_man`.`drinks`
WHERE score>%s
'''
SELECT_A_B = '''
SELECT id, id_string, name, image, score, gold, config, cost_rmb, pass_md5
FROM `db_drinking_man`.`drinks` ORDER BY score DESC LIMIT %s,%s
'''
UPDATE = '''
UPDATE `db_drinking_man`.`drinks`
SET id_string = %s,
name = %s,
image = %s,
score = %s,
gold = %s,
config = %s,
cost_rmb = %s,
pass_md5 = %s
WHERE id = %s
'''
IS_STRING_EXIST = '''
SELECT id_string, name FROM db_drinking_man.drinks WHERE id_string = %s
'''
def __init__(self):
#connect to DB
print("connect to DB")
self.context = mysql.connector.connect(user=self.USER, password=self.PASSWORD, database=self.DATABASE)
def __del__(self):
print("disconnect to DB")
self.context.close()
def insert(self, dict):
''' insert may raise exception because of duplicate id_string '''
try:
cursor = self.context.cursor()
cursor.execute(self.ADD_DRINK, dict)
self.context.commit()
cursor.close()
return True
except mysql.connector.errors.IntegrityError:
return False
def update(self, dict):
cursor = self.context.cursor()
print dict
cursor.execute(self.UPDATE, (
dict["id_string"],
dict["name"],
dict["image"],
dict["score"],
dict["gold"],
dict["config"],
dict["cost_rmb"],
dict["pass_md5"],
dict["id"]
))
cursor.close()
self.context.commit()
def turple2dict(self, turp):
if (turp == None):
return None
else:
res = {}
res["id"] = turp[0]
res["id_string"] = turp[1]
res["name"] = turp[2]
res["image"] = turp[3]
res["score"] = turp[4]
res["gold"] = turp[5]
res["config"] = turp[6]
res["cost_rmb"] = turp[7]
res["pass_md5"] = turp[8]
return res
def getById(self, id):
''' get data by Id, if not exist, return None '''
cursor = self.context.cursor()
cursor.execute(self.SELECT_BY_ID, (id,))
res = None
for turp in cursor:
res = self.turple2dict(turp)
break
cursor.close()
return res
def getByIdstring(self, id_string):
cursor = self.context.cursor()
cursor.execute(self.SELECT_BY_ID_STR, (id_string,))
res = None
for turp in cursor:
res = self.turple2dict(turp)
break
cursor.close()
return res
def isIdStringExist(self, id_string):
''' check if id_string exist '''
cursor = self.context.cursor()
cursor.execute(self.IS_STRING_EXIST, (id_string,))
res = False
for tp in cursor:
res = True
break
cursor.close()
return res
def getTopAB(self, a, b):
''' get limit a, b '''
cursor = self.context.cursor()
cursor.execute(self.SELECT_A_B, (a, b))
res = []
for turp in cursor:
res.append(self.turple2dict(turp))
cursor.close()
return res
def getTopScore3(self):
''' get top 3 record '''
return self.getTopAB(0, 3)
def countScoreRank(self, score):
''' count (record > score) '''
cursor = self.context.cursor()
cursor.execute(self.COUNT_SCORE_RANK, (score,))
res = 0
for turp in cursor:
res = turp[0]
break
cursor.close()
return res
def getNear6ByScore(self, score):
rank = self.countScoreRank(score)
if rank < 3:
rank = 3
return self.getTopAB(rank-3, rank+3)
if __name__ == "__main__":
''' TEST CODE '''
db = Dmdb()
drink = {
"id_string":"my_id_string",
"name":"my_name",
"image":"image",
"score":3243,
"gold":23432,
"config":'{"sdfsdgijoi324325":23432}',
"cost_rmb":"2343",
"pass_md5":"sdfsdfs234"
}
'''
for i in xrange(100):
drink["score"] = i
drink["id_string"] = "id_string_%s" % (i,)
#db.insert(drink)
'''
#db.insert(drink)
#db.insert(drink)
#print db.getById(1)
# print db.isIdStringExist("my_id_string")
# a = db.getById(1)
# print a
# a["name"] = "abc"
# print a
# db.update(a)
# print db.getById(1)
del db | {
"content_hash": "581aca09864eb40ad31c039fe9d42999",
"timestamp": "",
"source": "github",
"line_count": 211,
"max_line_length": 110,
"avg_line_length": 27.10900473933649,
"alnum_prop": 0.5145104895104895,
"repo_name": "arisecbf/dringkingrun_server",
"id": "4358527f67dc285c170fdab4c13650d14b43fffa",
"size": "5720",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "db.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "17169"
}
],
"symlink_target": ""
} |
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bitcoin")
return os.path.expanduser("~/.bitcoin")
def read_bitcoin_config(dbdir):
"""Read the bitcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 14342 if testnet else 4342
connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bitcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bitcoind):
info = bitcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bitcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bitcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bitcoind):
address_summary = dict()
address_to_account = dict()
for info in bitcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bitcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bitcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bitcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bitcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bitcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bitcoind.createrawtransaction(inputs, outputs)
signed_rawtx = bitcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bitcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bitcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bitcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bitcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bitcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of bitcoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
bitcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(bitcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(bitcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = bitcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
| {
"content_hash": "b0843ed2cdcd8a68577844e6f899a6a2",
"timestamp": "",
"source": "github",
"line_count": 252,
"max_line_length": 111,
"avg_line_length": 38.392857142857146,
"alnum_prop": 0.6155038759689923,
"repo_name": "GENESISCOIN/GENESISCOIN_SCRYPT",
"id": "758889b8c6ea37f183b7393492cdeea5633e8b24",
"size": "10053",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "contrib/spendfrom/spendfrom.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "32092"
},
{
"name": "C++",
"bytes": "2606911"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "Groff",
"bytes": "18284"
},
{
"name": "HTML",
"bytes": "50615"
},
{
"name": "Makefile",
"bytes": "102375"
},
{
"name": "NSIS",
"bytes": "6022"
},
{
"name": "Objective-C",
"bytes": "1052"
},
{
"name": "Objective-C++",
"bytes": "5864"
},
{
"name": "Python",
"bytes": "69729"
},
{
"name": "QMake",
"bytes": "14726"
},
{
"name": "Shell",
"bytes": "13173"
}
],
"symlink_target": ""
} |
direct_links = r'meta property="og:image" content="(.+?)"'
# same_filename (default=False): if True, uses filename specified on remote link. Otherwise, creates own filename with incremental index.
| {
"content_hash": "7ee9b0be6688549fac525e44c2e16e3e",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 139,
"avg_line_length": 66.66666666666667,
"alnum_prop": 0.74,
"repo_name": "regosen/gallery_get",
"id": "1a448621d4b418cb598fdeecb0d893637a5db8a9",
"size": "1032",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gallery_plugins/plugin_imagebam.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "66870"
}
],
"symlink_target": ""
} |
import turtle
def draw_square():
window = turtle.Screen()
window.bgcolor('red')
joe = turtle.Turtle()
joe.shape('turtle')
joe.color('green')
joe.speed(1) # on a scale of 1 to 10, where 10 is fastest
for i in range(0,4):
joe.forward(100)
joe.right(90)
window.exitonclick()
draw_square() | {
"content_hash": "97cd87cc45765fea54c1a2cb56e8748a",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 61,
"avg_line_length": 18.77777777777778,
"alnum_prop": 0.6035502958579881,
"repo_name": "tai271828/courses",
"id": "c547dc9b49e43f325d6faf9ec0ddb3b0bb68dec8",
"size": "338",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "software-development/udacity/ud036-python-foundations/code/lesson2/square.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2692"
},
{
"name": "HTML",
"bytes": "9304"
},
{
"name": "JavaScript",
"bytes": "312081"
},
{
"name": "Python",
"bytes": "151460"
}
],
"symlink_target": ""
} |
"""
Created on Sun May 28 14:51:28 2017
@author: azkei
Using lxml library to read XML data
"""
# We want to take the data structure inside the XML file and convert it into
# a DataFrame.
# For that, we need objectify.
from lxml import objectify
# Now we can do the parser of the XML file using parse()
xml = objectify.parse('books.xml')
xml
# Define a root structure
root = xml.getroot()
# Access different nodes in the root structure
root.Book.Author
# Access various elements corresponding to a node
root.Book.getchildren()
# Get the various tags in the structure
[child.tag for child in root.Book.getchildren()]
# Get the corresponding value
[child.text for child in root.Book.getchildren()]
# Converting the Tree Structure into a DataFrame
def etree2df(root):
column_names=[]
for i in range(0,len(root.getchildren()[0].getchildren())):
column_names.append(root.getchildren()[0].getchildren()[i].tag)
xmlframe = pd.DataFrame(columns=column_names)
for j in range(0,len(root.getchildren())):
obj = root.getchildren()[j].getchildren()
texts = []
for k in range(0,len(column_names)):
texts.append(obj[k].text)
row = dict(zip(column_names,texts))
row_s = pd.Series(row)
row_s.name = j
xmlframe = xmlframe.append(row_s)
return xmlframe
# Running the function
etree2df(root) | {
"content_hash": "b6087e4ca55112694d2b608d1157b0ef",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 76,
"avg_line_length": 32.57142857142857,
"alnum_prop": 0.6944444444444444,
"repo_name": "jjsalomon/python-analytics",
"id": "b3307ae810705dd8b51397264612c60b8c7642cf",
"size": "1392",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandas2 - Reading & Writing Data/pandas3 - Reading Data from XML.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "835"
},
{
"name": "Python",
"bytes": "75839"
}
],
"symlink_target": ""
} |
__author__="Ashish Hunnargikar"
__date__ ="$Jun 13, 2014 12:33:33 PM$"
import time
import os
import subprocess
from datetime import date, timedelta
from elasticsearch.transport import Transport
from elasticsearch import (Elasticsearch, RoundRobinSelector, ImproperlyConfigured, ElasticsearchException,
SerializationError, TransportError, NotFoundError, ConflictError, RequestError, ConnectionError)
import simplejson as json
from kazoo.client import KazooClient
from kazoo.exceptions import (KazooException)
os.environ['DEBUG'] = 'true'
#os.environ['CRON'] = '180'
#os.environ['DAYS'] = '1'
#
##Zookeeper
#os.environ['ZK_ADDRESS'] = 'zookeeper1:2181,zookeeper2:2181,zookeeper3:2181'
#
##Elasticsearch
#os.environ['ES_CLUSTER'] = 'elasticsearch'
#os.environ['ES_ANALYTICS_INDEX'] = 'analytics'
#os.environ['ES_ANALYTICS_TYPE'] = 'data'
#os.environ['ES_REGISTRY_INDEX'] = 'docker_registry'
#os.environ['ES_REGISTRY_TAG_TYPE'] = 'tags'
#os.environ['ES_REGISTRY_IMAGE_TYPE'] = 'images'
swift_env={
"OS_TENANT_ID":os.environ["OS_TENANT_ID"],
"OS_USERNAME": os.environ["OS_USERNAME"],
"OS_AUTH_URL": os.environ["OS_AUTH_URL"],
"OS_TENANT_NAME": os.environ["OS_TENANT_NAME"],
"OS_CONTAINER": os.environ["OS_CONTAINER"],
"OS_REGION_NAME": os.environ["OS_REGION_NAME"],
"OS_PASSWORD": os.environ["OS_PASSWORD"],
"STORAGE_PATH": os.environ["STORAGE_PATH"]
}
es = None
import traceback
def log(data):
"""
Print debug output
"""
if (os.environ['DEBUG'] == 'true'):
print(data + '\n')
def multi_get_from_es_index(index, doc_type, body, _source, fields):
"""
Get the Elasticsearch index data for multiple ids
@type documentList: List
@param documentList: List of image layer JSON documents
"""
response = None
#Try 3 times to read the document from ES, each time picking a random ES node address in case of failure
for retries in range(3):
try:
response = es.mget(index=index, doc_type=doc_type, body=body, _source=_source, fields=fields)
#log("ES Get Response :: " + json.dumps(response))
except ImproperlyConfigured:
log("ES ImproperlyConfigured!" + traceback.format_exc())
continue
except ElasticsearchException:
log("ES ElasticsearchException!" + traceback.format_exc())
continue
except TransportError:
log("ES TransportError!" + traceback.format_exc())
continue
except NotFoundError:
log("ES NotFoundError!" + traceback.format_exc())
continue
except ConflictError:
log("ES ConflictError!" + traceback.format_exc())
continue
except RequestError:
log("ES RequestError!" + traceback.format_exc())
continue
except SerializationError:
log("ES SerializationError!" + traceback.format_exc())
continue
except ConnectionError:
log("ES ConnectionError!" + traceback.format_exc())
continue
except Exception:
log("ES Exception!" + traceback.format_exc())
continue
finally:
log("Total number of ES read attempts: " + str(retries + 1))
#Exit for loop if ES transaction is successful otherwise pick another node and continue retrying
break
return response
def set_in_index(es, document, index, type):
"""
Store the list of documents in the Elasticsearch index via HTTP APIs
@type document: List
@param document: JSON document
"""
response = None
#Try 3 times to store the document in ES, each time picking a random ES node address in case of failure
for retries in range(3):
try:
document['epoch'] = int(time.time())
log('ES Set Request :: ' + json.dumps(document) + ' : ' + index + ':' + type)
response = es.index(index=index, doc_type=type, id=document['id'], body=document)
log("ES Set Response :: " + json.dumps(response))
except ImproperlyConfigured:
log("ES ImproperlyConfigured!" + traceback.format_exc())
continue
except ElasticsearchException:
log("ES ElasticsearchException!" + traceback.format_exc())
continue
except TransportError:
log("ES TransportError!" + traceback.format_exc())
continue
except NotFoundError:
log("ES NotFoundError!" + traceback.format_exc())
continue
except ConflictError:
log("ES ConflictError!" + traceback.format_exc())
continue
except RequestError:
log("ES RequestError!" + traceback.format_exc())
continue
except SerializationError:
log("ES SerializationError!" + traceback.format_exc())
continue
except ConnectionError:
log("ES ConnectionError!" + traceback.format_exc())
continue
except Exception:
log("ES Exception!" + traceback.format_exc())
continue
finally:
log("Total number of ES write attempts: " + str(retries + 1))
#Exit for loop if ES transaction is successful otherwise pick another node and continue retrying
break
if response is None or response == '':
log('Failed to store document ' + document['id'] + ' into the ES index')
return 'false'
else:
log('Successfully stored document ' + document['id'] + ' into the ES index')
return 'true'
def get_es_node_addresses():
"""
Get the Elasticsearch node addresses via Zookeeper
@return List of Elasticsearch node ip addresses and ports
"""
zk = KazooClient(hosts=os.environ['ZK_ADDRESS'], timeout=10.0, randomize_hosts=True)
zk.start()
esNodes = []
try:
#Fetch the list of ES cluster node names from Zookeeper
zkPath = '/es/clusters/' + os.environ['ES_CLUSTER'] + '/json'
children = zk.get_children(zkPath)
#Retrieve the JSON metadata associated with each ephemeral ES node
for node in children:
zookeeperAddr = zkPath + '/' + node
esNodeInfo = zk.get(zookeeperAddr)
jsonData = json.loads(esNodeInfo[0])
#Collect each node ip address and port
esNodes.append(jsonData['address'] + ':' + jsonData['port'])
except KazooException:
log('Kazoo Exception: Unable to fetch Zookeeper data from ' + zkPath + ' : ' + traceback.format_exc());
zk.stop()
zk.close()
log('ES Node list retrieved from Zookeeper :: ' + json.dumps(esNodes))
return esNodes
def get_image_checksums_from_swift(namespace, repository):
"""
Get the registry image layer checksums JSON from Swift
@return Image layer JSON object
"""
#swift download community-registry registry/images/09690291212c69ac94df172ed35634b5cacd8b52e015e9e81c974cecb8ecde05/json --output -
swiftCommand = 'swift download ' + swift_env['OS_CONTAINER'] + ' ' + os.environ['SWIFT_REGISTRY_PREFIX'] + '/repositories/' + namespace + '/' + repository + '/_index_images --output -'
p = subprocess.Popen([swiftCommand], stdout=subprocess.PIPE, shell=True, env=swift_env)
output, error = p.communicate()
log('Checksums json from Swift for image ' + namespace + '/' + repository + ' received....' + output)
return json.loads(output)
def get_image_json_from_swift(checksum):
"""
Get the registry image layer JSON from Swift
@return Image layer JSON object
"""
#swift download community-registry registry/images/09690291212c69ac94df172ed35634b5cacd8b52e015e9e81c974cecb8ecde05/json --output -
swiftCommand = 'swift download ' + swift_env['OS_CONTAINER'] + ' ' + os.environ['SWIFT_REGISTRY_PREFIX'] + '/images/' + checksum + '/json --output -'
p = subprocess.Popen([swiftCommand], stdout=subprocess.PIPE, shell=True, env=swift_env)
output, error = p.communicate()
log('Image json from Swift for checksum ' + checksum + ' received....')
return json.loads(output)
def get_tag_checksum_from_swift(namespace, repository, tag):
"""
Get the registry Tag checksum from Swift
@return Tag JSON object
"""
#swift download community-registry registry/repositories/paas/fe15c3d73f634f59904cde910500958b/tag_1408139458.3264 --output -
swiftCommand = 'swift download ' + swift_env['OS_CONTAINER'] + ' ' + os.environ['SWIFT_REGISTRY_PREFIX'] + '/repositories/' + namespace + '/' + repository + '/tag_' + tag + ' --output -'
p = subprocess.Popen([swiftCommand], stdout=subprocess.PIPE, shell=True, env=swift_env)
output, error = p.communicate()
log('Tag from Swift for ' + namespace + '/' + repository + '/' + tag + ' received....')
return output
def find_missing_tags_in_es_via_swift(es, days):
"""
Get the registry Tag names that are present in Swift but absent in the registry ES index
@return List of Tag names
"""
#Get the list of tag paths in Swift created N days ago
tagsList = get_swift_tags(es, days)
#Now extract the namespace, repository and tag from each path into a JSON document
docs = generate_tag_docs(tagsList)
docsNotFound = {}
if len(docs) > 0:
#Get all the corresponding Tag ids available in the ES registry index
#response = es.mget(index=os.environ['ES_REGISTRY_INDEX'], doc_type=os.environ['ES_REGISTRY_TAG_TYPE'], body={"ids" : docs.keys()}, _source=False, fields=[])
response = multi_get_from_es_index(index=os.environ['ES_REGISTRY_INDEX'], doc_type=os.environ['ES_REGISTRY_TAG_TYPE'], body={"ids" : docs.keys()}, _source=False, fields=[])
#Iterate over the ES response docs and find the registry tags that haven't been located in the ES index
#ES sends us a "found=true/false" response attribute per doc so we only need the false ones
for item in response['docs']:
if item['found'] == False:
docsNotFound[item['_id']] = docs.get(item['_id'])
log(str(len(docsNotFound)) + ' missing tags identified in ES....')
return docsNotFound
def find_missing_images_in_es_via_swift(es, days):
"""
Get the registry Image names that are present in Swift but absent in the registry ES index
@return List of image ids
"""
#Get the list of tag paths in Swift created N days ago
imageList = get_swift_images(es, days)
#Now extract the namespace and repository from each image path into a JSON document
docs = generate_image_docs(imageList)
docsNotFound = {}
if len(docs) > 0:
#Get all the corresponding image ids available in the ES registry index
#response = es.mget(index=os.environ['ES_REGISTRY_INDEX'], doc_type=os.environ['ES_REGISTRY_IMAGE_TYPE'], body={"ids" : docs.keys()}, _source=False)
response = multi_get_from_es_index(index=os.environ['ES_REGISTRY_INDEX'], doc_type=os.environ['ES_REGISTRY_IMAGE_TYPE'], body={"ids" : docs.keys()}, _source=False, fields=[])
#Iterate over the ES response docs and find the registry images that haven't been located in the ES index
#ES sends us a "found=true/false" response attribute per doc so we only need the false ones
for item in response['docs']:
if item['found'] == False:
docsNotFound[item['_id']] = docs.get(item['_id'])
log(str(len(docsNotFound)) + ' missing images identified in ES....')
return docsNotFound
def generate_tag_docs(tagsList):
"""
Extract the registry tag names from the list of Swift tag paths. Ex.
#"paas_c71ffca6470f4f1495d17c729459c8a3_1408369211.39146": {"tag": "1408369211.39146", "namespace": "paas", "repository": "c71ffca6470f4f1495d17c729459c8a3"},
#"paas_453873689c51492c88341ffea425b3ac_1408322491.23110": {"tag": "1408322491.23110", "namespace": "paas", "repository": "453873689c51492c88341ffea425b3ac"},
#........
@return List of JSON documents with the namespace, repository and tag info
"""
docs = {}
for item in tagsList:
if (item != ''):
temp = item.split('/')
#Generate a document for the missing Tag
doc = {}
doc['namespace'] = temp[2]
doc['repository'] = temp[3]
doc['tag'] = temp[4].replace('tag_', '')
#Add the missing tag info to the ids list and docs dict resp.
docs[temp[2] + '_' + temp[3] + '_' + doc['tag']] = doc
log('Documents generated for ' + str(len(docs)) + ' tags....')
return docs
def generate_image_docs(imageList):
"""
Extract the registry image docs from the list of Swift image paths. Ex.
#"paas_c71ffca6470f4f1495d17c729459c8a3": {"namespace": "paas", "repository": "c71ffca6470f4f1495d17c729459c8a3"},
#"paas_453873689c51492c88341ffea425b3ac": {"namespace": "paas", "repository": "453873689c51492c88341ffea425b3ac"},
#........
@return List of JSON documents with the namespace, repository and tag info
"""
docs = {}
for item in imageList:
if (item != ''):
temp = item.split('/')
#Generate a document for the missing Tag
doc = {}
doc['namespace'] = temp[2]
doc['repository'] = temp[3]
#Add the missing tag info to the ids list and docs dict resp.
docs[temp[2] + '_' + temp[3]] = doc
log('Documents generated for ' + str(len(docs)) + ' images....')
return docs
def get_swift_tags(es, days):
"""
Get all the registry Tags created in Swift during the past N days
#registry/repositories/paas/01a66295ebd74d9199817940531c1d46/tag_1408320061.71133
#........
#........
@return List of Swift Tag location paths
"""
#Generate the date to grep for in yyyy-mm-dd format in the Swift output
dateString=str(date.today()-timedelta(days=days))
#Get the list of tag paths from Swift via the Swift cli
swiftCommand = 'swift list ' + swift_env['OS_CONTAINER'] + ' --prefix "' + os.environ['SWIFT_REGISTRY_PREFIX'] + '/repositories" --long | grep "tag_" | grep "' + dateString + '" | sed "s#.* ##g"'
p = subprocess.Popen([swiftCommand], stdout=subprocess.PIPE, shell=True, env=swift_env)
output, error = p.communicate()
#Convert the Tag list string into an array
tagsList = output.split('\n')
tagsList = filter(None, tagsList)
log(str(len(tagsList)) + ' tags created in Swift ' + str(days) + ' days ago on that date ' + dateString + '....')
return tagsList
def get_swift_images(es, days):
"""
Get all the registry images created in Swift during the past N days. Ex.
# registry/repositories/paas/0782049940714c6f9269b2879073d707/_index_images
# registry/repositories/paas/10bad43969474fec80ba5465bec62412/_index_images
# ........
# ........
@return List of Swift image location paths
"""
#Generate the date to grep for in yyyy-mm-dd format in the Swift output
dateString=str(date.today()-timedelta(days=days))
#Get the list of tag paths from Swift via the Swift cli
swiftCommand = 'swift list ' + swift_env['OS_CONTAINER'] + ' --prefix "' + os.environ['SWIFT_REGISTRY_PREFIX'] + '/repositories" --long | grep "_index_images" | grep "' + dateString + '" | sed "s#.* ##g"'
p = subprocess.Popen([swiftCommand], stdout=subprocess.PIPE, shell=True, env=swift_env)
output, error = p.communicate()
#Convert the image list string into an array
imageList = output.split('\n')
imageList = filter(None, imageList)
log(str(len(imageList)) + ' images/repos created in Swift ' + str(days) + ' days ago....')
return imageList
def generate_tag_document(namespace, repository, tag, checksum, author):
"""
Creates a Tag in the registry index
"""
#Manufacture the tag document to be writen into the index
document={}
#Concatenate the <namespace>_<repository>_imageid to generate a unique primary key id
document['id'] = namespace + '_' + repository + '_' + tag
document['namespace'] = namespace
document['tag'] = tag
document['checksum'] = checksum
document['repository'] = repository
document['description'] = author
log('Generated ES document for Tag ' + document['id'] + '....')
return document
def generate_image_document(namespace, repository, checksums):
"""
Creates an Image document in the registry index
"""
#Manufacture the image document to be writen into the index
document={}
#Concatenate the <namespace>_<repository> to generate a unique primary key id
document['id'] = namespace + '_' + repository
document['namespace'] = namespace
document['imageLayers'] = checksums
document['repository'] = repository
log('Generated ES document for Image ' + document['id'] + '....')
return document
#Overriding the default ES Sniffing mechanism with Zookeeper
class ZookeeperTransport(Transport):
def get_es_node_addresses(self):
"""
Get the Elasticsearch node addresses via Zookeeper
@return List of Elasticsearch node ip addresses and ports
"""
esNodes = []
#Initlate the Zookeeper Kazoo connection
#kz_retry = KazooRetry(max_tries=3, delay=0.5, backoff=2)
zk = KazooClient(hosts=os.environ['ZK_ADDRESS'], timeout=10.0, randomize_hosts=True)
zk.start()
try:
#Fetch the list of ES cluster node names from Zookeeper
zkPath = '/es/clusters/' + os.environ['ES_CLUSTER'] + '/json'
children = zk.get_children(zkPath)
#Retrieve the JSON metadata associated with each ephemeral ES node
for node in children:
zookeeperAddr = zkPath + '/' + node
esNodeInfo = zk.get(zookeeperAddr)
jsonData = json.loads(esNodeInfo[0])
#Collect each node ip address and port
host = {'host':jsonData['address'], 'port': int(jsonData['port'])}
esNodes.append(host)
except KazooException:
log('Kazoo Exception: Unable to fetch Zookeeper data from ' + zkPath + ' : ' + traceback.format_exc());
#Close and Zookeeper connection
zk.stop()
zk.close()
return esNodes
def sniff_hosts(self):
"""
Obtain a list of nodes from the cluster and create a new connection
pool using the information retrieved.
To extract the node connection parameters use the `nodes_to_host_callback`.
"""
previous_sniff = self.last_sniff
hosts = []
try:
# reset last_sniff timestamp
self.last_sniff = time.time()
try:
hosts = self.get_es_node_addresses()
except Exception:
raise TransportError("N/A", "Unable to sniff hosts." + traceback.format_exc())
except:
# keep the previous value on error
self.last_sniff = previous_sniff
raise
# we weren't able to get any nodes, maybe using an incompatible
# transport_schema or host_info_callback blocked all - raise error.
if not hosts:
raise TransportError("N/A", "Unable to sniff hosts - no viable hosts found." + traceback.format_exc())
self.set_connections(hosts)
if __name__ == "__main__":
#Initiate the ES connection pool
es = Elasticsearch(get_es_node_addresses(), sniff_on_start=True, sniff_on_connection_fail=True, max_retries=3, sniffer_timeout=180, selector_class=RoundRobinSelector, sniff_timeout=1, transport_class=ZookeeperTransport)
while True:
#Find missing tags
missingTagDocs = find_missing_tags_in_es_via_swift(es, int(os.environ['DAYS']))
#Iterate over the missing tag docs and restore them in the ES index
counter = 1
total = len(missingTagDocs.items())
for key, value in missingTagDocs.items():
log('**** Restoring Tag ' + str(counter) + '/' + str(total) + ' --> ' + value['namespace'] + '/' + value['repository'] + '/' + value['tag'] + ' ****\n')
checksum = get_tag_checksum_from_swift(value['namespace'], value['repository'], value['tag'])
data = get_image_json_from_swift(checksum)
document = generate_tag_document(value['namespace'], value['repository'], value['tag'], data.get('id'), data.get('author', ''))
set_in_index(es, document, os.environ['ES_REGISTRY_INDEX'], os.environ['ES_REGISTRY_TAG_TYPE'])
counter = counter + 1
log('************************************************************************************************************\n')
#Find missing images from Swift
missingImageDocs = find_missing_images_in_es_via_swift(es, int(os.environ['DAYS']))
#Iterate over the missing image docs and restore them in the ES index
counter = 1
total = len(missingImageDocs.items())
for key, value in missingImageDocs.items():
log('**** Restoring Image' + str(counter) + '/' + str(total) + ' --> ' + value['namespace'] + '/' + value['repository'] + ' ****\n')
checksumDict = get_image_checksums_from_swift(value['namespace'], value['repository'])
checksumList = []
for checksum in checksumDict:
checksumList.append(checksum['id'])
document = generate_image_document(value['namespace'], value['repository'], checksumList)
set_in_index(es, document, os.environ['ES_REGISTRY_INDEX'], os.environ['ES_REGISTRY_IMAGE_TYPE'])
counter = counter + 1
log('**********************************************************************************************\n')
log('Sleeping for ' + os.environ['CRON'] + ' secs.....\n')
time.sleep(int(os.environ['CRON']))
| {
"content_hash": "9750582c0e19524d8991ff3342513d28",
"timestamp": "",
"source": "github",
"line_count": 529,
"max_line_length": 223,
"avg_line_length": 41.54253308128544,
"alnum_prop": 0.6260921004732435,
"repo_name": "misho-kr/elasticsearchindex",
"id": "98b260ed06c14414edfe4f282aeb420ebf5156b0",
"size": "21996",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "loadtest/restore_tags.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "95411"
}
],
"symlink_target": ""
} |
"""
Script to get read counts distribution
@author: Alicia Schep
"""
##### IMPORT MODULES #####
# import necessary for python
import os
from pyatac.chunk import ChunkList
from pysam import AlignmentFile
import numpy as np
def _between(x,start,end):
if x >= start and x < end:
return True
else:
return False
def get_counts(args):
"""function to get fragment sizes
"""
if args.out is None:
args.out = '.'.join(os.path.basename(args.bed).split('.')[0:-1])
chunks = ChunkList.read(args.bed)
mat = np.zeros(len(chunks), dtype=np.int)
bamHandle = AlignmentFile(args.bam)
j = 0
for chunk in chunks:
for read in bamHandle.fetch(chunk.chrom, max(0, chunk.start - args.upper), chunk.end + args.upper):
if read.is_proper_pair and not read.is_reverse:
if args.atac:
#get left position
l_pos = read.pos + 4
#get insert size
#correct by 8 base pairs to be inserion to insertion
ilen = abs(read.template_length) - 8
else:
l_pos = read.pos
ilen = abs(read.template_length)
r_pos = l_pos + ilen - 1
if _between(ilen, args.lower, args.upper) and (_between(l_pos, chunk.start, chunk.end) or _between(r_pos, chunk.start, chunk.end)):
mat[j] += 1
j += 1
bamHandle.close()
np.savetxt(args.out + ".counts.txt.gz", mat, delimiter="\n", fmt='%i')
| {
"content_hash": "00275bb90c55549833720a90cb0cc253",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 147,
"avg_line_length": 31.14,
"alnum_prop": 0.5555555555555556,
"repo_name": "GreenleafLab/NucleoATAC",
"id": "978b1440d0a1bbfec857059660f058206b6abd68",
"size": "1557",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyatac/get_counts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "194662"
}
],
"symlink_target": ""
} |
"""This module provides the TimeSeq class.
"""
import logging, csv, copy
from Transforms import SeqXform
class TimeSeq:
"""A class for representing data indexed by time.
"""
def __init__(self,columnNames,data):
self.__colIndexByName = {}
self.__columnNames = list(columnNames)
self.__ResetNameForColumnDict()
self.data =map(list,data) #convert from list of tuples to list of lists
assert 0==len(self.data) or (
len(self.data[0])==len(self.__columnNames)),"""
Wrong number of columnNames for data. """
def __ResetNameForColumnDict(self):
"""
This function recreates the __colIndexByName dictionary. The
__colIndexByName dictionary maps names to column indexes and is used
by the GetColIndex function quickly look up the index for a
column name
"""
self.__colIndexByName = {}
for i in range(len(self.__columnNames)):
self.__colIndexByName[self.__columnNames[i]] = i
def RemoveAllFieldsExcept(self,fieldsToKeep):
"""Remove all columns except for ones with the given indexes.
INPUTS:
-- fieldsToKeep: List of integer columns to keep.
-------------------------------------------------------
PURPOSE: Removes all fields except the ones given.
"""
fieldsToKeep = sorted([(
f if isinstance(f, (int, long)) else self.GetColIndex(f))
for f in fieldsToKeep])
oldColumnNames = self.__columnNames
self.__columnNames = []
for i in fieldsToKeep:
self.__columnNames.append(oldColumnNames[i])
oldData = self.data
self.data = [None]*len(oldData)
append = self.data.append
for row in range(len(oldData)):
oldRow = oldData[row]
self.data[row] = []
append = self.data[row].append
for i in fieldsToKeep:
append(oldRow[i])
del oldData
del oldColumnNames
self.__ResetNameForColumnDict()
def RemoveFieldsNamed(self,fieldsToRemove):
"""Remove columns with the given names.
INPUTS:
-- fieldsToRemove: List of strings representing columns to remove.
-------------------------------------------------------
PURPOSE: Removes the given columns as shown in the following
example:
>>> exampleQ = TimeSeq(['day','qnt','price','dm'],[[1,2,2,4],[2,5,5,6]])
>>> print exampleQ.data
[[1, 2, 2, 4], [2, 5, 5, 6]]
>>> exampleQ.RemoveFieldsNamed(['qnt','dm'])
>>> print exampleQ.GetColumnNames()
['day', 'price']
>>> print exampleQ.data
[[1, 2], [2, 5]]
"""
fieldsToRemove = set(fieldsToRemove)
fieldsToKeep = [i for i in range(len(self.__columnNames))
if self.__columnNames[i] not in fieldsToRemove]
return self.RemoveAllFieldsExcept(fieldsToKeep)
@classmethod
def ReadFromSimpleCSVFile(
cls, filename):
"""
Creates a TimeSeq object by reading data from a simple format
CSV file.
The format of the file is simply one line of header, and the
rest are data. There is no blank line between the header and
the data.
INPUTS:
-- fileName; string name of CSV file to read
RETURNS:
-- a TimeSeq object.
"""
fd = open(filename, "r")
reader = csv.reader(fd)
fields = reader.next()
rows = []
for line in reader:
if len(line) == 0:
continue
if len(line) < len(fields):
line.extend([None]*(len(fields)-len(line)))
rows.append(line)
return TimeSeq(fields, rows)
def WriteToSimpleCSVFile(self, filename):
"""Writes a TimeSeq object to a simple CSV file format
The format of the file is simply one line of header, and the
rest are data. There is no blank line between the header and
the data.
INPUTS:
-- fileName; string name of CSV file to write to.
"""
fd = open(filename, "w")
writer = csv.writer(fd)
writer.writerow(self.GetColumnNames())
for line in self.data:
writer.writerow(line)
def AddFields(self,transformList,lines=None):
"""
INPUTS:
-- transformList: A list of SeqXform objects to add
to this sequence.
-- lines: A generator indicating which lines of
of self.data to process. If this is None,
then all lines in self.data are processed.
-------------------------------------------------------
PURPOSE: Add the fields corresponding to the transforms in
transformList to this sequence. Specifically, this function
adds columns to this sequence corresponding to the outputFields
for the transfomrs in transformList and then populates these
fields by calling the transforms sequentially on every row.
Only those lines indicating by lines are processed.
Note that you can have later transforms refer to earlier
transforms.
The following example illustrates usage how the lines argument can
be combined with a generator such as the WeeklyDateJumper to process
data on a weekly level:
>>> import datetime
>>> import Sequence
>>> from Transforms import SeqXform
>>> exampleQ = Sequence.TimeSeq(['day','price','quantity'],
... [[datetime.date(2000,1,1)+datetime.timedelta(i),i,i+1] for i in range(5)])
>>> class ExampleTransform(SeqXform):
... def ProcessRow(self,args): return [args['price'] * args['quantity']]
...
>>> exampleQ.AddFields([ExampleTransform([],['product_v2'])],
... (num for (num, line) in enumerate(exampleQ.data) if line[0].weekday() == 1))
>>> print '\\n'.join(map(str,exampleQ.data))
[datetime.date(2000, 1, 1), 0, 1, None]
[datetime.date(2000, 1, 2), 1, 2, None]
[datetime.date(2000, 1, 3), 2, 3, None]
[datetime.date(2000, 1, 4), 3, 4, 12]
[datetime.date(2000, 1, 5), 4, 5, None]
"""
transformList = [t for t in transformList
if not getattr(t,'doNotProcess',False)]
if (len(transformList)==0): return
self.ComplainAboutNonTransforms(transformList)
logging.debug('Applying transforms: %s.' %
', '.join([str(t) for t in transformList]))
nameList = sum([t.outputFields for t in transformList],[])
adderList =sum([[str(t)]*len(t.outputFields) for t in transformList],[])
self.AddBlankColumns(nameList,adderList)
txRange = range(len(transformList))
txSlice = []
for transform in transformList:
startCol = self.GetColIndex(transform.outputFields[0])
endCol = self.GetColIndex(transform.outputFields[-1])
assert startCol is not None and endCol is not None
txSlice.append(slice(startCol,endCol+1))
for t in transformList: t.Startup(self)
numCols = len(self.__columnNames)
if (None == lines): lines = xrange(len(self.data))
for i in lines:
args = {}
for field in range(numCols):
args[self.__columnNames[field]] = self.data[i][field]
for txNum in txRange:
result = SeqXform.ProcessTransformList(
[transformList[txNum]],args,self,i)
self.data[i][txSlice[txNum]] = result
for t in transformList: t.Shutdown(self)
@staticmethod
def ComplainAboutNonTransforms(transformList):
"Complain about things in input list not instances of SeqXform"
bads = [(i,t) for (i,t) in enumerate(transformList)
if not isinstance(t, SeqXform)]
if (bads):
raise TypeError('''
The following elements were not SeqXform instances:\n%s
''' % '\n'.join(['element %i: %s' % (i, t) for (i,t) in bads]))
def _regr_test_AddFields(self):
"""
>>> import datetime
>>> from Transforms import SeqXform
>>> from Sequence import *
>>> exampleQ = TimeSeq(['day','price','quantity'],
... [[datetime.date(2000,1,1)+datetime.timedelta(i),i,i+1] for i in range(11)])
>>> class ExampleTransform(SeqXform):
... def ProcessRow(self,args): return [args['price'] * args['quantity']]
>>> exampleQ.AddFields([ExampleTransform([],['product'])])
>>> print '\\n'.join(map(str,exampleQ.data))
[datetime.date(2000, 1, 1), 0, 1, 0]
[datetime.date(2000, 1, 2), 1, 2, 2]
[datetime.date(2000, 1, 3), 2, 3, 6]
[datetime.date(2000, 1, 4), 3, 4, 12]
[datetime.date(2000, 1, 5), 4, 5, 20]
[datetime.date(2000, 1, 6), 5, 6, 30]
[datetime.date(2000, 1, 7), 6, 7, 42]
[datetime.date(2000, 1, 8), 7, 8, 56]
[datetime.date(2000, 1, 9), 8, 9, 72]
[datetime.date(2000, 1, 10), 9, 10, 90]
[datetime.date(2000, 1, 11), 10, 11, 110]
>>> exampleQ.AddFields([ExampleTransform([],['product_v2'])],[0,2])
>>> print '\\n'.join(map(str,exampleQ.data))
[datetime.date(2000, 1, 1), 0, 1, 0, 0]
[datetime.date(2000, 1, 2), 1, 2, 2, None]
[datetime.date(2000, 1, 3), 2, 3, 6, 6]
[datetime.date(2000, 1, 4), 3, 4, 12, None]
[datetime.date(2000, 1, 5), 4, 5, 20, None]
[datetime.date(2000, 1, 6), 5, 6, 30, None]
[datetime.date(2000, 1, 7), 6, 7, 42, None]
[datetime.date(2000, 1, 8), 7, 8, 56, None]
[datetime.date(2000, 1, 9), 8, 9, 72, None]
[datetime.date(2000, 1, 10), 9, 10, 90, None]
[datetime.date(2000, 1, 11), 10, 11, 110, None]
>>> exampleQ.AddFields([ExampleTransform([],['product_v3'])],
... lines=(n for (n, line) in enumerate(exampleQ.data) if line[0].weekday()==1))
>>> print '\\n'.join(map(str,exampleQ.data))
[datetime.date(2000, 1, 1), 0, 1, 0, 0, None]
[datetime.date(2000, 1, 2), 1, 2, 2, None, None]
[datetime.date(2000, 1, 3), 2, 3, 6, 6, None]
[datetime.date(2000, 1, 4), 3, 4, 12, None, 12]
[datetime.date(2000, 1, 5), 4, 5, 20, None, None]
[datetime.date(2000, 1, 6), 5, 6, 30, None, None]
[datetime.date(2000, 1, 7), 6, 7, 42, None, None]
[datetime.date(2000, 1, 8), 7, 8, 56, None, None]
[datetime.date(2000, 1, 9), 8, 9, 72, None, None]
[datetime.date(2000, 1, 10), 9, 10, 90, None, None]
[datetime.date(2000, 1, 11), 10, 11, 110, None, 110]
"""
def GetColIndex(self,name):
"""
INPUTS:
-- name: String representing name of a column to lookup.
RETURNS: Integer representing index for the named column or
None if the column is not present.
"""
simpleCol = self.__colIndexByName.get(name,None)
return simpleCol
def NameForColumn(self,index):
"self.NameForColumn(index) returns the name of the column at index."
return self.__columnNames[index]
def GetColumnNames(self):
"GetColumnNames(self): Returns names of columns in this sequence."
return copy.deepcopy(self.__columnNames)
def AddBlankColumns(self,nameList,adderList=None,default=None,
startingPos=-1):
"""Add blank columns to this sequence.
INPUTS:
-- nameList: List of names for the columns to add.
-- adderList: List of strings (one for each element in nameList)
indicating who is adding the given name. This is
optional and can be left as None.
-- default: Value to add for new columns.
-- startingPos: Integer indicating the column number at which
to insert the new columns:
-1 indicates the last column.
0 indicates the first column.
For example, inserting 'NewColumn' to a columnList of
['event_date', 'val1'] and at startingPos of 0 makes
the column List ['NewColumn', 'event_date', 'val1']
-------------------------------------------------------
RETURNS: List of indexes for the new columns.
-------------------------------------------------------
PURPOSE: This function is useful if you want to add new
columns of data. First you call this to create the
columns and then you can set the values accordingly.
The following illustrates example usage:
>>> exampleQ = TimeSeq(['day','qnt','price'],[[1,2,4],[8,5,9],[7,0,6]])
>>> exampleQ.AddBlankColumns(['foo','bar'])
[3, 4]
>>> print exampleQ.GetColumnNames()
['day', 'qnt', 'price', 'foo', 'bar']
>>> print exampleQ.data[-1]
[7, 0, 6, None, None]
>>> exampleQ.AddBlankColumns(['test1', 'test2'], startingPos = 1)
[1, 2]
>>> print exampleQ.GetColumnNames()
['day', 'test1', 'test2', 'qnt', 'price', 'foo', 'bar']
>>> print exampleQ.data[-1]
[7, None, None, 0, 6, None, None]
"""
if (None == adderList): adderList = ['unknown']*len(nameList)
numNames = len(nameList)
if (numNames != len(set(nameList))): # duplicates exist in nameList
for i in range(len(nameList)):
if (nameList[i] in nameList[(i+1):]):
dupInd = nameList[(i+1):].index(nameList[i])
raise Exception(
"Name %s added at i=%i by %s and at i=%i by %s." % (
nameList[i],i,adderList[i],dupInd,adderList[dupInd]))
raise Exception("Duplicate indexes in nameList.")
for name in nameList:
assert not isinstance(name,(tuple,list)),'Names should be strings.'
index = self.GetColIndex(name)
if (None != index):
raise Exception(
"Column %s can't be added; it already exists at index %i."
% (name,index))
assert startingPos in [-1] + range(len(self.__columnNames))
if startingPos == -1:
startingPos = len(self.__columnNames)
for name in nameList:
self.__columnNames.insert(startingPos, name)
for line in self.data:
line.insert(startingPos, default)
startingPos+=1
self.__ResetNameForColumnDict() # reset the column name dict
return range(startingPos- len(nameList), startingPos)
def RenameColumns(self,oldNames,newNames):
"""Rename columns.
INPUTS:
-- oldNames: List of old names for columns.
-- newNames: List of new names for columns.
-------------------------------------------------------
PURPOSE: For each i, this function renames column oldNames[i]
to have the name newNames[i] as illustrated below:
>>> import Sequence
>>> exampleQ = Sequence.TimeSeq(['x','y','z'],[[0,1,2]])
>>> exampleQ.RenameColumns(['x','z'],['a','b'])
>>> print exampleQ.GetColumnNames()
['a', 'y', 'b']
"""
if (len(oldNames)!=len(newNames)):
raise Exception("oldNames and newNames must have the same length")
if (len(oldNames) != len(set(oldNames))):
raise Exception("oldNames list contains duplicates")
if (len(newNames) != len(set(newNames))):
raise Exception("newNames list contains duplicates")
indexes = [self.GetColIndex(n) for n in oldNames]
if (None in indexes):
raise Exception("No column named %s."%oldNames[indexes.index(None)])
for i in range(len(indexes)):
if (not isinstance(newNames[i],str)):
raise Exception('Name %s is not a string.' % str(newNames[i]))
self.__columnNames[indexes[i]] = newNames[i]
self.__ResetNameForColumnDict() # reset the column name dict
def CopyColumns(self,oldNames,newNames):
"""Copy columns.
INPUTS:
-- oldNames: List of old names for columns.
-- newNames: List of new names for columns.
-------------------------------------------------------
PURPOSE: For each i, this function copies column oldNames[i]
into a new column named newNames[i] as shown below:
>>> import Sequence
>>> exampleQ = Sequence.TimeSeq(['x','y','z'],[[0,1,2],[3,4,5]])
>>> exampleQ.CopyColumns(['x','z'],['a','b'])
>>> print exampleQ.GetColumnNames()
['x', 'y', 'z', 'a', 'b']
>>> print exampleQ.data
[[0, 1, 2, 0, 2], [3, 4, 5, 3, 5]]
"""
if (len(oldNames)!=len(newNames)):
raise Exception("oldNames and newNames must have the same length")
if (len(oldNames) != len(set(oldNames))):
raise Exception("oldNames list contains duplicates")
if (len(newNames) != len(set(newNames))):
raise Exception("newNames list contains duplicates")
indexes = [self.GetColIndex(n) for n in oldNames]
if (None in indexes):
raise Exception("No column named %s."%oldNames[indexes.index(None)])
for name in newNames:
assert not isinstance(name,(tuple,list)),'Names should be strings.'
index = self.GetColIndex(name)
if (None != index):
raise Exception(
"Column %s can't be added; it already exists at index %i."
% (name,index))
self.__columnNames.extend(newNames) # add the new names
self.__ResetNameForColumnDict() # reset the column name dict
self.data = [line + [line[i] for i in indexes] for line in self.data]
def _test():
"Test docstrings in module."
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
print 'Test finished.'
| {
"content_hash": "82d4b127e30778db2826b815ed8c3520",
"timestamp": "",
"source": "github",
"line_count": 479,
"max_line_length": 80,
"avg_line_length": 38.979123173277664,
"alnum_prop": 0.539606876974988,
"repo_name": "eiriks/pyvol",
"id": "638856db6ba3005e493f6f0aed688d3ac2b84cc7",
"size": "18671",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "pyvol/tseries/Sequence.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "139522"
}
],
"symlink_target": ""
} |
import json
import os
import socket
import time
import uuid
from django.conf import settings
from django.template.loader import render_to_string
from django.utils import timezone
from django.utils.html import escape
import requests
from urllib.parse import quote, urlencode
from hc.accounts.models import Profile
from hc.api.schemas import telegram_migration
from hc.front.templatetags.hc_extras import sortchecks
from hc.lib import emails, jsonschema
from hc.lib.date import format_duration
from hc.lib.string import replace
try:
import apprise
except ImportError:
# Enforce
settings.APPRISE_ENABLED = False
def tmpl(template_name, **ctx) -> str:
template_path = "integrations/%s" % template_name
# \xa0 is non-breaking space. It causes SMS messages to use UCS2 encoding
# and cost twice the money.
return render_to_string(template_path, ctx).strip().replace("\xa0", " ")
class TransportError(Exception):
def __init__(self, message, permanent=False) -> None:
self.message = message
self.permanent = permanent
class Transport(object):
def __init__(self, channel):
self.channel = channel
def notify(self, check, notification=None) -> None:
""" Send notification about current status of the check.
This method raises TransportError on error, and returns None
on success.
"""
raise NotImplementedError()
def is_noop(self, check) -> bool:
""" Return True if transport will ignore check's current status.
This method is overridden in Webhook subclass where the user can
configure webhook urls for "up" and "down" events, and both are
optional.
"""
return False
def down_checks(self, check):
""" Return a sorted list of other checks in the same project that are down.
If there are no other hecks in the project, return None instead of empty list.
Templates can check for None to decide whether to show or not show the
"All other checks are up" note.
"""
siblings = self.channel.project.check_set.exclude(id=check.id)
if not siblings.exists():
return None
down_siblings = list(siblings.filter(status="down"))
sortchecks(down_siblings, "name")
return down_siblings
class Email(Transport):
def notify(self, check, notification=None) -> None:
if not self.channel.email_verified:
raise TransportError("Email not verified")
unsub_link = self.channel.get_unsub_link()
headers = {
"List-Unsubscribe": "<%s>" % unsub_link,
"List-Unsubscribe-Post": "List-Unsubscribe=One-Click",
}
if notification:
headers["X-Status-Url"] = notification.status_url()
from hc.accounts.models import Profile
# If this email address has an associated account, include
# a summary of projects the account has access to
try:
profile = Profile.objects.get(user__email=self.channel.email_value)
projects = list(profile.projects())
except Profile.DoesNotExist:
projects = None
ctx = {
"check": check,
"ping": check.ping_set.order_by("created").last(),
"projects": projects,
"unsub_link": unsub_link,
}
emails.alert(self.channel.email_value, ctx, headers)
def is_noop(self, check) -> bool:
if check.status == "down":
return not self.channel.email_notify_down
else:
return not self.channel.email_notify_up
class Shell(Transport):
def prepare(self, template: str, check) -> str:
""" Replace placeholders with actual values. """
ctx = {
"$CODE": str(check.code),
"$STATUS": check.status,
"$NOW": timezone.now().replace(microsecond=0).isoformat(),
"$NAME": check.name,
"$TAGS": check.tags,
}
for i, tag in enumerate(check.tags_list()):
ctx["$TAG%d" % (i + 1)] = tag
return replace(template, ctx)
def is_noop(self, check) -> bool:
if check.status == "down" and not self.channel.cmd_down:
return True
if check.status == "up" and not self.channel.cmd_up:
return True
return False
def notify(self, check, notification=None) -> None:
if not settings.SHELL_ENABLED:
raise TransportError("Shell commands are not enabled")
if check.status == "up":
cmd = self.channel.cmd_up
elif check.status == "down":
cmd = self.channel.cmd_down
cmd = self.prepare(cmd, check)
code = os.system(cmd)
if code != 0:
raise TransportError("Command returned exit code %d" % code)
class HttpTransport(Transport):
@classmethod
def raise_for_response(cls, response):
# Subclasses can override this method to produce a more specific message.
raise TransportError(f"Received status code {response.status_code}")
@classmethod
def _request(cls, method, url, **kwargs) -> None:
options = dict(kwargs)
options["timeout"] = 10
if "headers" not in options:
options["headers"] = {}
if "User-Agent" not in options["headers"]:
options["headers"]["User-Agent"] = "healthchecks.io"
try:
r = requests.request(method, url, **options)
if r.status_code not in (200, 201, 202, 204):
cls.raise_for_response(r)
except requests.exceptions.Timeout:
# Well, we tried
raise TransportError("Connection timed out")
except requests.exceptions.ConnectionError:
raise TransportError("Connection failed")
@classmethod
def _request_with_retries(cls, method, url, use_retries=True, **kwargs) -> None:
start = time.time()
tries_left = 3 if use_retries else 1
while True:
try:
return cls._request(method, url, **kwargs)
except TransportError as e:
tries_left = 0 if e.permanent else tries_left - 1
# If we have no tries left *or* have already used more than
# 15 seconds of time then abort the retry loop by re-raising
# the exception:
if tries_left == 0 or time.time() - start > 15:
raise e
@classmethod
def get(cls, url, **kwargs) -> None:
cls._request_with_retries("get", url, **kwargs)
@classmethod
def post(cls, url, **kwargs) -> None:
cls._request_with_retries("post", url, **kwargs)
@classmethod
def put(cls, url, **kwargs) -> None:
cls._request_with_retries("put", url, **kwargs)
class Webhook(HttpTransport):
def prepare(self, template: str, check, urlencode=False, latin1=False) -> str:
""" Replace variables with actual values. """
def safe(s: str) -> str:
return quote(s) if urlencode else s
ctx = {
"$CODE": str(check.code),
"$STATUS": check.status,
"$NOW": safe(timezone.now().replace(microsecond=0).isoformat()),
"$NAME": safe(check.name),
"$TAGS": safe(check.tags),
}
for i, tag in enumerate(check.tags_list()):
ctx["$TAG%d" % (i + 1)] = safe(tag)
result = replace(template, ctx)
if latin1:
# Replace non-latin-1 characters with XML character references.
result = result.encode("latin-1", "xmlcharrefreplace").decode("latin-1")
return result
def is_noop(self, check) -> bool:
if check.status == "down" and not self.channel.url_down:
return True
if check.status == "up" and not self.channel.url_up:
return True
return False
def notify(self, check, notification=None) -> None:
if not settings.WEBHOOKS_ENABLED:
raise TransportError("Webhook notifications are not enabled.")
spec = self.channel.webhook_spec(check.status)
if not spec["url"]:
raise TransportError("Empty webhook URL")
url = self.prepare(spec["url"], check, urlencode=True)
headers = {}
for key, value in spec["headers"].items():
# Header values should contain ASCII and latin-1 only
headers[key] = self.prepare(value, check, latin1=True)
body = spec["body"]
if body:
body = self.prepare(body, check).encode()
# When sending a test notification, don't retry on failures.
use_retries = True
if notification and notification.owner is None:
use_retries = False # this is a test notification
if spec["method"] == "GET":
self.get(url, use_retries=use_retries, headers=headers)
elif spec["method"] == "POST":
self.post(url, use_retries=use_retries, data=body, headers=headers)
elif spec["method"] == "PUT":
self.put(url, use_retries=use_retries, data=body, headers=headers)
class Slack(HttpTransport):
@classmethod
def raise_for_response(cls, response):
message = f"Received status code {response.status_code}"
# If Slack returns 404, this endpoint is unlikely to ever work again
# https://api.slack.com/messaging/webhooks#handling_errors
permanent = response.status_code == 404
raise TransportError(message, permanent=permanent)
def notify(self, check, notification=None) -> None:
if self.channel.kind == "slack" and not settings.SLACK_ENABLED:
raise TransportError("Slack notifications are not enabled.")
if self.channel.kind == "mattermost" and not settings.MATTERMOST_ENABLED:
raise TransportError("Mattermost notifications are not enabled.")
text = tmpl("slack_message.json", check=check)
payload = json.loads(text)
self.post(self.channel.slack_webhook_url, json=payload)
class HipChat(HttpTransport):
def is_noop(self, check) -> bool:
return True
class Opsgenie(HttpTransport):
@classmethod
def raise_for_response(cls, response):
message = f"Received status code {response.status_code}"
try:
details = response.json().get("message")
if isinstance(details, str):
message += f' with a message: "{details}"'
except ValueError:
pass
raise TransportError(message)
def notify(self, check, notification=None) -> None:
if not settings.OPSGENIE_ENABLED:
raise TransportError("Opsgenie notifications are not enabled.")
headers = {
"Conent-Type": "application/json",
"Authorization": "GenieKey %s" % self.channel.opsgenie_key,
}
payload = {"alias": str(check.code), "source": settings.SITE_NAME}
if check.status == "down":
payload["tags"] = check.tags_list()
payload["message"] = tmpl("opsgenie_message.html", check=check)
payload["note"] = tmpl("opsgenie_note.html", check=check)
payload["description"] = tmpl("opsgenie_description.html", check=check)
url = "https://api.opsgenie.com/v2/alerts"
if self.channel.opsgenie_region == "eu":
url = "https://api.eu.opsgenie.com/v2/alerts"
if check.status == "up":
url += "/%s/close?identifierType=alias" % check.code
self.post(url, json=payload, headers=headers)
class PagerDuty(HttpTransport):
URL = "https://events.pagerduty.com/generic/2010-04-15/create_event.json"
def notify(self, check, notification=None) -> None:
if not settings.PD_ENABLED:
raise TransportError("PagerDuty notifications are not enabled.")
details = {
"Project": check.project.name,
"Total pings": check.n_pings,
"Last ping": tmpl("pd_last_ping.html", check=check),
}
if check.desc:
details["Description"] = check.desc
if check.tags:
details["Tags"] = ", ".join(check.tags_list())
if check.kind == "simple":
details["Period"] = format_duration(check.timeout)
if check.kind == "cron":
details["Schedule"] = check.schedule
description = tmpl("pd_description.html", check=check)
payload = {
"service_key": self.channel.pd_service_key,
"incident_key": str(check.code),
"event_type": "trigger" if check.status == "down" else "resolve",
"description": description,
"client": settings.SITE_NAME,
"client_url": check.details_url(),
"details": details,
}
self.post(self.URL, json=payload)
class PagerTree(HttpTransport):
def notify(self, check, notification=None) -> None:
if not settings.PAGERTREE_ENABLED:
raise TransportError("PagerTree notifications are not enabled.")
url = self.channel.value
headers = {"Conent-Type": "application/json"}
payload = {
"incident_key": str(check.code),
"event_type": "trigger" if check.status == "down" else "resolve",
"title": tmpl("pagertree_title.html", check=check),
"description": tmpl("pagertree_description.html", check=check),
"client": settings.SITE_NAME,
"client_url": settings.SITE_ROOT,
"tags": ",".join(check.tags_list()),
}
self.post(url, json=payload, headers=headers)
class PagerTeam(HttpTransport):
def is_noop(self, check) -> bool:
return True
class Pushbullet(HttpTransport):
def notify(self, check, notification=None) -> None:
text = tmpl("pushbullet_message.html", check=check)
url = "https://api.pushbullet.com/v2/pushes"
headers = {
"Access-Token": self.channel.value,
"Conent-Type": "application/json",
}
payload = {"type": "note", "title": settings.SITE_NAME, "body": text}
self.post(url, json=payload, headers=headers)
class Pushover(HttpTransport):
URL = "https://api.pushover.net/1/messages.json"
CANCEL_TMPL = "https://api.pushover.net/1/receipts/cancel_by_tag/%s.json"
def notify(self, check, notification=None) -> None:
pieces = self.channel.value.split("|")
user_key, down_prio = pieces[0], pieces[1]
# The third element, if present, is the priority for "up" events
up_prio = down_prio
if len(pieces) == 3:
up_prio = pieces[2]
from hc.api.models import TokenBucket
if not TokenBucket.authorize_pushover(user_key):
raise TransportError("Rate limit exceeded")
# If down events have the emergency priority,
# send a cancel call first
if check.status == "up" and down_prio == "2":
url = self.CANCEL_TMPL % check.unique_key
cancel_payload = {"token": settings.PUSHOVER_API_TOKEN}
self.post(url, data=cancel_payload)
ctx = {"check": check, "down_checks": self.down_checks(check)}
text = tmpl("pushover_message.html", **ctx)
title = tmpl("pushover_title.html", **ctx)
prio = up_prio if check.status == "up" else down_prio
payload = {
"token": settings.PUSHOVER_API_TOKEN,
"user": user_key,
"message": text,
"title": title,
"html": 1,
"priority": int(prio),
"tags": check.unique_key,
}
# Emergency notification
if prio == "2":
payload["retry"] = settings.PUSHOVER_EMERGENCY_RETRY_DELAY
payload["expire"] = settings.PUSHOVER_EMERGENCY_EXPIRATION
self.post(self.URL, data=payload)
class VictorOps(HttpTransport):
def notify(self, check, notification=None) -> None:
if not settings.VICTOROPS_ENABLED:
raise TransportError("Splunk On-Call notifications are not enabled.")
description = tmpl("victorops_description.html", check=check)
mtype = "CRITICAL" if check.status == "down" else "RECOVERY"
payload = {
"entity_id": str(check.code),
"message_type": mtype,
"entity_display_name": check.name_then_code(),
"state_message": description,
"monitoring_tool": settings.SITE_NAME,
}
self.post(self.channel.value, json=payload)
class Matrix(HttpTransport):
def get_url(self):
s = quote(self.channel.value)
url = settings.MATRIX_HOMESERVER
url += "/_matrix/client/r0/rooms/%s/send/m.room.message?" % s
url += urlencode({"access_token": settings.MATRIX_ACCESS_TOKEN})
return url
def notify(self, check, notification=None) -> None:
plain = tmpl("matrix_description.html", check=check)
formatted = tmpl("matrix_description_formatted.html", check=check)
payload = {
"msgtype": "m.text",
"body": plain,
"format": "org.matrix.custom.html",
"formatted_body": formatted,
}
self.post(self.get_url(), json=payload)
class Discord(HttpTransport):
def notify(self, check, notification=None) -> None:
text = tmpl("slack_message.json", check=check)
payload = json.loads(text)
url = self.channel.discord_webhook_url + "/slack"
self.post(url, json=payload)
class MigrationRequiredError(TransportError):
def __init__(self, message, new_chat_id: int):
super().__init__(message, permanent=True)
self.new_chat_id = new_chat_id
class Telegram(HttpTransport):
SM = "https://api.telegram.org/bot%s/sendMessage" % settings.TELEGRAM_TOKEN
@classmethod
def raise_for_response(cls, response):
message = f"Received status code {response.status_code}"
try:
doc = response.json()
except ValueError:
raise TransportError(message)
# If the error payload contains the migrate_to_chat_id field,
# raise MigrationRequiredError, with the new chat_id included
try:
jsonschema.validate(doc, telegram_migration)
description = doc["description"]
chat_id = doc["parameters"]["migrate_to_chat_id"]
raise MigrationRequiredError(description, chat_id)
except jsonschema.ValidationError:
pass
permanent = False
description = doc.get("description")
if isinstance(description, str):
message += f' with a message: "{description}"'
if description == "Forbidden: the group chat was deleted":
permanent = True
raise TransportError(message, permanent=permanent)
@classmethod
def send(cls, chat_id, text):
# Telegram.send is a separate method because it is also used in
# hc.front.views.telegram_bot to send invite links.
cls.post(cls.SM, json={"chat_id": chat_id, "text": text, "parse_mode": "html"})
def notify(self, check, notification=None) -> None:
from hc.api.models import TokenBucket
if not TokenBucket.authorize_telegram(self.channel.telegram_id):
raise TransportError("Rate limit exceeded")
ctx = {"check": check, "down_checks": self.down_checks(check)}
text = tmpl("telegram_message.html", **ctx)
try:
self.send(self.channel.telegram_id, text)
except MigrationRequiredError as e:
# Save the new chat_id, then try sending again:
self.channel.update_telegram_id(e.new_chat_id)
self.send(self.channel.telegram_id, text)
class Sms(HttpTransport):
URL = "https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json"
def is_noop(self, check) -> bool:
if check.status == "down":
return not self.channel.sms_notify_down
else:
return not self.channel.sms_notify_up
def notify(self, check, notification=None) -> None:
profile = Profile.objects.for_user(self.channel.project.owner)
if not profile.authorize_sms():
profile.send_sms_limit_notice("SMS")
raise TransportError("Monthly SMS limit exceeded")
url = self.URL % settings.TWILIO_ACCOUNT
auth = (settings.TWILIO_ACCOUNT, settings.TWILIO_AUTH)
text = tmpl("sms_message.html", check=check, site_name=settings.SITE_NAME)
data = {
"From": settings.TWILIO_FROM,
"To": self.channel.phone_number,
"Body": text,
}
if notification:
data["StatusCallback"] = notification.status_url()
self.post(url, data=data, auth=auth)
class Call(HttpTransport):
URL = "https://api.twilio.com/2010-04-01/Accounts/%s/Calls.json"
def is_noop(self, check) -> bool:
return check.status != "down"
def notify(self, check, notification=None) -> None:
profile = Profile.objects.for_user(self.channel.project.owner)
if not profile.authorize_call():
profile.send_call_limit_notice()
raise TransportError("Monthly phone call limit exceeded")
url = self.URL % settings.TWILIO_ACCOUNT
auth = (settings.TWILIO_ACCOUNT, settings.TWILIO_AUTH)
twiml = tmpl("call_message.html", check=check, site_name=settings.SITE_NAME)
data = {
"From": settings.TWILIO_FROM,
"To": self.channel.phone_number,
"Twiml": twiml,
}
if notification:
data["StatusCallback"] = notification.status_url()
self.post(url, data=data, auth=auth)
class WhatsApp(HttpTransport):
URL = "https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json"
def is_noop(self, check) -> bool:
if check.status == "down":
return not self.channel.whatsapp_notify_down
else:
return not self.channel.whatsapp_notify_up
def notify(self, check, notification=None) -> None:
profile = Profile.objects.for_user(self.channel.project.owner)
if not profile.authorize_sms():
profile.send_sms_limit_notice("WhatsApp")
raise TransportError("Monthly message limit exceeded")
url = self.URL % settings.TWILIO_ACCOUNT
auth = (settings.TWILIO_ACCOUNT, settings.TWILIO_AUTH)
text = tmpl("whatsapp_message.html", check=check, site_name=settings.SITE_NAME)
data = {
"From": "whatsapp:%s" % settings.TWILIO_FROM,
"To": "whatsapp:%s" % self.channel.phone_number,
"Body": text,
}
if notification:
data["StatusCallback"] = notification.status_url()
self.post(url, data=data, auth=auth)
class Trello(HttpTransport):
URL = "https://api.trello.com/1/cards"
def is_noop(self, check) -> bool:
return check.status != "down"
def notify(self, check, notification=None) -> None:
params = {
"idList": self.channel.trello_list_id,
"name": tmpl("trello_name.html", check=check),
"desc": tmpl("trello_desc.html", check=check),
"key": settings.TRELLO_APP_KEY,
"token": self.channel.trello_token,
}
self.post(self.URL, params=params)
class Apprise(HttpTransport):
def notify(self, check, notification=None) -> None:
if not settings.APPRISE_ENABLED:
# Not supported and/or enabled
raise TransportError("Apprise is disabled and/or not installed")
a = apprise.Apprise()
title = tmpl("apprise_title.html", check=check)
body = tmpl("apprise_description.html", check=check)
a.add(self.channel.value)
notify_type = (
apprise.NotifyType.SUCCESS
if check.status == "up"
else apprise.NotifyType.FAILURE
)
if not a.notify(body=body, title=title, notify_type=notify_type):
raise TransportError("Failed")
class MsTeams(HttpTransport):
def escape_md(self, s):
# Escape special HTML characters
s = escape(s)
# Escape characters that have special meaning in Markdown
for c in r"\`*_{}[]()#+-.!|":
s = s.replace(c, "\\" + c)
return s
def notify(self, check, notification=None) -> None:
if not settings.MSTEAMS_ENABLED:
raise TransportError("MS Teams notifications are not enabled.")
text = tmpl("msteams_message.json", check=check)
payload = json.loads(text)
# MS Teams escapes HTML special characters in the summary field.
# It does not interpret summary content as Markdown.
name = check.name_then_code()
payload["summary"] = f"“{name}” is {check.status.upper()}."
# MS teams *strips* HTML special characters from the title field.
# To avoid that, we use escape().
# It does not interpret title as Markdown.
safe_name = escape(name)
payload["title"] = f"“{safe_name}” is {check.status.upper()}."
# MS teams allows some HTML in the section text.
# It also interprets the section text as Markdown.
# We want to display the raw content, angle brackets and all,
# so we run escape() and then additionally escape Markdown:
payload["sections"][0]["text"] = self.escape_md(check.desc)
self.post(self.channel.value, json=payload)
class Zulip(HttpTransport):
@classmethod
def raise_for_response(cls, response):
message = f"Received status code {response.status_code}"
try:
details = response.json().get("msg")
if isinstance(details, str):
message += f' with a message: "{details}"'
except ValueError:
pass
raise TransportError(message)
def notify(self, check, notification=None) -> None:
if not settings.ZULIP_ENABLED:
raise TransportError("Zulip notifications are not enabled.")
url = self.channel.zulip_site + "/api/v1/messages"
auth = (self.channel.zulip_bot_email, self.channel.zulip_api_key)
data = {
"type": self.channel.zulip_type,
"to": self.channel.zulip_to,
"topic": tmpl("zulip_topic.html", check=check),
"content": tmpl("zulip_content.html", check=check),
}
self.post(url, data=data, auth=auth)
class Spike(HttpTransport):
def notify(self, check, notification=None) -> None:
if not settings.SPIKE_ENABLED:
raise TransportError("Spike notifications are not enabled.")
url = self.channel.value
headers = {"Conent-Type": "application/json"}
payload = {
"check_id": str(check.code),
"title": tmpl("spike_title.html", check=check),
"message": tmpl("spike_description.html", check=check),
"status": check.status,
}
self.post(url, json=payload, headers=headers)
class LineNotify(HttpTransport):
URL = "https://notify-api.line.me/api/notify"
def notify(self, check, notification=None) -> None:
headers = {
"Content-Type": "application/x-www-form-urlencoded",
"Authorization": "Bearer %s" % self.channel.linenotify_token,
}
payload = {"message": tmpl("linenotify_message.html", check=check)}
self.post(self.URL, headers=headers, params=payload)
class Signal(Transport):
def is_noop(self, check) -> bool:
if check.status == "down":
return not self.channel.signal_notify_down
else:
return not self.channel.signal_notify_up
def send(self, recipient, message):
payload = {
"jsonrpc": "2.0",
"method": "send",
"params": {"recipient": [recipient], "message": message},
"id": str(uuid.uuid4()),
}
payload_bytes = (json.dumps(payload) + "\n").encode()
for reply_bytes in self._read_replies(payload_bytes):
try:
reply = json.loads(reply_bytes.decode())
except ValueError:
raise TransportError("signal-cli call failed (unexpected response)")
if reply.get("id") == payload["id"]:
if "error" not in reply:
# success!
break
message = reply["error"].get("message", "")
if "UnregisteredUserException" in message:
raise TransportError("Recipient not found")
code = reply["error"].get("code")
raise TransportError("signal-cli call failed (%s)" % code)
def _read_replies(self, payload_bytes):
"""Send a request to signal-cli over UNIX socket. Read and yield replies.
This method:
* opens UNIX socket
* sends the request data (JSON RPC data encoded as bytes)
* reads newline-terminated responses and yields them
Individual sendall and recv operations have a timeout of 15 seconds.
This method also keeps track of total time spent in the method, and raises
an exception when the total time exceeds 15 seconds.
"""
start = time.time()
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.settimeout(15)
try:
s.connect(settings.SIGNAL_CLI_SOCKET)
s.sendall(payload_bytes)
s.shutdown(socket.SHUT_WR) # we are done sending
buffer = []
while True:
ch = s.recv(1)
buffer.append(ch)
if ch in (b"\n", b""):
yield b"".join(buffer)
buffer = []
if time.time() - start > 15:
raise TransportError("signal-cli call timed out")
except OSError as e:
raise TransportError("signal-cli call failed (%s)" % e)
def notify(self, check, notification=None) -> None:
if not settings.SIGNAL_CLI_SOCKET:
raise TransportError("Signal notifications are not enabled")
from hc.api.models import TokenBucket
if not TokenBucket.authorize_signal(self.channel.phone_number):
raise TransportError("Rate limit exceeded")
ctx = {"check": check, "down_checks": self.down_checks(check)}
text = tmpl("signal_message.html", **ctx)
self.send(self.channel.phone_number, text)
| {
"content_hash": "dd65955edbed6677ddf24309e6bfce59",
"timestamp": "",
"source": "github",
"line_count": 889,
"max_line_length": 87,
"avg_line_length": 34.514060742407196,
"alnum_prop": 0.5966496105335202,
"repo_name": "iphoting/healthchecks",
"id": "248bcf3c2e8927cc64f3252a9fbadf45fb5ba5e2",
"size": "30691",
"binary": false,
"copies": "1",
"ref": "refs/heads/heroku",
"path": "hc/api/transports.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "64145"
},
{
"name": "Dockerfile",
"bytes": "939"
},
{
"name": "HTML",
"bytes": "595497"
},
{
"name": "JavaScript",
"bytes": "55883"
},
{
"name": "Less",
"bytes": "14135"
},
{
"name": "Python",
"bytes": "894208"
},
{
"name": "Shell",
"bytes": "4382"
}
],
"symlink_target": ""
} |
Subsets and Splits