repo_name
stringlengths
5
100
ref
stringlengths
12
67
path
stringlengths
4
244
copies
stringlengths
1
8
content
stringlengths
0
1.05M
171121130/SWI
refs/heads/master
venv/Lib/site-packages/xlrd/sheet.py
27
# -*- coding: cp1252 -*- ## # <p> Portions copyright © 2005-2013 Stephen John Machin, Lingfo Pty Ltd</p> # <p>This module is part of the xlrd package, which is released under a BSD-style licence.</p> ## # 2010-04-25 SJM fix zoom factors cooking logic # 2010-04-15 CW r4253 fix zoom factors cooking logic # 2010-04-09 CW r4248 add a flag so xlutils knows whether or not to write a PANE record # 2010-03-29 SJM Fixed bug in adding new empty rows in put_cell_ragged # 2010-03-28 SJM Tailored put_cell method for each of ragged_rows=False (fixed speed regression) and =True (faster) # 2010-03-25 CW r4236 Slight refactoring to remove method calls # 2010-03-25 CW r4235 Collapse expand_cells into put_cell and enhance the raggedness. This should save even more memory! # 2010-03-25 CW r4234 remove duplicate chunks for extend_cells; refactor to remove put_number_cell and put_blank_cell which essentially duplicated the code of put_cell # 2010-03-10 SJM r4222 Added reading of the PANE record. # 2010-03-10 SJM r4221 Preliminary work on "cooked" mag factors; use at own peril # 2010-03-01 SJM Reading SCL record # 2010-03-01 SJM Added ragged_rows functionality # 2009-08-23 SJM Reduced CPU time taken by parsing MULBLANK records. # 2009-08-18 SJM Used __slots__ and sharing to reduce memory consumed by Rowinfo instances # 2009-05-31 SJM Fixed problem with no CODEPAGE record on extremely minimal BIFF2.x 3rd-party file # 2009-04-27 SJM Integrated on_demand patch by Armando Serrano Lombillo # 2008-02-09 SJM Excel 2.0: build XFs on the fly from cell attributes # 2007-12-04 SJM Added support for Excel 2.x (BIFF2) files. # 2007-10-11 SJM Added missing entry for blank cell type to ctype_text # 2007-07-11 SJM Allow for BIFF2/3-style FORMAT record in BIFF4/8 file # 2007-04-22 SJM Remove experimental "trimming" facility. from __future__ import print_function from array import array from struct import unpack, calcsize from .biffh import * from .timemachine import * from .formula import dump_formula, decompile_formula, rangename2d, FMLA_TYPE_CELL, FMLA_TYPE_SHARED from .formatting import nearest_colour_index, Format DEBUG = 0 OBJ_MSO_DEBUG = 0 _WINDOW2_options = ( # Attribute names and initial values to use in case # a WINDOW2 record is not written. ("show_formulas", 0), ("show_grid_lines", 1), ("show_sheet_headers", 1), ("panes_are_frozen", 0), ("show_zero_values", 1), ("automatic_grid_line_colour", 1), ("columns_from_right_to_left", 0), ("show_outline_symbols", 1), ("remove_splits_if_pane_freeze_is_removed", 0), # Multiple sheets can be selected, but only one can be active # (hold down Ctrl and click multiple tabs in the file in OOo) ("sheet_selected", 0), # "sheet_visible" should really be called "sheet_active" # and is 1 when this sheet is the sheet displayed when the file # is open. More than likely only one sheet should ever be set as # visible. # This would correspond to the Book's sheet_active attribute, but # that doesn't exist as WINDOW1 records aren't currently processed. # The real thing is the visibility attribute from the BOUNDSHEET record. ("sheet_visible", 0), ("show_in_page_break_preview", 0), ) ## # <p>Contains the data for one worksheet.</p> # # <p>In the cell access functions, "rowx" is a row index, counting from zero, and "colx" is a # column index, counting from zero. # Negative values for row/column indexes and slice positions are supported in the expected fashion.</p> # # <p>For information about cell types and cell values, refer to the documentation of the {@link #Cell} class.</p> # # <p>WARNING: You don't call this class yourself. You access Sheet objects via the Book object that # was returned when you called xlrd.open_workbook("myfile.xls").</p> class Sheet(BaseObject): ## # Name of sheet. name = '' ## # A reference to the Book object to which this sheet belongs. # Example usage: some_sheet.book.datemode book = None ## # Number of rows in sheet. A row index is in range(thesheet.nrows). nrows = 0 ## # Nominal number of columns in sheet. It is 1 + the maximum column index # found, ignoring trailing empty cells. See also open_workbook(ragged_rows=?) # and Sheet.{@link #Sheet.row_len}(row_index). ncols = 0 ## # The map from a column index to a {@link #Colinfo} object. Often there is an entry # in COLINFO records for all column indexes in range(257). # Note that xlrd ignores the entry for the non-existent # 257th column. On the other hand, there may be no entry for unused columns. # <br /> -- New in version 0.6.1. Populated only if open_workbook(formatting_info=True). colinfo_map = {} ## # The map from a row index to a {@link #Rowinfo} object. Note that it is possible # to have missing entries -- at least one source of XLS files doesn't # bother writing ROW records. # <br /> -- New in version 0.6.1. Populated only if open_workbook(formatting_info=True). rowinfo_map = {} ## # List of address ranges of cells containing column labels. # These are set up in Excel by Insert > Name > Labels > Columns. # <br> -- New in version 0.6.0 # <br>How to deconstruct the list: # <pre> # for crange in thesheet.col_label_ranges: # rlo, rhi, clo, chi = crange # for rx in xrange(rlo, rhi): # for cx in xrange(clo, chi): # print "Column label at (rowx=%d, colx=%d) is %r" \ # (rx, cx, thesheet.cell_value(rx, cx)) # </pre> col_label_ranges = [] ## # List of address ranges of cells containing row labels. # For more details, see <i>col_label_ranges</i> above. # <br> -- New in version 0.6.0 row_label_ranges = [] ## # List of address ranges of cells which have been merged. # These are set up in Excel by Format > Cells > Alignment, then ticking # the "Merge cells" box. # <br> Note that the upper limits are exclusive: i.e. <tt>[2, 3, 7, 9]</tt> only # spans two cells. # <br> -- New in version 0.6.1. Extracted only if open_workbook(formatting_info=True). # <br>How to deconstruct the list: # <pre> # for crange in thesheet.merged_cells: # rlo, rhi, clo, chi = crange # for rowx in xrange(rlo, rhi): # for colx in xrange(clo, chi): # # cell (rlo, clo) (the top left one) will carry the data # # and formatting info; the remainder will be recorded as # # blank cells, but a renderer will apply the formatting info # # for the top left cell (e.g. border, pattern) to all cells in # # the range. # </pre> merged_cells = [] ## # Mapping of (rowx, colx) to list of (offset, font_index) tuples. The offset # defines where in the string the font begins to be used. # Offsets are expected to be in ascending order. # If the first offset is not zero, the meaning is that the cell's XF's font should # be used from offset 0. # <br /> This is a sparse mapping. There is no entry for cells that are not formatted with # rich text. # <br>How to use: # <pre> # runlist = thesheet.rich_text_runlist_map.get((rowx, colx)) # if runlist: # for offset, font_index in runlist: # # do work here. # pass # </pre> # Populated only if open_workbook(formatting_info=True). # <br /> -- New in version 0.7.2. # <br /> &nbsp; rich_text_runlist_map = {} ## # Default column width from DEFCOLWIDTH record, else None. # From the OOo docs:<br /> # """Column width in characters, using the width of the zero character # from default font (first FONT record in the file). Excel adds some # extra space to the default width, depending on the default font and # default font size. The algorithm how to exactly calculate the resulting # column width is not known.<br /> # Example: The default width of 8 set in this record results in a column # width of 8.43 using Arial font with a size of 10 points."""<br /> # For the default hierarchy, refer to the {@link #Colinfo} class. # <br /> -- New in version 0.6.1 defcolwidth = None ## # Default column width from STANDARDWIDTH record, else None. # From the OOo docs:<br /> # """Default width of the columns in 1/256 of the width of the zero # character, using default font (first FONT record in the file)."""<br /> # For the default hierarchy, refer to the {@link #Colinfo} class. # <br /> -- New in version 0.6.1 standardwidth = None ## # Default value to be used for a row if there is # no ROW record for that row. # From the <i>optional</i> DEFAULTROWHEIGHT record. default_row_height = None ## # Default value to be used for a row if there is # no ROW record for that row. # From the <i>optional</i> DEFAULTROWHEIGHT record. default_row_height_mismatch = None ## # Default value to be used for a row if there is # no ROW record for that row. # From the <i>optional</i> DEFAULTROWHEIGHT record. default_row_hidden = None ## # Default value to be used for a row if there is # no ROW record for that row. # From the <i>optional</i> DEFAULTROWHEIGHT record. default_additional_space_above = None ## # Default value to be used for a row if there is # no ROW record for that row. # From the <i>optional</i> DEFAULTROWHEIGHT record. default_additional_space_below = None ## # Visibility of the sheet. 0 = visible, 1 = hidden (can be unhidden # by user -- Format/Sheet/Unhide), 2 = "very hidden" (can be unhidden # only by VBA macro). visibility = 0 ## # A 256-element tuple corresponding to the contents of the GCW record for this sheet. # If no such record, treat as all bits zero. # Applies to BIFF4-7 only. See docs of the {@link #Colinfo} class for discussion. gcw = (0, ) * 256 ## # <p>A list of {@link #Hyperlink} objects corresponding to HLINK records found # in the worksheet.<br />-- New in version 0.7.2 </p> hyperlink_list = [] ## # <p>A sparse mapping from (rowx, colx) to an item in {@link #Sheet.hyperlink_list}. # Cells not covered by a hyperlink are not mapped. # It is possible using the Excel UI to set up a hyperlink that # covers a larger-than-1x1 rectangle of cells. # Hyperlink rectangles may overlap (Excel doesn't check). # When a multiply-covered cell is clicked on, the hyperlink that is activated # (and the one that is mapped here) is the last in hyperlink_list. # <br />-- New in version 0.7.2 </p> hyperlink_map = {} ## # <p>A sparse mapping from (rowx, colx) to a {@link #Note} object. # Cells not containing a note ("comment") are not mapped. # <br />-- New in version 0.7.2 </p> cell_note_map = {} ## # Number of columns in left pane (frozen panes; for split panes, see comments below in code) vert_split_pos = 0 ## # Number of rows in top pane (frozen panes; for split panes, see comments below in code) horz_split_pos = 0 ## # Index of first visible row in bottom frozen/split pane horz_split_first_visible = 0 ## # Index of first visible column in right frozen/split pane vert_split_first_visible = 0 ## # Frozen panes: ignore it. Split panes: explanation and diagrams in OOo docs. split_active_pane = 0 ## # Boolean specifying if a PANE record was present, ignore unless you're xlutils.copy has_pane_record = 0 ## # A list of the horizontal page breaks in this sheet. # Breaks are tuples in the form (index of row after break, start col index, end col index). # Populated only if open_workbook(formatting_info=True). # <br /> -- New in version 0.7.2 horizontal_page_breaks = [] ## # A list of the vertical page breaks in this sheet. # Breaks are tuples in the form (index of col after break, start row index, end row index). # Populated only if open_workbook(formatting_info=True). # <br /> -- New in version 0.7.2 vertical_page_breaks = [] def __init__(self, book, position, name, number): self.book = book self.biff_version = book.biff_version self._position = position self.logfile = book.logfile self.bt = array('B', [XL_CELL_EMPTY]) self.bf = array('h', [-1]) self.name = name self.number = number self.verbosity = book.verbosity self.formatting_info = book.formatting_info self.ragged_rows = book.ragged_rows if self.ragged_rows: self.put_cell = self.put_cell_ragged else: self.put_cell = self.put_cell_unragged self._xf_index_to_xl_type_map = book._xf_index_to_xl_type_map self.nrows = 0 # actual, including possibly empty cells self.ncols = 0 self._maxdatarowx = -1 # highest rowx containing a non-empty cell self._maxdatacolx = -1 # highest colx containing a non-empty cell self._dimnrows = 0 # as per DIMENSIONS record self._dimncols = 0 self._cell_values = [] self._cell_types = [] self._cell_xf_indexes = [] self.defcolwidth = None self.standardwidth = None self.default_row_height = None self.default_row_height_mismatch = 0 self.default_row_hidden = 0 self.default_additional_space_above = 0 self.default_additional_space_below = 0 self.colinfo_map = {} self.rowinfo_map = {} self.col_label_ranges = [] self.row_label_ranges = [] self.merged_cells = [] self.rich_text_runlist_map = {} self.horizontal_page_breaks = [] self.vertical_page_breaks = [] self._xf_index_stats = [0, 0, 0, 0] self.visibility = book._sheet_visibility[number] # from BOUNDSHEET record for attr, defval in _WINDOW2_options: setattr(self, attr, defval) self.first_visible_rowx = 0 self.first_visible_colx = 0 self.gridline_colour_index = 0x40 self.gridline_colour_rgb = None # pre-BIFF8 self.hyperlink_list = [] self.hyperlink_map = {} self.cell_note_map = {} # Values calculated by xlrd to predict the mag factors that # will actually be used by Excel to display your worksheet. # Pass these values to xlwt when writing XLS files. # Warning 1: Behaviour of OOo Calc and Gnumeric has been observed to differ from Excel's. # Warning 2: A value of zero means almost exactly what it says. Your sheet will be # displayed as a very tiny speck on the screen. xlwt will reject attempts to set # a mag_factor that is not (10 <= mag_factor <= 400). self.cooked_page_break_preview_mag_factor = 60 self.cooked_normal_view_mag_factor = 100 # Values (if any) actually stored on the XLS file self.cached_page_break_preview_mag_factor = 0 # default (60%), from WINDOW2 record self.cached_normal_view_mag_factor = 0 # default (100%), from WINDOW2 record self.scl_mag_factor = None # from SCL record self._ixfe = None # BIFF2 only self._cell_attr_to_xfx = {} # BIFF2.0 only #### Don't initialise this here, use class attribute initialisation. #### self.gcw = (0, ) * 256 #### if self.biff_version >= 80: self.utter_max_rows = 65536 else: self.utter_max_rows = 16384 self.utter_max_cols = 256 self._first_full_rowx = -1 # self._put_cell_exceptions = 0 # self._put_cell_row_widenings = 0 # self._put_cell_rows_appended = 0 # self._put_cell_cells_appended = 0 ## # {@link #Cell} object in the given row and column. def cell(self, rowx, colx): if self.formatting_info: xfx = self.cell_xf_index(rowx, colx) else: xfx = None return Cell( self._cell_types[rowx][colx], self._cell_values[rowx][colx], xfx, ) ## # Value of the cell in the given row and column. def cell_value(self, rowx, colx): return self._cell_values[rowx][colx] ## # Type of the cell in the given row and column. # Refer to the documentation of the {@link #Cell} class. def cell_type(self, rowx, colx): return self._cell_types[rowx][colx] ## # XF index of the cell in the given row and column. # This is an index into Book.{@link #Book.xf_list}. # <br /> -- New in version 0.6.1 def cell_xf_index(self, rowx, colx): self.req_fmt_info() xfx = self._cell_xf_indexes[rowx][colx] if xfx > -1: self._xf_index_stats[0] += 1 return xfx # Check for a row xf_index try: xfx = self.rowinfo_map[rowx].xf_index if xfx > -1: self._xf_index_stats[1] += 1 return xfx except KeyError: pass # Check for a column xf_index try: xfx = self.colinfo_map[colx].xf_index if xfx == -1: xfx = 15 self._xf_index_stats[2] += 1 return xfx except KeyError: # If all else fails, 15 is used as hardwired global default xf_index. self._xf_index_stats[3] += 1 return 15 ## # Returns the effective number of cells in the given row. For use with # open_workbook(ragged_rows=True) which is likely to produce rows # with fewer than {@link #Sheet.ncols} cells. # <br /> -- New in version 0.7.2 def row_len(self, rowx): return len(self._cell_values[rowx]) ## # Returns a sequence of the {@link #Cell} objects in the given row. def row(self, rowx): return [ self.cell(rowx, colx) for colx in xrange(len(self._cell_values[rowx])) ] ## # Returns a generator for iterating through each row. def get_rows(self): return (self.row(index) for index in range(self.nrows)) ## # Returns a slice of the types # of the cells in the given row. def row_types(self, rowx, start_colx=0, end_colx=None): if end_colx is None: return self._cell_types[rowx][start_colx:] return self._cell_types[rowx][start_colx:end_colx] ## # Returns a slice of the values # of the cells in the given row. def row_values(self, rowx, start_colx=0, end_colx=None): if end_colx is None: return self._cell_values[rowx][start_colx:] return self._cell_values[rowx][start_colx:end_colx] ## # Returns a slice of the {@link #Cell} objects in the given row. def row_slice(self, rowx, start_colx=0, end_colx=None): nc = len(self._cell_values[rowx]) if start_colx < 0: start_colx += nc if start_colx < 0: start_colx = 0 if end_colx is None or end_colx > nc: end_colx = nc elif end_colx < 0: end_colx += nc return [ self.cell(rowx, colx) for colx in xrange(start_colx, end_colx) ] ## # Returns a slice of the {@link #Cell} objects in the given column. def col_slice(self, colx, start_rowx=0, end_rowx=None): nr = self.nrows if start_rowx < 0: start_rowx += nr if start_rowx < 0: start_rowx = 0 if end_rowx is None or end_rowx > nr: end_rowx = nr elif end_rowx < 0: end_rowx += nr return [ self.cell(rowx, colx) for rowx in xrange(start_rowx, end_rowx) ] ## # Returns a slice of the values of the cells in the given column. def col_values(self, colx, start_rowx=0, end_rowx=None): nr = self.nrows if start_rowx < 0: start_rowx += nr if start_rowx < 0: start_rowx = 0 if end_rowx is None or end_rowx > nr: end_rowx = nr elif end_rowx < 0: end_rowx += nr return [ self._cell_values[rowx][colx] for rowx in xrange(start_rowx, end_rowx) ] ## # Returns a slice of the types of the cells in the given column. def col_types(self, colx, start_rowx=0, end_rowx=None): nr = self.nrows if start_rowx < 0: start_rowx += nr if start_rowx < 0: start_rowx = 0 if end_rowx is None or end_rowx > nr: end_rowx = nr elif end_rowx < 0: end_rowx += nr return [ self._cell_types[rowx][colx] for rowx in xrange(start_rowx, end_rowx) ] ## # Returns a sequence of the {@link #Cell} objects in the given column. def col(self, colx): return self.col_slice(colx) # Above two lines just for the docs. Here's the real McCoy: col = col_slice # === Following methods are used in building the worksheet. # === They are not part of the API. def tidy_dimensions(self): if self.verbosity >= 3: fprintf(self.logfile, "tidy_dimensions: nrows=%d ncols=%d \n", self.nrows, self.ncols, ) if 1 and self.merged_cells: nr = nc = 0 umaxrows = self.utter_max_rows umaxcols = self.utter_max_cols for crange in self.merged_cells: rlo, rhi, clo, chi = crange if not (0 <= rlo < rhi <= umaxrows) \ or not (0 <= clo < chi <= umaxcols): fprintf(self.logfile, "*** WARNING: sheet #%d (%r), MERGEDCELLS bad range %r\n", self.number, self.name, crange) if rhi > nr: nr = rhi if chi > nc: nc = chi if nc > self.ncols: self.ncols = nc self._first_full_rowx = -2 if nr > self.nrows: # we put one empty cell at (nr-1,0) to make sure # we have the right number of rows. The ragged rows # will sort out the rest if needed. self.put_cell(nr-1, 0, XL_CELL_EMPTY, UNICODE_LITERAL(''), -1) if self.verbosity >= 1 \ and (self.nrows != self._dimnrows or self.ncols != self._dimncols): fprintf(self.logfile, "NOTE *** sheet %d (%r): DIMENSIONS R,C = %d,%d should be %d,%d\n", self.number, self.name, self._dimnrows, self._dimncols, self.nrows, self.ncols, ) if not self.ragged_rows: # fix ragged rows ncols = self.ncols s_cell_types = self._cell_types s_cell_values = self._cell_values s_cell_xf_indexes = self._cell_xf_indexes s_fmt_info = self.formatting_info # for rowx in xrange(self.nrows): if self._first_full_rowx == -2: ubound = self.nrows else: ubound = self._first_full_rowx for rowx in xrange(ubound): trow = s_cell_types[rowx] rlen = len(trow) nextra = ncols - rlen if nextra > 0: s_cell_values[rowx][rlen:] = [UNICODE_LITERAL('')] * nextra trow[rlen:] = self.bt * nextra if s_fmt_info: s_cell_xf_indexes[rowx][rlen:] = self.bf * nextra def put_cell_ragged(self, rowx, colx, ctype, value, xf_index): if ctype is None: # we have a number, so look up the cell type ctype = self._xf_index_to_xl_type_map[xf_index] assert 0 <= colx < self.utter_max_cols assert 0 <= rowx < self.utter_max_rows fmt_info = self.formatting_info try: nr = rowx + 1 if self.nrows < nr: scta = self._cell_types.append scva = self._cell_values.append scxa = self._cell_xf_indexes.append bt = self.bt bf = self.bf for _unused in xrange(self.nrows, nr): scta(bt * 0) scva([]) if fmt_info: scxa(bf * 0) self.nrows = nr types_row = self._cell_types[rowx] values_row = self._cell_values[rowx] if fmt_info: fmt_row = self._cell_xf_indexes[rowx] ltr = len(types_row) if colx >= self.ncols: self.ncols = colx + 1 num_empty = colx - ltr if not num_empty: # most common case: colx == previous colx + 1 # self._put_cell_cells_appended += 1 types_row.append(ctype) values_row.append(value) if fmt_info: fmt_row.append(xf_index) return if num_empty > 0: num_empty += 1 # self._put_cell_row_widenings += 1 # types_row.extend(self.bt * num_empty) # values_row.extend([UNICODE_LITERAL('')] * num_empty) # if fmt_info: # fmt_row.extend(self.bf * num_empty) types_row[ltr:] = self.bt * num_empty values_row[ltr:] = [UNICODE_LITERAL('')] * num_empty if fmt_info: fmt_row[ltr:] = self.bf * num_empty types_row[colx] = ctype values_row[colx] = value if fmt_info: fmt_row[colx] = xf_index except: print("put_cell", rowx, colx, file=self.logfile) raise def put_cell_unragged(self, rowx, colx, ctype, value, xf_index): if ctype is None: # we have a number, so look up the cell type ctype = self._xf_index_to_xl_type_map[xf_index] # assert 0 <= colx < self.utter_max_cols # assert 0 <= rowx < self.utter_max_rows try: self._cell_types[rowx][colx] = ctype self._cell_values[rowx][colx] = value if self.formatting_info: self._cell_xf_indexes[rowx][colx] = xf_index except IndexError: # print >> self.logfile, "put_cell extending", rowx, colx # self.extend_cells(rowx+1, colx+1) # self._put_cell_exceptions += 1 nr = rowx + 1 nc = colx + 1 assert 1 <= nc <= self.utter_max_cols assert 1 <= nr <= self.utter_max_rows if nc > self.ncols: self.ncols = nc # The row self._first_full_rowx and all subsequent rows # are guaranteed to have length == self.ncols. Thus the # "fix ragged rows" section of the tidy_dimensions method # doesn't need to examine them. if nr < self.nrows: # cell data is not in non-descending row order *AND* # self.ncols has been bumped up. # This very rare case ruins this optmisation. self._first_full_rowx = -2 elif rowx > self._first_full_rowx > -2: self._first_full_rowx = rowx if nr <= self.nrows: # New cell is in an existing row, so extend that row (if necessary). # Note that nr < self.nrows means that the cell data # is not in ascending row order!! trow = self._cell_types[rowx] nextra = self.ncols - len(trow) if nextra > 0: # self._put_cell_row_widenings += 1 trow.extend(self.bt * nextra) if self.formatting_info: self._cell_xf_indexes[rowx].extend(self.bf * nextra) self._cell_values[rowx].extend([UNICODE_LITERAL('')] * nextra) else: scta = self._cell_types.append scva = self._cell_values.append scxa = self._cell_xf_indexes.append fmt_info = self.formatting_info nc = self.ncols bt = self.bt bf = self.bf for _unused in xrange(self.nrows, nr): # self._put_cell_rows_appended += 1 scta(bt * nc) scva([UNICODE_LITERAL('')] * nc) if fmt_info: scxa(bf * nc) self.nrows = nr # === end of code from extend_cells() try: self._cell_types[rowx][colx] = ctype self._cell_values[rowx][colx] = value if self.formatting_info: self._cell_xf_indexes[rowx][colx] = xf_index except: print("put_cell", rowx, colx, file=self.logfile) raise except: print("put_cell", rowx, colx, file=self.logfile) raise # === Methods after this line neither know nor care about how cells are stored. def read(self, bk): global rc_stats DEBUG = 0 blah = DEBUG or self.verbosity >= 2 blah_rows = DEBUG or self.verbosity >= 4 blah_formulas = 0 and blah r1c1 = 0 oldpos = bk._position bk._position = self._position XL_SHRFMLA_ETC_ETC = ( XL_SHRFMLA, XL_ARRAY, XL_TABLEOP, XL_TABLEOP2, XL_ARRAY2, XL_TABLEOP_B2, ) self_put_cell = self.put_cell local_unpack = unpack bk_get_record_parts = bk.get_record_parts bv = self.biff_version fmt_info = self.formatting_info do_sst_rich_text = fmt_info and bk._rich_text_runlist_map rowinfo_sharing_dict = {} txos = {} eof_found = 0 while 1: # if DEBUG: print "SHEET.READ: about to read from position %d" % bk._position rc, data_len, data = bk_get_record_parts() # if rc in rc_stats: # rc_stats[rc] += 1 # else: # rc_stats[rc] = 1 # if DEBUG: print "SHEET.READ: op 0x%04x, %d bytes %r" % (rc, data_len, data) if rc == XL_NUMBER: # [:14] in following stmt ignores extraneous rubbish at end of record. # Sample file testEON-8.xls supplied by Jan Kraus. rowx, colx, xf_index, d = local_unpack('<HHHd', data[:14]) # if xf_index == 0: # fprintf(self.logfile, # "NUMBER: r=%d c=%d xfx=%d %f\n", rowx, colx, xf_index, d) self_put_cell(rowx, colx, None, d, xf_index) elif rc == XL_LABELSST: rowx, colx, xf_index, sstindex = local_unpack('<HHHi', data) # print "LABELSST", rowx, colx, sstindex, bk._sharedstrings[sstindex] self_put_cell(rowx, colx, XL_CELL_TEXT, bk._sharedstrings[sstindex], xf_index) if do_sst_rich_text: runlist = bk._rich_text_runlist_map.get(sstindex) if runlist: self.rich_text_runlist_map[(rowx, colx)] = runlist elif rc == XL_LABEL: rowx, colx, xf_index = local_unpack('<HHH', data[0:6]) if bv < BIFF_FIRST_UNICODE: strg = unpack_string(data, 6, bk.encoding or bk.derive_encoding(), lenlen=2) else: strg = unpack_unicode(data, 6, lenlen=2) self_put_cell(rowx, colx, XL_CELL_TEXT, strg, xf_index) elif rc == XL_RSTRING: rowx, colx, xf_index = local_unpack('<HHH', data[0:6]) if bv < BIFF_FIRST_UNICODE: strg, pos = unpack_string_update_pos(data, 6, bk.encoding or bk.derive_encoding(), lenlen=2) nrt = BYTES_ORD(data[pos]) pos += 1 runlist = [] for _unused in xrange(nrt): runlist.append(unpack('<BB', data[pos:pos+2])) pos += 2 assert pos == len(data) else: strg, pos = unpack_unicode_update_pos(data, 6, lenlen=2) nrt = unpack('<H', data[pos:pos+2])[0] pos += 2 runlist = [] for _unused in xrange(nrt): runlist.append(unpack('<HH', data[pos:pos+4])) pos += 4 assert pos == len(data) self_put_cell(rowx, colx, XL_CELL_TEXT, strg, xf_index) self.rich_text_runlist_map[(rowx, colx)] = runlist elif rc == XL_RK: rowx, colx, xf_index = local_unpack('<HHH', data[:6]) d = unpack_RK(data[6:10]) self_put_cell(rowx, colx, None, d, xf_index) elif rc == XL_MULRK: mulrk_row, mulrk_first = local_unpack('<HH', data[0:4]) mulrk_last, = local_unpack('<H', data[-2:]) pos = 4 for colx in xrange(mulrk_first, mulrk_last+1): xf_index, = local_unpack('<H', data[pos:pos+2]) d = unpack_RK(data[pos+2:pos+6]) pos += 6 self_put_cell(mulrk_row, colx, None, d, xf_index) elif rc == XL_ROW: # Version 0.6.0a3: ROW records are just not worth using (for memory allocation). # Version 0.6.1: now used for formatting info. if not fmt_info: continue rowx, bits1, bits2 = local_unpack('<H4xH4xi', data[0:16]) if not(0 <= rowx < self.utter_max_rows): print("*** NOTE: ROW record has row index %d; " \ "should have 0 <= rowx < %d -- record ignored!" \ % (rowx, self.utter_max_rows), file=self.logfile) continue key = (bits1, bits2) r = rowinfo_sharing_dict.get(key) if r is None: rowinfo_sharing_dict[key] = r = Rowinfo() # Using upkbits() is far too slow on a file # with 30 sheets each with 10K rows :-( # upkbits(r, bits1, ( # ( 0, 0x7FFF, 'height'), # (15, 0x8000, 'has_default_height'), # )) # upkbits(r, bits2, ( # ( 0, 0x00000007, 'outline_level'), # ( 4, 0x00000010, 'outline_group_starts_ends'), # ( 5, 0x00000020, 'hidden'), # ( 6, 0x00000040, 'height_mismatch'), # ( 7, 0x00000080, 'has_default_xf_index'), # (16, 0x0FFF0000, 'xf_index'), # (28, 0x10000000, 'additional_space_above'), # (29, 0x20000000, 'additional_space_below'), # )) # So: r.height = bits1 & 0x7fff r.has_default_height = (bits1 >> 15) & 1 r.outline_level = bits2 & 7 r.outline_group_starts_ends = (bits2 >> 4) & 1 r.hidden = (bits2 >> 5) & 1 r.height_mismatch = (bits2 >> 6) & 1 r.has_default_xf_index = (bits2 >> 7) & 1 r.xf_index = (bits2 >> 16) & 0xfff r.additional_space_above = (bits2 >> 28) & 1 r.additional_space_below = (bits2 >> 29) & 1 if not r.has_default_xf_index: r.xf_index = -1 self.rowinfo_map[rowx] = r if 0 and r.xf_index > -1: fprintf(self.logfile, "**ROW %d %d %d\n", self.number, rowx, r.xf_index) if blah_rows: print('ROW', rowx, bits1, bits2, file=self.logfile) r.dump(self.logfile, header="--- sh #%d, rowx=%d ---" % (self.number, rowx)) elif rc in XL_FORMULA_OPCODES: # 06, 0206, 0406 # DEBUG = 1 # if DEBUG: print "FORMULA: rc: 0x%04x data: %r" % (rc, data) if bv >= 50: rowx, colx, xf_index, result_str, flags = local_unpack('<HHH8sH', data[0:16]) lenlen = 2 tkarr_offset = 20 elif bv >= 30: rowx, colx, xf_index, result_str, flags = local_unpack('<HHH8sH', data[0:16]) lenlen = 2 tkarr_offset = 16 else: # BIFF2 rowx, colx, cell_attr, result_str, flags = local_unpack('<HH3s8sB', data[0:16]) xf_index = self.fixed_BIFF2_xfindex(cell_attr, rowx, colx) lenlen = 1 tkarr_offset = 16 if blah_formulas: # testing formula dumper #### XXXX FIXME fprintf(self.logfile, "FORMULA: rowx=%d colx=%d\n", rowx, colx) fmlalen = local_unpack("<H", data[20:22])[0] decompile_formula(bk, data[22:], fmlalen, FMLA_TYPE_CELL, browx=rowx, bcolx=colx, blah=1, r1c1=r1c1) if result_str[6:8] == b"\xFF\xFF": first_byte = BYTES_ORD(result_str[0]) if first_byte == 0: # need to read next record (STRING) gotstring = 0 # if flags & 8: if 1: # "flags & 8" applies only to SHRFMLA # actually there's an optional SHRFMLA or ARRAY etc record to skip over rc2, data2_len, data2 = bk.get_record_parts() if rc2 == XL_STRING or rc2 == XL_STRING_B2: gotstring = 1 elif rc2 == XL_ARRAY: row1x, rownx, col1x, colnx, array_flags, tokslen = \ local_unpack("<HHBBBxxxxxH", data2[:14]) if blah_formulas: fprintf(self.logfile, "ARRAY: %d %d %d %d %d\n", row1x, rownx, col1x, colnx, array_flags) # dump_formula(bk, data2[14:], tokslen, bv, reldelta=0, blah=1) elif rc2 == XL_SHRFMLA: row1x, rownx, col1x, colnx, nfmlas, tokslen = \ local_unpack("<HHBBxBH", data2[:10]) if blah_formulas: fprintf(self.logfile, "SHRFMLA (sub): %d %d %d %d %d\n", row1x, rownx, col1x, colnx, nfmlas) decompile_formula(bk, data2[10:], tokslen, FMLA_TYPE_SHARED, blah=1, browx=rowx, bcolx=colx, r1c1=r1c1) elif rc2 not in XL_SHRFMLA_ETC_ETC: raise XLRDError( "Expected SHRFMLA, ARRAY, TABLEOP* or STRING record; found 0x%04x" % rc2) # if DEBUG: print "gotstring:", gotstring # now for the STRING record if not gotstring: rc2, _unused_len, data2 = bk.get_record_parts() if rc2 not in (XL_STRING, XL_STRING_B2): raise XLRDError("Expected STRING record; found 0x%04x" % rc2) # if DEBUG: print "STRING: data=%r BIFF=%d cp=%d" % (data2, self.biff_version, bk.encoding) strg = self.string_record_contents(data2) self.put_cell(rowx, colx, XL_CELL_TEXT, strg, xf_index) # if DEBUG: print "FORMULA strg %r" % strg elif first_byte == 1: # boolean formula result value = BYTES_ORD(result_str[2]) self_put_cell(rowx, colx, XL_CELL_BOOLEAN, value, xf_index) elif first_byte == 2: # Error in cell value = BYTES_ORD(result_str[2]) self_put_cell(rowx, colx, XL_CELL_ERROR, value, xf_index) elif first_byte == 3: # empty ... i.e. empty (zero-length) string, NOT an empty cell. self_put_cell(rowx, colx, XL_CELL_TEXT, "", xf_index) else: raise XLRDError("unexpected special case (0x%02x) in FORMULA" % first_byte) else: # it is a number d = local_unpack('<d', result_str)[0] self_put_cell(rowx, colx, None, d, xf_index) elif rc == XL_BOOLERR: rowx, colx, xf_index, value, is_err = local_unpack('<HHHBB', data[:8]) # Note OOo Calc 2.0 writes 9-byte BOOLERR records. # OOo docs say 8. Excel writes 8. cellty = (XL_CELL_BOOLEAN, XL_CELL_ERROR)[is_err] # if DEBUG: print "XL_BOOLERR", rowx, colx, xf_index, value, is_err self_put_cell(rowx, colx, cellty, value, xf_index) elif rc == XL_COLINFO: if not fmt_info: continue c = Colinfo() first_colx, last_colx, c.width, c.xf_index, flags \ = local_unpack("<HHHHH", data[:10]) #### Colinfo.width is denominated in 256ths of a character, #### *not* in characters. if not(0 <= first_colx <= last_colx <= 256): # Note: 256 instead of 255 is a common mistake. # We silently ignore the non-existing 257th column in that case. print("*** NOTE: COLINFO record has first col index %d, last %d; " \ "should have 0 <= first <= last <= 255 -- record ignored!" \ % (first_colx, last_colx), file=self.logfile) del c continue upkbits(c, flags, ( ( 0, 0x0001, 'hidden'), ( 1, 0x0002, 'bit1_flag'), # *ALL* colinfos created by Excel in "default" cases are 0x0002!! # Maybe it's "locked" by analogy with XFProtection data. ( 8, 0x0700, 'outline_level'), (12, 0x1000, 'collapsed'), )) for colx in xrange(first_colx, last_colx+1): if colx > 255: break # Excel does 0 to 256 inclusive self.colinfo_map[colx] = c if 0: fprintf(self.logfile, "**COL %d %d %d\n", self.number, colx, c.xf_index) if blah: fprintf( self.logfile, "COLINFO sheet #%d cols %d-%d: wid=%d xf_index=%d flags=0x%04x\n", self.number, first_colx, last_colx, c.width, c.xf_index, flags, ) c.dump(self.logfile, header='===') elif rc == XL_DEFCOLWIDTH: self.defcolwidth, = local_unpack("<H", data[:2]) if 0: print('DEFCOLWIDTH', self.defcolwidth, file=self.logfile) elif rc == XL_STANDARDWIDTH: if data_len != 2: print('*** ERROR *** STANDARDWIDTH', data_len, repr(data), file=self.logfile) self.standardwidth, = local_unpack("<H", data[:2]) if 0: print('STANDARDWIDTH', self.standardwidth, file=self.logfile) elif rc == XL_GCW: if not fmt_info: continue # useless w/o COLINFO assert data_len == 34 assert data[0:2] == b"\x20\x00" iguff = unpack("<8i", data[2:34]) gcw = [] for bits in iguff: for j in xrange(32): gcw.append(bits & 1) bits >>= 1 self.gcw = tuple(gcw) if 0: showgcw = "".join(map(lambda x: "F "[x], gcw)).rstrip().replace(' ', '.') print("GCW:", showgcw, file=self.logfile) elif rc == XL_BLANK: if not fmt_info: continue rowx, colx, xf_index = local_unpack('<HHH', data[:6]) # if 0: print >> self.logfile, "BLANK", rowx, colx, xf_index self_put_cell(rowx, colx, XL_CELL_BLANK, '', xf_index) elif rc == XL_MULBLANK: # 00BE if not fmt_info: continue nitems = data_len >> 1 result = local_unpack("<%dH" % nitems, data) rowx, mul_first = result[:2] mul_last = result[-1] # print >> self.logfile, "MULBLANK", rowx, mul_first, mul_last, data_len, nitems, mul_last + 4 - mul_first assert nitems == mul_last + 4 - mul_first pos = 2 for colx in xrange(mul_first, mul_last + 1): self_put_cell(rowx, colx, XL_CELL_BLANK, '', result[pos]) pos += 1 elif rc == XL_DIMENSION or rc == XL_DIMENSION2: if data_len == 0: # Four zero bytes after some other record. See github issue 64. continue # if data_len == 10: # Was crashing on BIFF 4.0 file w/o the two trailing unused bytes. # Reported by Ralph Heimburger. if bv < 80: dim_tuple = local_unpack('<HxxH', data[2:8]) else: dim_tuple = local_unpack('<ixxH', data[4:12]) self.nrows, self.ncols = 0, 0 self._dimnrows, self._dimncols = dim_tuple if bv in (21, 30, 40) and self.book.xf_list and not self.book._xf_epilogue_done: self.book.xf_epilogue() if blah: fprintf(self.logfile, "sheet %d(%r) DIMENSIONS: ncols=%d nrows=%d\n", self.number, self.name, self._dimncols, self._dimnrows ) elif rc == XL_HLINK: self.handle_hlink(data) elif rc == XL_QUICKTIP: self.handle_quicktip(data) elif rc == XL_EOF: DEBUG = 0 if DEBUG: print("SHEET.READ: EOF", file=self.logfile) eof_found = 1 break elif rc == XL_OBJ: # handle SHEET-level objects; note there's a separate Book.handle_obj saved_obj = self.handle_obj(data) if saved_obj: saved_obj_id = saved_obj.id else: saved_obj_id = None elif rc == XL_MSO_DRAWING: self.handle_msodrawingetc(rc, data_len, data) elif rc == XL_TXO: txo = self.handle_txo(data) if txo and saved_obj_id: txos[saved_obj_id] = txo saved_obj_id = None elif rc == XL_NOTE: self.handle_note(data, txos) elif rc == XL_FEAT11: self.handle_feat11(data) elif rc in bofcodes: ##### EMBEDDED BOF ##### version, boftype = local_unpack('<HH', data[0:4]) if boftype != 0x20: # embedded chart print("*** Unexpected embedded BOF (0x%04x) at offset %d: version=0x%04x type=0x%04x" \ % (rc, bk._position - data_len - 4, version, boftype), file=self.logfile) while 1: code, data_len, data = bk.get_record_parts() if code == XL_EOF: break if DEBUG: print("---> found EOF", file=self.logfile) elif rc == XL_COUNTRY: bk.handle_country(data) elif rc == XL_LABELRANGES: pos = 0 pos = unpack_cell_range_address_list_update_pos( self.row_label_ranges, data, pos, bv, addr_size=8, ) pos = unpack_cell_range_address_list_update_pos( self.col_label_ranges, data, pos, bv, addr_size=8, ) assert pos == data_len elif rc == XL_ARRAY: row1x, rownx, col1x, colnx, array_flags, tokslen = \ local_unpack("<HHBBBxxxxxH", data[:14]) if blah_formulas: print("ARRAY:", row1x, rownx, col1x, colnx, array_flags, file=self.logfile) # dump_formula(bk, data[14:], tokslen, bv, reldelta=0, blah=1) elif rc == XL_SHRFMLA: row1x, rownx, col1x, colnx, nfmlas, tokslen = \ local_unpack("<HHBBxBH", data[:10]) if blah_formulas: print("SHRFMLA (main):", row1x, rownx, col1x, colnx, nfmlas, file=self.logfile) decompile_formula(bk, data[10:], tokslen, FMLA_TYPE_SHARED, blah=1, browx=rowx, bcolx=colx, r1c1=r1c1) elif rc == XL_CONDFMT: if not fmt_info: continue assert bv >= 80 num_CFs, needs_recalc, browx1, browx2, bcolx1, bcolx2 = \ unpack("<6H", data[0:12]) if self.verbosity >= 1: fprintf(self.logfile, "\n*** WARNING: Ignoring CONDFMT (conditional formatting) record\n" \ "*** in Sheet %d (%r).\n" \ "*** %d CF record(s); needs_recalc_or_redraw = %d\n" \ "*** Bounding box is %s\n", self.number, self.name, num_CFs, needs_recalc, rangename2d(browx1, browx2+1, bcolx1, bcolx2+1), ) olist = [] # updated by the function pos = unpack_cell_range_address_list_update_pos( olist, data, 12, bv, addr_size=8) # print >> self.logfile, repr(result), len(result) if self.verbosity >= 1: fprintf(self.logfile, "*** %d individual range(s):\n" \ "*** %s\n", len(olist), ", ".join([rangename2d(*coords) for coords in olist]), ) elif rc == XL_CF: if not fmt_info: continue cf_type, cmp_op, sz1, sz2, flags = unpack("<BBHHi", data[0:10]) font_block = (flags >> 26) & 1 bord_block = (flags >> 28) & 1 patt_block = (flags >> 29) & 1 if self.verbosity >= 1: fprintf(self.logfile, "\n*** WARNING: Ignoring CF (conditional formatting) sub-record.\n" \ "*** cf_type=%d, cmp_op=%d, sz1=%d, sz2=%d, flags=0x%08x\n" \ "*** optional data blocks: font=%d, border=%d, pattern=%d\n", cf_type, cmp_op, sz1, sz2, flags, font_block, bord_block, patt_block, ) # hex_char_dump(data, 0, data_len, fout=self.logfile) pos = 12 if font_block: (font_height, font_options, weight, escapement, underline, font_colour_index, two_bits, font_esc, font_underl) = \ unpack("<64x i i H H B 3x i 4x i i i 18x", data[pos:pos+118]) font_style = (two_bits > 1) & 1 posture = (font_options > 1) & 1 font_canc = (two_bits > 7) & 1 cancellation = (font_options > 7) & 1 if self.verbosity >= 1: fprintf(self.logfile, "*** Font info: height=%d, weight=%d, escapement=%d,\n" \ "*** underline=%d, colour_index=%d, esc=%d, underl=%d,\n" \ "*** style=%d, posture=%d, canc=%d, cancellation=%d\n", font_height, weight, escapement, underline, font_colour_index, font_esc, font_underl, font_style, posture, font_canc, cancellation, ) pos += 118 if bord_block: pos += 8 if patt_block: pos += 4 fmla1 = data[pos:pos+sz1] pos += sz1 if blah and sz1: fprintf(self.logfile, "*** formula 1:\n", ) dump_formula(bk, fmla1, sz1, bv, reldelta=0, blah=1) fmla2 = data[pos:pos+sz2] pos += sz2 assert pos == data_len if blah and sz2: fprintf(self.logfile, "*** formula 2:\n", ) dump_formula(bk, fmla2, sz2, bv, reldelta=0, blah=1) elif rc == XL_DEFAULTROWHEIGHT: if data_len == 4: bits, self.default_row_height = unpack("<HH", data[:4]) elif data_len == 2: self.default_row_height, = unpack("<H", data) bits = 0 fprintf(self.logfile, "*** WARNING: DEFAULTROWHEIGHT record len is 2, " \ "should be 4; assuming BIFF2 format\n") else: bits = 0 fprintf(self.logfile, "*** WARNING: DEFAULTROWHEIGHT record len is %d, " \ "should be 4; ignoring this record\n", data_len) self.default_row_height_mismatch = bits & 1 self.default_row_hidden = (bits >> 1) & 1 self.default_additional_space_above = (bits >> 2) & 1 self.default_additional_space_below = (bits >> 3) & 1 elif rc == XL_MERGEDCELLS: if not fmt_info: continue pos = unpack_cell_range_address_list_update_pos( self.merged_cells, data, 0, bv, addr_size=8) if blah: fprintf(self.logfile, "MERGEDCELLS: %d ranges\n", (pos - 2) // 8) assert pos == data_len, \ "MERGEDCELLS: pos=%d data_len=%d" % (pos, data_len) elif rc == XL_WINDOW2: if bv >= 80 and data_len >= 14: (options, self.first_visible_rowx, self.first_visible_colx, self.gridline_colour_index, self.cached_page_break_preview_mag_factor, self.cached_normal_view_mag_factor ) = unpack("<HHHHxxHH", data[:14]) else: assert bv >= 30 # BIFF3-7 (options, self.first_visible_rowx, self.first_visible_colx, ) = unpack("<HHH", data[:6]) self.gridline_colour_rgb = unpack("<BBB", data[6:9]) self.gridline_colour_index = nearest_colour_index( self.book.colour_map, self.gridline_colour_rgb, debug=0) # options -- Bit, Mask, Contents: # 0 0001H 0 = Show formula results 1 = Show formulas # 1 0002H 0 = Do not show grid lines 1 = Show grid lines # 2 0004H 0 = Do not show sheet headers 1 = Show sheet headers # 3 0008H 0 = Panes are not frozen 1 = Panes are frozen (freeze) # 4 0010H 0 = Show zero values as empty cells 1 = Show zero values # 5 0020H 0 = Manual grid line colour 1 = Automatic grid line colour # 6 0040H 0 = Columns from left to right 1 = Columns from right to left # 7 0080H 0 = Do not show outline symbols 1 = Show outline symbols # 8 0100H 0 = Keep splits if pane freeze is removed 1 = Remove splits if pane freeze is removed # 9 0200H 0 = Sheet not selected 1 = Sheet selected (BIFF5-BIFF8) # 10 0400H 0 = Sheet not visible 1 = Sheet visible (BIFF5-BIFF8) # 11 0800H 0 = Show in normal view 1 = Show in page break preview (BIFF8) # The freeze flag specifies, if a following PANE record (6.71) describes unfrozen or frozen panes. for attr, _unused_defval in _WINDOW2_options: setattr(self, attr, options & 1) options >>= 1 elif rc == XL_SCL: num, den = unpack("<HH", data) result = 0 if den: result = (num * 100) // den if not(10 <= result <= 400): if DEBUG or self.verbosity >= 0: print(( "WARNING *** SCL rcd sheet %d: should have 0.1 <= num/den <= 4; got %d/%d" % (self.number, num, den) ), file=self.logfile) result = 100 self.scl_mag_factor = result elif rc == XL_PANE: ( self.vert_split_pos, self.horz_split_pos, self.horz_split_first_visible, self.vert_split_first_visible, self.split_active_pane, ) = unpack("<HHHHB", data[:9]) self.has_pane_record = 1 elif rc == XL_HORIZONTALPAGEBREAKS: if not fmt_info: continue num_breaks, = local_unpack("<H", data[:2]) assert num_breaks * (2 + 4 * (bv >= 80)) + 2 == data_len pos = 2 if bv < 80: while pos < data_len: self.horizontal_page_breaks.append((local_unpack("<H", data[pos:pos+2])[0], 0, 255)) pos += 2 else: while pos < data_len: self.horizontal_page_breaks.append(local_unpack("<HHH", data[pos:pos+6])) pos += 6 elif rc == XL_VERTICALPAGEBREAKS: if not fmt_info: continue num_breaks, = local_unpack("<H", data[:2]) assert num_breaks * (2 + 4 * (bv >= 80)) + 2 == data_len pos = 2 if bv < 80: while pos < data_len: self.vertical_page_breaks.append((local_unpack("<H", data[pos:pos+2])[0], 0, 65535)) pos += 2 else: while pos < data_len: self.vertical_page_breaks.append(local_unpack("<HHH", data[pos:pos+6])) pos += 6 #### all of the following are for BIFF <= 4W elif bv <= 45: if rc == XL_FORMAT or rc == XL_FORMAT2: bk.handle_format(data, rc) elif rc == XL_FONT or rc == XL_FONT_B3B4: bk.handle_font(data) elif rc == XL_STYLE: if not self.book._xf_epilogue_done: self.book.xf_epilogue() bk.handle_style(data) elif rc == XL_PALETTE: bk.handle_palette(data) elif rc == XL_BUILTINFMTCOUNT: bk.handle_builtinfmtcount(data) elif rc == XL_XF4 or rc == XL_XF3 or rc == XL_XF2: #### N.B. not XL_XF bk.handle_xf(data) elif rc == XL_DATEMODE: bk.handle_datemode(data) elif rc == XL_CODEPAGE: bk.handle_codepage(data) elif rc == XL_FILEPASS: bk.handle_filepass(data) elif rc == XL_WRITEACCESS: bk.handle_writeaccess(data) elif rc == XL_IXFE: self._ixfe = local_unpack('<H', data)[0] elif rc == XL_NUMBER_B2: rowx, colx, cell_attr, d = local_unpack('<HH3sd', data) self_put_cell(rowx, colx, None, d, self.fixed_BIFF2_xfindex(cell_attr, rowx, colx)) elif rc == XL_INTEGER: rowx, colx, cell_attr, d = local_unpack('<HH3sH', data) self_put_cell(rowx, colx, None, float(d), self.fixed_BIFF2_xfindex(cell_attr, rowx, colx)) elif rc == XL_LABEL_B2: rowx, colx, cell_attr = local_unpack('<HH3s', data[0:7]) strg = unpack_string(data, 7, bk.encoding or bk.derive_encoding(), lenlen=1) self_put_cell(rowx, colx, XL_CELL_TEXT, strg, self.fixed_BIFF2_xfindex(cell_attr, rowx, colx)) elif rc == XL_BOOLERR_B2: rowx, colx, cell_attr, value, is_err = local_unpack('<HH3sBB', data) cellty = (XL_CELL_BOOLEAN, XL_CELL_ERROR)[is_err] # if DEBUG: print "XL_BOOLERR_B2", rowx, colx, cell_attr, value, is_err self_put_cell(rowx, colx, cellty, value, self.fixed_BIFF2_xfindex(cell_attr, rowx, colx)) elif rc == XL_BLANK_B2: if not fmt_info: continue rowx, colx, cell_attr = local_unpack('<HH3s', data[:7]) self_put_cell(rowx, colx, XL_CELL_BLANK, '', self.fixed_BIFF2_xfindex(cell_attr, rowx, colx)) elif rc == XL_EFONT: bk.handle_efont(data) elif rc == XL_ROW_B2: if not fmt_info: continue rowx, bits1, bits2 = local_unpack('<H4xH2xB', data[0:11]) if not(0 <= rowx < self.utter_max_rows): print("*** NOTE: ROW_B2 record has row index %d; " \ "should have 0 <= rowx < %d -- record ignored!" \ % (rowx, self.utter_max_rows), file=self.logfile) continue if not (bits2 & 1): # has_default_xf_index is false xf_index = -1 elif data_len == 18: # Seems the XF index in the cell_attr is dodgy xfx = local_unpack('<H', data[16:18])[0] xf_index = self.fixed_BIFF2_xfindex(cell_attr=None, rowx=rowx, colx=-1, true_xfx=xfx) else: cell_attr = data[13:16] xf_index = self.fixed_BIFF2_xfindex(cell_attr, rowx, colx=-1) key = (bits1, bits2, xf_index) r = rowinfo_sharing_dict.get(key) if r is None: rowinfo_sharing_dict[key] = r = Rowinfo() r.height = bits1 & 0x7fff r.has_default_height = (bits1 >> 15) & 1 r.has_default_xf_index = bits2 & 1 r.xf_index = xf_index # r.outline_level = 0 # set in __init__ # r.outline_group_starts_ends = 0 # set in __init__ # r.hidden = 0 # set in __init__ # r.height_mismatch = 0 # set in __init__ # r.additional_space_above = 0 # set in __init__ # r.additional_space_below = 0 # set in __init__ self.rowinfo_map[rowx] = r if 0 and r.xf_index > -1: fprintf(self.logfile, "**ROW %d %d %d\n", self.number, rowx, r.xf_index) if blah_rows: print('ROW_B2', rowx, bits1, has_defaults, file=self.logfile) r.dump(self.logfile, header="--- sh #%d, rowx=%d ---" % (self.number, rowx)) elif rc == XL_COLWIDTH: # BIFF2 only if not fmt_info: continue first_colx, last_colx, width\ = local_unpack("<BBH", data[:4]) if not(first_colx <= last_colx): print("*** NOTE: COLWIDTH record has first col index %d, last %d; " \ "should have first <= last -- record ignored!" \ % (first_colx, last_colx), file=self.logfile) continue for colx in xrange(first_colx, last_colx+1): if colx in self.colinfo_map: c = self.colinfo_map[colx] else: c = Colinfo() self.colinfo_map[colx] = c c.width = width if blah: fprintf( self.logfile, "COLWIDTH sheet #%d cols %d-%d: wid=%d\n", self.number, first_colx, last_colx, width ) elif rc == XL_COLUMNDEFAULT: # BIFF2 only if not fmt_info: continue first_colx, last_colx = local_unpack("<HH", data[:4]) #### Warning OOo docs wrong; first_colx <= colx < last_colx if blah: fprintf( self.logfile, "COLUMNDEFAULT sheet #%d cols in range(%d, %d)\n", self.number, first_colx, last_colx ) if not(0 <= first_colx < last_colx <= 256): print("*** NOTE: COLUMNDEFAULT record has first col index %d, last %d; " \ "should have 0 <= first < last <= 256" \ % (first_colx, last_colx), file=self.logfile) last_colx = min(last_colx, 256) for colx in xrange(first_colx, last_colx): offset = 4 + 3 * (colx - first_colx) cell_attr = data[offset:offset+3] xf_index = self.fixed_BIFF2_xfindex(cell_attr, rowx=-1, colx=colx) if colx in self.colinfo_map: c = self.colinfo_map[colx] else: c = Colinfo() self.colinfo_map[colx] = c c.xf_index = xf_index elif rc == XL_WINDOW2_B2: # BIFF 2 only attr_names = ("show_formulas", "show_grid_lines", "show_sheet_headers", "panes_are_frozen", "show_zero_values") for attr, char in zip(attr_names, data[0:5]): setattr(self, attr, int(char != b'\0')) (self.first_visible_rowx, self.first_visible_colx, self.automatic_grid_line_colour, ) = unpack("<HHB", data[5:10]) self.gridline_colour_rgb = unpack("<BBB", data[10:13]) self.gridline_colour_index = nearest_colour_index( self.book.colour_map, self.gridline_colour_rgb, debug=0) else: # if DEBUG: print "SHEET.READ: Unhandled record type %02x %d bytes %r" % (rc, data_len, data) pass if not eof_found: raise XLRDError("Sheet %d (%r) missing EOF record" \ % (self.number, self.name)) self.tidy_dimensions() self.update_cooked_mag_factors() bk._position = oldpos return 1 def string_record_contents(self, data): bv = self.biff_version bk = self.book lenlen = (bv >= 30) + 1 nchars_expected = unpack("<" + "BH"[lenlen - 1], data[:lenlen])[0] offset = lenlen if bv < 80: enc = bk.encoding or bk.derive_encoding() nchars_found = 0 result = UNICODE_LITERAL("") while 1: if bv >= 80: flag = BYTES_ORD(data[offset]) & 1 enc = ("latin_1", "utf_16_le")[flag] offset += 1 chunk = unicode(data[offset:], enc) result += chunk nchars_found += len(chunk) if nchars_found == nchars_expected: return result if nchars_found > nchars_expected: msg = ("STRING/CONTINUE: expected %d chars, found %d" % (nchars_expected, nchars_found)) raise XLRDError(msg) rc, _unused_len, data = bk.get_record_parts() if rc != XL_CONTINUE: raise XLRDError( "Expected CONTINUE record; found record-type 0x%04X" % rc) offset = 0 def update_cooked_mag_factors(self): # Cached values are used ONLY for the non-active view mode. # When the user switches to the non-active view mode, # if the cached value for that mode is not valid, # Excel pops up a window which says: # "The number must be between 10 and 400. Try again by entering a number in this range." # When the user hits OK, it drops into the non-active view mode # but uses the magn from the active mode. # NOTE: definition of "valid" depends on mode ... see below blah = DEBUG or self.verbosity > 0 if self.show_in_page_break_preview: if self.scl_mag_factor is None: # no SCL record self.cooked_page_break_preview_mag_factor = 100 # Yes, 100, not 60, NOT a typo else: self.cooked_page_break_preview_mag_factor = self.scl_mag_factor zoom = self.cached_normal_view_mag_factor if not (10 <= zoom <=400): if blah: print(( "WARNING *** WINDOW2 rcd sheet %d: Bad cached_normal_view_mag_factor: %d" % (self.number, self.cached_normal_view_mag_factor) ), file=self.logfile) zoom = self.cooked_page_break_preview_mag_factor self.cooked_normal_view_mag_factor = zoom else: # normal view mode if self.scl_mag_factor is None: # no SCL record self.cooked_normal_view_mag_factor = 100 else: self.cooked_normal_view_mag_factor = self.scl_mag_factor zoom = self.cached_page_break_preview_mag_factor if not zoom: # VALID, defaults to 60 zoom = 60 elif not (10 <= zoom <= 400): if blah: print(( "WARNING *** WINDOW2 rcd sheet %r: Bad cached_page_break_preview_mag_factor: %r" % (self.number, self.cached_page_break_preview_mag_factor) ), file=self.logfile) zoom = self.cooked_normal_view_mag_factor self.cooked_page_break_preview_mag_factor = zoom def fixed_BIFF2_xfindex(self, cell_attr, rowx, colx, true_xfx=None): DEBUG = 0 blah = DEBUG or self.verbosity >= 2 if self.biff_version == 21: if self.book.xf_list: if true_xfx is not None: xfx = true_xfx else: xfx = BYTES_ORD(cell_attr[0]) & 0x3F if xfx == 0x3F: if self._ixfe is None: raise XLRDError("BIFF2 cell record has XF index 63 but no preceding IXFE record.") xfx = self._ixfe # OOo docs are capable of interpretation that each # cell record is preceded immediately by its own IXFE record. # Empirical evidence is that (sensibly) an IXFE record applies to all # following cell records until another IXFE comes along. return xfx # Have either Excel 2.0, or broken 2.1 w/o XF records -- same effect. self.biff_version = self.book.biff_version = 20 #### check that XF slot in cell_attr is zero xfx_slot = BYTES_ORD(cell_attr[0]) & 0x3F assert xfx_slot == 0 xfx = self._cell_attr_to_xfx.get(cell_attr) if xfx is not None: return xfx if blah: fprintf(self.logfile, "New cell_attr %r at (%r, %r)\n", cell_attr, rowx, colx) if not self.book.xf_list: for xfx in xrange(16): self.insert_new_BIFF20_xf(cell_attr=b"\x40\x00\x00", style=xfx < 15) xfx = self.insert_new_BIFF20_xf(cell_attr=cell_attr) return xfx def insert_new_BIFF20_xf(self, cell_attr, style=0): DEBUG = 0 blah = DEBUG or self.verbosity >= 2 book = self.book xfx = len(book.xf_list) xf = self.fake_XF_from_BIFF20_cell_attr(cell_attr, style) xf.xf_index = xfx book.xf_list.append(xf) if blah: xf.dump(self.logfile, header="=== Faked XF %d ===" % xfx, footer="======") if xf.format_key not in book.format_map: if xf.format_key: msg = "ERROR *** XF[%d] unknown format key (%d, 0x%04x)\n" fprintf(self.logfile, msg, xf.xf_index, xf.format_key, xf.format_key) fmt = Format(xf.format_key, FUN, UNICODE_LITERAL("General")) book.format_map[xf.format_key] = fmt book.format_list.append(fmt) cellty_from_fmtty = { FNU: XL_CELL_NUMBER, FUN: XL_CELL_NUMBER, FGE: XL_CELL_NUMBER, FDT: XL_CELL_DATE, FTX: XL_CELL_NUMBER, # Yes, a number can be formatted as text. } fmt = book.format_map[xf.format_key] cellty = cellty_from_fmtty[fmt.type] self._xf_index_to_xl_type_map[xf.xf_index] = cellty self._cell_attr_to_xfx[cell_attr] = xfx return xfx def fake_XF_from_BIFF20_cell_attr(self, cell_attr, style=0): from .formatting import XF, XFAlignment, XFBorder, XFBackground, XFProtection xf = XF() xf.alignment = XFAlignment() xf.alignment.indent_level = 0 xf.alignment.shrink_to_fit = 0 xf.alignment.text_direction = 0 xf.border = XFBorder() xf.border.diag_up = 0 xf.border.diag_down = 0 xf.border.diag_colour_index = 0 xf.border.diag_line_style = 0 # no line xf.background = XFBackground() xf.protection = XFProtection() (prot_bits, font_and_format, halign_etc) = unpack('<BBB', cell_attr) xf.format_key = font_and_format & 0x3F xf.font_index = (font_and_format & 0xC0) >> 6 upkbits(xf.protection, prot_bits, ( (6, 0x40, 'cell_locked'), (7, 0x80, 'formula_hidden'), )) xf.alignment.hor_align = halign_etc & 0x07 for mask, side in ((0x08, 'left'), (0x10, 'right'), (0x20, 'top'), (0x40, 'bottom')): if halign_etc & mask: colour_index, line_style = 8, 1 # black, thin else: colour_index, line_style = 0, 0 # none, none setattr(xf.border, side + '_colour_index', colour_index) setattr(xf.border, side + '_line_style', line_style) bg = xf.background if halign_etc & 0x80: bg.fill_pattern = 17 else: bg.fill_pattern = 0 bg.background_colour_index = 9 # white bg.pattern_colour_index = 8 # black xf.parent_style_index = (0x0FFF, 0)[style] xf.alignment.vert_align = 2 # bottom xf.alignment.rotation = 0 for attr_stem in \ "format font alignment border background protection".split(): attr = "_" + attr_stem + "_flag" setattr(xf, attr, 1) return xf def req_fmt_info(self): if not self.formatting_info: raise XLRDError("Feature requires open_workbook(..., formatting_info=True)") ## # Determine column display width. # <br /> -- New in version 0.6.1 # <br /> # @param colx Index of the queried column, range 0 to 255. # Note that it is possible to find out the width that will be used to display # columns with no cell information e.g. column IV (colx=255). # @return The column width that will be used for displaying # the given column by Excel, in units of 1/256th of the width of a # standard character (the digit zero in the first font). def computed_column_width(self, colx): self.req_fmt_info() if self.biff_version >= 80: colinfo = self.colinfo_map.get(colx, None) if colinfo is not None: return colinfo.width if self.standardwidth is not None: return self.standardwidth elif self.biff_version >= 40: if self.gcw[colx]: if self.standardwidth is not None: return self.standardwidth else: colinfo = self.colinfo_map.get(colx, None) if colinfo is not None: return colinfo.width elif self.biff_version == 30: colinfo = self.colinfo_map.get(colx, None) if colinfo is not None: return colinfo.width # All roads lead to Rome and the DEFCOLWIDTH ... if self.defcolwidth is not None: return self.defcolwidth * 256 return 8 * 256 # 8 is what Excel puts in a DEFCOLWIDTH record def handle_hlink(self, data): # DEBUG = 1 if DEBUG: print("\n=== hyperlink ===", file=self.logfile) record_size = len(data) h = Hyperlink() h.frowx, h.lrowx, h.fcolx, h.lcolx, guid0, dummy, options = unpack('<HHHH16s4si', data[:32]) assert guid0 == b"\xD0\xC9\xEA\x79\xF9\xBA\xCE\x11\x8C\x82\x00\xAA\x00\x4B\xA9\x0B" assert dummy == b"\x02\x00\x00\x00" if DEBUG: print("options: %08X" % options, file=self.logfile) offset = 32 def get_nul_terminated_unicode(buf, ofs): nb = unpack('<L', buf[ofs:ofs+4])[0] * 2 ofs += 4 uc = unicode(buf[ofs:ofs+nb], 'UTF-16le')[:-1] ofs += nb return uc, ofs if options & 0x14: # has a description h.desc, offset = get_nul_terminated_unicode(data, offset) if options & 0x80: # has a target h.target, offset = get_nul_terminated_unicode(data, offset) if (options & 1) and not (options & 0x100): # HasMoniker and not MonikerSavedAsString # an OLEMoniker structure clsid, = unpack('<16s', data[offset:offset + 16]) if DEBUG: fprintf(self.logfile, "clsid=%r\n", clsid) offset += 16 if clsid == b"\xE0\xC9\xEA\x79\xF9\xBA\xCE\x11\x8C\x82\x00\xAA\x00\x4B\xA9\x0B": # E0H C9H EAH 79H F9H BAH CEH 11H 8CH 82H 00H AAH 00H 4BH A9H 0BH # URL Moniker h.type = UNICODE_LITERAL('url') nbytes = unpack('<L', data[offset:offset + 4])[0] offset += 4 h.url_or_path = unicode(data[offset:offset + nbytes], 'UTF-16le') if DEBUG: fprintf(self.logfile, "initial url=%r len=%d\n", h.url_or_path, len(h.url_or_path)) endpos = h.url_or_path.find('\x00') if DEBUG: print("endpos=%d" % endpos, file=self.logfile) h.url_or_path = h.url_or_path[:endpos] true_nbytes = 2 * (endpos + 1) offset += true_nbytes extra_nbytes = nbytes - true_nbytes extra_data = data[offset:offset + extra_nbytes] offset += extra_nbytes if DEBUG: fprintf( self.logfile, "url=%r\nextra=%r\nnbytes=%d true_nbytes=%d extra_nbytes=%d\n", h.url_or_path, extra_data, nbytes, true_nbytes, extra_nbytes, ) assert extra_nbytes in (24, 0) elif clsid == b"\x03\x03\x00\x00\x00\x00\x00\x00\xC0\x00\x00\x00\x00\x00\x00\x46": # file moniker h.type = UNICODE_LITERAL('local file') uplevels, nbytes = unpack("<Hi", data[offset:offset + 6]) offset += 6 shortpath = b"..\\" * uplevels + data[offset:offset + nbytes - 1] #### BYTES, not unicode if DEBUG: fprintf(self.logfile, "uplevels=%d shortpath=%r\n", uplevels, shortpath) offset += nbytes offset += 24 # OOo: "unknown byte sequence" # above is version 0xDEAD + 20 reserved zero bytes sz = unpack('<i', data[offset:offset + 4])[0] if DEBUG: print("sz=%d" % sz, file=self.logfile) offset += 4 if sz: xl = unpack('<i', data[offset:offset + 4])[0] offset += 4 offset += 2 # "unknown byte sequence" MS: 0x0003 extended_path = unicode(data[offset:offset + xl], 'UTF-16le') # not zero-terminated offset += xl h.url_or_path = extended_path else: h.url_or_path = shortpath #### MS KLUDGE WARNING #### # The "shortpath" is bytes encoded in the **UNKNOWN** creator's "ANSI" encoding. else: fprintf(self.logfile, "*** unknown clsid %r\n", clsid) elif options & 0x163 == 0x103: # UNC h.type = UNICODE_LITERAL('unc') h.url_or_path, offset = get_nul_terminated_unicode(data, offset) elif options & 0x16B == 8: h.type = UNICODE_LITERAL('workbook') else: h.type = UNICODE_LITERAL('unknown') if options & 0x8: # has textmark h.textmark, offset = get_nul_terminated_unicode(data, offset) if DEBUG: h.dump(header="... object dump ...") print("offset=%d record_size=%d" % (offset, record_size)) extra_nbytes = record_size - offset if extra_nbytes > 0: fprintf( self.logfile, "*** WARNING: hyperlink at r=%d c=%d has %d extra data bytes: %s\n", h.frowx, h.fcolx, extra_nbytes, REPR(data[-extra_nbytes:]) ) # Seen: b"\x00\x00" also b"A\x00", b"V\x00" elif extra_nbytes < 0: raise XLRDError("Bug or corrupt file, send copy of input file for debugging") self.hyperlink_list.append(h) for rowx in xrange(h.frowx, h.lrowx+1): for colx in xrange(h.fcolx, h.lcolx+1): self.hyperlink_map[rowx, colx] = h def handle_quicktip(self, data): rcx, frowx, lrowx, fcolx, lcolx = unpack('<5H', data[:10]) assert rcx == XL_QUICKTIP assert self.hyperlink_list h = self.hyperlink_list[-1] assert (frowx, lrowx, fcolx, lcolx) == (h.frowx, h.lrowx, h.fcolx, h.lcolx) assert data[-2:] == b'\x00\x00' h.quicktip = unicode(data[10:-2], 'utf_16_le') def handle_msodrawingetc(self, recid, data_len, data): if not OBJ_MSO_DEBUG: return DEBUG = 1 if self.biff_version < 80: return o = MSODrawing() pos = 0 while pos < data_len: tmp, fbt, cb = unpack('<HHI', data[pos:pos+8]) ver = tmp & 0xF inst = (tmp >> 4) & 0xFFF if ver == 0xF: ndb = 0 # container else: ndb = cb if DEBUG: hex_char_dump(data, pos, ndb + 8, base=0, fout=self.logfile) fprintf(self.logfile, "fbt:0x%04X inst:%d ver:0x%X cb:%d (0x%04X)\n", fbt, inst, ver, cb, cb) if fbt == 0xF010: # Client Anchor assert ndb == 18 (o.anchor_unk, o.anchor_colx_lo, o.anchor_rowx_lo, o.anchor_colx_hi, o.anchor_rowx_hi) = unpack('<Hiiii', data[pos+8:pos+8+ndb]) elif fbt == 0xF011: # Client Data # must be followed by an OBJ record assert cb == 0 assert pos + 8 == data_len else: pass pos += ndb + 8 else: # didn't break out of while loop assert pos == data_len if DEBUG: o.dump(self.logfile, header="=== MSODrawing ===", footer= " ") def handle_obj(self, data): if self.biff_version < 80: return None o = MSObj() data_len = len(data) pos = 0 if OBJ_MSO_DEBUG: fprintf(self.logfile, "... OBJ record len=%d...\n", data_len) while pos < data_len: ft, cb = unpack('<HH', data[pos:pos+4]) if OBJ_MSO_DEBUG: fprintf(self.logfile, "pos=%d ft=0x%04X cb=%d\n", pos, ft, cb) hex_char_dump(data, pos, cb + 4, base=0, fout=self.logfile) if pos == 0 and not (ft == 0x15 and cb == 18): if self.verbosity: fprintf(self.logfile, "*** WARNING Ignoring antique or corrupt OBJECT record\n") return None if ft == 0x15: # ftCmo ... s/b first assert pos == 0 o.type, o.id, option_flags = unpack('<HHH', data[pos+4:pos+10]) upkbits(o, option_flags, ( ( 0, 0x0001, 'locked'), ( 4, 0x0010, 'printable'), ( 8, 0x0100, 'autofilter'), # not documented in Excel 97 dev kit ( 9, 0x0200, 'scrollbar_flag'), # not documented in Excel 97 dev kit (13, 0x2000, 'autofill'), (14, 0x4000, 'autoline'), )) elif ft == 0x00: if data[pos:data_len] == b'\0' * (data_len - pos): # ignore "optional reserved" data at end of record break msg = "Unexpected data at end of OBJECT record" fprintf(self.logfile, "*** ERROR %s\n" % msg) hex_char_dump(data, pos, data_len - pos, base=0, fout=self.logfile) raise XLRDError(msg) elif ft == 0x0C: # Scrollbar values = unpack('<5H', data[pos+8:pos+18]) for value, tag in zip(values, ('value', 'min', 'max', 'inc', 'page')): setattr(o, 'scrollbar_' + tag, value) elif ft == 0x0D: # "Notes structure" [used for cell comments] # not documented in Excel 97 dev kit if OBJ_MSO_DEBUG: fprintf(self.logfile, "*** OBJ record has ft==0x0D 'notes' structure\n") elif ft == 0x13: # list box data if o.autofilter: # non standard exit. NOT documented break else: pass pos += cb + 4 else: # didn't break out of while loop pass if OBJ_MSO_DEBUG: o.dump(self.logfile, header="=== MSOBj ===", footer= " ") return o def handle_note(self, data, txos): if OBJ_MSO_DEBUG: fprintf(self.logfile, '... NOTE record ...\n') hex_char_dump(data, 0, len(data), base=0, fout=self.logfile) o = Note() data_len = len(data) if self.biff_version < 80: o.rowx, o.colx, expected_bytes = unpack('<HHH', data[:6]) nb = len(data) - 6 assert nb <= expected_bytes pieces = [data[6:]] expected_bytes -= nb while expected_bytes > 0: rc2, data2_len, data2 = self.book.get_record_parts() assert rc2 == XL_NOTE dummy_rowx, nb = unpack('<H2xH', data2[:6]) assert dummy_rowx == 0xFFFF assert nb == data2_len - 6 pieces.append(data2[6:]) expected_bytes -= nb assert expected_bytes == 0 enc = self.book.encoding or self.book.derive_encoding() o.text = unicode(b''.join(pieces), enc) o.rich_text_runlist = [(0, 0)] o.show = 0 o.row_hidden = 0 o.col_hidden = 0 o.author = UNICODE_LITERAL('') o._object_id = None self.cell_note_map[o.rowx, o.colx] = o return # Excel 8.0+ o.rowx, o.colx, option_flags, o._object_id = unpack('<4H', data[:8]) o.show = (option_flags >> 1) & 1 o.row_hidden = (option_flags >> 7) & 1 o.col_hidden = (option_flags >> 8) & 1 # XL97 dev kit book says NULL [sic] bytes padding between string count and string data # to ensure that string is word-aligned. Appears to be nonsense. o.author, endpos = unpack_unicode_update_pos(data, 8, lenlen=2) # There is a random/undefined byte after the author string (not counted in the # string length). # Issue 4 on github: Google Spreadsheet doesn't write the undefined byte. assert (data_len - endpos) in (0, 1) if OBJ_MSO_DEBUG: o.dump(self.logfile, header="=== Note ===", footer= " ") txo = txos.get(o._object_id) if txo: o.text = txo.text o.rich_text_runlist = txo.rich_text_runlist self.cell_note_map[o.rowx, o.colx] = o def handle_txo(self, data): if self.biff_version < 80: return o = MSTxo() data_len = len(data) fmt = '<HH6sHHH' fmtsize = calcsize(fmt) option_flags, o.rot, controlInfo, cchText, cbRuns, o.ifntEmpty = unpack(fmt, data[:fmtsize]) o.fmla = data[fmtsize:] upkbits(o, option_flags, ( ( 3, 0x000E, 'horz_align'), ( 6, 0x0070, 'vert_align'), ( 9, 0x0200, 'lock_text'), (14, 0x4000, 'just_last'), (15, 0x8000, 'secret_edit'), )) totchars = 0 o.text = UNICODE_LITERAL('') while totchars < cchText: rc2, data2_len, data2 = self.book.get_record_parts() assert rc2 == XL_CONTINUE if OBJ_MSO_DEBUG: hex_char_dump(data2, 0, data2_len, base=0, fout=self.logfile) nb = BYTES_ORD(data2[0]) # 0 means latin1, 1 means utf_16_le nchars = data2_len - 1 if nb: assert nchars % 2 == 0 nchars //= 2 utext, endpos = unpack_unicode_update_pos(data2, 0, known_len=nchars) assert endpos == data2_len o.text += utext totchars += nchars o.rich_text_runlist = [] totruns = 0 while totruns < cbRuns: # counts of BYTES, not runs rc3, data3_len, data3 = self.book.get_record_parts() # print totruns, cbRuns, rc3, data3_len, repr(data3) assert rc3 == XL_CONTINUE assert data3_len % 8 == 0 for pos in xrange(0, data3_len, 8): run = unpack('<HH4x', data3[pos:pos+8]) o.rich_text_runlist.append(run) totruns += 8 # remove trailing entries that point to the end of the string while o.rich_text_runlist and o.rich_text_runlist[-1][0] == cchText: del o.rich_text_runlist[-1] if OBJ_MSO_DEBUG: o.dump(self.logfile, header="=== MSTxo ===", footer= " ") print(o.rich_text_runlist, file=self.logfile) return o def handle_feat11(self, data): if not OBJ_MSO_DEBUG: return # rt: Record type; this matches the BIFF rt in the first two bytes of the record; =0872h # grbitFrt: FRT cell reference flag (see table below for details) # Ref0: Range reference to a worksheet cell region if grbitFrt=1 (bitFrtRef). Otherwise blank. # isf: Shared feature type index =5 for Table # fHdr: =0 since this is for feat not feat header # reserved0: Reserved for future use =0 for Table # cref: Count of ref ranges this feature is on # cbFeatData: Count of byte for the current feature data. # reserved1: =0 currently not used # Ref1: Repeat of Ref0. UNDOCUMENTED rt, grbitFrt, Ref0, isf, fHdr, reserved0, cref, cbFeatData, reserved1, Ref1 = unpack('<HH8sHBiHiH8s', data[0:35]) assert reserved0 == 0 assert reserved1 == 0 assert isf == 5 assert rt == 0x872 assert fHdr == 0 assert Ref1 == Ref0 print(self.logfile, "FEAT11: grbitFrt=%d Ref0=%r cref=%d cbFeatData=%d\n", grbitFrt, Ref0, cref, cbFeatData) # lt: Table data source type: # =0 for Excel Worksheet Table =1 for read-write SharePoint linked List # =2 for XML mapper Table =3 for Query Table # idList: The ID of the Table (unique per worksheet) # crwHeader: How many header/title rows the Table has at the top # crwTotals: How many total rows the Table has at the bottom # idFieldNext: Next id to try when assigning a unique id to a new field # cbFSData: The size of the Fixed Data portion of the Table data structure. # rupBuild: the rupBuild that generated the record # unusedShort: UNUSED short that can be used later. The value is reserved during round-tripping. # listFlags: Collection of bit flags: (see listFlags' bit setting table below for detail.) # lPosStmCache: Table data stream position of cached data # cbStmCache: Count of bytes of cached data # cchStmCache: Count of characters of uncompressed cached data in the stream # lem: Table edit mode (see List (Table) Editing Mode (lem) setting table below for details.) # rgbHashParam: Hash value for SharePoint Table # cchName: Count of characters in the Table name string rgbName (lt, idList, crwHeader, crwTotals, idFieldNext, cbFSData, rupBuild, unusedShort, listFlags, lPosStmCache, cbStmCache, cchStmCache, lem, rgbHashParam, cchName) = unpack('<iiiiiiHHiiiii16sH', data[35:35+66]) print("lt=%d idList=%d crwHeader=%d crwTotals=%d idFieldNext=%d cbFSData=%d\n"\ "rupBuild=%d unusedShort=%d listFlags=%04X lPosStmCache=%d cbStmCache=%d\n"\ "cchStmCache=%d lem=%d rgbHashParam=%r cchName=%d" % ( lt, idList, crwHeader, crwTotals, idFieldNext, cbFSData, rupBuild, unusedShort,listFlags, lPosStmCache, cbStmCache, cchStmCache, lem, rgbHashParam, cchName), file=self.logfile) class MSODrawing(BaseObject): pass class MSObj(BaseObject): pass class MSTxo(BaseObject): pass ## # <p> Represents a user "comment" or "note". # Note objects are accessible through Sheet.{@link #Sheet.cell_note_map}. # <br />-- New in version 0.7.2 # </p> class Note(BaseObject): ## # Author of note author = UNICODE_LITERAL('') ## # True if the containing column is hidden col_hidden = 0 ## # Column index colx = 0 ## # List of (offset_in_string, font_index) tuples. # Unlike Sheet.{@link #Sheet.rich_text_runlist_map}, the first offset should always be 0. rich_text_runlist = None ## # True if the containing row is hidden row_hidden = 0 ## # Row index rowx = 0 ## # True if note is always shown show = 0 ## # Text of the note text = UNICODE_LITERAL('') ## # <p>Contains the attributes of a hyperlink. # Hyperlink objects are accessible through Sheet.{@link #Sheet.hyperlink_list} # and Sheet.{@link #Sheet.hyperlink_map}. # <br />-- New in version 0.7.2 # </p> class Hyperlink(BaseObject): ## # Index of first row frowx = None ## # Index of last row lrowx = None ## # Index of first column fcolx = None ## # Index of last column lcolx = None ## # Type of hyperlink. Unicode string, one of 'url', 'unc', # 'local file', 'workbook', 'unknown' type = None ## # The URL or file-path, depending in the type. Unicode string, except # in the rare case of a local but non-existent file with non-ASCII # characters in the name, in which case only the "8.3" filename is available, # as a bytes (3.x) or str (2.x) string, <i>with unknown encoding.</i> url_or_path = None ## # Description ... this is displayed in the cell, # and should be identical to the cell value. Unicode string, or None. It seems # impossible NOT to have a description created by the Excel UI. desc = None ## # Target frame. Unicode string. Note: I have not seen a case of this. # It seems impossible to create one in the Excel UI. target = None ## # "Textmark": the piece after the "#" in # "http://docs.python.org/library#struct_module", or the Sheet1!A1:Z99 # part when type is "workbook". textmark = None ## # The text of the "quick tip" displayed when the cursor # hovers over the hyperlink. quicktip = None # === helpers === def unpack_RK(rk_str): flags = BYTES_ORD(rk_str[0]) if flags & 2: # There's a SIGNED 30-bit integer in there! i, = unpack('<i', rk_str) i >>= 2 # div by 4 to drop the 2 flag bits if flags & 1: return i / 100.0 return float(i) else: # It's the most significant 30 bits of an IEEE 754 64-bit FP number d, = unpack('<d', b'\0\0\0\0' + BYTES_LITERAL(chr(flags & 252)) + rk_str[1:4]) if flags & 1: return d / 100.0 return d ##### =============== Cell ======================================== ##### cellty_from_fmtty = { FNU: XL_CELL_NUMBER, FUN: XL_CELL_NUMBER, FGE: XL_CELL_NUMBER, FDT: XL_CELL_DATE, FTX: XL_CELL_NUMBER, # Yes, a number can be formatted as text. } ctype_text = { XL_CELL_EMPTY: 'empty', XL_CELL_TEXT: 'text', XL_CELL_NUMBER: 'number', XL_CELL_DATE: 'xldate', XL_CELL_BOOLEAN: 'bool', XL_CELL_ERROR: 'error', XL_CELL_BLANK: 'blank', } ## # <p>Contains the data for one cell.</p> # # <p>WARNING: You don't call this class yourself. You access Cell objects # via methods of the {@link #Sheet} object(s) that you found in the {@link #Book} object that # was returned when you called xlrd.open_workbook("myfile.xls").</p> # <p> Cell objects have three attributes: <i>ctype</i> is an int, <i>value</i> # (which depends on <i>ctype</i>) and <i>xf_index</i>. # If "formatting_info" is not enabled when the workbook is opened, xf_index will be None. # The following table describes the types of cells and how their values # are represented in Python.</p> # # <table border="1" cellpadding="7"> # <tr> # <th>Type symbol</th> # <th>Type number</th> # <th>Python value</th> # </tr> # <tr> # <td>XL_CELL_EMPTY</td> # <td align="center">0</td> # <td>empty string u''</td> # </tr> # <tr> # <td>XL_CELL_TEXT</td> # <td align="center">1</td> # <td>a Unicode string</td> # </tr> # <tr> # <td>XL_CELL_NUMBER</td> # <td align="center">2</td> # <td>float</td> # </tr> # <tr> # <td>XL_CELL_DATE</td> # <td align="center">3</td> # <td>float</td> # </tr> # <tr> # <td>XL_CELL_BOOLEAN</td> # <td align="center">4</td> # <td>int; 1 means TRUE, 0 means FALSE</td> # </tr> # <tr> # <td>XL_CELL_ERROR</td> # <td align="center">5</td> # <td>int representing internal Excel codes; for a text representation, # refer to the supplied dictionary error_text_from_code</td> # </tr> # <tr> # <td>XL_CELL_BLANK</td> # <td align="center">6</td> # <td>empty string u''. Note: this type will appear only when # open_workbook(..., formatting_info=True) is used.</td> # </tr> # </table> #<p></p> class Cell(BaseObject): __slots__ = ['ctype', 'value', 'xf_index'] def __init__(self, ctype, value, xf_index=None): self.ctype = ctype self.value = value self.xf_index = xf_index def __repr__(self): if self.xf_index is None: return "%s:%r" % (ctype_text[self.ctype], self.value) else: return "%s:%r (XF:%r)" % (ctype_text[self.ctype], self.value, self.xf_index) empty_cell = Cell(XL_CELL_EMPTY, UNICODE_LITERAL('')) ##### =============== Colinfo and Rowinfo ============================== ##### ## # Width and default formatting information that applies to one or # more columns in a sheet. Derived from COLINFO records. # # <p> Here is the default hierarchy for width, according to the OOo docs: # # <br />"""In BIFF3, if a COLINFO record is missing for a column, # the width specified in the record DEFCOLWIDTH is used instead. # # <br />In BIFF4-BIFF7, the width set in this [COLINFO] record is only used, # if the corresponding bit for this column is cleared in the GCW # record, otherwise the column width set in the DEFCOLWIDTH record # is used (the STANDARDWIDTH record is always ignored in this case [see footnote!]). # # <br />In BIFF8, if a COLINFO record is missing for a column, # the width specified in the record STANDARDWIDTH is used. # If this [STANDARDWIDTH] record is also missing, # the column width of the record DEFCOLWIDTH is used instead.""" # <br /> # # Footnote: The docs on the GCW record say this: # """<br /> # If a bit is set, the corresponding column uses the width set in the STANDARDWIDTH # record. If a bit is cleared, the corresponding column uses the width set in the # COLINFO record for this column. # <br />If a bit is set, and the worksheet does not contain the STANDARDWIDTH record, or if # the bit is cleared, and the worksheet does not contain the COLINFO record, the DEFCOLWIDTH # record of the worksheet will be used instead. # <br />"""<br /> # At the moment (2007-01-17) xlrd is going with the GCW version of the story. # Reference to the source may be useful: see the computed_column_width(colx) method # of the Sheet class. # <br />-- New in version 0.6.1 # </p> class Colinfo(BaseObject): ## # Width of the column in 1/256 of the width of the zero character, # using default font (first FONT record in the file). width = 0 ## # XF index to be used for formatting empty cells. xf_index = -1 ## # 1 = column is hidden hidden = 0 ## # Value of a 1-bit flag whose purpose is unknown # but is often seen set to 1 bit1_flag = 0 ## # Outline level of the column, in range(7). # (0 = no outline) outline_level = 0 ## # 1 = column is collapsed collapsed = 0 _USE_SLOTS = 1 ## # <p>Height and default formatting information that applies to a row in a sheet. # Derived from ROW records. # <br /> -- New in version 0.6.1</p> # # <p><b>height</b>: Height of the row, in twips. One twip == 1/20 of a point.</p> # # <p><b>has_default_height</b>: 0 = Row has custom height; 1 = Row has default height.</p> # # <p><b>outline_level</b>: Outline level of the row (0 to 7) </p> # # <p><b>outline_group_starts_ends</b>: 1 = Outline group starts or ends here (depending on where the # outline buttons are located, see WSBOOL record [TODO ??]), # <i>and</i> is collapsed </p> # # <p><b>hidden</b>: 1 = Row is hidden (manually, or by a filter or outline group) </p> # # <p><b>height_mismatch</b>: 1 = Row height and default font height do not match </p> # # <p><b>has_default_xf_index</b>: 1 = the xf_index attribute is usable; 0 = ignore it </p> # # <p><b>xf_index</b>: Index to default XF record for empty cells in this row. # Don't use this if has_default_xf_index == 0. </p> # # <p><b>additional_space_above</b>: This flag is set, if the upper border of at least one cell in this row # or if the lower border of at least one cell in the row above is # formatted with a thick line style. Thin and medium line styles are not # taken into account. </p> # # <p><b>additional_space_below</b>: This flag is set, if the lower border of at least one cell in this row # or if the upper border of at least one cell in the row below is # formatted with a medium or thick line style. Thin line styles are not # taken into account. </p> class Rowinfo(BaseObject): if _USE_SLOTS: __slots__ = ( "height", "has_default_height", "outline_level", "outline_group_starts_ends", "hidden", "height_mismatch", "has_default_xf_index", "xf_index", "additional_space_above", "additional_space_below", ) def __init__(self): self.height = None self.has_default_height = None self.outline_level = None self.outline_group_starts_ends = None self.hidden = None self.height_mismatch = None self.has_default_xf_index = None self.xf_index = None self.additional_space_above = None self.additional_space_below = None def __getstate__(self): return ( self.height, self.has_default_height, self.outline_level, self.outline_group_starts_ends, self.hidden, self.height_mismatch, self.has_default_xf_index, self.xf_index, self.additional_space_above, self.additional_space_below, ) def __setstate__(self, state): ( self.height, self.has_default_height, self.outline_level, self.outline_group_starts_ends, self.hidden, self.height_mismatch, self.has_default_xf_index, self.xf_index, self.additional_space_above, self.additional_space_below, ) = state
kingvuplus/nn-gui
refs/heads/master
lib/python/Components/ConditionalWidget.py
33
from GUIComponent import GUIComponent from enigma import eTimer class ConditionalWidget(GUIComponent): def __init__(self, withTimer = True): GUIComponent.__init__(self) self.setConnect(None) if (withTimer): self.conditionCheckTimer = eTimer() self.conditionCheckTimer.callback.append(self.update) self.conditionCheckTimer.start(1000) def postWidgetCreate(self, instance): self.visible = 0 def setConnect(self, conditionalFunction): self.conditionalFunction = conditionalFunction def activateCondition(self, condition): if condition: self.visible = 1 else: self.visible = 0 def update(self): if (self.conditionalFunction != None): try: self.activateCondition(self.conditionalFunction()) except: self.conditionalFunction = None self.activateCondition(False) class BlinkingWidget(GUIComponent): def __init__(self): GUIComponent.__init__(self) self.blinking = False self.setBlinkTime(500) self.timer = eTimer() self.timer.callback.append(self.blink) def setBlinkTime(self, time): self.blinktime = time def blink(self): if self.blinking == True: self.visible = not self.visible def startBlinking(self): self.blinking = True self.timer.start(self.blinktime) def stopBlinking(self): self.blinking = False if self.visible: self.hide() self.timer.stop() class BlinkingWidgetConditional(BlinkingWidget, ConditionalWidget): def __init__(self): BlinkingWidget.__init__(self) ConditionalWidget.__init__(self) def activateCondition(self, condition): if (condition): if not self.blinking: # we are already blinking self.startBlinking() else: if self.blinking: # we are blinking self.stopBlinking()
acetcom/cellwire
refs/heads/master
lib/pfcp/support/cache/tlv-msg-list.py
2
msg_list["PFCP Heartbeat Request"] = { "type" : "1" } msg_list["PFCP Heartbeat Response"] = { "type" : "2" } msg_list["PFCP PFD Management Request"] = { "type" : "3" } msg_list["PFCP PFD Management Response"] = { "type" : "4" } msg_list["PFCP Association Setup Request"] = { "type" : "5" } msg_list["PFCP Association Setup Response"] = { "type" : "6" } msg_list["PFCP Association Update Request"] = { "type" : "7" } msg_list["PFCP Association Update Response"] = { "type" : "8" } msg_list["PFCP Association Release Request"] = { "type" : "9" } msg_list["PFCP Association Release Response"] = { "type" : "10" } msg_list["PFCP Version Not Supported Response"] = { "type" : "11" } msg_list["PFCP Node Report Request"] = { "type" : "12" } msg_list["PFCP Node Report Response"] = { "type" : "13" } msg_list["PFCP Session Set Deletion Request"] = { "type" : "14" } msg_list["PFCP Session Set Deletion Response"] = { "type" : "15" } msg_list["PFCP Session Establishment Request"] = { "type" : "50" } msg_list["PFCP Session Establishment Response"] = { "type" : "51" } msg_list["PFCP Session Modification Request"] = { "type" : "52" } msg_list["PFCP Session Modification Response"] = { "type" : "53" } msg_list["PFCP Session Deletion Request"] = { "type" : "54" } msg_list["PFCP Session Deletion Response"] = { "type" : "55" } msg_list["PFCP Session Report Request"] = { "type" : "56" } msg_list["PFCP Session Report Response"] = { "type" : "57" }
MarcosCommunity/odoo
refs/heads/marcos-8.0
addons/warning/warning.py
243
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields,osv from openerp.tools.translate import _ WARNING_MESSAGE = [ ('no-message','No Message'), ('warning','Warning'), ('block','Blocking Message') ] WARNING_HELP = _('Selecting the "Warning" option will notify user with the message, Selecting "Blocking Message" will throw an exception with the message and block the flow. The Message has to be written in the next field.') class res_partner(osv.osv): _inherit = 'res.partner' _columns = { 'sale_warn' : fields.selection(WARNING_MESSAGE, 'Sales Order', help=WARNING_HELP, required=True), 'sale_warn_msg' : fields.text('Message for Sales Order'), 'purchase_warn' : fields.selection(WARNING_MESSAGE, 'Purchase Order', help=WARNING_HELP, required=True), 'purchase_warn_msg' : fields.text('Message for Purchase Order'), 'picking_warn' : fields.selection(WARNING_MESSAGE, 'Stock Picking', help=WARNING_HELP, required=True), 'picking_warn_msg' : fields.text('Message for Stock Picking'), 'invoice_warn' : fields.selection(WARNING_MESSAGE, 'Invoice', help=WARNING_HELP, required=True), 'invoice_warn_msg' : fields.text('Message for Invoice'), } _defaults = { 'sale_warn' : 'no-message', 'purchase_warn' : 'no-message', 'picking_warn' : 'no-message', 'invoice_warn' : 'no-message', } class sale_order(osv.osv): _inherit = 'sale.order' def onchange_partner_id(self, cr, uid, ids, part, context=None): if not part: return {'value':{'partner_invoice_id': False, 'partner_shipping_id':False, 'payment_term' : False}} warning = {} title = False message = False partner = self.pool.get('res.partner').browse(cr, uid, part, context=context) if partner.sale_warn != 'no-message': title = _("Warning for %s") % partner.name message = partner.sale_warn_msg warning = { 'title': title, 'message': message, } if partner.sale_warn == 'block': return {'value': {'partner_id': False}, 'warning': warning} result = super(sale_order, self).onchange_partner_id(cr, uid, ids, part, context=context) if result.get('warning',False): warning['title'] = title and title +' & '+ result['warning']['title'] or result['warning']['title'] warning['message'] = message and message + ' ' + result['warning']['message'] or result['warning']['message'] if warning: result['warning'] = warning return result class purchase_order(osv.osv): _inherit = 'purchase.order' def onchange_partner_id(self, cr, uid, ids, part, context=None): if not part: return {'value':{'partner_address_id': False}} warning = {} title = False message = False partner = self.pool.get('res.partner').browse(cr, uid, part, context=context) if partner.purchase_warn != 'no-message': title = _("Warning for %s") % partner.name message = partner.purchase_warn_msg warning = { 'title': title, 'message': message } if partner.purchase_warn == 'block': return {'value': {'partner_id': False}, 'warning': warning} result = super(purchase_order, self).onchange_partner_id(cr, uid, ids, part, context=context) if result.get('warning',False): warning['title'] = title and title +' & '+ result['warning']['title'] or result['warning']['title'] warning['message'] = message and message + ' ' + result['warning']['message'] or result['warning']['message'] if warning: result['warning'] = warning return result class account_invoice(osv.osv): _inherit = 'account.invoice' def onchange_partner_id(self, cr, uid, ids, type, partner_id, date_invoice=False, payment_term=False, partner_bank_id=False, company_id=False, context=None): if not partner_id: return {'value': { 'account_id': False, 'payment_term': False, } } warning = {} title = False message = False partner = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context) if partner.invoice_warn != 'no-message': title = _("Warning for %s") % partner.name message = partner.invoice_warn_msg warning = { 'title': title, 'message': message } if partner.invoice_warn == 'block': return {'value': {'partner_id': False}, 'warning': warning} result = super(account_invoice, self).onchange_partner_id(cr, uid, ids, type, partner_id, date_invoice=date_invoice, payment_term=payment_term, partner_bank_id=partner_bank_id, company_id=company_id, context=context) if result.get('warning',False): warning['title'] = title and title +' & '+ result['warning']['title'] or result['warning']['title'] warning['message'] = message and message + ' ' + result['warning']['message'] or result['warning']['message'] if warning: result['warning'] = warning return result class stock_picking(osv.osv): _inherit = 'stock.picking' def onchange_partner_in(self, cr, uid, ids, partner_id=None, context=None): if not partner_id: return {} partner = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context) warning = {} title = False message = False if partner.picking_warn != 'no-message': title = _("Warning for %s") % partner.name message = partner.picking_warn_msg warning = { 'title': title, 'message': message } if partner.picking_warn == 'block': return {'value': {'partner_id': False}, 'warning': warning} result = {'value': {}} if warning: result['warning'] = warning return result class product_product(osv.osv): _inherit = 'product.template' _columns = { 'sale_line_warn' : fields.selection(WARNING_MESSAGE,'Sales Order Line', help=WARNING_HELP, required=True), 'sale_line_warn_msg' : fields.text('Message for Sales Order Line'), 'purchase_line_warn' : fields.selection(WARNING_MESSAGE,'Purchase Order Line', help=WARNING_HELP, required=True), 'purchase_line_warn_msg' : fields.text('Message for Purchase Order Line'), } _defaults = { 'sale_line_warn' : 'no-message', 'purchase_line_warn' : 'no-message', } class sale_order_line(osv.osv): _inherit = 'sale.order.line' def product_id_change_with_wh(self, cr, uid, ids, pricelist, product, qty=0, uom=False, qty_uos=0, uos=False, name='', partner_id=False, lang=False, update_tax=True, date_order=False, packaging=False, fiscal_position=False, flag=False, warehouse_id=False, context=None): warning = {} if not product: return {'value': {'th_weight' : 0, 'product_packaging': False, 'product_uos_qty': qty}, 'domain': {'product_uom': [], 'product_uos': []}} product_obj = self.pool.get('product.product') product_info = product_obj.browse(cr, uid, product) title = False message = False if product_info.sale_line_warn != 'no-message': title = _("Warning for %s") % product_info.name message = product_info.sale_line_warn_msg warning['title'] = title warning['message'] = message if product_info.sale_line_warn == 'block': return {'value': {'product_id': False}, 'warning': warning} result = super(sale_order_line, self).product_id_change_with_wh( cr, uid, ids, pricelist, product, qty, uom, qty_uos, uos, name, partner_id, lang, update_tax, date_order, packaging, fiscal_position, flag, warehouse_id=warehouse_id, context=context) if result.get('warning',False): warning['title'] = title and title +' & '+result['warning']['title'] or result['warning']['title'] warning['message'] = message and message +'\n\n'+result['warning']['message'] or result['warning']['message'] if warning: result['warning'] = warning return result class purchase_order_line(osv.osv): _inherit = 'purchase.order.line' def onchange_product_id(self,cr, uid, ids, pricelist, product, qty, uom, partner_id, date_order=False, fiscal_position_id=False, date_planned=False, name=False, price_unit=False, state='draft', context=None): warning = {} if not product: return {'value': {'price_unit': price_unit or 0.0, 'name': name or '', 'product_uom' : uom or False}, 'domain':{'product_uom':[]}} product_obj = self.pool.get('product.product') product_info = product_obj.browse(cr, uid, product) title = False message = False if product_info.purchase_line_warn != 'no-message': title = _("Warning for %s") % product_info.name message = product_info.purchase_line_warn_msg warning['title'] = title warning['message'] = message if product_info.purchase_line_warn == 'block': return {'value': {'product_id': False}, 'warning': warning} result = super(purchase_order_line, self).onchange_product_id(cr, uid, ids, pricelist, product, qty, uom, partner_id, date_order=date_order, fiscal_position_id=fiscal_position_id, date_planned=date_planned, name=name, price_unit=price_unit, state=state, context=context) if result.get('warning',False): warning['title'] = title and title +' & '+result['warning']['title'] or result['warning']['title'] warning['message'] = message and message +'\n\n'+result['warning']['message'] or result['warning']['message'] if warning: result['warning'] = warning return result # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
anhstudios/swganh
refs/heads/develop
data/scripts/templates/object/tangible/deed/event_perk/shared_technical_chest_deed.py
2
#### NOTICE: THIS FILE IS AUTOGENERATED #### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY #### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES from swgpy.object import * def create(kernel): result = Tangible() result.template = "object/tangible/deed/event_perk/shared_technical_chest_deed.iff" result.attribute_template_id = 2 result.stfName("event_perk","technical_chest_deed_name") #### BEGIN MODIFICATIONS #### #### END MODIFICATIONS #### return result
Masood-M/yalih
refs/heads/master
jsbeautifier/jsbeautifier/unpackers/myobfuscate.py
2
# # deobfuscator for scripts messed up with myobfuscate.com # by Einar Lielmanis <[email protected]> # # written by Stefano Sanfilippo <[email protected]> # # usage: # # if detect(some_string): # unpacked = unpack(some_string) # # CAVEAT by Einar Lielmanis # # You really don't want to obfuscate your scripts there: they're tracking # your unpackings, your script gets turned into something like this, # as of 2011-08-26: # # var _escape = 'your_script_escaped'; # var _111 = document.createElement('script'); # _111.src = 'http://api.www.myobfuscate.com/?getsrc=ok' + # '&ref=' + encodeURIComponent(document.referrer) + # '&url=' + encodeURIComponent(document.URL); # var 000 = document.getElementsByTagName('head')[0]; # 000.appendChild(_111); # document.write(unescape(_escape)); # """Deobfuscator for scripts messed up with MyObfuscate.com""" import re import base64 # Python 2 retrocompatibility # pylint: disable=F0401 # pylint: disable=E0611 try: from urllib import unquote except ImportError: from urllib.parse import unquote from jsbeautifier.unpackers import UnpackingError PRIORITY = 1 CAVEAT = """// // Unpacker warning: be careful when using myobfuscate.com for your projects: // scripts obfuscated by the free online version call back home. // """ SIGNATURE = ( r'["\x41\x42\x43\x44\x45\x46\x47\x48\x49\x4A\x4B\x4C\x4D\x4E\x4F' r'\x50\x51\x52\x53\x54\x55\x56\x57\x58\x59\x5A\x61\x62\x63\x64\x65' r'\x66\x67\x68\x69\x6A\x6B\x6C\x6D\x6E\x6F\x70\x71\x72\x73\x74\x75' r'\x76\x77\x78\x79\x7A\x30\x31\x32\x33\x34\x35\x36\x37\x38\x39\x2B' r'\x2F\x3D","","\x63\x68\x61\x72\x41\x74","\x69\x6E\x64\x65\x78' r'\x4F\x66","\x66\x72\x6F\x6D\x43\x68\x61\x72\x43\x6F\x64\x65","' r'\x6C\x65\x6E\x67\x74\x68"]') def detect(source): """Detects MyObfuscate.com packer.""" return SIGNATURE in source def unpack(source): """Unpacks js code packed with MyObfuscate.com""" if not detect(source): return source payload = unquote(_filter(source)) match = re.search(r"^var _escape\='<script>(.*)<\/script>'", payload, re.DOTALL) polished = match.group(1) if match else source return CAVEAT + polished def _filter(source): """Extracts and decode payload (original file) from `source`""" try: varname = re.search(r'eval\(\w+\(\w+\((\w+)\)\)\);', source).group(1) reverse = re.search(r"var +%s *\= *'(.*)';" % varname, source).group(1) except AttributeError: raise UnpackingError('Malformed MyObfuscate data.') try: return base64.b64decode(reverse[::-1].encode('utf8')).decode('utf8') except TypeError: raise UnpackingError('MyObfuscate payload is not base64-encoded.')
c0defreak/python-for-android
refs/heads/master
python3-alpha/python3-src/Lib/idlelib/configHelpSourceEdit.py
67
"Dialog to specify or edit the parameters for a user configured help source." import os import sys from tkinter import * import tkinter.messagebox as tkMessageBox import tkinter.filedialog as tkFileDialog class GetHelpSourceDialog(Toplevel): def __init__(self, parent, title, menuItem='', filePath=''): """Get menu entry and url/ local file location for Additional Help User selects a name for the Help resource and provides a web url or a local file as its source. The user can enter a url or browse for the file. """ Toplevel.__init__(self, parent) self.configure(borderwidth=5) self.resizable(height=FALSE, width=FALSE) self.title(title) self.transient(parent) self.grab_set() self.protocol("WM_DELETE_WINDOW", self.Cancel) self.parent = parent self.result = None self.CreateWidgets() self.menu.set(menuItem) self.path.set(filePath) self.withdraw() #hide while setting geometry #needs to be done here so that the winfo_reqwidth is valid self.update_idletasks() #centre dialog over parent: self.geometry("+%d+%d" % ((parent.winfo_rootx() + ((parent.winfo_width()/2) -(self.winfo_reqwidth()/2)), parent.winfo_rooty() + ((parent.winfo_height()/2) -(self.winfo_reqheight()/2))))) self.deiconify() #geometry set, unhide self.bind('<Return>', self.Ok) self.wait_window() def CreateWidgets(self): self.menu = StringVar(self) self.path = StringVar(self) self.fontSize = StringVar(self) self.frameMain = Frame(self, borderwidth=2, relief=GROOVE) self.frameMain.pack(side=TOP, expand=TRUE, fill=BOTH) labelMenu = Label(self.frameMain, anchor=W, justify=LEFT, text='Menu Item:') self.entryMenu = Entry(self.frameMain, textvariable=self.menu, width=30) self.entryMenu.focus_set() labelPath = Label(self.frameMain, anchor=W, justify=LEFT, text='Help File Path: Enter URL or browse for file') self.entryPath = Entry(self.frameMain, textvariable=self.path, width=40) self.entryMenu.focus_set() labelMenu.pack(anchor=W, padx=5, pady=3) self.entryMenu.pack(anchor=W, padx=5, pady=3) labelPath.pack(anchor=W, padx=5, pady=3) self.entryPath.pack(anchor=W, padx=5, pady=3) browseButton = Button(self.frameMain, text='Browse', width=8, command=self.browseFile) browseButton.pack(pady=3) frameButtons = Frame(self) frameButtons.pack(side=BOTTOM, fill=X) self.buttonOk = Button(frameButtons, text='OK', width=8, default=ACTIVE, command=self.Ok) self.buttonOk.grid(row=0, column=0, padx=5,pady=5) self.buttonCancel = Button(frameButtons, text='Cancel', width=8, command=self.Cancel) self.buttonCancel.grid(row=0, column=1, padx=5, pady=5) def browseFile(self): filetypes = [ ("HTML Files", "*.htm *.html", "TEXT"), ("PDF Files", "*.pdf", "TEXT"), ("Windows Help Files", "*.chm"), ("Text Files", "*.txt", "TEXT"), ("All Files", "*")] path = self.path.get() if path: dir, base = os.path.split(path) else: base = None if sys.platform[:3] == 'win': dir = os.path.join(os.path.dirname(sys.executable), 'Doc') if not os.path.isdir(dir): dir = os.getcwd() else: dir = os.getcwd() opendialog = tkFileDialog.Open(parent=self, filetypes=filetypes) file = opendialog.show(initialdir=dir, initialfile=base) if file: self.path.set(file) def MenuOk(self): "Simple validity check for a sensible menu item name" menuOk = True menu = self.menu.get() menu.strip() if not menu: tkMessageBox.showerror(title='Menu Item Error', message='No menu item specified', parent=self) self.entryMenu.focus_set() menuOk = False elif len(menu) > 30: tkMessageBox.showerror(title='Menu Item Error', message='Menu item too long:' '\nLimit 30 characters.', parent=self) self.entryMenu.focus_set() menuOk = False return menuOk def PathOk(self): "Simple validity check for menu file path" pathOk = True path = self.path.get() path.strip() if not path: #no path specified tkMessageBox.showerror(title='File Path Error', message='No help file path specified.', parent=self) self.entryPath.focus_set() pathOk = False elif path.startswith(('www.', 'http')): pass else: if path[:5] == 'file:': path = path[5:] if not os.path.exists(path): tkMessageBox.showerror(title='File Path Error', message='Help file path does not exist.', parent=self) self.entryPath.focus_set() pathOk = False return pathOk def Ok(self, event=None): if self.MenuOk() and self.PathOk(): self.result = (self.menu.get().strip(), self.path.get().strip()) if sys.platform == 'darwin': path = self.result[1] if path.startswith(('www', 'file:', 'http:')): pass else: # Mac Safari insists on using the URI form for local files self.result = list(self.result) self.result[1] = "file://" + path self.destroy() def Cancel(self, event=None): self.result = None self.destroy() if __name__ == '__main__': #test the dialog root = Tk() def run(): keySeq = '' dlg = GetHelpSourceDialog(root, 'Get Help Source') print(dlg.result) Button(root,text='Dialog', command=run).pack() root.mainloop()
jiangzhuo/kbengine
refs/heads/master
kbe/src/lib/python/Doc/includes/setup.py
138
from distutils.core import setup, Extension setup(name="noddy", version="1.0", ext_modules=[ Extension("noddy", ["noddy.c"]), Extension("noddy2", ["noddy2.c"]), Extension("noddy3", ["noddy3.c"]), Extension("noddy4", ["noddy4.c"]), ])
openhealthcare/randomise.me
refs/heads/master
rm/trials/migrations/0040_auto__add_field_trial_reporting_date.py
1
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'Trial.reporting_date' db.add_column(u'trials_trial', 'reporting_date', self.gf('django.db.models.fields.DateField')(null=True, blank=True), keep_default=False) def backwards(self, orm): # Deleting field 'Trial.reporting_date' db.delete_column(u'trials_trial', 'reporting_date') models = { u'trials.group': { 'Meta': {'object_name': 'Group'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '1'}), 'trial': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['trials.Trial']"}) }, u'trials.invitation': { 'Meta': {'object_name': 'Invitation'}, 'email': ('django.db.models.fields.EmailField', [], {'max_length': '254'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'sent': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'trial': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['trials.Trial']"}) }, u'trials.participant': { 'Meta': {'object_name': 'Participant'}, 'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['trials.Group']", 'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'joined': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 5, 29, 0, 0)', 'blank': 'True'}), 'trial': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['trials.Trial']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['userprofiles.RMUser']", 'null': 'True', 'blank': 'True'}) }, u'trials.report': { 'Meta': {'object_name': 'Report'}, 'binary': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}), 'count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'date': ('django.db.models.fields.DateField', [], {}), 'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['trials.Group']", 'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'participant': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['trials.Participant']", 'null': 'True', 'blank': 'True'}), 'score': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'trial': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['trials.Trial']"}), 'variable': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['trials.Variable']"}) }, u'trials.trial': { 'Meta': {'object_name': 'Trial'}, 'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 5, 29, 0, 0)'}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'group_a': ('django.db.models.fields.TextField', [], {}), 'group_a_desc': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'group_a_expected': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'group_b': ('django.db.models.fields.TextField', [], {}), 'group_b_desc': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'group_b_impressed': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'instruction_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}), 'instruction_delivery': ('django.db.models.fields.TextField', [], {'default': "'im'", 'max_length': '2'}), 'instruction_hours_after': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'is_edited': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'min_participants': ('django.db.models.fields.IntegerField', [], {}), 'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['userprofiles.RMUser']"}), 'participants': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'private': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'recruiting': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'recruitment': ('django.db.models.fields.CharField', [], {'default': "'an'", 'max_length': '2'}), 'reporting_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}), 'reporting_freq': ('django.db.models.fields.CharField', [], {'default': "'da'", 'max_length': '2'}), 'reporting_style': ('django.db.models.fields.CharField', [], {'default': "'on'", 'max_length': '2'}), 'stopped': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}) }, u'trials.variable': { 'Meta': {'object_name': 'Variable'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}), 'question': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'style': ('django.db.models.fields.CharField', [], {'max_length': '2'}), 'trial': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['trials.Trial']"}) }, u'userprofiles.rmuser': { 'Meta': {'object_name': 'RMUser'}, 'account': ('django.db.models.fields.CharField', [], {'default': "'st'", 'max_length': '2'}), 'dob': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}), 'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '254'}), 'gender': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'postcode': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}), 'receive_questions': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40', 'db_index': 'True'}) } } complete_apps = ['trials']
KlubJagiellonski/Politikon
refs/heads/master
bladepolska/redis_connection.py
4
from django.conf import settings from django.core import signals import redis from threading import local class _RedisConnection(object): def __init__(self, db=0): self.connection = local() self.db = getattr(settings, 'REDIS_DB', 0) def connect(self): if hasattr(settings, 'REDIS_BASE_URL') and settings.REDIS_BASE_URL is not None: self.connection.r = redis.from_url(settings.REDIS_BASE_URL) else: if hasattr(settings, 'REDIS_PATH'): self.connection.r = redis.StrictRedis(unix_socket_path=getattr(settings, 'REDIS_PATH'), db=self.db) else: self.connection.r = redis.StrictRedis(host=getattr(settings, 'REDIS_HOST', 'localhost'), port=getattr(settings, 'REDIS_PORT', 6379), password=getattr(settings, 'REDIS_PASSWORD', None), db=self.db) def is_connected(self): return hasattr(self.connection, 'r') def disconnect(self, **kwargs): if self.is_connected(): self.connection.r.connection_pool.disconnect() def redis(self): if not self.is_connected(): self.connect() return self.connection.r def __enter__(self): if not self.is_connected(): self.connect() return self.connection.r def __exit__(self, type, value, traceback): pass RedisConnection = _RedisConnection() signals.request_finished.connect(RedisConnection.disconnect)
MostafaGazar/tensorflow
refs/heads/master
tensorflow/python/ops/rnn_cell.py
16
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Module for constructing RNN Cells. ## Base interface for all RNN Cells @@RNNCell ## RNN Cells for use with TensorFlow's core RNN methods @@BasicRNNCell @@BasicLSTMCell @@GRUCell @@LSTMCell ## Classes storing split `RNNCell` state @@LSTMStateTuple ## RNN Cell wrappers (RNNCells that wrap other RNNCells) @@MultiRNNCell @@DropoutWrapper @@EmbeddingWrapper @@InputProjectionWrapper @@OutputProjectionWrapper """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import math from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import clip_ops from tensorflow.python.ops import embedding_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_ops from tensorflow.python.ops import variable_scope as vs from tensorflow.python.ops.math_ops import sigmoid from tensorflow.python.ops.math_ops import tanh from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util import nest def _state_size_with_prefix(state_size, prefix=None): """Helper function that enables int or TensorShape shape specification. This function takes a size specification, which can be an integer or a TensorShape, and converts it into a list of integers. One may specify any additional dimensions that precede the final state size specification. Args: state_size: TensorShape or int that specifies the size of a tensor. prefix: optional additional list of dimensions to prepend. Returns: result_state_size: list of dimensions the resulting tensor size. """ result_state_size = tensor_shape.as_shape(state_size).as_list() if prefix is not None: if not isinstance(prefix, list): raise TypeError("prefix of _state_size_with_prefix should be a list.") result_state_size = prefix + result_state_size return result_state_size class RNNCell(object): """Abstract object representing an RNN cell. The definition of cell in this package differs from the definition used in the literature. In the literature, cell refers to an object with a single scalar output. The definition in this package refers to a horizontal array of such units. An RNN cell, in the most abstract setting, is anything that has a state and performs some operation that takes a matrix of inputs. This operation results in an output matrix with `self.output_size` columns. If `self.state_size` is an integer, this operation also results in a new state matrix with `self.state_size` columns. If `self.state_size` is a tuple of integers, then it results in a tuple of `len(state_size)` state matrices, each with a column size corresponding to values in `state_size`. This module provides a number of basic commonly used RNN cells, such as LSTM (Long Short Term Memory) or GRU (Gated Recurrent Unit), and a number of operators that allow add dropouts, projections, or embeddings for inputs. Constructing multi-layer cells is supported by the class `MultiRNNCell`, or by calling the `rnn` ops several times. Every `RNNCell` must have the properties below and and implement `__call__` with the following signature. """ def __call__(self, inputs, state, scope=None): """Run this RNN cell on inputs, starting from the given state. Args: inputs: `2-D` tensor with shape `[batch_size x input_size]`. state: if `self.state_size` is an integer, this should be a `2-D Tensor` with shape `[batch_size x self.state_size]`. Otherwise, if `self.state_size` is a tuple of integers, this should be a tuple with shapes `[batch_size x s] for s in self.state_size`. scope: VariableScope for the created subgraph; defaults to class name. Returns: A pair containing: - Output: A `2-D` tensor with shape `[batch_size x self.output_size]`. - New state: Either a single `2-D` tensor, or a tuple of tensors matching the arity and shapes of `state`. """ raise NotImplementedError("Abstract method") @property def state_size(self): """size(s) of state(s) used by this cell. It can be represented by an Integer, a TensorShape or a tuple of Integers or TensorShapes. """ raise NotImplementedError("Abstract method") @property def output_size(self): """Integer or TensorShape: size of outputs produced by this cell.""" raise NotImplementedError("Abstract method") def zero_state(self, batch_size, dtype): """Return zero-filled state tensor(s). Args: batch_size: int, float, or unit Tensor representing the batch size. dtype: the data type to use for the state. Returns: If `state_size` is an int or TensorShape, then the return value is a `N-D` tensor of shape `[batch_size x state_size]` filled with zeros. If `state_size` is a nested list or tuple, then the return value is a nested list or tuple (of the same structure) of `2-D` tensors with the shapes `[batch_size x s]` for each s in `state_size`. """ state_size = self.state_size if nest.is_sequence(state_size): state_size_flat = nest.flatten(state_size) zeros_flat = [ array_ops.zeros( array_ops.pack(_state_size_with_prefix(s, prefix=[batch_size])), dtype=dtype) for s in state_size_flat] for s, z in zip(state_size_flat, zeros_flat): z.set_shape(_state_size_with_prefix(s, prefix=[None])) zeros = nest.pack_sequence_as(structure=state_size, flat_sequence=zeros_flat) else: zeros_size = _state_size_with_prefix(state_size, prefix=[batch_size]) zeros = array_ops.zeros(array_ops.pack(zeros_size), dtype=dtype) zeros.set_shape(_state_size_with_prefix(state_size, prefix=[None])) return zeros class BasicRNNCell(RNNCell): """The most basic RNN cell.""" def __init__(self, num_units, input_size=None, activation=tanh): if input_size is not None: logging.warn("%s: The input_size parameter is deprecated.", self) self._num_units = num_units self._activation = activation @property def state_size(self): return self._num_units @property def output_size(self): return self._num_units def __call__(self, inputs, state, scope=None): """Most basic RNN: output = new_state = activation(W * input + U * state + B).""" with vs.variable_scope(scope or type(self).__name__): # "BasicRNNCell" output = self._activation(_linear([inputs, state], self._num_units, True)) return output, output class GRUCell(RNNCell): """Gated Recurrent Unit cell (cf. http://arxiv.org/abs/1406.1078).""" def __init__(self, num_units, input_size=None, activation=tanh): if input_size is not None: logging.warn("%s: The input_size parameter is deprecated.", self) self._num_units = num_units self._activation = activation @property def state_size(self): return self._num_units @property def output_size(self): return self._num_units def __call__(self, inputs, state, scope=None): """Gated recurrent unit (GRU) with nunits cells.""" with vs.variable_scope(scope or type(self).__name__): # "GRUCell" with vs.variable_scope("Gates"): # Reset gate and update gate. # We start with bias of 1.0 to not reset and not update. r, u = array_ops.split(1, 2, _linear([inputs, state], 2 * self._num_units, True, 1.0)) r, u = sigmoid(r), sigmoid(u) with vs.variable_scope("Candidate"): c = self._activation(_linear([inputs, r * state], self._num_units, True)) new_h = u * state + (1 - u) * c return new_h, new_h _LSTMStateTuple = collections.namedtuple("LSTMStateTuple", ("c", "h")) class LSTMStateTuple(_LSTMStateTuple): """Tuple used by LSTM Cells for `state_size`, `zero_state`, and output state. Stores two elements: `(c, h)`, in that order. Only used when `state_is_tuple=True`. """ __slots__ = () @property def dtype(self): (c, h) = self if not c.dtype == h.dtype: raise TypeError("Inconsistent internal state: %s vs %s" % (str(c.dtype), str(h.dtype))) return c.dtype class BasicLSTMCell(RNNCell): """Basic LSTM recurrent network cell. The implementation is based on: http://arxiv.org/abs/1409.2329. We add forget_bias (default: 1) to the biases of the forget gate in order to reduce the scale of forgetting in the beginning of the training. It does not allow cell clipping, a projection layer, and does not use peep-hole connections: it is the basic baseline. For advanced models, please use the full LSTMCell that follows. """ def __init__(self, num_units, forget_bias=1.0, input_size=None, state_is_tuple=True, activation=tanh): """Initialize the basic LSTM cell. Args: num_units: int, The number of units in the LSTM cell. forget_bias: float, The bias added to forget gates (see above). input_size: Deprecated and unused. state_is_tuple: If True, accepted and returned states are 2-tuples of the `c_state` and `m_state`. If False, they are concatenated along the column axis. The latter behavior will soon be deprecated. activation: Activation function of the inner states. """ if not state_is_tuple: logging.warn("%s: Using a concatenated state is slower and will soon be " "deprecated. Use state_is_tuple=True.", self) if input_size is not None: logging.warn("%s: The input_size parameter is deprecated.", self) self._num_units = num_units self._forget_bias = forget_bias self._state_is_tuple = state_is_tuple self._activation = activation @property def state_size(self): return (LSTMStateTuple(self._num_units, self._num_units) if self._state_is_tuple else 2 * self._num_units) @property def output_size(self): return self._num_units def __call__(self, inputs, state, scope=None): """Long short-term memory cell (LSTM).""" with vs.variable_scope(scope or type(self).__name__): # "BasicLSTMCell" # Parameters of gates are concatenated into one multiply for efficiency. if self._state_is_tuple: c, h = state else: c, h = array_ops.split(1, 2, state) concat = _linear([inputs, h], 4 * self._num_units, True) # i = input_gate, j = new_input, f = forget_gate, o = output_gate i, j, f, o = array_ops.split(1, 4, concat) new_c = (c * sigmoid(f + self._forget_bias) + sigmoid(i) * self._activation(j)) new_h = self._activation(new_c) * sigmoid(o) if self._state_is_tuple: new_state = LSTMStateTuple(new_c, new_h) else: new_state = array_ops.concat(1, [new_c, new_h]) return new_h, new_state def _get_concat_variable(name, shape, dtype, num_shards): """Get a sharded variable concatenated into one tensor.""" sharded_variable = _get_sharded_variable(name, shape, dtype, num_shards) if len(sharded_variable) == 1: return sharded_variable[0] concat_name = name + "/concat" concat_full_name = vs.get_variable_scope().name + "/" + concat_name + ":0" for value in ops.get_collection(ops.GraphKeys.CONCATENATED_VARIABLES): if value.name == concat_full_name: return value concat_variable = array_ops.concat(0, sharded_variable, name=concat_name) ops.add_to_collection(ops.GraphKeys.CONCATENATED_VARIABLES, concat_variable) return concat_variable def _get_sharded_variable(name, shape, dtype, num_shards): """Get a list of sharded variables with the given dtype.""" if num_shards > shape[0]: raise ValueError("Too many shards: shape=%s, num_shards=%d" % (shape, num_shards)) unit_shard_size = int(math.floor(shape[0] / num_shards)) remaining_rows = shape[0] - unit_shard_size * num_shards shards = [] for i in range(num_shards): current_size = unit_shard_size if i < remaining_rows: current_size += 1 shards.append(vs.get_variable(name + "_%d" % i, [current_size] + shape[1:], dtype=dtype)) return shards class LSTMCell(RNNCell): """Long short-term memory unit (LSTM) recurrent network cell. The default non-peephole implementation is based on: http://deeplearning.cs.cmu.edu/pdfs/Hochreiter97_lstm.pdf S. Hochreiter and J. Schmidhuber. "Long Short-Term Memory". Neural Computation, 9(8):1735-1780, 1997. The peephole implementation is based on: https://research.google.com/pubs/archive/43905.pdf Hasim Sak, Andrew Senior, and Francoise Beaufays. "Long short-term memory recurrent neural network architectures for large scale acoustic modeling." INTERSPEECH, 2014. The class uses optional peep-hole connections, optional cell clipping, and an optional projection layer. """ def __init__(self, num_units, input_size=None, use_peepholes=False, cell_clip=None, initializer=None, num_proj=None, proj_clip=None, num_unit_shards=1, num_proj_shards=1, forget_bias=1.0, state_is_tuple=True, activation=tanh): """Initialize the parameters for an LSTM cell. Args: num_units: int, The number of units in the LSTM cell input_size: Deprecated and unused. use_peepholes: bool, set True to enable diagonal/peephole connections. cell_clip: (optional) A float value, if provided the cell state is clipped by this value prior to the cell output activation. initializer: (optional) The initializer to use for the weight and projection matrices. num_proj: (optional) int, The output dimensionality for the projection matrices. If None, no projection is performed. proj_clip: (optional) A float value. If `num_proj > 0` and `proj_clip` is provided, then the projected values are clipped elementwise to within `[-proj_clip, proj_clip]`. num_unit_shards: How to split the weight matrix. If >1, the weight matrix is stored across num_unit_shards. num_proj_shards: How to split the projection matrix. If >1, the projection matrix is stored across num_proj_shards. forget_bias: Biases of the forget gate are initialized by default to 1 in order to reduce the scale of forgetting at the beginning of the training. state_is_tuple: If True, accepted and returned states are 2-tuples of the `c_state` and `m_state`. If False, they are concatenated along the column axis. This latter behavior will soon be deprecated. activation: Activation function of the inner states. """ if not state_is_tuple: logging.warn("%s: Using a concatenated state is slower and will soon be " "deprecated. Use state_is_tuple=True.", self) if input_size is not None: logging.warn("%s: The input_size parameter is deprecated.", self) self._num_units = num_units self._use_peepholes = use_peepholes self._cell_clip = cell_clip self._initializer = initializer self._num_proj = num_proj self._proj_clip = proj_clip self._num_unit_shards = num_unit_shards self._num_proj_shards = num_proj_shards self._forget_bias = forget_bias self._state_is_tuple = state_is_tuple self._activation = activation if num_proj: self._state_size = ( LSTMStateTuple(num_units, num_proj) if state_is_tuple else num_units + num_proj) self._output_size = num_proj else: self._state_size = ( LSTMStateTuple(num_units, num_units) if state_is_tuple else 2 * num_units) self._output_size = num_units @property def state_size(self): return self._state_size @property def output_size(self): return self._output_size def __call__(self, inputs, state, scope=None): """Run one step of LSTM. Args: inputs: input Tensor, 2D, batch x num_units. state: if `state_is_tuple` is False, this must be a state Tensor, `2-D, batch x state_size`. If `state_is_tuple` is True, this must be a tuple of state Tensors, both `2-D`, with column sizes `c_state` and `m_state`. scope: VariableScope for the created subgraph; defaults to "LSTMCell". Returns: A tuple containing: - A `2-D, [batch x output_dim]`, Tensor representing the output of the LSTM after reading `inputs` when previous state was `state`. Here output_dim is: num_proj if num_proj was set, num_units otherwise. - Tensor(s) representing the new state of LSTM after reading `inputs` when the previous state was `state`. Same type and shape(s) as `state`. Raises: ValueError: If input size cannot be inferred from inputs via static shape inference. """ num_proj = self._num_units if self._num_proj is None else self._num_proj if self._state_is_tuple: (c_prev, m_prev) = state else: c_prev = array_ops.slice(state, [0, 0], [-1, self._num_units]) m_prev = array_ops.slice(state, [0, self._num_units], [-1, num_proj]) dtype = inputs.dtype input_size = inputs.get_shape().with_rank(2)[1] if input_size.value is None: raise ValueError("Could not infer input size from inputs.get_shape()[-1]") with vs.variable_scope(scope or type(self).__name__, initializer=self._initializer): # "LSTMCell" concat_w = _get_concat_variable( "W", [input_size.value + num_proj, 4 * self._num_units], dtype, self._num_unit_shards) b = vs.get_variable( "B", shape=[4 * self._num_units], initializer=init_ops.zeros_initializer, dtype=dtype) # i = input_gate, j = new_input, f = forget_gate, o = output_gate cell_inputs = array_ops.concat(1, [inputs, m_prev]) lstm_matrix = nn_ops.bias_add(math_ops.matmul(cell_inputs, concat_w), b) i, j, f, o = array_ops.split(1, 4, lstm_matrix) # Diagonal connections if self._use_peepholes: w_f_diag = vs.get_variable( "W_F_diag", shape=[self._num_units], dtype=dtype) w_i_diag = vs.get_variable( "W_I_diag", shape=[self._num_units], dtype=dtype) w_o_diag = vs.get_variable( "W_O_diag", shape=[self._num_units], dtype=dtype) if self._use_peepholes: c = (sigmoid(f + self._forget_bias + w_f_diag * c_prev) * c_prev + sigmoid(i + w_i_diag * c_prev) * self._activation(j)) else: c = (sigmoid(f + self._forget_bias) * c_prev + sigmoid(i) * self._activation(j)) if self._cell_clip is not None: # pylint: disable=invalid-unary-operand-type c = clip_ops.clip_by_value(c, -self._cell_clip, self._cell_clip) # pylint: enable=invalid-unary-operand-type if self._use_peepholes: m = sigmoid(o + w_o_diag * c) * self._activation(c) else: m = sigmoid(o) * self._activation(c) if self._num_proj is not None: concat_w_proj = _get_concat_variable( "W_P", [self._num_units, self._num_proj], dtype, self._num_proj_shards) m = math_ops.matmul(m, concat_w_proj) if self._proj_clip is not None: # pylint: disable=invalid-unary-operand-type m = clip_ops.clip_by_value(m, -self._proj_clip, self._proj_clip) # pylint: enable=invalid-unary-operand-type new_state = (LSTMStateTuple(c, m) if self._state_is_tuple else array_ops.concat(1, [c, m])) return m, new_state class OutputProjectionWrapper(RNNCell): """Operator adding an output projection to the given cell. Note: in many cases it may be more efficient to not use this wrapper, but instead concatenate the whole sequence of your outputs in time, do the projection on this batch-concatenated sequence, then split it if needed or directly feed into a softmax. """ def __init__(self, cell, output_size): """Create a cell with output projection. Args: cell: an RNNCell, a projection to output_size is added to it. output_size: integer, the size of the output after projection. Raises: TypeError: if cell is not an RNNCell. ValueError: if output_size is not positive. """ if not isinstance(cell, RNNCell): raise TypeError("The parameter cell is not RNNCell.") if output_size < 1: raise ValueError("Parameter output_size must be > 0: %d." % output_size) self._cell = cell self._output_size = output_size @property def state_size(self): return self._cell.state_size @property def output_size(self): return self._output_size def __call__(self, inputs, state, scope=None): """Run the cell and output projection on inputs, starting from state.""" output, res_state = self._cell(inputs, state) # Default scope: "OutputProjectionWrapper" with vs.variable_scope(scope or type(self).__name__): projected = _linear(output, self._output_size, True) return projected, res_state class InputProjectionWrapper(RNNCell): """Operator adding an input projection to the given cell. Note: in many cases it may be more efficient to not use this wrapper, but instead concatenate the whole sequence of your inputs in time, do the projection on this batch-concatenated sequence, then split it. """ def __init__(self, cell, num_proj, input_size=None): """Create a cell with input projection. Args: cell: an RNNCell, a projection of inputs is added before it. num_proj: Python integer. The dimension to project to. input_size: Deprecated and unused. Raises: TypeError: if cell is not an RNNCell. """ if input_size is not None: logging.warn("%s: The input_size parameter is deprecated.", self) if not isinstance(cell, RNNCell): raise TypeError("The parameter cell is not RNNCell.") self._cell = cell self._num_proj = num_proj @property def state_size(self): return self._cell.state_size @property def output_size(self): return self._cell.output_size def __call__(self, inputs, state, scope=None): """Run the input projection and then the cell.""" # Default scope: "InputProjectionWrapper" with vs.variable_scope(scope or type(self).__name__): projected = _linear(inputs, self._num_proj, True) return self._cell(projected, state) class DropoutWrapper(RNNCell): """Operator adding dropout to inputs and outputs of the given cell.""" def __init__(self, cell, input_keep_prob=1.0, output_keep_prob=1.0, seed=None): """Create a cell with added input and/or output dropout. Dropout is never used on the state. Args: cell: an RNNCell, a projection to output_size is added to it. input_keep_prob: unit Tensor or float between 0 and 1, input keep probability; if it is float and 1, no input dropout will be added. output_keep_prob: unit Tensor or float between 0 and 1, output keep probability; if it is float and 1, no output dropout will be added. seed: (optional) integer, the randomness seed. Raises: TypeError: if cell is not an RNNCell. ValueError: if keep_prob is not between 0 and 1. """ if not isinstance(cell, RNNCell): raise TypeError("The parameter cell is not a RNNCell.") if (isinstance(input_keep_prob, float) and not (input_keep_prob >= 0.0 and input_keep_prob <= 1.0)): raise ValueError("Parameter input_keep_prob must be between 0 and 1: %d" % input_keep_prob) if (isinstance(output_keep_prob, float) and not (output_keep_prob >= 0.0 and output_keep_prob <= 1.0)): raise ValueError("Parameter output_keep_prob must be between 0 and 1: %d" % output_keep_prob) self._cell = cell self._input_keep_prob = input_keep_prob self._output_keep_prob = output_keep_prob self._seed = seed @property def state_size(self): return self._cell.state_size @property def output_size(self): return self._cell.output_size def __call__(self, inputs, state, scope=None): """Run the cell with the declared dropouts.""" if (not isinstance(self._input_keep_prob, float) or self._input_keep_prob < 1): inputs = nn_ops.dropout(inputs, self._input_keep_prob, seed=self._seed) output, new_state = self._cell(inputs, state, scope) if (not isinstance(self._output_keep_prob, float) or self._output_keep_prob < 1): output = nn_ops.dropout(output, self._output_keep_prob, seed=self._seed) return output, new_state class EmbeddingWrapper(RNNCell): """Operator adding input embedding to the given cell. Note: in many cases it may be more efficient to not use this wrapper, but instead concatenate the whole sequence of your inputs in time, do the embedding on this batch-concatenated sequence, then split it and feed into your RNN. """ def __init__(self, cell, embedding_classes, embedding_size, initializer=None): """Create a cell with an added input embedding. Args: cell: an RNNCell, an embedding will be put before its inputs. embedding_classes: integer, how many symbols will be embedded. embedding_size: integer, the size of the vectors we embed into. initializer: an initializer to use when creating the embedding; if None, the initializer from variable scope or a default one is used. Raises: TypeError: if cell is not an RNNCell. ValueError: if embedding_classes is not positive. """ if not isinstance(cell, RNNCell): raise TypeError("The parameter cell is not RNNCell.") if embedding_classes <= 0 or embedding_size <= 0: raise ValueError("Both embedding_classes and embedding_size must be > 0: " "%d, %d." % (embedding_classes, embedding_size)) self._cell = cell self._embedding_classes = embedding_classes self._embedding_size = embedding_size self._initializer = initializer @property def state_size(self): return self._cell.state_size @property def output_size(self): return self._cell.output_size def __call__(self, inputs, state, scope=None): """Run the cell on embedded inputs.""" with vs.variable_scope(scope or type(self).__name__): # "EmbeddingWrapper" with ops.device("/cpu:0"): if self._initializer: initializer = self._initializer elif vs.get_variable_scope().initializer: initializer = vs.get_variable_scope().initializer else: # Default initializer for embeddings should have variance=1. sqrt3 = math.sqrt(3) # Uniform(-sqrt(3), sqrt(3)) has variance=1. initializer = init_ops.random_uniform_initializer(-sqrt3, sqrt3) if type(state) is tuple: data_type = state[0].dtype else: data_type = state.dtype embedding = vs.get_variable( "embedding", [self._embedding_classes, self._embedding_size], initializer=initializer, dtype=data_type) embedded = embedding_ops.embedding_lookup( embedding, array_ops.reshape(inputs, [-1])) return self._cell(embedded, state) class MultiRNNCell(RNNCell): """RNN cell composed sequentially of multiple simple cells.""" def __init__(self, cells, state_is_tuple=True): """Create a RNN cell composed sequentially of a number of RNNCells. Args: cells: list of RNNCells that will be composed in this order. state_is_tuple: If True, accepted and returned states are n-tuples, where `n = len(cells)`. If False, the states are all concatenated along the column axis. This latter behavior will soon be deprecated. Raises: ValueError: if cells is empty (not allowed), or at least one of the cells returns a state tuple but the flag `state_is_tuple` is `False`. """ if not cells: raise ValueError("Must specify at least one cell for MultiRNNCell.") self._cells = cells self._state_is_tuple = state_is_tuple if not state_is_tuple: if any(nest.is_sequence(c.state_size) for c in self._cells): raise ValueError("Some cells return tuples of states, but the flag " "state_is_tuple is not set. State sizes are: %s" % str([c.state_size for c in self._cells])) @property def state_size(self): if self._state_is_tuple: return tuple(cell.state_size for cell in self._cells) else: return sum([cell.state_size for cell in self._cells]) @property def output_size(self): return self._cells[-1].output_size def __call__(self, inputs, state, scope=None): """Run this multi-layer cell on inputs, starting from state.""" with vs.variable_scope(scope or type(self).__name__): # "MultiRNNCell" cur_state_pos = 0 cur_inp = inputs new_states = [] for i, cell in enumerate(self._cells): with vs.variable_scope("Cell%d" % i): if self._state_is_tuple: if not nest.is_sequence(state): raise ValueError( "Expected state to be a tuple of length %d, but received: %s" % (len(self.state_size), state)) cur_state = state[i] else: cur_state = array_ops.slice( state, [0, cur_state_pos], [-1, cell.state_size]) cur_state_pos += cell.state_size cur_inp, new_state = cell(cur_inp, cur_state) new_states.append(new_state) new_states = (tuple(new_states) if self._state_is_tuple else array_ops.concat(1, new_states)) return cur_inp, new_states class _SlimRNNCell(RNNCell): """A simple wrapper for slim.rnn_cells.""" def __init__(self, cell_fn): """Create a SlimRNNCell from a cell_fn. Args: cell_fn: a function which takes (inputs, state, scope) and produces the outputs and the new_state. Additionally when called with inputs=None and state=None it should return (initial_outputs, initial_state). Raises: TypeError: if cell_fn is not callable ValueError: if cell_fn cannot produce a valid initial state. """ if not callable(cell_fn): raise TypeError("cell_fn %s needs to be callable", cell_fn) self._cell_fn = cell_fn self._cell_name = cell_fn.func.__name__ init_output, init_state = self._cell_fn(None, None) output_shape = init_output.get_shape() state_shape = init_state.get_shape() self._output_size = output_shape.with_rank(2)[1].value self._state_size = state_shape.with_rank(2)[1].value if self._output_size is None: raise ValueError("Initial output created by %s has invalid shape %s" % (self._cell_name, output_shape)) if self._state_size is None: raise ValueError("Initial state created by %s has invalid shape %s" % (self._cell_name, state_shape)) @property def state_size(self): return self._state_size @property def output_size(self): return self._output_size def __call__(self, inputs, state, scope=None): scope = scope or self._cell_name output, state = self._cell_fn(inputs, state, scope=scope) return output, state def _linear(args, output_size, bias, bias_start=0.0, scope=None): """Linear map: sum_i(args[i] * W[i]), where W[i] is a variable. Args: args: a 2D Tensor or a list of 2D, batch x n, Tensors. output_size: int, second dimension of W[i]. bias: boolean, whether to add a bias term or not. bias_start: starting value to initialize the bias; 0 by default. scope: VariableScope for the created subgraph; defaults to "Linear". Returns: A 2D Tensor with shape [batch x output_size] equal to sum_i(args[i] * W[i]), where W[i]s are newly created matrices. Raises: ValueError: if some of the arguments has unspecified or wrong shape. """ if args is None or (nest.is_sequence(args) and not args): raise ValueError("`args` must be specified") if not nest.is_sequence(args): args = [args] # Calculate the total size of arguments on dimension 1. total_arg_size = 0 shapes = [a.get_shape().as_list() for a in args] for shape in shapes: if len(shape) != 2: raise ValueError("Linear is expecting 2D arguments: %s" % str(shapes)) if not shape[1]: raise ValueError("Linear expects shape[1] of arguments: %s" % str(shapes)) else: total_arg_size += shape[1] dtype = [a.dtype for a in args][0] # Now the computation. with vs.variable_scope(scope or "Linear"): matrix = vs.get_variable( "Matrix", [total_arg_size, output_size], dtype=dtype) if len(args) == 1: res = math_ops.matmul(args[0], matrix) else: res = math_ops.matmul(array_ops.concat(1, args), matrix) if not bias: return res bias_term = vs.get_variable( "Bias", [output_size], dtype=dtype, initializer=init_ops.constant_initializer( bias_start, dtype=dtype)) return res + bias_term
cnanders/test-cxro-tickets
refs/heads/master
locations/views.py
1
from django.http import HttpResponseRedirect from django.shortcuts import render, get_object_or_404 from django.core.urlresolvers import reverse from django.views.decorators.http import require_POST from django.contrib.auth.decorators import login_required from locations.models import * from extra.shortcuts import auth_or_403 from django.contrib import messages @login_required def get(request, id=None): if id: model_set = Location.objects.filter(parent_id__exact=id) else: model_set = Location.objects.filter(parent_id__exact=None) return render(request, 'locations/get.html', {'model_set': model_set,}) @login_required @require_POST def post(request, id=None): if id: model = get_object_or_404(Location, pk=id) url = reverse('locations.views.post', args=[id]) else: model = None url = reverse('locations.views.post') form = LocationForm(request.POST) if form.is_valid(): location = form.save(commit=False) location.parent = model location.save() messages.success(request, "A new location has been created.") return HttpResponseRedirect(reverse('locations.views.get')) else: return render(request, 'locations/new.html', {'form': form, 'model': model, 'url': url}) @login_required() def new(request, id=None): if id: model = get_object_or_404(Location, pk=id) url = reverse('locations.views.post', args=[id]) else: model = None url = reverse('locations.views.post') form = LocationForm() return render(request, 'locations/new.html', {'form': form, 'model': model, 'url': url,}) @login_required() @require_POST def put(request, id): model = get_object_or_404(Location, pk=id) #auth_or_403(request, 'locations.change_location', model) form = LocationForm(request.POST, instance=model) if form.is_valid(): form.save() #location = form.save(commit=False) #location.parent = model #location.save() messages.success(request, "The location has been updated.") return HttpResponseRedirect(reverse('locations.views.get')) else: return render(request, 'locations/edit.html', {'form': form, 'model': model, }) @login_required() def edit(request, id): model = get_object_or_404(Location, pk=id) #auth_or_403(request, 'locations.change_location', model) form = LocationForm(instance=model) return render(request, 'locations/edit.html', {'form': form, 'model': model}) @login_required() @require_POST def delete(request, id): model = get_object_or_404(Location, pk=id) #auth_or_403(request, 'locations.delete_location', model) model.delete() messages.success(request, "The location has been deleted.") return HttpResponseRedirect(reverse('locations.views.get'))
WimpyAnalytics/django-markitup
refs/heads/master
tests/project/urls.py
1
from django.conf.urls import patterns, url, include from django.views.generic.base import TemplateView from .forms import DemoForm urlpatterns = patterns( '', url( r'^$', TemplateView.as_view(template_name='demo.html'), {'form': DemoForm()}, name='demo', ), url(r'^markitup/', include('markitup.urls')), )
ucl-exoplanets/pylightcurve
refs/heads/master
pylightcurve/analysis/optimisation.py
1
__all__ = ['EmceeFitting', 'values_to_print'] import emcee import numpy as np import warnings from pylightcurve.errors import * from pylightcurve.processes.counter import Counter from pylightcurve.processes.files import save_dict from pylightcurve.plots.plots_fitting import plot_mcmc_corner, plot_mcmc_traces, plot_mcmc_fitting from pylightcurve.analysis.distributions import one_d_distribution # emcee class EmceeFitting: def __init__(self, input_data_x, input_data_y, input_data_y_unc, model, initials, limits1, limits2, walkers, iterations, burn_in, data_x_name='x', data_y_name='y', data_x_print_name='x', data_y_print_name='y', parameters_names=None, parameters_print_names=None, counter='MCMC', strech_prior=1000.0, function_to_call=None): self.input_data_x = input_data_x self.input_data_y = input_data_y self.input_data_y_unc = input_data_y_unc self.data_x_name = data_x_name self.data_y_name = data_y_name self.data_x_print_name = data_x_print_name self.data_y_print_name = data_y_print_name self.model = model self.initials = np.array(initials) self.limits1 = np.array(limits1) self.limits2 = np.array(limits2) self.walkers = int(walkers) self.iterations_per_walker = int(int(iterations) / walkers) self.iterations = self.iterations_per_walker * walkers self.burn_in = int(burn_in) self.names = ['p{0}'.format(ff) for ff in range(len(initials))] if parameters_names: self.names = parameters_names self.print_names = ['p{0}'.format(ff) for ff in range(len(initials))] if parameters_print_names: self.print_names = parameters_print_names self.counter = Counter(counter, self.iterations + self.walkers, 100) self.strech_prior = strech_prior self.results = { 'mcmc': { 'iterations': self.iterations, 'walkers': self.walkers, 'burn_in': self.burn_in, }, 'input_series': {}, 'parameters': {}, 'parameters_final': [], 'output_series': {}, 'statistics': {}} self.fitted_parameters = [] self.mcmc_run_complete = False self.function_to_call = function_to_call def run_mcmc(self): fitted_parameters_indices = np.where(~np.isnan(self.limits1 * self.limits2))[0] dimensions = len(fitted_parameters_indices) internal_limits1 = self.limits1[fitted_parameters_indices] internal_limits2 = self.limits2[fitted_parameters_indices] internal_initials = self.initials[fitted_parameters_indices] walkers_initial_positions = np.random.uniform( (internal_initials - (internal_initials - internal_limits1) / self.strech_prior)[:, None] * np.ones(self.walkers), (internal_initials + (internal_limits2 - internal_initials) / self.strech_prior)[:, None] * np.ones(self.walkers)) walkers_initial_positions = np.swapaxes(walkers_initial_positions, 0, 1) def internal_model(theta): parameters = self.initials parameters[fitted_parameters_indices] = theta return self.model(self.input_data_x, *parameters) def likelihood(theta): if ((internal_limits1 < theta) & (theta < internal_limits2)).all(): chi = (self.input_data_y - internal_model(theta)) / self.input_data_y_unc return -0.5 * (np.sum(chi * chi) + np.sum(np.log(2.0 * np.pi * (self.input_data_y_unc * self.input_data_y_unc)))) else: return -np.inf def prior(theta): if ((internal_limits1 < theta) & (theta < internal_limits2)).all(): return 0.0 return -np.inf # probability def probability_core(theta): return prior(theta) + likelihood(theta) probability_core_function = probability_core if self.function_to_call: def probability_core_function(theta): self.function_to_call() return probability_core(theta) def probability(theta): self.counter.update() return probability_core_function(theta) # run sampler sampler = emcee.EnsembleSampler(self.walkers, dimensions, probability) sampler.run_mcmc(walkers_initial_positions, int(self.iterations) // int(self.walkers)) mcmc_results = sampler.flatchain self.results['input_series'][self.data_x_name] = self.input_data_x self.results['input_series'][self.data_y_name] = self.input_data_y self.results['input_series']['{0}_unc'.format(self.data_y_name)] = self.input_data_y_unc self.results['input_series_x'] = self.data_x_name self.results['input_series_x_print'] = self.data_x_print_name self.results['input_series_y'] = self.data_y_name self.results['input_series_y_print'] = self.data_y_print_name self.results['input_series_y_unc'] = '{0}_unc'.format(self.data_y_name) trace_to_analyse = 0 vars_check = 0 for var in range(len(self.names)): if not np.isnan(self.limits1[var]): trace = mcmc_results[:, np.where(fitted_parameters_indices == var)[0][0]] trace = trace.reshape(int(self.walkers), int(self.iterations) // int(self.walkers)) trace = (np.swapaxes(trace, 0, 1).flatten())[self.burn_in:] median = np.median(trace) mad = np.sqrt(np.median((trace - median) ** 2)) trace_to_analyse += (trace > (median - 5 * mad)) * (trace < (median + 5 * mad)) vars_check += 1 trace_to_analyse = np.where(trace_to_analyse == vars_check) for var in range(len(self.names)): if np.isnan(self.limits1[var]): variable = {'name': self.names[var], 'print_name': self.print_names[var], 'initial': None, 'min_allowed': None, 'max_allowed': None, 'trace': None, 'trace_bins': None, 'trace_counts': None, 'value': self.initials[var], 'm_error': None, 'p_error': None, 'print_value': self.initials[var], 'print_m_error': '-', 'print_p_error': '-'} else: trace = mcmc_results[:, np.where(fitted_parameters_indices == var)[0][0]] trace = trace.reshape(int(self.walkers), int(self.iterations) // int(self.walkers)) trace = (np.swapaxes(trace, 0, 1).flatten())[self.burn_in:] trace = trace[trace_to_analyse] bins, counts, value, m_error, p_error = \ one_d_distribution(trace, confidence_interval=0.68) print_value, print_m_error, print_p_error = values_to_print(value, m_error, p_error) variable = {'name': self.names[var], 'print_name': self.print_names[var], 'initial': self.initials[var], 'min_allowed': self.limits1[var], 'max_allowed': self.limits2[var], 'trace': trace, 'trace_bins': bins, 'trace_counts': counts, 'value': value, 'm_error': m_error, 'p_error': p_error, 'print_value': print_value, 'print_m_error': print_m_error, 'print_p_error': print_p_error} self.fitted_parameters.append(self.names[var]) self.results['parameters'][self.names[var]] = variable self.results['parameters_final'].append(variable['value']) self.results['output_series']['model'] = self.model(self.input_data_x, *self.results['parameters_final']) self.results['output_series']['residuals'] = self.input_data_y - self.results['output_series']['model'] to_correlate = [] for parameter in self.fitted_parameters: to_correlate.append(self.results['parameters'][parameter]['trace']) correlation_matrix = np.corrcoef(to_correlate) res_autocorr = np.correlate(self.results['output_series']['residuals'], self.results['output_series']['residuals'], mode='full') res_autocorr = res_autocorr[res_autocorr.size // 2:] / res_autocorr[res_autocorr.size // 2:][0] self.results['statistics']['res_autocorr'] = res_autocorr self.results['statistics']['res_max_autocorr'] = np.max(res_autocorr[1:]) self.results['statistics']['res_mean'] = np.mean(self.results['output_series']['residuals']) self.results['statistics']['res_std'] = np.std(self.results['output_series']['residuals']) self.results['statistics']['res_rms'] = np.sqrt(np.mean(self.results['output_series']['residuals']**2)) self.results['statistics']['res_chi_sqr'] = np.sum( (self.results['output_series']['residuals'] ** 2) / (self.input_data_y_unc ** 2)) self.results['statistics']['res_red_chi_sqr'] = ( self.results['statistics']['res_chi_sqr'] / (len(self.input_data_y_unc) - len(self.fitted_parameters))) self.results['statistics']['corr_matrix'] = correlation_matrix self.results['statistics']['corr_variables'] = ','.join(self.fitted_parameters) self.mcmc_run_complete = True def save_all(self, export_file): if not self.mcmc_run_complete: raise PyLCProcessError('MCMC not completed') save_dict(self.results, export_file) def save_results(self, export_file): if not self.mcmc_run_complete: raise PyLCProcessError('MCMC not completed') w = open(export_file, 'w') w.write('# variable\tresult\tuncertainty\n') for i in self.names: w.write('{0}\t{1}\t-{2} +{3}\n'.format(self.results['parameters'][i]['name'], self.results['parameters'][i]['print_value'], self.results['parameters'][i]['print_m_error'], self.results['parameters'][i]['print_p_error'])) w.write('\n#Residuals:\n') w.write('#Mean: {0}\n'.format(self.results['statistics']['res_mean'])) w.write('#STD: {0}\n'.format(self.results['statistics']['res_std'])) w.write('#RMS: {0}\n'.format(self.results['statistics']['res_rms'])) w.write('#Max auto-correlation: {0}\n'.format(self.results['statistics']['res_max_autocorr'])) w.write('#Chi squared: {0}\n'.format(self.results['statistics']['res_chi_sqr'])) w.write('#Reduced chi squared: {0}\n'.format(self.results['statistics']['res_red_chi_sqr'])) w.close() def plot_fitting(self, export_file): plot_mcmc_fitting(self, export_file) def plot_corner(self, export_file): plot_mcmc_corner(self, export_file) def plot_traces(self, export_file): plot_mcmc_traces(self, export_file) # decimal points and rounding def values_to_print(value, error_minus, error_plus): value = float(value) error_minus = float(error_minus) error_plus = float(error_plus) with warnings.catch_warnings(): warnings.simplefilter("ignore") if error_minus >= 1.0 or error_minus == 0.0: digit1 = 1 else: str_error_minus = '{0:.{test}f}'.format(error_minus, test=10 + abs(int(np.log10(error_minus)))) digit1 = np.where([ff not in ['0', '.'] for ff in str_error_minus])[0][0] - 1 if error_plus >= 1.0 or error_plus == 0.0: digit2 = 1 else: str_error_plus = '{0:.{test}f}'.format(error_plus, test=10 + abs(int(np.log10(error_plus)))) digit2 = np.where([ff not in ['0', '.'] for ff in str_error_plus])[0][0] - 1 width = max(1, digit1, digit2) print_m_error = '{0:.{width}f}'.format(round(error_minus, width), width=width) print_p_error = '{0:.{width}f}'.format(round(error_plus, width), width=width) if print_m_error[-1] in ['1', '2'] and float(print_m_error[:-1]) == 0: width += 1 elif print_p_error[-1] in ['1', '2'] and float(print_p_error[:-1]) == 0: width += 1 if error_plus >= 1.0 and error_minus >= 1.0: width = 1 print_value = '{0:.{width}f}'.format(round(value, width), width=width) print_m_error = '{0:.{width}f}'.format(round(error_minus, width), width=width) print_p_error = '{0:.{width}f}'.format(round(error_plus, width), width=width) return print_value, print_m_error, print_p_error
orgito/ansible
refs/heads/devel
lib/ansible/modules/cloud/amazon/redshift_subnet_group.py
12
#!/usr/bin/python # Copyright 2014 Jens Carl, Hothead Games Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- author: - "Jens Carl (@j-carl), Hothead Games Inc." module: redshift_subnet_group version_added: "2.2" short_description: manage Redshift cluster subnet groups description: - Create, modifies, and deletes Redshift cluster subnet groups. options: state: description: - Specifies whether the subnet should be present or absent. default: 'present' choices: ['present', 'absent' ] group_name: description: - Cluster subnet group name. required: true aliases: ['name'] group_description: description: - Database subnet group description. aliases: ['description'] group_subnets: description: - List of subnet IDs that make up the cluster subnet group. aliases: ['subnets'] requirements: [ 'boto' ] extends_documentation_fragment: - aws - ec2 ''' EXAMPLES = ''' # Create a Redshift subnet group - local_action: module: redshift_subnet_group state: present group_name: redshift-subnet group_description: Redshift subnet group_subnets: - 'subnet-aaaaa' - 'subnet-bbbbb' # Remove subnet group - redshift_subnet_group: state: absent group_name: redshift-subnet ''' RETURN = ''' group: description: dictionary containing all Redshift subnet group information returned: success type: complex contains: name: description: name of the Redshift subnet group returned: success type: str sample: "redshift_subnet_group_name" vpc_id: description: Id of the VPC where the subnet is located returned: success type: str sample: "vpc-aabb1122" ''' try: import boto import boto.redshift HAS_BOTO = True except ImportError: HAS_BOTO = False from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.ec2 import HAS_BOTO, connect_to_aws, ec2_argument_spec, get_aws_connection_info def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( state=dict(required=True, choices=['present', 'absent']), group_name=dict(required=True, aliases=['name']), group_description=dict(required=False, aliases=['description']), group_subnets=dict(required=False, aliases=['subnets'], type='list'), )) module = AnsibleModule(argument_spec=argument_spec) if not HAS_BOTO: module.fail_json(msg='boto v2.9.0+ required for this module') state = module.params.get('state') group_name = module.params.get('group_name') group_description = module.params.get('group_description') group_subnets = module.params.get('group_subnets') if state == 'present': for required in ('group_name', 'group_description', 'group_subnets'): if not module.params.get(required): module.fail_json(msg=str("parameter %s required for state='present'" % required)) else: for not_allowed in ('group_description', 'group_subnets'): if module.params.get(not_allowed): module.fail_json(msg=str("parameter %s not allowed for state='absent'" % not_allowed)) region, ec2_url, aws_connect_params = get_aws_connection_info(module) if not region: module.fail_json(msg=str("region not specified and unable to determine region from EC2_REGION.")) # Connect to the Redshift endpoint. try: conn = connect_to_aws(boto.redshift, region, **aws_connect_params) except boto.exception.JSONResponseError as e: module.fail_json(msg=str(e)) try: changed = False exists = False group = None try: matching_groups = conn.describe_cluster_subnet_groups(group_name, max_records=100) exists = len(matching_groups) > 0 except boto.exception.JSONResponseError as e: if e.body['Error']['Code'] != 'ClusterSubnetGroupNotFoundFault': # if e.code != 'ClusterSubnetGroupNotFoundFault': module.fail_json(msg=str(e)) if state == 'absent': if exists: conn.delete_cluster_subnet_group(group_name) changed = True else: if not exists: new_group = conn.create_cluster_subnet_group(group_name, group_description, group_subnets) group = { 'name': new_group['CreateClusterSubnetGroupResponse']['CreateClusterSubnetGroupResult'] ['ClusterSubnetGroup']['ClusterSubnetGroupName'], 'vpc_id': new_group['CreateClusterSubnetGroupResponse']['CreateClusterSubnetGroupResult'] ['ClusterSubnetGroup']['VpcId'], } else: changed_group = conn.modify_cluster_subnet_group(group_name, group_subnets, description=group_description) group = { 'name': changed_group['ModifyClusterSubnetGroupResponse']['ModifyClusterSubnetGroupResult'] ['ClusterSubnetGroup']['ClusterSubnetGroupName'], 'vpc_id': changed_group['ModifyClusterSubnetGroupResponse']['ModifyClusterSubnetGroupResult'] ['ClusterSubnetGroup']['VpcId'], } changed = True except boto.exception.JSONResponseError as e: module.fail_json(msg=str(e)) module.exit_json(changed=changed, group=group) if __name__ == '__main__': main()
malayaleecoder/servo
refs/heads/master
tests/wpt/web-platform-tests/tools/html5lib/html5lib/treewalkers/_base.py
658
from __future__ import absolute_import, division, unicode_literals from six import text_type, string_types import gettext _ = gettext.gettext from xml.dom import Node DOCUMENT = Node.DOCUMENT_NODE DOCTYPE = Node.DOCUMENT_TYPE_NODE TEXT = Node.TEXT_NODE ELEMENT = Node.ELEMENT_NODE COMMENT = Node.COMMENT_NODE ENTITY = Node.ENTITY_NODE UNKNOWN = "<#UNKNOWN#>" from ..constants import voidElements, spaceCharacters spaceCharacters = "".join(spaceCharacters) def to_text(s, blank_if_none=True): """Wrapper around six.text_type to convert None to empty string""" if s is None: if blank_if_none: return "" else: return None elif isinstance(s, text_type): return s else: return text_type(s) def is_text_or_none(string): """Wrapper around isinstance(string_types) or is None""" return string is None or isinstance(string, string_types) class TreeWalker(object): def __init__(self, tree): self.tree = tree def __iter__(self): raise NotImplementedError def error(self, msg): return {"type": "SerializeError", "data": msg} def emptyTag(self, namespace, name, attrs, hasChildren=False): assert namespace is None or isinstance(namespace, string_types), type(namespace) assert isinstance(name, string_types), type(name) assert all((namespace is None or isinstance(namespace, string_types)) and isinstance(name, string_types) and isinstance(value, string_types) for (namespace, name), value in attrs.items()) yield {"type": "EmptyTag", "name": to_text(name, False), "namespace": to_text(namespace), "data": attrs} if hasChildren: yield self.error(_("Void element has children")) def startTag(self, namespace, name, attrs): assert namespace is None or isinstance(namespace, string_types), type(namespace) assert isinstance(name, string_types), type(name) assert all((namespace is None or isinstance(namespace, string_types)) and isinstance(name, string_types) and isinstance(value, string_types) for (namespace, name), value in attrs.items()) return {"type": "StartTag", "name": text_type(name), "namespace": to_text(namespace), "data": dict(((to_text(namespace, False), to_text(name)), to_text(value, False)) for (namespace, name), value in attrs.items())} def endTag(self, namespace, name): assert namespace is None or isinstance(namespace, string_types), type(namespace) assert isinstance(name, string_types), type(namespace) return {"type": "EndTag", "name": to_text(name, False), "namespace": to_text(namespace), "data": {}} def text(self, data): assert isinstance(data, string_types), type(data) data = to_text(data) middle = data.lstrip(spaceCharacters) left = data[:len(data) - len(middle)] if left: yield {"type": "SpaceCharacters", "data": left} data = middle middle = data.rstrip(spaceCharacters) right = data[len(middle):] if middle: yield {"type": "Characters", "data": middle} if right: yield {"type": "SpaceCharacters", "data": right} def comment(self, data): assert isinstance(data, string_types), type(data) return {"type": "Comment", "data": text_type(data)} def doctype(self, name, publicId=None, systemId=None, correct=True): assert is_text_or_none(name), type(name) assert is_text_or_none(publicId), type(publicId) assert is_text_or_none(systemId), type(systemId) return {"type": "Doctype", "name": to_text(name), "publicId": to_text(publicId), "systemId": to_text(systemId), "correct": to_text(correct)} def entity(self, name): assert isinstance(name, string_types), type(name) return {"type": "Entity", "name": text_type(name)} def unknown(self, nodeType): return self.error(_("Unknown node type: ") + nodeType) class NonRecursiveTreeWalker(TreeWalker): def getNodeDetails(self, node): raise NotImplementedError def getFirstChild(self, node): raise NotImplementedError def getNextSibling(self, node): raise NotImplementedError def getParentNode(self, node): raise NotImplementedError def __iter__(self): currentNode = self.tree while currentNode is not None: details = self.getNodeDetails(currentNode) type, details = details[0], details[1:] hasChildren = False if type == DOCTYPE: yield self.doctype(*details) elif type == TEXT: for token in self.text(*details): yield token elif type == ELEMENT: namespace, name, attributes, hasChildren = details if name in voidElements: for token in self.emptyTag(namespace, name, attributes, hasChildren): yield token hasChildren = False else: yield self.startTag(namespace, name, attributes) elif type == COMMENT: yield self.comment(details[0]) elif type == ENTITY: yield self.entity(details[0]) elif type == DOCUMENT: hasChildren = True else: yield self.unknown(details[0]) if hasChildren: firstChild = self.getFirstChild(currentNode) else: firstChild = None if firstChild is not None: currentNode = firstChild else: while currentNode is not None: details = self.getNodeDetails(currentNode) type, details = details[0], details[1:] if type == ELEMENT: namespace, name, attributes, hasChildren = details if name not in voidElements: yield self.endTag(namespace, name) if self.tree is currentNode: currentNode = None break nextSibling = self.getNextSibling(currentNode) if nextSibling is not None: currentNode = nextSibling break else: currentNode = self.getParentNode(currentNode)
spitfire88/upm
refs/heads/master
examples/python/grovecircularled.py
7
#!/usr/bin/python # # Author: Jon Trulson <[email protected]> # Copyright (c) 2015 Intel Corporation. # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. from __future__ import print_function import time, sys, signal, atexit from upm import pyupm_my9221 as upmGroveCircularLED def main(): # Exit handlers def SIGINTHandler(signum, frame): raise SystemExit def exitHandler(): circle.setLevel(0, True) print("Exiting") sys.exit(0) # This function lets you run code on exit atexit.register(exitHandler) # This function stops python from printing a stacktrace when you hit control-C signal.signal(signal.SIGINT, SIGINTHandler) # Instantiate a Grove Circular LED on gpio pins 9 and 8 circle = upmGroveCircularLED.GroveCircularLED(9, 8) level = 0 while(1): circle.setSpinner(level) level = (level + 1) % 24 time.sleep(.1) if __name__ == '__main__': main()
bkirui/odoo
refs/heads/8.0
addons/l10n_fr_rib/bank.py
335
# -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2011 Numérigraphe SARL. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, osv from openerp.tools.translate import _ class res_partner_bank(osv.osv): """Add fields and behavior for French RIB""" _inherit = "res.partner.bank" def _check_key(self, cr, uid, ids): """Check the RIB key""" for bank_acc in self.browse(cr, uid, ids): # Ignore the accounts of type other than rib if bank_acc.state != 'rib': continue # Fail if the needed values are empty of too short if (not bank_acc.bank_code or len(bank_acc.bank_code) != 5 or not bank_acc.office or len(bank_acc.office) != 5 or not bank_acc.rib_acc_number or len(bank_acc.rib_acc_number) != 11 or not bank_acc.key or len(bank_acc.key) != 2): return False # Get the rib data (without the key) rib = "%s%s%s" % (bank_acc.bank_code, bank_acc.office, bank_acc.rib_acc_number) # Translate letters into numbers according to a specific table # (notice how s -> 2) table = dict((ord(a), b) for a, b in zip( u'abcdefghijklmnopqrstuvwxyz', u'12345678912345678923456789')) rib = rib.lower().translate(table) # compute the key key = 97 - (100 * int(rib)) % 97 if int(bank_acc.key) != key: raise osv.except_osv(_('Error!'), _("The RIB key %s does not correspond to the other codes: %s %s %s.") % \ (bank_acc.key, bank_acc.bank_code, bank_acc.office, bank_acc.rib_acc_number) ) if bank_acc.acc_number: if not self.is_iban_valid(cr, uid, bank_acc.acc_number): raise osv.except_osv(_('Error!'), _("The IBAN %s is not valid.") % bank_acc.acc_number) return True def onchange_bank_id(self, cr, uid, ids, bank_id, context=None): """Change the bank code""" result = super(res_partner_bank, self).onchange_bank_id(cr, uid, ids, bank_id, context=context) if bank_id: value = result.setdefault('value', {}) bank = self.pool.get('res.bank').browse(cr, uid, bank_id, context=context) value['bank_code'] = bank.rib_code return result _columns = { 'acc_number': fields.char('Account Number', size=64, required=False), 'rib_acc_number': fields.char('RIB account number', size=11, readonly=True,), 'bank_code': fields.char('Bank Code', size=64, readonly=True,), 'office': fields.char('Office Code', size=5, readonly=True,), 'key': fields.char('Key', size=2, readonly=True, help="The key is a number allowing to check the " "correctness of the other codes."), } _constraints = [(_check_key, 'The RIB and/or IBAN is not valid', ['rib_acc_number', 'bank_code', 'office', 'key'])] class res_bank(osv.osv): """Add the bank code to make it easier to enter RIB data""" _inherit = 'res.bank' def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=80): """Search by bank code in addition to the standard search""" # Get the standard results results = super(res_bank, self).name_search(cr, user, name, args=args ,operator=operator, context=context, limit=limit) # Get additional results using the RIB code ids = self.search(cr, user, [('rib_code', operator, name)], limit=limit, context=context) # Merge the results results = list(set(results + self.name_get(cr, user, ids, context))) return results _columns = { 'rib_code': fields.char('RIB Bank Code'), } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
theguardian/headphones
refs/heads/master
lib/mutagen/easymp4.py
28
# -*- coding: utf-8 -*- # Copyright (C) 2009 Joe Wreschnig # # This program is free software; you can redistribute it and/or modify # it under the terms of version 2 of the GNU General Public License as # published by the Free Software Foundation. from mutagen import Metadata from mutagen._util import DictMixin, dict_match from mutagen.mp4 import MP4, MP4Tags, error, delete from ._compat import PY2, text_type, PY3 __all__ = ["EasyMP4Tags", "EasyMP4", "delete", "error"] class EasyMP4KeyError(error, KeyError, ValueError): pass class EasyMP4Tags(DictMixin, Metadata): """A file with MPEG-4 iTunes metadata. Like Vorbis comments, EasyMP4Tags keys are case-insensitive ASCII strings, and values are a list of Unicode strings (and these lists are always of length 0 or 1). If you need access to the full MP4 metadata feature set, you should use MP4, not EasyMP4. """ Set = {} Get = {} Delete = {} List = {} def __init__(self, *args, **kwargs): self.__mp4 = MP4Tags(*args, **kwargs) self.load = self.__mp4.load self.save = self.__mp4.save self.delete = self.__mp4.delete filename = property(lambda s: s.__mp4.filename, lambda s, fn: setattr(s.__mp4, 'filename', fn)) @classmethod def RegisterKey(cls, key, getter=None, setter=None, deleter=None, lister=None): """Register a new key mapping. A key mapping is four functions, a getter, setter, deleter, and lister. The key may be either a string or a glob pattern. The getter, deleted, and lister receive an MP4Tags instance and the requested key name. The setter also receives the desired value, which will be a list of strings. The getter, setter, and deleter are used to implement __getitem__, __setitem__, and __delitem__. The lister is used to implement keys(). It should return a list of keys that are actually in the MP4 instance, provided by its associated getter. """ key = key.lower() if getter is not None: cls.Get[key] = getter if setter is not None: cls.Set[key] = setter if deleter is not None: cls.Delete[key] = deleter if lister is not None: cls.List[key] = lister @classmethod def RegisterTextKey(cls, key, atomid): """Register a text key. If the key you need to register is a simple one-to-one mapping of MP4 atom name to EasyMP4Tags key, then you can use this function:: EasyMP4Tags.RegisterTextKey("artist", "\xa9ART") """ def getter(tags, key): return tags[atomid] def setter(tags, key, value): tags[atomid] = value def deleter(tags, key): del(tags[atomid]) cls.RegisterKey(key, getter, setter, deleter) @classmethod def RegisterIntKey(cls, key, atomid, min_value=0, max_value=(2 ** 16) - 1): """Register a scalar integer key. """ def getter(tags, key): return list(map(text_type, tags[atomid])) def setter(tags, key, value): clamp = lambda x: int(min(max(min_value, x), max_value)) tags[atomid] = [clamp(v) for v in map(int, value)] def deleter(tags, key): del(tags[atomid]) cls.RegisterKey(key, getter, setter, deleter) @classmethod def RegisterIntPairKey(cls, key, atomid, min_value=0, max_value=(2 ** 16) - 1): def getter(tags, key): ret = [] for (track, total) in tags[atomid]: if total: ret.append(u"%d/%d" % (track, total)) else: ret.append(text_type(track)) return ret def setter(tags, key, value): clamp = lambda x: int(min(max(min_value, x), max_value)) data = [] for v in value: try: tracks, total = v.split("/") tracks = clamp(int(tracks)) total = clamp(int(total)) except (ValueError, TypeError): tracks = clamp(int(v)) total = min_value data.append((tracks, total)) tags[atomid] = data def deleter(tags, key): del(tags[atomid]) cls.RegisterKey(key, getter, setter, deleter) @classmethod def RegisterFreeformKey(cls, key, name, mean="com.apple.iTunes"): """Register a text key. If the key you need to register is a simple one-to-one mapping of MP4 freeform atom (----) and name to EasyMP4Tags key, then you can use this function:: EasyMP4Tags.RegisterFreeformKey( "musicbrainz_artistid", "MusicBrainz Artist Id") """ atomid = "----:" + mean + ":" + name def getter(tags, key): return [s.decode("utf-8", "replace") for s in tags[atomid]] def setter(tags, key, value): encoded = [] for v in value: if not isinstance(v, text_type): if PY3: raise TypeError("%r not str" % v) v = v.decode("utf-8") encoded.append(v.encode("utf-8")) tags[atomid] = encoded def deleter(tags, key): del(tags[atomid]) cls.RegisterKey(key, getter, setter, deleter) def __getitem__(self, key): key = key.lower() func = dict_match(self.Get, key) if func is not None: return func(self.__mp4, key) else: raise EasyMP4KeyError("%r is not a valid key" % key) def __setitem__(self, key, value): key = key.lower() if PY2: if isinstance(value, basestring): value = [value] else: if isinstance(value, text_type): value = [value] func = dict_match(self.Set, key) if func is not None: return func(self.__mp4, key, value) else: raise EasyMP4KeyError("%r is not a valid key" % key) def __delitem__(self, key): key = key.lower() func = dict_match(self.Delete, key) if func is not None: return func(self.__mp4, key) else: raise EasyMP4KeyError("%r is not a valid key" % key) def keys(self): keys = [] for key in self.Get.keys(): if key in self.List: keys.extend(self.List[key](self.__mp4, key)) elif key in self: keys.append(key) return keys def pprint(self): """Print tag key=value pairs.""" strings = [] for key in sorted(self.keys()): values = self[key] for value in values: strings.append("%s=%s" % (key, value)) return "\n".join(strings) for atomid, key in { '\xa9nam': 'title', '\xa9alb': 'album', '\xa9ART': 'artist', 'aART': 'albumartist', '\xa9day': 'date', '\xa9cmt': 'comment', 'desc': 'description', '\xa9grp': 'grouping', '\xa9gen': 'genre', 'cprt': 'copyright', 'soal': 'albumsort', 'soaa': 'albumartistsort', 'soar': 'artistsort', 'sonm': 'titlesort', 'soco': 'composersort', }.items(): EasyMP4Tags.RegisterTextKey(key, atomid) for name, key in { 'MusicBrainz Artist Id': 'musicbrainz_artistid', 'MusicBrainz Track Id': 'musicbrainz_trackid', 'MusicBrainz Album Id': 'musicbrainz_albumid', 'MusicBrainz Album Artist Id': 'musicbrainz_albumartistid', 'MusicIP PUID': 'musicip_puid', 'MusicBrainz Album Status': 'musicbrainz_albumstatus', 'MusicBrainz Album Type': 'musicbrainz_albumtype', 'MusicBrainz Release Country': 'releasecountry', }.items(): EasyMP4Tags.RegisterFreeformKey(key, name) for name, key in { "tmpo": "bpm", }.items(): EasyMP4Tags.RegisterIntKey(key, name) for name, key in { "trkn": "tracknumber", "disk": "discnumber", }.items(): EasyMP4Tags.RegisterIntPairKey(key, name) class EasyMP4(MP4): """Like :class:`MP4 <mutagen.mp4.MP4>`, but uses :class:`EasyMP4Tags` for tags. :ivar info: :class:`MP4Info <mutagen.mp4.MP4Info>` :ivar tags: :class:`EasyMP4Tags` """ MP4Tags = EasyMP4Tags Get = EasyMP4Tags.Get Set = EasyMP4Tags.Set Delete = EasyMP4Tags.Delete List = EasyMP4Tags.List RegisterTextKey = EasyMP4Tags.RegisterTextKey RegisterKey = EasyMP4Tags.RegisterKey
djkonro/client-python
refs/heads/master
kubernetes/test/test_v1beta1_cluster_role_binding.py
2
# coding: utf-8 """ Kubernetes No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) OpenAPI spec version: v1.7.4 Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import os import sys import unittest import kubernetes.client from kubernetes.client.rest import ApiException from kubernetes.client.models.v1beta1_cluster_role_binding import V1beta1ClusterRoleBinding class TestV1beta1ClusterRoleBinding(unittest.TestCase): """ V1beta1ClusterRoleBinding unit test stubs """ def setUp(self): pass def tearDown(self): pass def testV1beta1ClusterRoleBinding(self): """ Test V1beta1ClusterRoleBinding """ model = kubernetes.client.models.v1beta1_cluster_role_binding.V1beta1ClusterRoleBinding() if __name__ == '__main__': unittest.main()
martonw/phantomjs
refs/heads/master
src/qt/qtwebkit/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/_stream_hybi.py
628
# Copyright 2012, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """This file provides classes and helper functions for parsing/building frames of the WebSocket protocol (RFC 6455). Specification: http://tools.ietf.org/html/rfc6455 """ from collections import deque import logging import os import struct import time from mod_pywebsocket import common from mod_pywebsocket import util from mod_pywebsocket._stream_base import BadOperationException from mod_pywebsocket._stream_base import ConnectionTerminatedException from mod_pywebsocket._stream_base import InvalidFrameException from mod_pywebsocket._stream_base import InvalidUTF8Exception from mod_pywebsocket._stream_base import StreamBase from mod_pywebsocket._stream_base import UnsupportedFrameException _NOOP_MASKER = util.NoopMasker() class Frame(object): def __init__(self, fin=1, rsv1=0, rsv2=0, rsv3=0, opcode=None, payload=''): self.fin = fin self.rsv1 = rsv1 self.rsv2 = rsv2 self.rsv3 = rsv3 self.opcode = opcode self.payload = payload # Helper functions made public to be used for writing unittests for WebSocket # clients. def create_length_header(length, mask): """Creates a length header. Args: length: Frame length. Must be less than 2^63. mask: Mask bit. Must be boolean. Raises: ValueError: when bad data is given. """ if mask: mask_bit = 1 << 7 else: mask_bit = 0 if length < 0: raise ValueError('length must be non negative integer') elif length <= 125: return chr(mask_bit | length) elif length < (1 << 16): return chr(mask_bit | 126) + struct.pack('!H', length) elif length < (1 << 63): return chr(mask_bit | 127) + struct.pack('!Q', length) else: raise ValueError('Payload is too big for one frame') def create_header(opcode, payload_length, fin, rsv1, rsv2, rsv3, mask): """Creates a frame header. Raises: Exception: when bad data is given. """ if opcode < 0 or 0xf < opcode: raise ValueError('Opcode out of range') if payload_length < 0 or (1 << 63) <= payload_length: raise ValueError('payload_length out of range') if (fin | rsv1 | rsv2 | rsv3) & ~1: raise ValueError('FIN bit and Reserved bit parameter must be 0 or 1') header = '' first_byte = ((fin << 7) | (rsv1 << 6) | (rsv2 << 5) | (rsv3 << 4) | opcode) header += chr(first_byte) header += create_length_header(payload_length, mask) return header def _build_frame(header, body, mask): if not mask: return header + body masking_nonce = os.urandom(4) masker = util.RepeatedXorMasker(masking_nonce) return header + masking_nonce + masker.mask(body) def _filter_and_format_frame_object(frame, mask, frame_filters): for frame_filter in frame_filters: frame_filter.filter(frame) header = create_header( frame.opcode, len(frame.payload), frame.fin, frame.rsv1, frame.rsv2, frame.rsv3, mask) return _build_frame(header, frame.payload, mask) def create_binary_frame( message, opcode=common.OPCODE_BINARY, fin=1, mask=False, frame_filters=[]): """Creates a simple binary frame with no extension, reserved bit.""" frame = Frame(fin=fin, opcode=opcode, payload=message) return _filter_and_format_frame_object(frame, mask, frame_filters) def create_text_frame( message, opcode=common.OPCODE_TEXT, fin=1, mask=False, frame_filters=[]): """Creates a simple text frame with no extension, reserved bit.""" encoded_message = message.encode('utf-8') return create_binary_frame(encoded_message, opcode, fin, mask, frame_filters) def parse_frame(receive_bytes, logger=None, ws_version=common.VERSION_HYBI_LATEST, unmask_receive=True): """Parses a frame. Returns a tuple containing each header field and payload. Args: receive_bytes: a function that reads frame data from a stream or something similar. The function takes length of the bytes to be read. The function must raise ConnectionTerminatedException if there is not enough data to be read. logger: a logging object. ws_version: the version of WebSocket protocol. unmask_receive: unmask received frames. When received unmasked frame, raises InvalidFrameException. Raises: ConnectionTerminatedException: when receive_bytes raises it. InvalidFrameException: when the frame contains invalid data. """ if not logger: logger = logging.getLogger() logger.log(common.LOGLEVEL_FINE, 'Receive the first 2 octets of a frame') received = receive_bytes(2) first_byte = ord(received[0]) fin = (first_byte >> 7) & 1 rsv1 = (first_byte >> 6) & 1 rsv2 = (first_byte >> 5) & 1 rsv3 = (first_byte >> 4) & 1 opcode = first_byte & 0xf second_byte = ord(received[1]) mask = (second_byte >> 7) & 1 payload_length = second_byte & 0x7f logger.log(common.LOGLEVEL_FINE, 'FIN=%s, RSV1=%s, RSV2=%s, RSV3=%s, opcode=%s, ' 'Mask=%s, Payload_length=%s', fin, rsv1, rsv2, rsv3, opcode, mask, payload_length) if (mask == 1) != unmask_receive: raise InvalidFrameException( 'Mask bit on the received frame did\'nt match masking ' 'configuration for received frames') # The HyBi and later specs disallow putting a value in 0x0-0xFFFF # into the 8-octet extended payload length field (or 0x0-0xFD in # 2-octet field). valid_length_encoding = True length_encoding_bytes = 1 if payload_length == 127: logger.log(common.LOGLEVEL_FINE, 'Receive 8-octet extended payload length') extended_payload_length = receive_bytes(8) payload_length = struct.unpack( '!Q', extended_payload_length)[0] if payload_length > 0x7FFFFFFFFFFFFFFF: raise InvalidFrameException( 'Extended payload length >= 2^63') if ws_version >= 13 and payload_length < 0x10000: valid_length_encoding = False length_encoding_bytes = 8 logger.log(common.LOGLEVEL_FINE, 'Decoded_payload_length=%s', payload_length) elif payload_length == 126: logger.log(common.LOGLEVEL_FINE, 'Receive 2-octet extended payload length') extended_payload_length = receive_bytes(2) payload_length = struct.unpack( '!H', extended_payload_length)[0] if ws_version >= 13 and payload_length < 126: valid_length_encoding = False length_encoding_bytes = 2 logger.log(common.LOGLEVEL_FINE, 'Decoded_payload_length=%s', payload_length) if not valid_length_encoding: logger.warning( 'Payload length is not encoded using the minimal number of ' 'bytes (%d is encoded using %d bytes)', payload_length, length_encoding_bytes) if mask == 1: logger.log(common.LOGLEVEL_FINE, 'Receive mask') masking_nonce = receive_bytes(4) masker = util.RepeatedXorMasker(masking_nonce) logger.log(common.LOGLEVEL_FINE, 'Mask=%r', masking_nonce) else: masker = _NOOP_MASKER logger.log(common.LOGLEVEL_FINE, 'Receive payload data') if logger.isEnabledFor(common.LOGLEVEL_FINE): receive_start = time.time() raw_payload_bytes = receive_bytes(payload_length) if logger.isEnabledFor(common.LOGLEVEL_FINE): logger.log( common.LOGLEVEL_FINE, 'Done receiving payload data at %s MB/s', payload_length / (time.time() - receive_start) / 1000 / 1000) logger.log(common.LOGLEVEL_FINE, 'Unmask payload data') if logger.isEnabledFor(common.LOGLEVEL_FINE): unmask_start = time.time() unmasked_bytes = masker.mask(raw_payload_bytes) if logger.isEnabledFor(common.LOGLEVEL_FINE): logger.log( common.LOGLEVEL_FINE, 'Done unmasking payload data at %s MB/s', payload_length / (time.time() - unmask_start) / 1000 / 1000) return opcode, unmasked_bytes, fin, rsv1, rsv2, rsv3 class FragmentedFrameBuilder(object): """A stateful class to send a message as fragments.""" def __init__(self, mask, frame_filters=[], encode_utf8=True): """Constructs an instance.""" self._mask = mask self._frame_filters = frame_filters # This is for skipping UTF-8 encoding when building text type frames # from compressed data. self._encode_utf8 = encode_utf8 self._started = False # Hold opcode of the first frame in messages to verify types of other # frames in the message are all the same. self._opcode = common.OPCODE_TEXT def build(self, payload_data, end, binary): if binary: frame_type = common.OPCODE_BINARY else: frame_type = common.OPCODE_TEXT if self._started: if self._opcode != frame_type: raise ValueError('Message types are different in frames for ' 'the same message') opcode = common.OPCODE_CONTINUATION else: opcode = frame_type self._opcode = frame_type if end: self._started = False fin = 1 else: self._started = True fin = 0 if binary or not self._encode_utf8: return create_binary_frame( payload_data, opcode, fin, self._mask, self._frame_filters) else: return create_text_frame( payload_data, opcode, fin, self._mask, self._frame_filters) def _create_control_frame(opcode, body, mask, frame_filters): frame = Frame(opcode=opcode, payload=body) for frame_filter in frame_filters: frame_filter.filter(frame) if len(frame.payload) > 125: raise BadOperationException( 'Payload data size of control frames must be 125 bytes or less') header = create_header( frame.opcode, len(frame.payload), frame.fin, frame.rsv1, frame.rsv2, frame.rsv3, mask) return _build_frame(header, frame.payload, mask) def create_ping_frame(body, mask=False, frame_filters=[]): return _create_control_frame(common.OPCODE_PING, body, mask, frame_filters) def create_pong_frame(body, mask=False, frame_filters=[]): return _create_control_frame(common.OPCODE_PONG, body, mask, frame_filters) def create_close_frame(body, mask=False, frame_filters=[]): return _create_control_frame( common.OPCODE_CLOSE, body, mask, frame_filters) def create_closing_handshake_body(code, reason): body = '' if code is not None: if (code > common.STATUS_USER_PRIVATE_MAX or code < common.STATUS_NORMAL_CLOSURE): raise BadOperationException('Status code is out of range') if (code == common.STATUS_NO_STATUS_RECEIVED or code == common.STATUS_ABNORMAL_CLOSURE or code == common.STATUS_TLS_HANDSHAKE): raise BadOperationException('Status code is reserved pseudo ' 'code') encoded_reason = reason.encode('utf-8') body = struct.pack('!H', code) + encoded_reason return body class StreamOptions(object): """Holds option values to configure Stream objects.""" def __init__(self): """Constructs StreamOptions.""" # Filters applied to frames. self.outgoing_frame_filters = [] self.incoming_frame_filters = [] # Filters applied to messages. Control frames are not affected by them. self.outgoing_message_filters = [] self.incoming_message_filters = [] self.encode_text_message_to_utf8 = True self.mask_send = False self.unmask_receive = True class Stream(StreamBase): """A class for parsing/building frames of the WebSocket protocol (RFC 6455). """ def __init__(self, request, options): """Constructs an instance. Args: request: mod_python request. """ StreamBase.__init__(self, request) self._logger = util.get_class_logger(self) self._options = options self._request.client_terminated = False self._request.server_terminated = False # Holds body of received fragments. self._received_fragments = [] # Holds the opcode of the first fragment. self._original_opcode = None self._writer = FragmentedFrameBuilder( self._options.mask_send, self._options.outgoing_frame_filters, self._options.encode_text_message_to_utf8) self._ping_queue = deque() def _receive_frame(self): """Receives a frame and return data in the frame as a tuple containing each header field and payload separately. Raises: ConnectionTerminatedException: when read returns empty string. InvalidFrameException: when the frame contains invalid data. """ def _receive_bytes(length): return self.receive_bytes(length) return parse_frame(receive_bytes=_receive_bytes, logger=self._logger, ws_version=self._request.ws_version, unmask_receive=self._options.unmask_receive) def _receive_frame_as_frame_object(self): opcode, unmasked_bytes, fin, rsv1, rsv2, rsv3 = self._receive_frame() return Frame(fin=fin, rsv1=rsv1, rsv2=rsv2, rsv3=rsv3, opcode=opcode, payload=unmasked_bytes) def receive_filtered_frame(self): """Receives a frame and applies frame filters and message filters. The frame to be received must satisfy following conditions: - The frame is not fragmented. - The opcode of the frame is TEXT or BINARY. DO NOT USE this method except for testing purpose. """ frame = self._receive_frame_as_frame_object() if not frame.fin: raise InvalidFrameException( 'Segmented frames must not be received via ' 'receive_filtered_frame()') if (frame.opcode != common.OPCODE_TEXT and frame.opcode != common.OPCODE_BINARY): raise InvalidFrameException( 'Control frames must not be received via ' 'receive_filtered_frame()') for frame_filter in self._options.incoming_frame_filters: frame_filter.filter(frame) for message_filter in self._options.incoming_message_filters: frame.payload = message_filter.filter(frame.payload) return frame def send_message(self, message, end=True, binary=False): """Send message. Args: message: text in unicode or binary in str to send. binary: send message as binary frame. Raises: BadOperationException: when called on a server-terminated connection or called with inconsistent message type or binary parameter. """ if self._request.server_terminated: raise BadOperationException( 'Requested send_message after sending out a closing handshake') if binary and isinstance(message, unicode): raise BadOperationException( 'Message for binary frame must be instance of str') for message_filter in self._options.outgoing_message_filters: message = message_filter.filter(message, end, binary) try: # Set this to any positive integer to limit maximum size of data in # payload data of each frame. MAX_PAYLOAD_DATA_SIZE = -1 if MAX_PAYLOAD_DATA_SIZE <= 0: self._write(self._writer.build(message, end, binary)) return bytes_written = 0 while True: end_for_this_frame = end bytes_to_write = len(message) - bytes_written if (MAX_PAYLOAD_DATA_SIZE > 0 and bytes_to_write > MAX_PAYLOAD_DATA_SIZE): end_for_this_frame = False bytes_to_write = MAX_PAYLOAD_DATA_SIZE frame = self._writer.build( message[bytes_written:bytes_written + bytes_to_write], end_for_this_frame, binary) self._write(frame) bytes_written += bytes_to_write # This if must be placed here (the end of while block) so that # at least one frame is sent. if len(message) <= bytes_written: break except ValueError, e: raise BadOperationException(e) def _get_message_from_frame(self, frame): """Gets a message from frame. If the message is composed of fragmented frames and the frame is not the last fragmented frame, this method returns None. The whole message will be returned when the last fragmented frame is passed to this method. Raises: InvalidFrameException: when the frame doesn't match defragmentation context, or the frame contains invalid data. """ if frame.opcode == common.OPCODE_CONTINUATION: if not self._received_fragments: if frame.fin: raise InvalidFrameException( 'Received a termination frame but fragmentation ' 'not started') else: raise InvalidFrameException( 'Received an intermediate frame but ' 'fragmentation not started') if frame.fin: # End of fragmentation frame self._received_fragments.append(frame.payload) message = ''.join(self._received_fragments) self._received_fragments = [] return message else: # Intermediate frame self._received_fragments.append(frame.payload) return None else: if self._received_fragments: if frame.fin: raise InvalidFrameException( 'Received an unfragmented frame without ' 'terminating existing fragmentation') else: raise InvalidFrameException( 'New fragmentation started without terminating ' 'existing fragmentation') if frame.fin: # Unfragmented frame self._original_opcode = frame.opcode return frame.payload else: # Start of fragmentation frame if common.is_control_opcode(frame.opcode): raise InvalidFrameException( 'Control frames must not be fragmented') self._original_opcode = frame.opcode self._received_fragments.append(frame.payload) return None def _process_close_message(self, message): """Processes close message. Args: message: close message. Raises: InvalidFrameException: when the message is invalid. """ self._request.client_terminated = True # Status code is optional. We can have status reason only if we # have status code. Status reason can be empty string. So, # allowed cases are # - no application data: no code no reason # - 2 octet of application data: has code but no reason # - 3 or more octet of application data: both code and reason if len(message) == 0: self._logger.debug('Received close frame (empty body)') self._request.ws_close_code = ( common.STATUS_NO_STATUS_RECEIVED) elif len(message) == 1: raise InvalidFrameException( 'If a close frame has status code, the length of ' 'status code must be 2 octet') elif len(message) >= 2: self._request.ws_close_code = struct.unpack( '!H', message[0:2])[0] self._request.ws_close_reason = message[2:].decode( 'utf-8', 'replace') self._logger.debug( 'Received close frame (code=%d, reason=%r)', self._request.ws_close_code, self._request.ws_close_reason) # As we've received a close frame, no more data is coming over the # socket. We can now safely close the socket without worrying about # RST sending. if self._request.server_terminated: self._logger.debug( 'Received ack for server-initiated closing handshake') return self._logger.debug( 'Received client-initiated closing handshake') code = common.STATUS_NORMAL_CLOSURE reason = '' if hasattr(self._request, '_dispatcher'): dispatcher = self._request._dispatcher code, reason = dispatcher.passive_closing_handshake( self._request) if code is None and reason is not None and len(reason) > 0: self._logger.warning( 'Handler specified reason despite code being None') reason = '' if reason is None: reason = '' self._send_closing_handshake(code, reason) self._logger.debug( 'Acknowledged closing handshake initiated by the peer ' '(code=%r, reason=%r)', code, reason) def _process_ping_message(self, message): """Processes ping message. Args: message: ping message. """ try: handler = self._request.on_ping_handler if handler: handler(self._request, message) return except AttributeError, e: pass self._send_pong(message) def _process_pong_message(self, message): """Processes pong message. Args: message: pong message. """ # TODO(tyoshino): Add ping timeout handling. inflight_pings = deque() while True: try: expected_body = self._ping_queue.popleft() if expected_body == message: # inflight_pings contains pings ignored by the # other peer. Just forget them. self._logger.debug( 'Ping %r is acked (%d pings were ignored)', expected_body, len(inflight_pings)) break else: inflight_pings.append(expected_body) except IndexError, e: # The received pong was unsolicited pong. Keep the # ping queue as is. self._ping_queue = inflight_pings self._logger.debug('Received a unsolicited pong') break try: handler = self._request.on_pong_handler if handler: handler(self._request, message) except AttributeError, e: pass def receive_message(self): """Receive a WebSocket frame and return its payload as a text in unicode or a binary in str. Returns: payload data of the frame - as unicode instance if received text frame - as str instance if received binary frame or None iff received closing handshake. Raises: BadOperationException: when called on a client-terminated connection. ConnectionTerminatedException: when read returns empty string. InvalidFrameException: when the frame contains invalid data. UnsupportedFrameException: when the received frame has flags, opcode we cannot handle. You can ignore this exception and continue receiving the next frame. """ if self._request.client_terminated: raise BadOperationException( 'Requested receive_message after receiving a closing ' 'handshake') while True: # mp_conn.read will block if no bytes are available. # Timeout is controlled by TimeOut directive of Apache. frame = self._receive_frame_as_frame_object() # Check the constraint on the payload size for control frames # before extension processes the frame. # See also http://tools.ietf.org/html/rfc6455#section-5.5 if (common.is_control_opcode(frame.opcode) and len(frame.payload) > 125): raise InvalidFrameException( 'Payload data size of control frames must be 125 bytes or ' 'less') for frame_filter in self._options.incoming_frame_filters: frame_filter.filter(frame) if frame.rsv1 or frame.rsv2 or frame.rsv3: raise UnsupportedFrameException( 'Unsupported flag is set (rsv = %d%d%d)' % (frame.rsv1, frame.rsv2, frame.rsv3)) message = self._get_message_from_frame(frame) if message is None: continue for message_filter in self._options.incoming_message_filters: message = message_filter.filter(message) if self._original_opcode == common.OPCODE_TEXT: # The WebSocket protocol section 4.4 specifies that invalid # characters must be replaced with U+fffd REPLACEMENT # CHARACTER. try: return message.decode('utf-8') except UnicodeDecodeError, e: raise InvalidUTF8Exception(e) elif self._original_opcode == common.OPCODE_BINARY: return message elif self._original_opcode == common.OPCODE_CLOSE: self._process_close_message(message) return None elif self._original_opcode == common.OPCODE_PING: self._process_ping_message(message) elif self._original_opcode == common.OPCODE_PONG: self._process_pong_message(message) else: raise UnsupportedFrameException( 'Opcode %d is not supported' % self._original_opcode) def _send_closing_handshake(self, code, reason): body = create_closing_handshake_body(code, reason) frame = create_close_frame( body, mask=self._options.mask_send, frame_filters=self._options.outgoing_frame_filters) self._request.server_terminated = True self._write(frame) def close_connection(self, code=common.STATUS_NORMAL_CLOSURE, reason='', wait_response=True): """Closes a WebSocket connection. Args: code: Status code for close frame. If code is None, a close frame with empty body will be sent. reason: string representing close reason. wait_response: True when caller want to wait the response. Raises: BadOperationException: when reason is specified with code None or reason is not an instance of both str and unicode. """ if self._request.server_terminated: self._logger.debug( 'Requested close_connection but server is already terminated') return if code is None: if reason is not None and len(reason) > 0: raise BadOperationException( 'close reason must not be specified if code is None') reason = '' else: if not isinstance(reason, str) and not isinstance(reason, unicode): raise BadOperationException( 'close reason must be an instance of str or unicode') self._send_closing_handshake(code, reason) self._logger.debug( 'Initiated closing handshake (code=%r, reason=%r)', code, reason) if (code == common.STATUS_GOING_AWAY or code == common.STATUS_PROTOCOL_ERROR) or not wait_response: # It doesn't make sense to wait for a close frame if the reason is # protocol error or that the server is going away. For some of # other reasons, it might not make sense to wait for a close frame, # but it's not clear, yet. return # TODO(ukai): 2. wait until the /client terminated/ flag has been set, # or until a server-defined timeout expires. # # For now, we expect receiving closing handshake right after sending # out closing handshake. message = self.receive_message() if message is not None: raise ConnectionTerminatedException( 'Didn\'t receive valid ack for closing handshake') # TODO: 3. close the WebSocket connection. # note: mod_python Connection (mp_conn) doesn't have close method. def send_ping(self, body=''): frame = create_ping_frame( body, self._options.mask_send, self._options.outgoing_frame_filters) self._write(frame) self._ping_queue.append(body) def _send_pong(self, body): frame = create_pong_frame( body, self._options.mask_send, self._options.outgoing_frame_filters) self._write(frame) def get_last_received_opcode(self): """Returns the opcode of the WebSocket message which the last received frame belongs to. The return value is valid iff immediately after receive_message call. """ return self._original_opcode # vi:sts=4 sw=4 et
chundongwang/Guess2014
refs/heads/master
lib/werkzeug/contrib/lint.py
318
# -*- coding: utf-8 -*- """ werkzeug.contrib.lint ~~~~~~~~~~~~~~~~~~~~~ .. versionadded:: 0.5 This module provides a middleware that performs sanity checks of the WSGI application. It checks that :pep:`333` is properly implemented and warns on some common HTTP errors such as non-empty responses for 304 status codes. This module provides a middleware, the :class:`LintMiddleware`. Wrap your application with it and it will warn about common problems with WSGI and HTTP while your application is running. It's strongly recommended to use it during development. :copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ from urlparse import urlparse from warnings import warn from werkzeug.datastructures import Headers from werkzeug.http import is_entity_header from werkzeug.wsgi import FileWrapper from werkzeug._compat import string_types class WSGIWarning(Warning): """Warning class for WSGI warnings.""" class HTTPWarning(Warning): """Warning class for HTTP warnings.""" def check_string(context, obj, stacklevel=3): if type(obj) is not str: warn(WSGIWarning('%s requires bytestrings, got %s' % (context, obj.__class__.__name__))) class InputStream(object): def __init__(self, stream): self._stream = stream def read(self, *args): if len(args) == 0: warn(WSGIWarning('wsgi does not guarantee an EOF marker on the ' 'input stream, thus making calls to ' 'wsgi.input.read() unsafe. Conforming servers ' 'may never return from this call.'), stacklevel=2) elif len(args) != 1: warn(WSGIWarning('too many parameters passed to wsgi.input.read()'), stacklevel=2) return self._stream.read(*args) def readline(self, *args): if len(args) == 0: warn(WSGIWarning('Calls to wsgi.input.readline() without arguments' ' are unsafe. Use wsgi.input.read() instead.'), stacklevel=2) elif len(args) == 1: warn(WSGIWarning('wsgi.input.readline() was called with a size hint. ' 'WSGI does not support this, although it\'s available ' 'on all major servers.'), stacklevel=2) else: raise TypeError('too many arguments passed to wsgi.input.readline()') return self._stream.readline(*args) def __iter__(self): try: return iter(self._stream) except TypeError: warn(WSGIWarning('wsgi.input is not iterable.'), stacklevel=2) return iter(()) def close(self): warn(WSGIWarning('application closed the input stream!'), stacklevel=2) self._stream.close() class ErrorStream(object): def __init__(self, stream): self._stream = stream def write(self, s): check_string('wsgi.error.write()', s) self._stream.write(s) def flush(self): self._stream.flush() def writelines(self, seq): for line in seq: self.write(seq) def close(self): warn(WSGIWarning('application closed the error stream!'), stacklevel=2) self._stream.close() class GuardedWrite(object): def __init__(self, write, chunks): self._write = write self._chunks = chunks def __call__(self, s): check_string('write()', s) self._write.write(s) self._chunks.append(len(s)) class GuardedIterator(object): def __init__(self, iterator, headers_set, chunks): self._iterator = iterator self._next = iter(iterator).next self.closed = False self.headers_set = headers_set self.chunks = chunks def __iter__(self): return self def next(self): if self.closed: warn(WSGIWarning('iterated over closed app_iter'), stacklevel=2) rv = self._next() if not self.headers_set: warn(WSGIWarning('Application returned before it ' 'started the response'), stacklevel=2) check_string('application iterator items', rv) self.chunks.append(len(rv)) return rv def close(self): self.closed = True if hasattr(self._iterator, 'close'): self._iterator.close() if self.headers_set: status_code, headers = self.headers_set bytes_sent = sum(self.chunks) content_length = headers.get('content-length', type=int) if status_code == 304: for key, value in headers: key = key.lower() if key not in ('expires', 'content-location') and \ is_entity_header(key): warn(HTTPWarning('entity header %r found in 304 ' 'response' % key)) if bytes_sent: warn(HTTPWarning('304 responses must not have a body')) elif 100 <= status_code < 200 or status_code == 204: if content_length != 0: warn(HTTPWarning('%r responses must have an empty ' 'content length') % status_code) if bytes_sent: warn(HTTPWarning('%r responses must not have a body' % status_code)) elif content_length is not None and content_length != bytes_sent: warn(WSGIWarning('Content-Length and the number of bytes ' 'sent to the client do not match.')) def __del__(self): if not self.closed: try: warn(WSGIWarning('Iterator was garbage collected before ' 'it was closed.')) except Exception: pass class LintMiddleware(object): """This middleware wraps an application and warns on common errors. Among other thing it currently checks for the following problems: - invalid status codes - non-bytestrings sent to the WSGI server - strings returned from the WSGI application - non-empty conditional responses - unquoted etags - relative URLs in the Location header - unsafe calls to wsgi.input - unclosed iterators Detected errors are emitted using the standard Python :mod:`warnings` system and usually end up on :data:`stderr`. :: from werkzeug.contrib.lint import LintMiddleware app = LintMiddleware(app) :param app: the application to wrap """ def __init__(self, app): self.app = app def check_environ(self, environ): if type(environ) is not dict: warn(WSGIWarning('WSGI environment is not a standard python dict.'), stacklevel=4) for key in ('REQUEST_METHOD', 'SERVER_NAME', 'SERVER_PORT', 'wsgi.version', 'wsgi.input', 'wsgi.errors', 'wsgi.multithread', 'wsgi.multiprocess', 'wsgi.run_once'): if key not in environ: warn(WSGIWarning('required environment key %r not found' % key), stacklevel=3) if environ['wsgi.version'] != (1, 0): warn(WSGIWarning('environ is not a WSGI 1.0 environ'), stacklevel=3) script_name = environ.get('SCRIPT_NAME', '') if script_name and script_name[:1] != '/': warn(WSGIWarning('SCRIPT_NAME does not start with a slash: %r' % script_name), stacklevel=3) path_info = environ.get('PATH_INFO', '') if path_info[:1] != '/': warn(WSGIWarning('PATH_INFO does not start with a slash: %r' % path_info), stacklevel=3) def check_start_response(self, status, headers, exc_info): check_string('status', status) status_code = status.split(None, 1)[0] if len(status_code) != 3 or not status_code.isdigit(): warn(WSGIWarning('Status code must be three digits'), stacklevel=3) if len(status) < 4 or status[3] != ' ': warn(WSGIWarning('Invalid value for status %r. Valid ' 'status strings are three digits, a space ' 'and a status explanation'), stacklevel=3) status_code = int(status_code) if status_code < 100: warn(WSGIWarning('status code < 100 detected'), stacklevel=3) if type(headers) is not list: warn(WSGIWarning('header list is not a list'), stacklevel=3) for item in headers: if type(item) is not tuple or len(item) != 2: warn(WSGIWarning('Headers must tuple 2-item tuples'), stacklevel=3) name, value = item if type(name) is not str or type(value) is not str: warn(WSGIWarning('header items must be strings'), stacklevel=3) if name.lower() == 'status': warn(WSGIWarning('The status header is not supported due to ' 'conflicts with the CGI spec.'), stacklevel=3) if exc_info is not None and not isinstance(exc_info, tuple): warn(WSGIWarning('invalid value for exc_info'), stacklevel=3) headers = Headers(headers) self.check_headers(headers) return status_code, headers def check_headers(self, headers): etag = headers.get('etag') if etag is not None: if etag.startswith('w/'): etag = etag[2:] if not (etag[:1] == etag[-1:] == '"'): warn(HTTPWarning('unquoted etag emitted.'), stacklevel=4) location = headers.get('location') if location is not None: if not urlparse(location).netloc: warn(HTTPWarning('absolute URLs required for location header'), stacklevel=4) def check_iterator(self, app_iter): if isinstance(app_iter, string_types): warn(WSGIWarning('application returned string. Response will ' 'send character for character to the client ' 'which will kill the performance. Return a ' 'list or iterable instead.'), stacklevel=3) def __call__(self, *args, **kwargs): if len(args) != 2: warn(WSGIWarning('Two arguments to WSGI app required'), stacklevel=2) if kwargs: warn(WSGIWarning('No keyword arguments to WSGI app allowed'), stacklevel=2) environ, start_response = args self.check_environ(environ) environ['wsgi.input'] = InputStream(environ['wsgi.input']) environ['wsgi.errors'] = ErrorStream(environ['wsgi.errors']) # hook our own file wrapper in so that applications will always # iterate to the end and we can check the content length environ['wsgi.file_wrapper'] = FileWrapper headers_set = [] chunks = [] def checking_start_response(*args, **kwargs): if len(args) not in (2, 3): warn(WSGIWarning('Invalid number of arguments: %s, expected ' '2 or 3' % len(args), stacklevel=2)) if kwargs: warn(WSGIWarning('no keyword arguments allowed.')) status, headers = args[:2] if len(args) == 3: exc_info = args[2] else: exc_info = None headers_set[:] = self.check_start_response(status, headers, exc_info) return GuardedWrite(start_response(status, headers, exc_info), chunks) app_iter = self.app(environ, checking_start_response) self.check_iterator(app_iter) return GuardedIterator(app_iter, headers_set, chunks)
andyraib/data-storage
refs/heads/master
python_scripts/pandas_example.py
2
import pandas as pd df = pd.DataFrame({ 'A' : 1., 'B' : pd.Timestamp('20130102'), 'C' : pd.Series(1, index=list(range(4)), dtype='float32'), 'D' : pd.Series([1, 2, 1, 2], dtype='int32'), 'E' : pd.Categorical(["test", "train", "test", "train"]), 'F' : 'foo' }) df df.B # Compute the sum of D for each category in E df.groupby('E').sum().D
Ziqi-Li/bknqgis
refs/heads/master
bokeh/bokeh/tests/test_model.py
8
import pytest from bokeh.core.properties import Int, String, Float, Instance, List, Any from bokeh.model import Model from bokeh.models.callbacks import CustomJS def test_Model_pretty(): class Foo1(Model): pass assert Foo1(id='1').pretty() == """\ bokeh.tests.test_model.Foo1( id='1', js_event_callbacks={}, js_property_callbacks={}, name=None, subscribed_events=[], tags=[])""" class Foo2(Model): a = Int(12) b = String("hello") c = List(Int, [1, 2, 3]) assert Foo2(id='xyz').pretty() == """\ bokeh.tests.test_model.Foo2( id='xyz', a=12, b='hello', c=[1, 2, 3], js_event_callbacks={}, js_property_callbacks={}, name=None, subscribed_events=[], tags=[])""" class Foo3(Model): a = Int(12) b = String("hello") c = List(Int, [1, 2, 3]) d = Float(None) assert Foo3(id='xyz').pretty() == """\ bokeh.tests.test_model.Foo3( id='xyz', a=12, b='hello', c=[1, 2, 3], d=None, js_event_callbacks={}, js_property_callbacks={}, name=None, subscribed_events=[], tags=[])""" class Foo4(Model): a = Int(12) b = String("hello") c = List(Int, [1, 2, 3]) d = Float(None) e = Instance(Foo2, lambda: Foo2(id='xyz')) assert Foo4(id='xyz').pretty() == """\ bokeh.tests.test_model.Foo4( id='xyz', a=12, b='hello', c=[1, 2, 3], d=None, e=bokeh.tests.test_model.Foo2( id='xyz', a=12, b='hello', c=[1, 2, 3], js_event_callbacks={}, js_property_callbacks={}, name=None, subscribed_events=[], tags=[]), js_event_callbacks={}, js_property_callbacks={}, name=None, subscribed_events=[], tags=[])""" class Foo5(Model): foo6 = Any # can't use Instance(".tests.test_model.Foo6") class Foo6(Model): foo5 = Instance(Foo5) f5 = Foo5(id='xyz') f6 = Foo6(id='uvw', foo5=f5) f5.foo6 = f6 assert f5.pretty() == """\ bokeh.tests.test_model.Foo5( id='xyz', foo6=bokeh.tests.test_model.Foo6( id='uvw', foo5=bokeh.tests.test_model.Foo5(id='xyz', ...), js_event_callbacks={}, js_property_callbacks={}, name=None, subscribed_events=[], tags=[]), js_event_callbacks={}, js_property_callbacks={}, name=None, subscribed_events=[], tags=[])""" class SomeModel(Model): a = Int(12) b = String("hello") c = List(Int, [1, 2, 3]) def test_model_js_on_change_exception_for_no_callbacks(): m = SomeModel() with pytest.raises(ValueError): m.js_on_change('foo') def test_model_js_on_change_exception_for_bad_callbacks(): m = SomeModel() for val in [10, "bar", None, [1], {}, 10.2]: with pytest.raises(ValueError): m.js_on_change('foo', val) def test_model_js_on_change_with_propname(): cb = CustomJS(code="") m0 = SomeModel() for name in m0.properties(): m = SomeModel() m.js_on_change(name, cb) assert m.js_property_callbacks == {"change:%s" % name: [cb]} def test_model_js_on_change_with_non_propname(): cb = CustomJS(code="") m1 = SomeModel() m1.js_on_change('foo', cb) assert m1.js_property_callbacks == {"foo": [cb]} m2 = SomeModel() m2.js_on_change('change:b', cb) assert m2.js_property_callbacks == {"change:b": [cb]} def test_model_js_on_change_with_multple_callbacks(): cb1 = CustomJS(code="") cb2 = CustomJS(code="") m = SomeModel() m.js_on_change('foo', cb1, cb2) assert m.js_property_callbacks == {"foo": [cb1, cb2]} def test_model_js_on_change_with_multple_callbacks_separately(): cb1 = CustomJS(code="") cb2 = CustomJS(code="") m = SomeModel() m.js_on_change('foo', cb1) assert m.js_property_callbacks == {"foo": [cb1]} m.js_on_change('foo', cb2) assert m.js_property_callbacks == {"foo": [cb1, cb2]} def test_model_js_on_change_ignores_dupe_callbacks(): cb = CustomJS(code="") m = SomeModel() m.js_on_change('foo', cb, cb) assert m.js_property_callbacks == {"foo": [cb]}
eickenberg/scikit-learn
refs/heads/master
sklearn/neighbors/graph.py
17
"""Nearest Neighbors graph functions""" # Author: Jake Vanderplas <[email protected]> # # License: BSD 3 clause (C) INRIA, University of Amsterdam from .base import KNeighborsMixin, RadiusNeighborsMixin from .unsupervised import NearestNeighbors def kneighbors_graph(X, n_neighbors, mode='connectivity'): """Computes the (weighted) graph of k-Neighbors for points in X Parameters ---------- X : array-like or BallTree, shape = [n_samples, n_features] Sample data, in the form of a numpy array or a precomputed :class:`BallTree`. n_neighbors : int Number of neighbors for each sample. mode : {'connectivity', 'distance'}, optional Type of returned matrix: 'connectivity' will return the connectivity matrix with ones and zeros, in 'distance' the edges are Euclidean distance between points. Returns ------- A : sparse matrix in CSR format, shape = [n_samples, n_samples] A[i, j] is assigned the weight of edge that connects i to j. Examples -------- >>> X = [[0], [3], [1]] >>> from sklearn.neighbors import kneighbors_graph >>> A = kneighbors_graph(X, 2) >>> A.toarray() array([[ 1., 0., 1.], [ 0., 1., 1.], [ 1., 0., 1.]]) See also -------- radius_neighbors_graph """ if not isinstance(X, KNeighborsMixin): X = NearestNeighbors(n_neighbors).fit(X) return X.kneighbors_graph(X._fit_X, n_neighbors, mode=mode) def radius_neighbors_graph(X, radius, mode='connectivity'): """Computes the (weighted) graph of Neighbors for points in X Neighborhoods are restricted the points at a distance lower than radius. Parameters ---------- X : array-like or BallTree, shape = [n_samples, n_features] Sample data, in the form of a numpy array or a precomputed :class:`BallTree`. radius : float Radius of neighborhoods. mode : {'connectivity', 'distance'}, optional Type of returned matrix: 'connectivity' will return the connectivity matrix with ones and zeros, in 'distance' the edges are Euclidean distance between points. Returns ------- A : sparse matrix in CSR format, shape = [n_samples, n_samples] A[i, j] is assigned the weight of edge that connects i to j. Examples -------- >>> X = [[0], [3], [1]] >>> from sklearn.neighbors import radius_neighbors_graph >>> A = radius_neighbors_graph(X, 1.5) >>> A.toarray() array([[ 1., 0., 1.], [ 0., 1., 0.], [ 1., 0., 1.]]) See also -------- kneighbors_graph """ if not isinstance(X, RadiusNeighborsMixin): X = NearestNeighbors(radius=radius).fit(X) return X.radius_neighbors_graph(X._fit_X, radius, mode)
TathagataChakraborti/resource-conflicts
refs/heads/master
PLANROB-2015/py2.5/lib/python2.5/test/test_dis.py
19
from test.test_support import verify, verbose, TestFailed, run_unittest import sys import dis import StringIO # Minimal tests for dis module import unittest def _f(a): print a return 1 dis_f = """\ %-4d 0 LOAD_FAST 0 (a) 3 PRINT_ITEM 4 PRINT_NEWLINE %-4d 5 LOAD_CONST 1 (1) 8 RETURN_VALUE """%(_f.func_code.co_firstlineno + 1, _f.func_code.co_firstlineno + 2) def bug708901(): for res in range(1, 10): pass dis_bug708901 = """\ %-4d 0 SETUP_LOOP 23 (to 26) 3 LOAD_GLOBAL 0 (range) 6 LOAD_CONST 1 (1) %-4d 9 LOAD_CONST 2 (10) 12 CALL_FUNCTION 2 15 GET_ITER >> 16 FOR_ITER 6 (to 25) 19 STORE_FAST 0 (res) %-4d 22 JUMP_ABSOLUTE 16 >> 25 POP_BLOCK >> 26 LOAD_CONST 0 (None) 29 RETURN_VALUE """%(bug708901.func_code.co_firstlineno + 1, bug708901.func_code.co_firstlineno + 2, bug708901.func_code.co_firstlineno + 3) def bug1333982(x=[]): assert 0, ([s for s in x] + 1) pass dis_bug1333982 = """\ %-4d 0 LOAD_CONST 1 (0) 3 JUMP_IF_TRUE 41 (to 47) 6 POP_TOP 7 LOAD_GLOBAL 0 (AssertionError) 10 BUILD_LIST 0 13 DUP_TOP 14 STORE_FAST 1 (_[1]) 17 LOAD_FAST 0 (x) 20 GET_ITER >> 21 FOR_ITER 13 (to 37) 24 STORE_FAST 2 (s) 27 LOAD_FAST 1 (_[1]) 30 LOAD_FAST 2 (s) 33 LIST_APPEND 34 JUMP_ABSOLUTE 21 >> 37 DELETE_FAST 1 (_[1]) %-4d 40 LOAD_CONST 2 (1) 43 BINARY_ADD 44 RAISE_VARARGS 2 >> 47 POP_TOP %-4d 48 LOAD_CONST 0 (None) 51 RETURN_VALUE """%(bug1333982.func_code.co_firstlineno + 1, bug1333982.func_code.co_firstlineno + 2, bug1333982.func_code.co_firstlineno + 3) _BIG_LINENO_FORMAT = """\ %3d 0 LOAD_GLOBAL 0 (spam) 3 POP_TOP 4 LOAD_CONST 0 (None) 7 RETURN_VALUE """ class DisTests(unittest.TestCase): def do_disassembly_test(self, func, expected): s = StringIO.StringIO() save_stdout = sys.stdout sys.stdout = s dis.dis(func) sys.stdout = save_stdout got = s.getvalue() # Trim trailing blanks (if any). lines = got.split('\n') lines = [line.rstrip() for line in lines] expected = expected.split("\n") import difflib if expected != lines: self.fail( "events did not match expectation:\n" + "\n".join(difflib.ndiff(expected, lines))) def test_opmap(self): self.assertEqual(dis.opmap["STOP_CODE"], 0) self.assertEqual(dis.opmap["LOAD_CONST"] in dis.hasconst, True) self.assertEqual(dis.opmap["STORE_NAME"] in dis.hasname, True) def test_opname(self): self.assertEqual(dis.opname[dis.opmap["LOAD_FAST"]], "LOAD_FAST") def test_boundaries(self): self.assertEqual(dis.opmap["EXTENDED_ARG"], dis.EXTENDED_ARG) self.assertEqual(dis.opmap["STORE_NAME"], dis.HAVE_ARGUMENT) def test_dis(self): self.do_disassembly_test(_f, dis_f) def test_bug_708901(self): self.do_disassembly_test(bug708901, dis_bug708901) def test_bug_1333982(self): # This one is checking bytecodes generated for an `assert` statement, # so fails if the tests are run with -O. Skip this test then. if __debug__: self.do_disassembly_test(bug1333982, dis_bug1333982) def test_big_linenos(self): def func(count): namespace = {} func = "def foo():\n " + "".join(["\n "] * count + ["spam\n"]) exec func in namespace return namespace['foo'] # Test all small ranges for i in xrange(1, 300): expected = _BIG_LINENO_FORMAT % (i + 2) self.do_disassembly_test(func(i), expected) # Test some larger ranges too for i in xrange(300, 5000, 10): expected = _BIG_LINENO_FORMAT % (i + 2) self.do_disassembly_test(func(i), expected) def test_main(): run_unittest(DisTests) if __name__ == "__main__": test_main()
hilaskis/UAV_MissionPlanner
refs/heads/master
Lib/encodings/iso8859_11.py
593
""" Python Character Mapping Codec iso8859_11 generated from 'MAPPINGS/ISO8859/8859-11.TXT' with gencodec.py. """#" import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_table) def decode(self,input,errors='strict'): return codecs.charmap_decode(input,errors,decoding_table) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_table)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_table)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='iso8859-11', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Table decoding_table = ( u'\x00' # 0x00 -> NULL u'\x01' # 0x01 -> START OF HEADING u'\x02' # 0x02 -> START OF TEXT u'\x03' # 0x03 -> END OF TEXT u'\x04' # 0x04 -> END OF TRANSMISSION u'\x05' # 0x05 -> ENQUIRY u'\x06' # 0x06 -> ACKNOWLEDGE u'\x07' # 0x07 -> BELL u'\x08' # 0x08 -> BACKSPACE u'\t' # 0x09 -> HORIZONTAL TABULATION u'\n' # 0x0A -> LINE FEED u'\x0b' # 0x0B -> VERTICAL TABULATION u'\x0c' # 0x0C -> FORM FEED u'\r' # 0x0D -> CARRIAGE RETURN u'\x0e' # 0x0E -> SHIFT OUT u'\x0f' # 0x0F -> SHIFT IN u'\x10' # 0x10 -> DATA LINK ESCAPE u'\x11' # 0x11 -> DEVICE CONTROL ONE u'\x12' # 0x12 -> DEVICE CONTROL TWO u'\x13' # 0x13 -> DEVICE CONTROL THREE u'\x14' # 0x14 -> DEVICE CONTROL FOUR u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE u'\x16' # 0x16 -> SYNCHRONOUS IDLE u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK u'\x18' # 0x18 -> CANCEL u'\x19' # 0x19 -> END OF MEDIUM u'\x1a' # 0x1A -> SUBSTITUTE u'\x1b' # 0x1B -> ESCAPE u'\x1c' # 0x1C -> FILE SEPARATOR u'\x1d' # 0x1D -> GROUP SEPARATOR u'\x1e' # 0x1E -> RECORD SEPARATOR u'\x1f' # 0x1F -> UNIT SEPARATOR u' ' # 0x20 -> SPACE u'!' # 0x21 -> EXCLAMATION MARK u'"' # 0x22 -> QUOTATION MARK u'#' # 0x23 -> NUMBER SIGN u'$' # 0x24 -> DOLLAR SIGN u'%' # 0x25 -> PERCENT SIGN u'&' # 0x26 -> AMPERSAND u"'" # 0x27 -> APOSTROPHE u'(' # 0x28 -> LEFT PARENTHESIS u')' # 0x29 -> RIGHT PARENTHESIS u'*' # 0x2A -> ASTERISK u'+' # 0x2B -> PLUS SIGN u',' # 0x2C -> COMMA u'-' # 0x2D -> HYPHEN-MINUS u'.' # 0x2E -> FULL STOP u'/' # 0x2F -> SOLIDUS u'0' # 0x30 -> DIGIT ZERO u'1' # 0x31 -> DIGIT ONE u'2' # 0x32 -> DIGIT TWO u'3' # 0x33 -> DIGIT THREE u'4' # 0x34 -> DIGIT FOUR u'5' # 0x35 -> DIGIT FIVE u'6' # 0x36 -> DIGIT SIX u'7' # 0x37 -> DIGIT SEVEN u'8' # 0x38 -> DIGIT EIGHT u'9' # 0x39 -> DIGIT NINE u':' # 0x3A -> COLON u';' # 0x3B -> SEMICOLON u'<' # 0x3C -> LESS-THAN SIGN u'=' # 0x3D -> EQUALS SIGN u'>' # 0x3E -> GREATER-THAN SIGN u'?' # 0x3F -> QUESTION MARK u'@' # 0x40 -> COMMERCIAL AT u'A' # 0x41 -> LATIN CAPITAL LETTER A u'B' # 0x42 -> LATIN CAPITAL LETTER B u'C' # 0x43 -> LATIN CAPITAL LETTER C u'D' # 0x44 -> LATIN CAPITAL LETTER D u'E' # 0x45 -> LATIN CAPITAL LETTER E u'F' # 0x46 -> LATIN CAPITAL LETTER F u'G' # 0x47 -> LATIN CAPITAL LETTER G u'H' # 0x48 -> LATIN CAPITAL LETTER H u'I' # 0x49 -> LATIN CAPITAL LETTER I u'J' # 0x4A -> LATIN CAPITAL LETTER J u'K' # 0x4B -> LATIN CAPITAL LETTER K u'L' # 0x4C -> LATIN CAPITAL LETTER L u'M' # 0x4D -> LATIN CAPITAL LETTER M u'N' # 0x4E -> LATIN CAPITAL LETTER N u'O' # 0x4F -> LATIN CAPITAL LETTER O u'P' # 0x50 -> LATIN CAPITAL LETTER P u'Q' # 0x51 -> LATIN CAPITAL LETTER Q u'R' # 0x52 -> LATIN CAPITAL LETTER R u'S' # 0x53 -> LATIN CAPITAL LETTER S u'T' # 0x54 -> LATIN CAPITAL LETTER T u'U' # 0x55 -> LATIN CAPITAL LETTER U u'V' # 0x56 -> LATIN CAPITAL LETTER V u'W' # 0x57 -> LATIN CAPITAL LETTER W u'X' # 0x58 -> LATIN CAPITAL LETTER X u'Y' # 0x59 -> LATIN CAPITAL LETTER Y u'Z' # 0x5A -> LATIN CAPITAL LETTER Z u'[' # 0x5B -> LEFT SQUARE BRACKET u'\\' # 0x5C -> REVERSE SOLIDUS u']' # 0x5D -> RIGHT SQUARE BRACKET u'^' # 0x5E -> CIRCUMFLEX ACCENT u'_' # 0x5F -> LOW LINE u'`' # 0x60 -> GRAVE ACCENT u'a' # 0x61 -> LATIN SMALL LETTER A u'b' # 0x62 -> LATIN SMALL LETTER B u'c' # 0x63 -> LATIN SMALL LETTER C u'd' # 0x64 -> LATIN SMALL LETTER D u'e' # 0x65 -> LATIN SMALL LETTER E u'f' # 0x66 -> LATIN SMALL LETTER F u'g' # 0x67 -> LATIN SMALL LETTER G u'h' # 0x68 -> LATIN SMALL LETTER H u'i' # 0x69 -> LATIN SMALL LETTER I u'j' # 0x6A -> LATIN SMALL LETTER J u'k' # 0x6B -> LATIN SMALL LETTER K u'l' # 0x6C -> LATIN SMALL LETTER L u'm' # 0x6D -> LATIN SMALL LETTER M u'n' # 0x6E -> LATIN SMALL LETTER N u'o' # 0x6F -> LATIN SMALL LETTER O u'p' # 0x70 -> LATIN SMALL LETTER P u'q' # 0x71 -> LATIN SMALL LETTER Q u'r' # 0x72 -> LATIN SMALL LETTER R u's' # 0x73 -> LATIN SMALL LETTER S u't' # 0x74 -> LATIN SMALL LETTER T u'u' # 0x75 -> LATIN SMALL LETTER U u'v' # 0x76 -> LATIN SMALL LETTER V u'w' # 0x77 -> LATIN SMALL LETTER W u'x' # 0x78 -> LATIN SMALL LETTER X u'y' # 0x79 -> LATIN SMALL LETTER Y u'z' # 0x7A -> LATIN SMALL LETTER Z u'{' # 0x7B -> LEFT CURLY BRACKET u'|' # 0x7C -> VERTICAL LINE u'}' # 0x7D -> RIGHT CURLY BRACKET u'~' # 0x7E -> TILDE u'\x7f' # 0x7F -> DELETE u'\x80' # 0x80 -> <control> u'\x81' # 0x81 -> <control> u'\x82' # 0x82 -> <control> u'\x83' # 0x83 -> <control> u'\x84' # 0x84 -> <control> u'\x85' # 0x85 -> <control> u'\x86' # 0x86 -> <control> u'\x87' # 0x87 -> <control> u'\x88' # 0x88 -> <control> u'\x89' # 0x89 -> <control> u'\x8a' # 0x8A -> <control> u'\x8b' # 0x8B -> <control> u'\x8c' # 0x8C -> <control> u'\x8d' # 0x8D -> <control> u'\x8e' # 0x8E -> <control> u'\x8f' # 0x8F -> <control> u'\x90' # 0x90 -> <control> u'\x91' # 0x91 -> <control> u'\x92' # 0x92 -> <control> u'\x93' # 0x93 -> <control> u'\x94' # 0x94 -> <control> u'\x95' # 0x95 -> <control> u'\x96' # 0x96 -> <control> u'\x97' # 0x97 -> <control> u'\x98' # 0x98 -> <control> u'\x99' # 0x99 -> <control> u'\x9a' # 0x9A -> <control> u'\x9b' # 0x9B -> <control> u'\x9c' # 0x9C -> <control> u'\x9d' # 0x9D -> <control> u'\x9e' # 0x9E -> <control> u'\x9f' # 0x9F -> <control> u'\xa0' # 0xA0 -> NO-BREAK SPACE u'\u0e01' # 0xA1 -> THAI CHARACTER KO KAI u'\u0e02' # 0xA2 -> THAI CHARACTER KHO KHAI u'\u0e03' # 0xA3 -> THAI CHARACTER KHO KHUAT u'\u0e04' # 0xA4 -> THAI CHARACTER KHO KHWAI u'\u0e05' # 0xA5 -> THAI CHARACTER KHO KHON u'\u0e06' # 0xA6 -> THAI CHARACTER KHO RAKHANG u'\u0e07' # 0xA7 -> THAI CHARACTER NGO NGU u'\u0e08' # 0xA8 -> THAI CHARACTER CHO CHAN u'\u0e09' # 0xA9 -> THAI CHARACTER CHO CHING u'\u0e0a' # 0xAA -> THAI CHARACTER CHO CHANG u'\u0e0b' # 0xAB -> THAI CHARACTER SO SO u'\u0e0c' # 0xAC -> THAI CHARACTER CHO CHOE u'\u0e0d' # 0xAD -> THAI CHARACTER YO YING u'\u0e0e' # 0xAE -> THAI CHARACTER DO CHADA u'\u0e0f' # 0xAF -> THAI CHARACTER TO PATAK u'\u0e10' # 0xB0 -> THAI CHARACTER THO THAN u'\u0e11' # 0xB1 -> THAI CHARACTER THO NANGMONTHO u'\u0e12' # 0xB2 -> THAI CHARACTER THO PHUTHAO u'\u0e13' # 0xB3 -> THAI CHARACTER NO NEN u'\u0e14' # 0xB4 -> THAI CHARACTER DO DEK u'\u0e15' # 0xB5 -> THAI CHARACTER TO TAO u'\u0e16' # 0xB6 -> THAI CHARACTER THO THUNG u'\u0e17' # 0xB7 -> THAI CHARACTER THO THAHAN u'\u0e18' # 0xB8 -> THAI CHARACTER THO THONG u'\u0e19' # 0xB9 -> THAI CHARACTER NO NU u'\u0e1a' # 0xBA -> THAI CHARACTER BO BAIMAI u'\u0e1b' # 0xBB -> THAI CHARACTER PO PLA u'\u0e1c' # 0xBC -> THAI CHARACTER PHO PHUNG u'\u0e1d' # 0xBD -> THAI CHARACTER FO FA u'\u0e1e' # 0xBE -> THAI CHARACTER PHO PHAN u'\u0e1f' # 0xBF -> THAI CHARACTER FO FAN u'\u0e20' # 0xC0 -> THAI CHARACTER PHO SAMPHAO u'\u0e21' # 0xC1 -> THAI CHARACTER MO MA u'\u0e22' # 0xC2 -> THAI CHARACTER YO YAK u'\u0e23' # 0xC3 -> THAI CHARACTER RO RUA u'\u0e24' # 0xC4 -> THAI CHARACTER RU u'\u0e25' # 0xC5 -> THAI CHARACTER LO LING u'\u0e26' # 0xC6 -> THAI CHARACTER LU u'\u0e27' # 0xC7 -> THAI CHARACTER WO WAEN u'\u0e28' # 0xC8 -> THAI CHARACTER SO SALA u'\u0e29' # 0xC9 -> THAI CHARACTER SO RUSI u'\u0e2a' # 0xCA -> THAI CHARACTER SO SUA u'\u0e2b' # 0xCB -> THAI CHARACTER HO HIP u'\u0e2c' # 0xCC -> THAI CHARACTER LO CHULA u'\u0e2d' # 0xCD -> THAI CHARACTER O ANG u'\u0e2e' # 0xCE -> THAI CHARACTER HO NOKHUK u'\u0e2f' # 0xCF -> THAI CHARACTER PAIYANNOI u'\u0e30' # 0xD0 -> THAI CHARACTER SARA A u'\u0e31' # 0xD1 -> THAI CHARACTER MAI HAN-AKAT u'\u0e32' # 0xD2 -> THAI CHARACTER SARA AA u'\u0e33' # 0xD3 -> THAI CHARACTER SARA AM u'\u0e34' # 0xD4 -> THAI CHARACTER SARA I u'\u0e35' # 0xD5 -> THAI CHARACTER SARA II u'\u0e36' # 0xD6 -> THAI CHARACTER SARA UE u'\u0e37' # 0xD7 -> THAI CHARACTER SARA UEE u'\u0e38' # 0xD8 -> THAI CHARACTER SARA U u'\u0e39' # 0xD9 -> THAI CHARACTER SARA UU u'\u0e3a' # 0xDA -> THAI CHARACTER PHINTHU u'\ufffe' u'\ufffe' u'\ufffe' u'\ufffe' u'\u0e3f' # 0xDF -> THAI CURRENCY SYMBOL BAHT u'\u0e40' # 0xE0 -> THAI CHARACTER SARA E u'\u0e41' # 0xE1 -> THAI CHARACTER SARA AE u'\u0e42' # 0xE2 -> THAI CHARACTER SARA O u'\u0e43' # 0xE3 -> THAI CHARACTER SARA AI MAIMUAN u'\u0e44' # 0xE4 -> THAI CHARACTER SARA AI MAIMALAI u'\u0e45' # 0xE5 -> THAI CHARACTER LAKKHANGYAO u'\u0e46' # 0xE6 -> THAI CHARACTER MAIYAMOK u'\u0e47' # 0xE7 -> THAI CHARACTER MAITAIKHU u'\u0e48' # 0xE8 -> THAI CHARACTER MAI EK u'\u0e49' # 0xE9 -> THAI CHARACTER MAI THO u'\u0e4a' # 0xEA -> THAI CHARACTER MAI TRI u'\u0e4b' # 0xEB -> THAI CHARACTER MAI CHATTAWA u'\u0e4c' # 0xEC -> THAI CHARACTER THANTHAKHAT u'\u0e4d' # 0xED -> THAI CHARACTER NIKHAHIT u'\u0e4e' # 0xEE -> THAI CHARACTER YAMAKKAN u'\u0e4f' # 0xEF -> THAI CHARACTER FONGMAN u'\u0e50' # 0xF0 -> THAI DIGIT ZERO u'\u0e51' # 0xF1 -> THAI DIGIT ONE u'\u0e52' # 0xF2 -> THAI DIGIT TWO u'\u0e53' # 0xF3 -> THAI DIGIT THREE u'\u0e54' # 0xF4 -> THAI DIGIT FOUR u'\u0e55' # 0xF5 -> THAI DIGIT FIVE u'\u0e56' # 0xF6 -> THAI DIGIT SIX u'\u0e57' # 0xF7 -> THAI DIGIT SEVEN u'\u0e58' # 0xF8 -> THAI DIGIT EIGHT u'\u0e59' # 0xF9 -> THAI DIGIT NINE u'\u0e5a' # 0xFA -> THAI CHARACTER ANGKHANKHU u'\u0e5b' # 0xFB -> THAI CHARACTER KHOMUT u'\ufffe' u'\ufffe' u'\ufffe' u'\ufffe' ) ### Encoding table encoding_table=codecs.charmap_build(decoding_table)
michaelgallacher/intellij-community
refs/heads/master
python/testData/resolve/FStringLocalVariable.py
34
def f(): foo = 42 f"{foo}" <ref>
ShashaQin/erpnext
refs/heads/develop
erpnext/patches/v6_6/remove_fiscal_year_from_leave_allocation.py
71
from __future__ import unicode_literals import frappe def execute(): frappe.reload_doctype("Leave Allocation") if frappe.db.has_column("Leave Allocation", "fiscal_year"): for leave_allocation in frappe.db.sql("select name, fiscal_year from `tabLeave Allocation`", as_dict=True): dates = frappe.db.get_value("Fiscal Year", leave_allocation["fiscal_year"], ["year_start_date", "year_end_date"]) if dates: year_start_date, year_end_date = dates frappe.db.sql("""update `tabLeave Allocation` set from_date=%s, to_date=%s where name=%s""", (year_start_date, year_end_date, leave_allocation["name"]))
vheon/JediHTTP
refs/heads/master
jedihttp/settings.py
1
# Copyright 2017 Cedraro Andrea <[email protected]> # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import jedi import sys # For efficiency, we store the default values of the global Jedi settings. See # https://jedi.readthedocs.io/en/latest/docs/settings.html auto_import_modules = jedi.settings.auto_import_modules # The socket module uses setattr for several methods (connect, listen, etc.) on # Python 2. if sys.version_info < (3, 0): auto_import_modules.append('socket') default_settings = { 'case_insensitive_completion': jedi.settings.case_insensitive_completion, 'add_bracket_after_function': jedi.settings.add_bracket_after_function, 'no_completion_duplicates': jedi.settings.no_completion_duplicates, 'cache_directory': jedi.settings.cache_directory, 'use_filesystem_cache': jedi.settings.use_filesystem_cache, 'fast_parser': jedi.settings.fast_parser, 'dynamic_array_additions': jedi.settings.dynamic_array_additions, 'dynamic_params': jedi.settings.dynamic_params, 'dynamic_params_for_other_modules': jedi.settings.dynamic_params_for_other_modules, 'additional_dynamic_modules': jedi.settings.additional_dynamic_modules, 'auto_import_modules': auto_import_modules }
balloob/home-assistant
refs/heads/dev
homeassistant/components/wunderground/__init__.py
36
"""The wunderground component."""
omnirom/android_external_chromium-org
refs/heads/android-5.1
tools/json_schema_compiler/memoize.py
128
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. def memoize(fn): '''Decorates |fn| to memoize. ''' memory = {} def impl(*args, **optargs): full_args = args + tuple(optargs.iteritems()) if full_args not in memory: memory[full_args] = fn(*args, **optargs) return memory[full_args] return impl
devasia1000/mitmproxy
refs/heads/master
examples/stream.py
38
def responseheaders(context, flow): """ Enables streaming for all responses. """ flow.response.stream = True
awickert/GRASSplot
refs/heads/master
Basemap/grassplot.py
1
# Plotting interface to GRASS GIS, based on Basemap # Written by Andy Wickert, mostly April (probably) and May 2013 # Still very much a work in progress - ESPECIALLY GETTING VECTOR LINES TO WORK # WITHOUT SUDDENLY JUMPING TO A NON-SEQUENTIAL NODE! (help appreciated!) # (maybe an easier way via improved vector handling in pygrass?)<-- note to self # LICENSE: GNU GPL v3 """ grassplot.py uses Matplotlib's Basemap toolkit (by Jeff Whitaker at NOAA) to create good-looking maps from GRASS GIS Copyright (C) 2011-2013, Andrew D. Wickert This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ # Starting out with a static projection, but this can be defined by functions in the future from mpl_toolkits.basemap import Basemap, cm import numpy as np import matplotlib.pyplot as plt from grass import script as grass from grass.script import array as garray from matplotlib.colors import Normalize, LinearSegmentedColormap import re def read_vector_lines(vect, area=True): # First the data # Parse the vertices from v.out.ascii all_lines_output = [] vertices_raw = grass.read_command('v.out.ascii', input=vect, output='-', type='line,boundary', format='wkt') vector_lines = vertices_raw.split('\n') for vector_line in vector_lines: if vector_line != '': # Last line should be empty, this will remove it safely vertices_output = [] # strips parentheses and text, and then separates out coordiante pairs vertex_list = re.sub("[A-Z]|\(|\)", "", vector_line).split(', ') # Turn coordiante pairs into a numpy array and add to the output list all_lines_output.append( np.array([vertex.split() for vertex in vertex_list]).astype(float) ) # And then the other attributes to go along with them """ if area == True: attributes_raw = grass.read_command('v.out.ascii', input=vect, output='-', type='point,centroid', format='point') attributes_list = attributes_raw.split('\n')[:-1] centroids = np.array( [centroid.split('|') for centroid in attributes_list] ) attributes = centroids[:,2:] categories = attributes[:,0].astype(int) """ return all_lines_output class grassplot(object): def __init__(self, basemap_projection): self.m = basemap_projection # self.grass_projection = grass.parse_command('g.proj', flags='j').get('+proj') # grass.run_command('g.gisenv', set="G_VERBOSE=-1") # Trying to make it quiet! def rastprep(self, raster_grid_name, resolution=90, figsize=(6,8)):#, colormap=cm.GMT_haxby, alpha=1): # handle the flipud and resolution (per above function) # also use any set transparency # Send input to class-wide variables and set the resolution self.raster_grid_name = raster_grid_name self.resolution = resolution self.figsize = figsize self.set_resolution() # Then get the grid from GRASS self.rast_grid = garray.array() self.rast_grid.read(raster_grid_name) self.rast_grid = np.flipud(self.rast_grid) self.buffer_rast_grid() # put nan's around it and extend n, s, w, e, lats, lons, nlats, nlons, to prevent streaking # And transform it into the coordiante system rast_grid_transformed = self.m.transform_scalar(self.rast_grid, self.lons, self.lats,self.nlons,self.nlats) return rast_grid_transformed # Plot #fig = plt.figure(figsize=figsize) #self.m.imshow(rast_grid_transformed, cmap=colormap, alpha=alpha) def buffer_rast_grid(self): if self.e + np.diff(self.lons)[-1] < 180: self.e += np.diff(self.lons)[-1] self.lons = np.concatenate(( self.lons, [self.lons[-1] + np.diff(self.lons)[-1]] )) self.rast_grid = np.hstack((self.rast_grid, np.nan*np.zeros((self.rast_grid.shape[0],1)) )) if self.w - np.diff(self.lons)[0] > - 180: self.w -= np.diff(self.lons)[0] self.lons = np.concatenate(( [self.lons[0] - np.diff(self.lons)[0]], self.lons )) self.rast_grid = np.hstack((np.nan*np.zeros((self.rast_grid.shape[0],1)), self.rast_grid )) if self.s + np.diff(self.lats)[0] > -90: self.s -= np.diff(self.lats)[0] self.lats = np.concatenate(( [self.lats[0] - np.diff(self.lats)[0]], self.lats )) self.rast_grid = np.vstack((self.rast_grid, np.nan*np.zeros((1,self.rast_grid.shape[1])) )) if self.n + np.diff(self.lats)[-1] < 90: self.n += np.diff(self.lats)[-1] self.lats = np.concatenate(( self.lats, [self.lats[-1] + np.diff(self.lats)[-1]] )) self.rast_grid = np.vstack((np.nan*np.zeros((1, self.rast_grid.shape[1])), self.rast_grid)) def set_resolution(self): """ resolution is in dpi, so is a function of figsize """ # Get maximum resolution raster_region = grass.region() rast_nlats = float(raster_region['rows']) rast_nlons = float(raster_region['cols']) self.nlats = int(np.min((rast_nlats, self.figsize[0]*self.resolution))) self.nlons = int(np.min((rast_nlons, self.figsize[1]*self.resolution))) grass.run_command('g.region', rows=self.nlats, cols=self.nlons) self.s = grass.region()['s'] self.n = grass.region()['n'] self.w = grass.region()['w'] self.e = grass.region()['e'] # And also set the lats and lons for the Basemap grid # use np.mean to get the cell centers self.lats = self.midpoints( np.linspace(self.s, self.n, self.nlats+1) ) self.lons = self.midpoints( np.linspace(self.w, self.e, self.nlons+1) ) def midpoints(self, invar): return (invar[1:] + invar[:-1]) / 2 def parse_region(self, grassoutput): prepped = re.sub(': +','\t',grassoutput) output = prepped.split('\n') for i in range(len(output)): if output[i] == '': output.pop(i) else: output[i] = output[i].split('\t') return dict(output) """ def project(self, projection = self.grass_projection): # Just pass m to this function: created by script calling this class self.m = Basemap(projection='stere', lon_0=-98., lat_0=90., lat_ts=90.,\ llcrnrlat=23,urcrnrlat=55,\ llcrnrlon=-117,urcrnrlon=-45,\ rsphere=6371200.,resolution='l',area_thresh=10000) nx = grass.region()['cols'] ny = grass.region()['rows'] # transform to nx x ny regularly spaced 5km native projection grid nx = int((m.xmax-m.xmin)/5000.)+1; ny = int((m.ymax-m.ymin)/5000.)+1 topodat = m.transform_scalar(topoin,lons,lats,nx,ny) # plot image over map with imshow. im = m.imshow(topodat,cm.GMT_haxby) """ def plot_figure(self, figure_width, figure_height): self.fig = plt.figure( figsize=(figure_width, figure_height) ) def make_GRASS_etopo2_colormap(self): """ GRASS GIS allows for color maps to be assigned to absolute values. Matplotlib doesn't seem to. So this will import and interpolate the etopo2 color map. """ etopo2 = np.genfromtxt('GRASScolors/etopo2', skip_footer=1) z = etopo2[:,0].astype(int) r = etopo2[:,1].astype(float) g = etopo2[:,2].astype(float) b = etopo2[:,3].astype(float) from scipy.interpolate import interp1d ri = interp1d(z, r) gi = interp1d(z, g) bi = interp1d(z, b) low_elev = np.min(z) high_elev = np.max(z) znew = np.linspace(low_elev, high_elev, 512) znew = np.concatenate(( znew[znew<-1], [-1, 0], znew[znew>0])) # make sure key SL transition is intact! rnew = ri(znew) gnew = gi(znew) bnew = bi(znew) clscaled = np.linspace(0, 1, len(znew)) cdr = [] cdg = [] cdb = [] for i in range(len(znew)): cdr.append([clscaled[i], rnew[i]/255., rnew[i]/255.]) cdg.append([clscaled[i], gnew[i]/255., gnew[i]/255.]) cdb.append([clscaled[i], bnew[i]/255., bnew[i]/255.]) cdict = {'red': cdr, 'green': cdg, 'blue': cdb} cm_etopo2 = LinearSegmentedColormap('etopo2',cdict,4096) return low_elev, high_elev, cm_etopo2 def contour(self, filled=False): # Build grids with the x and y data and then make contour raster map pass def get_nice_colormap(self): pass # Per Ethan's suggestion def text_label(self): pass def export_to_svg(self): pass # or just use savefig with svg format specified? # Colorbar is bipolar: # http://stackoverflow.com/questions/7404116/defining-the-midpoint-of-a-colormap-in-matplotlib class colorbar_bipolar(Normalize): def __init__(self,linthresh,vmin=None,vmax=None,clip=False): Normalize.__init__(self,vmin,vmax,clip) self.linthresh=linthresh self.vmin, self.vmax = vmin, vmax def __call__(self, value, clip=None): if clip is None: clip = self.clip result, is_scalar = self.process_value(value) self.autoscale_None(result) vmin, vmax = self.vmin, self.vmax if vmin > 0: raise ValueError("minvalue must be less than 0") if vmax < 0: raise ValueError("maxvalue must be more than 0") elif vmin == vmax: result.fill(0) # Or should it be all masked? Or 0.5? else: vmin = float(vmin) vmax = float(vmax) if clip: mask = ma.getmask(result) result = ma.array(np.clip(result.filled(vmax), vmin, vmax), mask=mask) # ma division is very slow; we can take a shortcut resdat = result.data resdat[resdat>0] /= vmax resdat[resdat<0] /= -vmin resdat=resdat/2.+0.5 result = np.ma.array(resdat, mask=result.mask, copy=False) if is_scalar: result = result[0] return result def inverse(self, value): if not self.scaled(): raise ValueError("Not invertible until scaled") vmin, vmax = self.vmin, self.vmax if cbook.iterable(value): val = ma.asarray(value) val=2*(val-0.5) val[val>0]*=vmax val[val<0]*=-vmin return val else: if val<0.5: return 2*val*(-vmin) else: return val*vmax def makeTickLabels(self, nlabels): #proportion_min = np.abs(self.vmin - self.linthresh) / ( np.abs(self.vmin - self.linthresh) + np.abs(self.vmax - self.linthresh) ) #nlabels_min = np.round((nlabels - 1) * proportion_min) # save the last label for the midpoint #nlabels_max = nlabels - 1 - nlabels_min # Will always add a point at the middle ticks = np.concatenate(( np.linspace(0, 0.5, nlabels/2+1), np.linspace(.5, 1, nlabels/2+1)[1:])) tick_labels = np.concatenate(( np.linspace(self.vmin, self.linthresh, nlabels/2 + 1), np.linspace(self.linthresh, self.vmax, nlabels/2 + 1)[1:] )) tick_labels = list(tick_labels) for i in range(len(tick_labels)): tick_labels[i] = '%.2f' %tick_labels[i] return ticks, tick_labels
MrLoick/python-for-android
refs/heads/master
python3-alpha/python3-src/Lib/distutils/tests/test_config.py
53
"""Tests for distutils.pypirc.pypirc.""" import sys import os import unittest import tempfile from distutils.core import PyPIRCCommand from distutils.core import Distribution from distutils.log import set_threshold from distutils.log import WARN from distutils.tests import support from test.support import run_unittest PYPIRC = """\ [distutils] index-servers = server1 server2 [server1] username:me password:secret [server2] username:meagain password: secret realm:acme repository:http://another.pypi/ """ PYPIRC_OLD = """\ [server-login] username:tarek password:secret """ WANTED = """\ [distutils] index-servers = pypi [pypi] username:tarek password:xxx """ class PyPIRCCommandTestCase(support.TempdirManager, support.LoggingSilencer, support.EnvironGuard, unittest.TestCase): def setUp(self): """Patches the environment.""" super(PyPIRCCommandTestCase, self).setUp() self.tmp_dir = self.mkdtemp() os.environ['HOME'] = self.tmp_dir self.rc = os.path.join(self.tmp_dir, '.pypirc') self.dist = Distribution() class command(PyPIRCCommand): def __init__(self, dist): PyPIRCCommand.__init__(self, dist) def initialize_options(self): pass finalize_options = initialize_options self._cmd = command self.old_threshold = set_threshold(WARN) def tearDown(self): """Removes the patch.""" set_threshold(self.old_threshold) super(PyPIRCCommandTestCase, self).tearDown() def test_server_registration(self): # This test makes sure PyPIRCCommand knows how to: # 1. handle several sections in .pypirc # 2. handle the old format # new format self.write_file(self.rc, PYPIRC) cmd = self._cmd(self.dist) config = cmd._read_pypirc() config = list(sorted(config.items())) waited = [('password', 'secret'), ('realm', 'pypi'), ('repository', 'http://pypi.python.org/pypi'), ('server', 'server1'), ('username', 'me')] self.assertEqual(config, waited) # old format self.write_file(self.rc, PYPIRC_OLD) config = cmd._read_pypirc() config = list(sorted(config.items())) waited = [('password', 'secret'), ('realm', 'pypi'), ('repository', 'http://pypi.python.org/pypi'), ('server', 'server-login'), ('username', 'tarek')] self.assertEqual(config, waited) def test_server_empty_registration(self): cmd = self._cmd(self.dist) rc = cmd._get_rc_file() self.assertTrue(not os.path.exists(rc)) cmd._store_pypirc('tarek', 'xxx') self.assertTrue(os.path.exists(rc)) f = open(rc) try: content = f.read() self.assertEqual(content, WANTED) finally: f.close() def test_suite(): return unittest.makeSuite(PyPIRCCommandTestCase) if __name__ == "__main__": run_unittest(test_suite())
evandrix/zxing
refs/heads/master
cpp/scons/scons-local-2.0.0.final.0/SCons/Tool/rpcgen.py
34
"""SCons.Tool.rpcgen Tool-specific initialization for RPCGEN tools. Three normally shouldn't be any need to import this module directly. It will usually be imported through the generic SCons.Tool.Tool() selection method. """ # # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "src/engine/SCons/Tool/rpcgen.py 5023 2010/06/14 22:05:46 scons" from SCons.Builder import Builder import SCons.Util cmd = "cd ${SOURCE.dir} && $RPCGEN -%s $RPCGENFLAGS %s -o ${TARGET.abspath} ${SOURCE.file}" rpcgen_client = cmd % ('l', '$RPCGENCLIENTFLAGS') rpcgen_header = cmd % ('h', '$RPCGENHEADERFLAGS') rpcgen_service = cmd % ('m', '$RPCGENSERVICEFLAGS') rpcgen_xdr = cmd % ('c', '$RPCGENXDRFLAGS') def generate(env): "Add RPCGEN Builders and construction variables for an Environment." client = Builder(action=rpcgen_client, suffix='_clnt.c', src_suffix='.x') header = Builder(action=rpcgen_header, suffix='.h', src_suffix='.x') service = Builder(action=rpcgen_service, suffix='_svc.c', src_suffix='.x') xdr = Builder(action=rpcgen_xdr, suffix='_xdr.c', src_suffix='.x') env.Append(BUILDERS={'RPCGenClient' : client, 'RPCGenHeader' : header, 'RPCGenService' : service, 'RPCGenXDR' : xdr}) env['RPCGEN'] = 'rpcgen' env['RPCGENFLAGS'] = SCons.Util.CLVar('') env['RPCGENCLIENTFLAGS'] = SCons.Util.CLVar('') env['RPCGENHEADERFLAGS'] = SCons.Util.CLVar('') env['RPCGENSERVICEFLAGS'] = SCons.Util.CLVar('') env['RPCGENXDRFLAGS'] = SCons.Util.CLVar('') def exists(env): return env.Detect('rpcgen') # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4:
justanr/flask-allows
refs/heads/master
test/test_views.py
1
import pytest from flask.views import MethodView, View from werkzeug.exceptions import Forbidden from flask_allows import Allows, requires def test_requires_allows(app, member, ismember): Allows(app=app, identity_loader=lambda: member) @requires(ismember) def stub(): return True with app.app_context(): assert stub() def test_requires_fails(app, guest, ismember): Allows(app=app, identity_loader=lambda: guest) @requires(ismember) def stub(): pass with pytest.raises(Forbidden): with app.app_context(): stub() def test_requires_works_as_cbv_decorator(app, ismember, guest): class IsMemberView(View): decorators = [requires(ismember)] Allows(app=app, identity_loader=lambda: guest) with pytest.raises(Forbidden): with app.app_context(): IsMemberView.as_view("memberonly")() def test_requires_works_as_method_decorator(app, ismember, guest): class MembersCanPost(MethodView): @requires(ismember) def post(self): return "hello" Allows(app=app, identity_loader=lambda: guest) context = app.test_request_context("/", method="POST") with pytest.raises(Forbidden), app.app_context(), context: MembersCanPost.as_view("memberonly")() def test_requires_on_fail_local_override(app, ismember, guest): @requires(ismember, on_fail="I've failed") def stub(): pass Allows(app=app, identity_loader=lambda: guest) with app.app_context(): assert stub() == "I've failed" def test_requires_defaults_to_allows_override(app, ismember, guest): @requires(ismember) def stub(): pass Allows(app=app, on_fail="I've failed", identity_loader=lambda: guest) with app.app_context(): assert stub() == "I've failed" def test_requires_on_fail_returning_none_raises(app, ismember, guest): @requires(ismember) def stub(): pass Allows(app=app, on_fail=lambda *a, **k: None, identity_loader=lambda: guest) with pytest.raises(Forbidden), app.app_context(): stub()
alu0100207385/dsi_3Django
refs/heads/master
django/contrib/gis/db/backends/postgis/creation.py
117
from django.conf import settings from django.db.backends.postgresql_psycopg2.creation import DatabaseCreation from django.utils.functional import cached_property class PostGISCreation(DatabaseCreation): geom_index_type = 'GIST' geom_index_ops = 'GIST_GEOMETRY_OPS' geom_index_ops_nd = 'GIST_GEOMETRY_OPS_ND' @cached_property def template_postgis(self): template_postgis = getattr(settings, 'POSTGIS_TEMPLATE', 'template_postgis') cursor = self.connection.cursor() cursor.execute('SELECT 1 FROM pg_database WHERE datname = %s LIMIT 1;', (template_postgis,)) if cursor.fetchone(): return template_postgis return None def sql_indexes_for_field(self, model, f, style): "Return any spatial index creation SQL for the field." from django.contrib.gis.db.models.fields import GeometryField output = super(PostGISCreation, self).sql_indexes_for_field(model, f, style) if isinstance(f, GeometryField): gqn = self.connection.ops.geo_quote_name qn = self.connection.ops.quote_name db_table = model._meta.db_table if f.geography or self.connection.ops.geometry: # Geography and Geometry (PostGIS 2.0+) columns are # created normally. pass else: # Geometry columns are created by `AddGeometryColumn` # stored procedure. output.append(style.SQL_KEYWORD('SELECT ') + style.SQL_TABLE('AddGeometryColumn') + '(' + style.SQL_TABLE(gqn(db_table)) + ', ' + style.SQL_FIELD(gqn(f.column)) + ', ' + style.SQL_FIELD(str(f.srid)) + ', ' + style.SQL_COLTYPE(gqn(f.geom_type)) + ', ' + style.SQL_KEYWORD(str(f.dim)) + ');') if not f.null: # Add a NOT NULL constraint to the field output.append(style.SQL_KEYWORD('ALTER TABLE ') + style.SQL_TABLE(qn(db_table)) + style.SQL_KEYWORD(' ALTER ') + style.SQL_FIELD(qn(f.column)) + style.SQL_KEYWORD(' SET NOT NULL') + ';') if f.spatial_index: # Spatial indexes created the same way for both Geometry and # Geography columns. # PostGIS 2.0 does not support GIST_GEOMETRY_OPS. So, on 1.5 # we use GIST_GEOMETRY_OPS, on 2.0 we use either "nd" ops # which are fast on multidimensional cases, or just plain # gist index for the 2d case. if f.geography: index_ops = '' elif self.connection.ops.geometry: if f.dim > 2: index_ops = ' ' + style.SQL_KEYWORD(self.geom_index_ops_nd) else: index_ops = '' else: index_ops = ' ' + style.SQL_KEYWORD(self.geom_index_ops) output.append(style.SQL_KEYWORD('CREATE INDEX ') + style.SQL_TABLE(qn('%s_%s_id' % (db_table, f.column))) + style.SQL_KEYWORD(' ON ') + style.SQL_TABLE(qn(db_table)) + style.SQL_KEYWORD(' USING ') + style.SQL_COLTYPE(self.geom_index_type) + ' ( ' + style.SQL_FIELD(qn(f.column)) + index_ops + ' );') return output def sql_table_creation_suffix(self): if self.template_postgis is not None: return ' TEMPLATE %s' % ( self.connection.ops.quote_name(self.template_postgis),) return '' def _create_test_db(self, verbosity, autoclobber): test_database_name = super(PostGISCreation, self)._create_test_db(verbosity, autoclobber) if self.template_postgis is None: # Connect to the test database in order to create the postgis extension self.connection.close() self.connection.settings_dict["NAME"] = test_database_name cursor = self.connection.cursor() cursor.execute("CREATE EXTENSION postgis") cursor.connection.commit() return test_database_name
UManPychron/pychron
refs/heads/develop
pychron/experiment/action_editor.py
2
# =============================================================================== # Copyright 2014 Jake Ross # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # =============================================================================== import os # ============= standard library imports ======================== import yaml # ============= enthought library imports ======================= from pyface.file_dialog import FileDialog from traits.api import HasTraits, List, Enum, Float, Int, Button, Any, Property, Str from traitsui.api import View, Item, Controller, UItem, HGroup, VGroup from traitsui.editors import ListEditor # ============= local library imports ========================== from pychron.core.helpers.filetools import add_extension from pychron.core.helpers.traitsui_shortcuts import okcancel_view from pychron.core.yaml import yload from pychron.envisage.icon_button_editor import icon_button_editor from pychron.paths import paths class ActionItem(HasTraits): attr = Enum('age', 'kca', 'radiogenic_yield', 'Ar40', 'Ar39', 'Ar38', 'Ar37', 'Ar36', 'Ar41', '37/39') comp = Enum('less than', 'greater than', 'between') value = Float value1 = Float start = Int(10) frequency = Int(5) label = Property(depends_on='attr,comp, value+, start, frequency') def __init__(self, saved_state=None, *args, **kw): super(ActionItem, self).__init__(*args, **kw) if saved_state: # saved_state.pop('comp') self.trait_set(**saved_state) def assemble(self): return dict(attr=self.attr, check=self.label, value=self.value, value1=self.value, abbreviated_count_ratio=1.0, frequency=self.frequency, start=self.start) def traits_view(self): v = View(VGroup(HGroup(UItem('attr'), UItem('comp'), UItem('value'), UItem('value1', visible_when='comp=="between"')), HGroup(Item('start'), Item('frequency')), show_border=False)) return v def _get_label(self): if self.comp == 'less than': c = '{}<{}'.format(self.attr, self.value) elif self.comp == 'greater than': c = '{}>{}'.format(self.attr, self.value) else: v = min(self.value, self.value1) v1 = max(self.value, self.value1) c = '{}<{}<{}'.format(v, self.attr, v1) return c class ActionModel(HasTraits): actions = List add_button = Button remove_button = Button selected = Any path = Str def _add_button_fired(self): if self.selected: idx = self.actions.index(self.selected) obj = self.selected.clone_traits() self.actions.insert(idx, obj) else: obj = self.actions[-1].clone_traits() self.actions.append(obj) def _remove_button_fired(self): if len(self.actions) > 1: idx = -1 if self.selected: idx = self.actions.index(self.selected) self.actions.pop(idx) def load_yaml(self, yd): self.actions = [ActionItem(saved_state=yi) for yi in yd] def dump_yaml(self): yd = [d.assemble() for d in self.actions] return yd def _actions_default(self): return [ActionItem()] class ActionEditor(Controller): title = Str def init(self, info): # print 'fdas', self.title # if self.model.path: # if self.title: info.ui.title = self.title def closed(self, info, is_ok): if is_ok: self.dump() def dump(self): p = self._get_path() if p: self._dump(p) def load(self, p): if p: self.title = os.path.basename(p) self._load(p) def _load(self, p): if not self.model: self.model = ActionModel() self.model.path = p yd = yload(p) self.model.load_yaml(yd) def _dump(self, p): d = self.model.dump_yaml() with open(p, 'w') as wfile: yaml.dump(d, wfile, default_flow_style=False) self.model.path = p def _get_path(self): p = self.model.path if not p: p = '/Users/ross/Sandbox/actions.yafml' if not os.path.isfile(p): p = None dlg = FileDialog(action='save as', default_directory=paths.conditionals_dir) if dlg.open(): p = dlg.path.strip() if p: p = add_extension(p, '.yaml') return p def traits_view(self): v = okcancel_view(HGroup(icon_button_editor('add_button', 'add'), icon_button_editor('remove_button', 'delete')), UItem('actions', style='custom', editor=ListEditor( use_notebook=True, selected='selected', page_name='.label'))) return v if __name__ == '__main__': a = ActionEditor(model=ActionModel()) a.configure_traits() # ============= EOF =============================================
chauhanhardik/populo
refs/heads/master
cms/djangoapps/xblock_config/models.py
172
""" Models used by Studio XBlock infrastructure. Includes: StudioConfig: A ConfigurationModel for managing Studio. """ from django.db.models import TextField from config_models.models import ConfigurationModel class StudioConfig(ConfigurationModel): """ Configuration for XBlockAsides. """ disabled_blocks = TextField( default="about course_info static_tab", help_text="Space-separated list of XBlocks on which XBlockAsides should never render in studio", ) @classmethod def asides_enabled(cls, block_type): """ Return True if asides are enabled for this type of block in studio """ studio_config = cls.current() return studio_config.enabled and block_type not in studio_config.disabled_blocks.split()
rrmartins/eventex
refs/heads/master
south/south/utils.py
32
""" Generally helpful utility functions. """ def _ask_for_it_by_name(name): "Returns an object referenced by absolute path." bits = name.split(".") ## what if there is no absolute reference? if len(bits)>1: modulename = ".".join(bits[:-1]) else: modulename=bits[0] module = __import__(modulename, {}, {}, bits[-1]) if len(bits) == 1: return module else: return getattr(module, bits[-1]) def ask_for_it_by_name(name): "Returns an object referenced by absolute path. (Memoised outer wrapper)" if name not in ask_for_it_by_name.cache: ask_for_it_by_name.cache[name] = _ask_for_it_by_name(name) return ask_for_it_by_name.cache[name] ask_for_it_by_name.cache = {} def get_attribute(item, attribute): """ Like getattr, but recursive (i.e. you can ask for 'foo.bar.yay'.) """ value = item for part in attribute.split("."): value = getattr(value, part) return value def auto_through(field): "Returns if the M2M class passed in has an autogenerated through table or not." return ( # Django 1.0/1.1 (not field.rel.through) or # Django 1.2+ getattr(getattr(field.rel.through, "_meta", None), "auto_created", False) ) def auto_model(model): "Returns if the given model was automatically generated." return getattr(model._meta, "auto_created", False) def memoize(function): "Standard memoization decorator." name = function.__name__ _name = '_' + name def method(self): if not hasattr(self, _name): value = function(self) setattr(self, _name, value) return getattr(self, _name) def invalidate(): if hasattr(method, _name): delattr(method, _name) method.__name__ = function.__name__ method.__doc__ = function.__doc__ method._invalidate = invalidate return method
bt3gl/The-Anti-Social-Network
refs/heads/master
app/auth/views.py
1
""" In the MVC paradigm, views define what is presented to the user. Here we define the blueprint routes and view functions. Templates should be stored inside auth folder (avoid name collision) """ from flask import render_template, redirect, request, url_for, flash from flask.ext.login import login_user, logout_user, login_required, \ current_user from . import auth from .. import db from ..models import User from ..email import send_email from .forms import LoginForm, RegistrationForm, ChangePasswordForm,\ PasswordResetRequestForm, PasswordResetForm, ChangeEmailForm # filter unconfirmed accounts in before_app_request handler, this # handler will intercept a request when: # 1) user is loggerd in # 2) the account for the user is not confirmed # 3 the requested endpoint is outside auth @auth.before_app_request def before_request(): if current_user.is_authenticated(): current_user.ping() if not current_user.confirmed \ and request.endpoint[:5] != 'auth.': return redirect(url_for('auth.unconfirmed')) @auth.route('/unconfirmed') def unconfirmed(): if current_user.is_anonymous() or current_user.confirmed: return redirect(url_for('main.index')) return render_template('auth/unconfirmed.html') @auth.route('/login', methods=['GET', 'POST']) def login(): form = LoginForm() if form.validate_on_submit(): user = User.query.filter_by(email=form.email.data).first() if user is not None and user.verify_password(form.password.data): login_user(user, form.remember_me.data) return redirect(request.args.get('next') or url_for('main.index')) flash('Invalid username or password.') return render_template('auth/login.html', form=form) @auth.route('/logout') @login_required def logout(): logout_user() flash('You have been logged out.') return redirect(url_for('main.index')) @auth.route('/register', methods=['GET', 'POST']) def register(): form = RegistrationForm() if form.validate_on_submit(): user = User(email=form.email.data, username=form.username.data, password=form.password.data) db.session.add(user) db.session.commit() token = user.generate_confirmation_token() send_email(user.email, 'Confirm Your Account', 'auth/email/confirm', user=user, token=token) flash('A confirmation email has been sent to you by email.') return redirect(url_for('auth.login')) return render_template('auth/register.html', form=form) # The route is protected with the decorator from Flask-login, so # that when the users click on the link from the confirmation email # they are asked to log in before they reach this view function @auth.route('/confirm/<token>') @login_required def confirm(token): if current_user.confirmed: return redirect(url_for('main.index')) if current_user.confirm(token): flash('You have confirmed your account. Thanks!') else: flash('The confirmation link is invalid or has expired.') return redirect(url_for('main.index')) @auth.route('/confirm') @login_required def resend_confirmation(): token = current_user.generate_confirmation_token() send_email(current_user.email, 'Confirm Your Account', 'auth/email/confirm', user=current_user, token=token) flash('A new confirmation email has been sent to you by email.') return redirect(url_for('main.index')) @auth.route('/change-password', methods=['GET', 'POST']) @login_required def change_password(): form = ChangePasswordForm() if form.validate_on_submit(): if current_user.verify_password(form.old_password.data): current_user.password = form.password.data db.session.add(current_user) flash('Your password has been updated.') return redirect(url_for('main.index')) else: flash('Invalid password.') return render_template("auth/change_password.html", form=form) @auth.route('/reset', methods=['GET', 'POST']) def password_reset_request(): if not current_user.is_anonymous(): return redirect(url_for('main.index')) form = PasswordResetRequestForm() if form.validate_on_submit(): user = User.query.filter_by(email=form.email.data).first() if user: token = user.generate_reset_token() send_email(user.email, 'Reset Your Password', 'auth/email/reset_password', user=user, token=token, next=request.args.get('next')) flash('An email with instructions to reset your password has been ' 'sent to you.') return redirect(url_for('auth.login')) return render_template('auth/reset_password.html', form=form) @auth.route('/reset/<token>', methods=['GET', 'POST']) def password_reset(token): if not current_user.is_anonymous(): return redirect(url_for('main.index')) form = PasswordResetForm() if form.validate_on_submit(): user = User.query.filter_by(email=form.email.data).first() if user is None: return redirect(url_for('main.index')) if user.reset_password(token, form.password.data): flash('Your password has been updated.') return redirect(url_for('auth.login')) else: return redirect(url_for('main.index')) return render_template('auth/reset_password.html', form=form) @auth.route('/change-email', methods=['GET', 'POST']) @login_required def change_email_request(): form = ChangeEmailForm() if form.validate_on_submit(): if current_user.verify_password(form.password.data): new_email = form.email.data token = current_user.generate_email_change_token(new_email) send_email(new_email, 'Confirm your email address', 'auth/email/change_email', user=current_user, token=token) flash('An email with instructions to confirm your new email ' 'address has been sent to you.') return redirect(url_for('main.index')) else: flash('Invalid email or password.') return render_template("auth/change_email.html", form=form) @auth.route('/change-email/<token>') @login_required def change_email(token): if current_user.change_email(token): flash('Your email address has been updated.') else: flash('Invalid request.') return redirect(url_for('main.index'))
mnahm5/django-estore
refs/heads/master
Lib/site-packages/boto/fps/exception.py
239
from boto.exception import BotoServerError class ResponseErrorFactory(BotoServerError): def __new__(cls, *args, **kw): error = BotoServerError(*args, **kw) newclass = globals().get(error.error_code, ResponseError) obj = newclass.__new__(newclass, *args, **kw) obj.__dict__.update(error.__dict__) return obj class ResponseError(BotoServerError): """Undefined response error. """ retry = False def __repr__(self): return '{0}({1}, {2},\n\t{3})'.format(self.__class__.__name__, self.status, self.reason, self.error_message) def __str__(self): return 'FPS Response Error: {0.status} {0.__class__.__name__} {1}\n' \ '{2}\n' \ '{0.error_message}'.format(self, self.retry and '(Retriable)' or '', self.__doc__.strip()) class RetriableResponseError(ResponseError): retry = True class AccessFailure(RetriableResponseError): """Account cannot be accessed. """ class AccountClosed(RetriableResponseError): """Account is not active. """ class AccountLimitsExceeded(RetriableResponseError): """The spending or receiving limit on the account is exceeded. """ class AmountOutOfRange(ResponseError): """The transaction amount is more than the allowed range. """ class AuthFailure(RetriableResponseError): """AWS was not able to validate the provided access credentials. """ class ConcurrentModification(RetriableResponseError): """A retriable error can happen when two processes try to modify the same data at the same time. """ class DuplicateRequest(ResponseError): """A different request associated with this caller reference already exists. """ class InactiveInstrument(ResponseError): """Payment instrument is inactive. """ class IncompatibleTokens(ResponseError): """The transaction could not be completed because the tokens have incompatible payment instructions. """ class InstrumentAccessDenied(ResponseError): """The external calling application is not the recipient for this postpaid or prepaid instrument. """ class InstrumentExpired(ResponseError): """The prepaid or the postpaid instrument has expired. """ class InsufficientBalance(RetriableResponseError): """The sender, caller, or recipient's account balance has insufficient funds to complete the transaction. """ class InternalError(RetriableResponseError): """A retriable error that happens due to some transient problem in the system. """ class InvalidAccountState(RetriableResponseError): """The account is either suspended or closed. """ class InvalidAccountState_Caller(RetriableResponseError): """The developer account cannot participate in the transaction. """ class InvalidAccountState_Recipient(RetriableResponseError): """Recipient account cannot participate in the transaction. """ class InvalidAccountState_Sender(RetriableResponseError): """Sender account cannot participate in the transaction. """ class InvalidCallerReference(ResponseError): """The Caller Reference does not have a token associated with it. """ class InvalidClientTokenId(ResponseError): """The AWS Access Key Id you provided does not exist in our records. """ class InvalidDateRange(ResponseError): """The end date specified is before the start date or the start date is in the future. """ class InvalidParams(ResponseError): """One or more parameters in the request is invalid. """ class InvalidPaymentInstrument(ResponseError): """The payment method used in the transaction is invalid. """ class InvalidPaymentMethod(ResponseError): """Specify correct payment method. """ class InvalidRecipientForCCTransaction(ResponseError): """This account cannot receive credit card payments. """ class InvalidSenderRoleForAccountType(ResponseError): """This token cannot be used for this operation. """ class InvalidTokenId(ResponseError): """You did not install the token that you are trying to cancel. """ class InvalidTokenId_Recipient(ResponseError): """The recipient token specified is either invalid or canceled. """ class InvalidTokenId_Sender(ResponseError): """The sender token specified is either invalid or canceled or the token is not active. """ class InvalidTokenType(ResponseError): """An invalid operation was performed on the token, for example, getting the token usage information on a single use token. """ class InvalidTransactionId(ResponseError): """The specified transaction could not be found or the caller did not execute the transaction or this is not a Pay or Reserve call. """ class InvalidTransactionState(ResponseError): """The transaction is not complete, or it has temporarily failed. """ class NotMarketplaceApp(RetriableResponseError): """This is not an marketplace application or the caller does not match either the sender or the recipient. """ class OriginalTransactionFailed(ResponseError): """The original transaction has failed. """ class OriginalTransactionIncomplete(RetriableResponseError): """The original transaction is still in progress. """ class PaymentInstrumentNotCC(ResponseError): """The payment method specified in the transaction is not a credit card. You can only use a credit card for this transaction. """ class PaymentMethodNotDefined(ResponseError): """Payment method is not defined in the transaction. """ class PrepaidFundingLimitExceeded(RetriableResponseError): """An attempt has been made to fund the prepaid instrument at a level greater than its recharge limit. """ class RefundAmountExceeded(ResponseError): """The refund amount is more than the refundable amount. """ class SameSenderAndRecipient(ResponseError): """The sender and receiver are identical, which is not allowed. """ class SameTokenIdUsedMultipleTimes(ResponseError): """This token is already used in earlier transactions. """ class SenderNotOriginalRecipient(ResponseError): """The sender in the refund transaction is not the recipient of the original transaction. """ class SettleAmountGreaterThanDebt(ResponseError): """The amount being settled or written off is greater than the current debt. """ class SettleAmountGreaterThanReserveAmount(ResponseError): """The amount being settled is greater than the reserved amount. """ class SignatureDoesNotMatch(ResponseError): """The request signature calculated by Amazon does not match the signature you provided. """ class TokenAccessDenied(ResponseError): """Permission to cancel the token is denied. """ class TokenNotActive(ResponseError): """The token is canceled. """ class TokenNotActive_Recipient(ResponseError): """The recipient token is canceled. """ class TokenNotActive_Sender(ResponseError): """The sender token is canceled. """ class TokenUsageError(ResponseError): """The token usage limit is exceeded. """ class TransactionDenied(ResponseError): """The transaction is not allowed. """ class TransactionFullyRefundedAlready(ResponseError): """The transaction has already been completely refunded. """ class TransactionTypeNotRefundable(ResponseError): """You cannot refund this transaction. """ class UnverifiedAccount_Recipient(ResponseError): """The recipient's account must have a verified bank account or a credit card before this transaction can be initiated. """ class UnverifiedAccount_Sender(ResponseError): """The sender's account must have a verified U.S. credit card or a verified U.S bank account before this transaction can be initiated. """ class UnverifiedBankAccount(ResponseError): """A verified bank account should be used for this transaction. """ class UnverifiedEmailAddress_Caller(ResponseError): """The caller account must have a verified email address. """ class UnverifiedEmailAddress_Recipient(ResponseError): """The recipient account must have a verified email address for receiving payments. """ class UnverifiedEmailAddress_Sender(ResponseError): """The sender account must have a verified email address for this payment. """
diagramsoftware/odoomrp-wip
refs/heads/8.0
mrp_bom_catch_product_code/__init__.py
129
# -*- encoding: utf-8 -*- ############################################################################## # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see http://www.gnu.org/licenses/. # ############################################################################## from . import models
rombie/contrail-controller
refs/heads/master
src/config/vnc_openstack/vnc_openstack/tests/test_strict_compliance.py
1
import sys import json import uuid import logging from testtools.matchers import Equals, Contains, Not from testtools import content, content_type, ExpectedException import webtest.app from vnc_api.vnc_api import * sys.path.append('../common/tests') from test_utils import * import test_common import test_case logger = logging.getLogger(__name__) class TestStrictCompOn(test_case.NeutronBackendTestCase): @classmethod def setUpClass(cls): super(TestStrictCompOn, cls).setUpClass( extra_config_knobs=[('NEUTRON', 'strict_compliance', True)]) #end setUpClass def _create_floatingip_and_associate_port_without_ext_gw(self, proj_id): #external network net_q = self.create_resource('network', proj_id, extra_res_fields={'router:external':True}) subnet_q = self.create_resource('subnet', proj_id, extra_res_fields={'network_id': net_q['id'], 'cidr': '10.2.0.0/24', 'ip_version': 4}) #private network pvt_net_q = self.create_resource('network', proj_id) pvt_subnet_q = self.create_resource('subnet', proj_id, extra_res_fields={'network_id': pvt_net_q['id'], 'cidr': '10.1.0.0/24', 'ip_version': 4}) port_q = self.create_resource('port', proj_id, extra_res_fields={'network_id': pvt_subnet_q['network_id']}) return self.create_resource('floatingip', proj_id, extra_res_fields={'floating_network_id': net_q['id'], 'port_id':port_q['id']}) def _create_floatingip_and_associate_port_with_ext_gw(self, proj_id): #external network net_q = self.create_resource('network', proj_id, extra_res_fields={'router:external':True}) subnet_q = self.create_resource('subnet', proj_id, extra_res_fields={'network_id': net_q['id'], 'cidr': '10.2.0.0/24', 'ip_version': 4}) router_q = self.create_resource('router',proj_id) #private network pvt_net_q = self.create_resource('network', proj_id) pvt_subnet_q = self.create_resource('subnet', proj_id, extra_res_fields={'network_id': pvt_net_q['id'], 'cidr': '10.1.0.0/24', 'ip_version': 4}) port_q = self.create_resource('port', proj_id, extra_res_fields={'network_id': pvt_subnet_q['network_id']}) port2_q = self.create_resource('port', proj_id, extra_res_fields={'network_id': pvt_subnet_q['network_id']}) #External gateway router_q = self.update_resource('router', router_q['id'], proj_id, extra_res_fields={'external_gateway_info': {'network_id':net_q['id']}}) router_q = self.add_router_interface(router_q['id'], proj_id, extra_res_fields={'port_id': port2_q['id']}) return self.create_resource('floatingip', proj_id, extra_res_fields={'floating_network_id': net_q['id'], 'port_id':port_q['id']}) #test when strict_compliance is ON def test_create_fip_and_associate_port_without_ext_gw(self): proj_obj = self._vnc_lib.project_read(fq_name=['default-domain', 'default-project']) res_q = self.create_resource('security_group', proj_obj.uuid) self.list_resource('security_group', proj_uuid=proj_obj.uuid, req_filters={'name': res_q['name']}) with ExpectedException(webtest.app.AppError): self._create_floatingip_and_associate_port_without_ext_gw(proj_obj.uuid) #test when strict_compliance is ON def test_create_fip_and_associate_port_with_ext_gw(self): proj_obj = self._vnc_lib.project_read(fq_name=['default-domain', 'default-project']) res_q = self.create_resource('security_group', proj_obj.uuid) self.list_resource('security_group', proj_uuid=proj_obj.uuid, req_filters={'name': res_q['name']}) self._create_floatingip_and_associate_port_with_ext_gw(proj_obj.uuid) # end class TestStrictCompON class TestStrictCompOff(test_case.NeutronBackendTestCase): @classmethod def setUpClass(cls): super(TestStrictCompOff, cls).setUpClass( extra_config_knobs=[('NEUTRON', 'strict_compliance', False)]) #end setUpClass def _create_floatingip_and_associate_port_without_ext_gw(self, proj_id): #external network net_q = self.create_resource('network', proj_id, extra_res_fields={'router:external':True}) self.create_resource('subnet', proj_id, extra_res_fields={'network_id': net_q['id'], 'cidr': '10.2.0.0/24', 'ip_version': 4}) #private network pvt_net_q = self.create_resource('network', proj_id) pvt_subnet_q = self.create_resource('subnet', proj_id, extra_res_fields={'network_id': pvt_net_q['id'], 'cidr': '10.1.0.0/24', 'ip_version': 4}) port_q = self.create_resource('port', proj_id, extra_res_fields={'network_id': pvt_subnet_q['network_id']}) return self.create_resource('floatingip', proj_id, extra_res_fields={'floating_network_id': net_q['id'], 'port_id':port_q['id']}) #test when strict_compliance is OFF def test_create_fip_and_associate_port_without_ext_gw(self): proj_obj = self._vnc_lib.project_read(fq_name=['default-domain', 'default-project']) res_q = self.create_resource('security_group', proj_obj.uuid) self.list_resource('security_group', proj_uuid=proj_obj.uuid, req_filters={'name': res_q['name']}) self._create_floatingip_and_associate_port_without_ext_gw(proj_obj.uuid) # end class TestStrictCompOFF
Domatix/stock-logistics-workflow
refs/heads/11.0
stock_batch_picking/models/stock_batch_picking.py
2
# Copyright 2012-2014 Alexandre Fayolle, Camptocamp SA # Copyright 2018 Tecnativa - Carlos Dauden # License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl). from odoo import _, api, fields, models from odoo.exceptions import UserError class StockBatchPicking(models.Model): """ This object allow to manage multiple stock.picking at the same time. """ _name = 'stock.batch.picking' name = fields.Char( 'Name', required=True, index=True, copy=False, unique=True, states={'draft': [('readonly', False)]}, default=lambda self: self.env['ir.sequence'].next_by_code( 'stock.batch.picking' ), ) state = fields.Selection([ ('draft', 'Draft'), ('assigned', 'Available'), ('done', 'Done'), ('cancel', 'Cancelled')], string='State', readonly=True, index=True, copy=False, default='draft', help='the state of the batch picking. ' 'Workflow is draft -> assigned -> done or cancel' ) date = fields.Date( 'Date', required=True, readonly=True, index=True, states={ 'draft': [('readonly', False)], 'assigned': [('readonly', False)] }, default=fields.Date.context_today, help='date on which the batch picking is to be processed' ) picker_id = fields.Many2one( 'res.users', 'Picker', readonly=True, index=True, states={ 'draft': [('readonly', False)], 'assigned': [('readonly', False)] }, help='the user to which the pickings are assigned' ) picking_ids = fields.One2many( 'stock.picking', 'batch_picking_id', 'Pickings', readonly=True, states={'draft': [('readonly', False)]}, help='List of picking managed by this batch.' ) active_picking_ids = fields.One2many( 'stock.picking', 'batch_picking_id', 'Pickings', readonly=True, domain=[('state', 'not in', ('cancel', 'done'))], ) notes = fields.Text('Notes', help='free form remarks') move_lines = fields.Many2many( 'stock.move', readonly=True, string='Related stock moves', compute='_compute_move_lines' ) move_line_ids = fields.Many2many( 'stock.move.line', string='Related pack operations', compute='_compute_move_line_ids', # HACK: Allow to write sml fields from this model inverse=lambda self: self, ) entire_package_ids = fields.Many2many( comodel_name='stock.quant.package', compute='_compute_entire_package_ids', help='Those are the entire packages of a picking shown in the view of ' 'operations', ) entire_package_detail_ids = fields.Many2many( comodel_name='stock.quant.package', compute='_compute_entire_package_ids', help='Those are the entire packages of a picking shown in the view of ' 'detailed operations', ) @api.depends('picking_ids') def _compute_move_lines(self): for batch in self: batch.move_lines = batch.picking_ids.mapped("move_lines") @api.depends('picking_ids') def _compute_move_line_ids(self): for batch in self: batch.move_line_ids = batch.picking_ids.mapped( 'move_line_ids' ) @api.depends('picking_ids') def _compute_entire_package_ids(self): for batch in self: batch.update({ 'entire_package_ids': batch.picking_ids.mapped( 'entire_package_ids'), 'entire_package_detail_ids': batch.picking_ids.mapped( 'entire_package_detail_ids'), }) def get_not_empties(self): """ Return all batches in this recordset for which picking_ids is not empty. :raise UserError: If all batches are empty. """ if not self.mapped('picking_ids'): if len(self) == 1: message = _('This Batch has no pickings') else: message = _('These Batches have no pickings') raise UserError(message) return self.filtered(lambda b: len(b.picking_ids) != 0) def verify_state(self, expected_state=None): """ Check if batches states must be changed based on pickings states. If all pickings are canceled, batch must be canceled. If all pickings are canceled or done, batch must be done. If all pickings are canceled or done or *expected_state*, batch must be *expected_state*. :return: True if batches states has been changed. """ expected_states = {'done', 'cancel'} if expected_state is not None: expected_states.add(expected_state) all_good = True for batch in self.filtered(lambda b: b.state not in expected_states): states = set(batch.mapped('picking_ids.state')) if not states or states == {'cancel'}: batch.state = 'cancel' elif states == {'done'} or states == {'done', 'cancel'}: batch.state = 'done' elif states.issubset(expected_states): batch.state = expected_state else: all_good = False return all_good @api.multi def action_cancel(self): """ Call action_cancel for all batches pickings and set batches states to cancel too. """ for batch in self: if not batch.picking_ids: batch.write({'state': 'cancel'}) else: if not batch.verify_state(): batch.picking_ids.action_cancel() @api.multi def action_assign(self): """ Check if batches pickings are available. """ batches = self.get_not_empties() if not batches.verify_state('assigned'): batches.mapped('active_picking_ids').action_assign() @api.multi def action_transfer(self): """ Make the transfer for all active pickings in these batches and set state to done all picking are done. """ batches = self.get_not_empties() for batch in batches: if not batch.verify_state(): batch.active_picking_ids.force_transfer( force_qty=all( operation.qty_done == 0 for operation in batch.move_line_ids ) ) @api.multi def remove_undone_pickings(self): """ Remove of this batch all pickings which state is not done / cancel. """ self.mapped('active_picking_ids').write({'batch_picking_id': False}) self.verify_state() @api.multi def action_view_stock_picking(self): """This function returns an action that display existing pickings of given batch picking. """ self.ensure_one() pickings = self.mapped('picking_ids') action = self.env.ref('stock.action_picking_tree_all').read([])[0] action['domain'] = [('id', 'in', pickings.ids)] return action
jonyroda97/redbot-amigosprovaveis
refs/heads/develop
lib/youtube_dl/extractor/xminus.py
87
# coding: utf-8 from __future__ import unicode_literals import re import time from .common import InfoExtractor from ..compat import ( compat_ord, ) from ..utils import ( int_or_none, parse_duration, ) class XMinusIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?x-minus\.org/track/(?P<id>[0-9]+)' _TEST = { 'url': 'http://x-minus.org/track/4542/%D0%BF%D0%B5%D1%81%D0%B5%D0%BD%D0%BA%D0%B0-%D1%88%D0%BE%D1%84%D0%B5%D1%80%D0%B0.html', 'md5': '401a15f2d2dcf6d592cb95528d72a2a8', 'info_dict': { 'id': '4542', 'ext': 'mp3', 'title': 'Леонид Агутин-Песенка шофёра', 'duration': 156, 'tbr': 320, 'filesize_approx': 5900000, 'view_count': int, 'description': 'md5:03238c5b663810bc79cf42ef3c03e371', } } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) artist = self._html_search_regex( r'<a[^>]+href="/artist/\d+">([^<]+)</a>', webpage, 'artist') title = artist + '-' + self._html_search_regex( r'<span[^>]+class="minustrack-full-title(?:\s+[^"]+)?"[^>]*>([^<]+)', webpage, 'title') duration = parse_duration(self._html_search_regex( r'<span[^>]+class="player-duration(?:\s+[^"]+)?"[^>]*>([^<]+)', webpage, 'duration', fatal=False)) mobj = re.search( r'<div[^>]+class="dw-info(?:\s+[^"]+)?"[^>]*>(?P<tbr>\d+)\s*кбит/c\s+(?P<filesize>[0-9.]+)\s*мб</div>', webpage) tbr = filesize_approx = None if mobj: filesize_approx = float(mobj.group('filesize')) * 1000000 tbr = float(mobj.group('tbr')) view_count = int_or_none(self._html_search_regex( r'<span><[^>]+class="icon-chart-bar".*?>(\d+)</span>', webpage, 'view count', fatal=False)) description = self._html_search_regex( r'(?s)<pre[^>]+id="lyrics-original"[^>]*>(.*?)</pre>', webpage, 'song lyrics', fatal=False) if description: description = re.sub(' *\r *', '\n', description) k = self._search_regex( r'<div[^>]+id="player-bottom"[^>]+data-k="([^"]+)">', webpage, 'encoded data') h = time.time() / 3600 a = sum(map(int, [compat_ord(c) for c in k])) + int(video_id) + h video_url = 'http://x-minus.me/dl/minus?id=%s&tkn2=%df%d' % (video_id, a, h) return { 'id': video_id, 'title': title, 'url': video_url, # The extension is unknown until actual downloading 'ext': 'mp3', 'duration': duration, 'filesize_approx': filesize_approx, 'tbr': tbr, 'view_count': view_count, 'description': description, }
cnbeining/you-get
refs/heads/develop
src/you_get/extractors/soundcloud.py
7
#!/usr/bin/env python __all__ = ['soundcloud_download', 'soundcloud_download_by_id'] from ..common import * def soundcloud_download_by_id(id, title = None, output_dir = '.', merge = True, info_only = False): assert title #if info["downloadable"]: # url = 'https://api.soundcloud.com/tracks/' + id + '/download?client_id=b45b1aa10f1ac2941910a7f0d10f8e28' url = 'https://api.soundcloud.com/tracks/' + id + '/stream?client_id=02gUJC0hH2ct1EGOcYXQIzRFU91c72Ea' assert url type, ext, size = url_info(url) print_info(site_info, title, type, size) if not info_only: download_urls([url], title, ext, size, output_dir, merge = merge) def soundcloud_download(url, output_dir = '.', merge = True, info_only = False, **kwargs): metadata = get_html('https://api.soundcloud.com/resolve.json?url=' + url + '&client_id=02gUJC0hH2ct1EGOcYXQIzRFU91c72Ea') import json info = json.loads(metadata) title = info["title"] id = str(info["id"]) soundcloud_download_by_id(id, title, output_dir, merge = merge, info_only = info_only) site_info = "SoundCloud.com" download = soundcloud_download download_playlist = playlist_not_supported('soundcloud')
lino-framework/welfare
refs/heads/master
lino_welfare/modlib/cbss/mixins.py
1
# -*- coding: UTF-8 -*- # Copyright 2011-2019 Rumma & Ko Ltd # License: GNU Affero General Public License v3 (see file COPYING for details) """Model mixins for `lino_welfare.modlib.cbss`. """ from builtins import str import os import traceback import datetime import logging import six logger = logging.getLogger(__name__) from django.conf import settings from django.db import models from django.utils.translation import gettext_lazy as _ # from appy.shared.xml_parser import XmlUnmarshaller from lino import mixins from lino.api import dd from lino.utils.ssin import ssin_validator from lino.modlib.users.mixins import UserAuthored from .utils import nodetext, xsdpath, CBSS_ENVS from .choicelists import * #~ try: # import suds from suds.client import Client from suds.transport.http import HttpAuthenticated from suds.transport.http import HttpTransport from suds.sax.element import Element as E from suds.sax.parser import Parser PARSER = Parser() from lino_xl.lib.excerpts.mixins import Certifiable #~ except ImportError, e: #~ pass _clients_dict = dict() def get_client(obj): c = _clients_dict.get(obj.__class__, None) if c is not None: return c c = obj.create_client() _clients_dict[obj.__class__] = c return c # class CBSSRequest(UserAuthored, mixins.Printable, mixins.Duplicable): class CBSSRequest(UserAuthored, mixins.Duplicable, Certifiable): """ Common Abstract Base Class for :class:`SSDNRequest` and :class:`NewStyleRequest` """ workflow_state_field = 'status' wsdl_parts = NotImplementedError class Meta: abstract = True person = dd.ForeignKey( 'pcsw.Client', verbose_name=_("Client")) sent = models.DateTimeField( verbose_name=_("Sent"), blank=True, null=True, editable=False, help_text="""\ The date and time when this request has been executed. This is empty for requests than haven't been sent. Read-only.""") status = RequestStates.field(editable=False, blank=True) environment = models.CharField( max_length=4, editable=False, verbose_name=_("T/A/B")) ticket = models.CharField( max_length=36, editable=False, verbose_name=_("Ticket")) #~ environment = Environment.field(blank=True,null=True) # will probably go away soon request_xml = models.TextField(verbose_name=_("Request"), editable=False, blank=True, help_text="""The raw XML string that has (or would have) been sent.""") response_xml = models.TextField( verbose_name=_("Response"), editable=False, blank=True, help_text="""\ The raw XML response received. """) #~ logged_messages = models.TextField( #~ verbose_name=_("Logged messages"), #~ editable=False,blank=True, #~ help_text="""Logged messages about this request.""") debug_messages = models.TextField( verbose_name=_("Debug messages"), editable=False, blank=True) info_messages = models.TextField( verbose_name=_("Info messages"), editable=False, blank=True) #~ send_action = ExecuteRequest() #~ print_action = mixins.DirectPrintAction(required=dict(states=['ok','warnings'])) if False: # removed 20151021 do_print = mixins.DirectPrintAction() def on_duplicate(self, ar, master): """When duplicating a CBSS request, we want re-execute it. So please duplicate only the parameters, not the execution data like `ticket`, `sent` and `status`. Note that also the `user` will be set to the user who asked to duplicate (because this is a subclass of `UserAuthored`. """ self.user = ar.get_user() self.debug_messages = '' self.info_messages = '' self.ticket = '' self.response_xml = '' self.request_xml = '' self.sent = None #~ self.status = RequestStates.new self.status = '' # RequestStates.blank_item self.environment = '' super(CBSSRequest, self).on_duplicate(ar, master) def get_row_permission(self, user, state, ba): """ CBSS requests that have a `ticket` may never be modified. """ #~ logger.info("20120622 CBSSRequest.get_row_permission %s %s", self.ticket, action.readonly) if self.ticket and not ba.action.readonly: return False return super(CBSSRequest, self).get_row_permission(user, state, ba) def on_cbss_ok(self, reply): """ Called when a successful reply has been received. """ pass #~ @classmethod #~ def setup_report(cls,rpt): # ~ # call_optional_super(CBSSRequest,cls,'setup_report',rpt) #~ rpt.add_action(ExecuteRequest()) #~ def logmsg(self,s,*args): #~ if args: #~ s = s % args #~ self.logged_messages += ("[%s] " % datetime.datetime.now()) + s + '\n' def logmsg_debug(self, s, *args): if args: s = s % args self.debug_messages += ("[%s] " % datetime.datetime.now()) + s + '\n' def logmsg_info(self, s, *args): if args: s = s % args self.info_messages += s + '\n' def logmsg_warning(self, s, *args): if args: s = s % args self.info_messages += s + '\n' def __str__(self): return u"%s #%s" % (self._meta.verbose_name, self.pk) def after_ui_save(self, ar, cw): self.execute_request(ar) if self.status == RequestStates.failed: ar.set_response(message=self.debug_messages) ar.set_response(alert=True) elif self.status == RequestStates.warnings: ar.set_response(message=self.info_messages) #~ kw.update(message=_("Got valid response, but it contains warnings.")) ar.set_response(alert=True) #~ kw.update(refresh=True) #~ return ar.success(**kw) #~ return kw def execute_request(self, ar=None, now=None, simulate_response=None, environment=None): """This is the common part of a request for both classic and new-style. """ if self.ticket: raise Warning("Cannot re-execute %s with non-empty ticket." % self) if ar is not None: logger.info("%s executes CBSS request %s", ar.get_user(), self) if now is None: now = datetime.datetime.now() if environment is None: environment = settings.SITE.plugins.cbss.cbss_environment or '' self.environment = environment self.sent = now #~ self.logged_messages = '' self.debug_messages = '' self.info_messages = '' if not settings.SITE.plugins.cbss.cbss_live_requests: if simulate_response is None: # and environment: self.validate_request() self.status = RequestStates.validated self.save() return self.status = RequestStates.sent self.save() retval = None try: retval = self.execute_request_(now, simulate_response) # except (IOError, Warning) as e: # if self.ticket: # self.status = RequestStates.errors # else: # self.status = RequestStates.failed # # self.logmsg_debug(unicode(e)) # if six.PY2: # self.logmsg_debug(traceback.format_exc(e)) # else: # self.logmsg_debug(traceback.format_exc()) except Exception as e: if self.ticket: self.status = RequestStates.errors else: self.status = RequestStates.failed #~ self.response_xml = traceback.format_exc(e) # self.logmsg_debug(traceback.format_exc(e)) if six.PY2: self.logmsg_debug(traceback.format_exc(e)) else: self.logmsg_debug(traceback.format_exc()) self.save() return retval def validate_request(self): pass def get_wsdl_uri(self): url = os.path.join(settings.MEDIA_ROOT, *self.wsdl_parts) if not url.startswith('/'): # on a windows machine we need to prepend an additional "/" url = '/' + url if os.path.sep != '/': url = url.replace(os.path.sep, '/') url = 'file://' + url return url def check_environment(self, req): # if not self.environment: # raise Warning("""\ # Not actually sending because environment is empty. Request would be: # """ + unicode(req)) assert self.environment in CBSS_ENVS @dd.virtualfield(dd.HtmlBox(_("Result"))) def result(self, ar): return self.response_xml def get_excerpt_options(self, ar, **kw): """When we print a request, the resulting excerpt should go to the client's history. """ kw.update(project=self.person) return super(CBSSRequest, self).get_excerpt_options(ar, **kw) #~ dd.update_field(CBSSRequest,'project',blank=False,null=False) dd.update_field(CBSSRequest, 'user', blank=False, null=False) class SSDNRequest(CBSSRequest): """Abstract Base Class for Models that represent SSDN ("classic") requests. """ wsdl_parts = ('cache', 'wsdl', 'WebServiceConnector.wsdl') xsd_filename = None class Meta: abstract = True def validate_against_xsd(self, srvreq, xsd_filename): #~ logger.info("20120524 Validate against %s", xsd_filename) from lxml import etree xml = str(srvreq) #~ print xml doc = etree.fromstring(xml) schema_doc = etree.parse(xsd_filename) schema = etree.XMLSchema(schema_doc) #~ if not schema.validate(doc): #~ print xml schema.assertValid(doc) #~ self.logmsg("Validated %s against %s", xml,xsd_filename) self.logmsg_debug("Validated %s against %s", self, xsd_filename) def validate_wrapped(self, srvreq): self.validate_against_xsd( srvreq, xsdpath('SSDN', 'Service', 'SSDNRequest.xsd')) def validate_inner(self, srvreq): if not self.xsd_filename: return self.validate_against_xsd(srvreq, self.xsd_filename) def validate_request(self): """ Validates the generated XML against the XSD files. Used by test suite. It is not necessary to validate each real request before actually sending it. """ srvreq = self.build_request() self.validate_inner(srvreq) wrapped_srvreq = self.wrap_ssdn_request( srvreq, datetime.datetime.now()) self.validate_wrapped(wrapped_srvreq) self.logmsg_info(_("Request has been validated against XSD files")) def create_client(self): url = self.get_wsdl_uri() #~ logger.info("Instantiate Client at %s", url) t = HttpTransport() client = Client(url, transport=t, timeout=10) #~ print 20120507, client return client def execute_request_(self, now, simulate_response): """ SSDN specific part of a request. """ srvreq = self.build_request() wrapped_srvreq = self.wrap_ssdn_request(srvreq, now) xmlString = str(wrapped_srvreq) self.request_xml = xmlString if simulate_response is not None: self.environment = 'demo' self.response_xml = str(simulate_response) return self.fill_from_string(simulate_response) # the normal case self.check_environment(srvreq) client = get_client(self) #~ logger.info("20120521 Gonna sendXML(<xmlString>):\n%s",xmlString) if not settings.SITE.plugins.cbss.cbss_live_requests: raise Warning( "NOT sending because `cbss_live_requests` is False:\n" + xmlString) #~ xmlString.append(wrapped_srvreq) self.logmsg_debug("client.service.sendXML(\n%s\n)", xmlString) res = client.service.sendXML(xmlString) #~ print 20120522, res self.response_xml = str(res) return self.fill_from_string(res.encode('utf-8')) def fill_from_string(self, s, sent_xmlString=None): #~ self.response_xml = unicode(res) reply = PARSER.parse(string=s).root() self.ticket = nodetext( reply.childAtPath('/ReplyContext/Message/Ticket')) rs = reply.childAtPath('/ServiceReply/ResultSummary') if rs is None: raise Warning("Missing ResultSummary in :\n%s" % reply) for dtl in rs.getChildren('Detail'): #~ for detail in rs.getChildren(): # WARNING, INFO, ERROR... msg = nodetext(dtl.childAtPath('/Severity')) msg += " " + nodetext(dtl.childAtPath('/ReasonCode')) msg += " (%s) : " % nodetext(dtl.childAtPath('/AuthorCodeList')) msg += nodetext(dtl.childAtPath('/Diagnostic')) #~ print '========' #~ print msg #~ raise Warning(msg) self.logmsg_info(msg) rc = nodetext(rs.childAtPath('/ReturnCode')) #~ print reply.__class__, dir(reply) #~ print reply #~ rc = reply.root().SSDNReply.ServiceReply.ResultSummary.ReturnCode if rc == '0': self.status = RequestStates.ok elif rc == '1': self.status = RequestStates.warnings #~ self.logmsg_debug("Warnings:==============\n%s\n===============" % s) #~ elif rc == '10000': #~ self.status = RequestStates.errors else: self.status = RequestStates.errors #~ self.response_xml = unicode(reply) #~ dtl = rs.childAtPath('/Detail') #~ msg = CBSS_ERROR_MESSAGE % rc #~ keys = ('Severity', 'ReasonCode', 'Diagnostic', 'AuthorCodeList') #~ msg += '\n'.join([ #~ k+' : '+nodetext(dtl.childAtPath('/'+k)) #~ for k in keys]) #~ raise Warning(msg) #~ return None #~ raise Exception("Got invalid response status") #~ self.on_cbss_ok(reply) service_reply = self.get_service_reply(reply) if service_reply is None: raise Warning("Got response without service reply.") #~ raise Exception( #~ "Return code is %r, but there's no service reply." % rc) #~ "Return code is %r but there's no service reply in:\n%s\n" % (rc,reply)) #~ reply.childAtPath('/ServiceReply/IdentifyPersonReply') self.response_xml = str(service_reply) return service_reply def get_service_reply(self, full_reply=None): raise NotImplementedError() def wrap_ssdn_request(self, srvreq, dt): """ Wrap the given service request into the SSDN envelope by adding AuthorizedUser and other information common the all SSDN requests). """ #~ up = settings.SITE.ssdn_user_params #~ user_params = settings.SITE.cbss_user_params sc = settings.SITE.site_config #~ au = E('common:AuthorizedUser',ns=NSCOMMON) #~ au.append(E('common:UserID').setText(up['UserID'])) #~ au.append(E('common:Email').setText(up['Email'])) #~ au.append(E('common:OrgUnit').setText(up['OrgUnit'])) #~ au.append(E('common:MatrixID').setText(up['MatrixID'])) #~ au.append(E('common:MatrixSubID').setText(up['MatrixSubID'])) au = E('ssdn:AuthorizedUser') #~ au.append(E('ssdn:UserID').setText(user_params['UserID'])) au.append(E('ssdn:UserID').setText(sc.ssdn_user_id)) #~ au.append(E('ssdn:Email').setText(user_params['Email'])) #~ if not sc.site_company: #~ raise Exception("") #~ au.append(E('ssdn:Email').setText(sc.site_company.email)) au.append(E('ssdn:Email').setText(sc.ssdn_email)) #~ au.append(E('ssdn:OrgUnit').setText(user_params['OrgUnit'])) #~ au.append(E('ssdn:OrgUnit').setText(sc.site_company.vat_id)) au.append(E('ssdn:OrgUnit').setText(sc.cbss_org_unit)) #~ au.append(E('ssdn:MatrixID').setText(user_params['MatrixID'])) au.append(E('ssdn:MatrixID').setText(sc.sector.code)) #~ au.append(E('ssdn:MatrixSubID').setText(user_params['MatrixSubID'])) au.append(E('ssdn:MatrixSubID').setText(sc.sector.subcode)) ref = "%s # %s" % (self.__class__.__name__, self.id) msg = E('ssdn:Message') msg.append(E('ssdn:Reference').setText(ref)) msg.append(E('ssdn:TimeRequest').setText(dt.strftime("%Y%m%dT%H%M%S"))) context = E('ssdn:RequestContext') context.append(au) context.append(msg) sr = E('ssdn:ServiceRequest') sr.append(E('ssdn:ServiceId').setText(self.ssdn_service_id)) sr.append(E('ssdn:Version').setText(self.ssdn_service_version)) sr.append(srvreq) #~ xg.set_default_namespace(SSDN) e = E('ssdn:SSDNRequest', ns=('ssdn', 'http://www.ksz-bcss.fgov.be/XSD/SSDN/Service')) e.append(context) e.append(sr) #~ if srvreq.prefix != e.prefix: #~ e.addPrefix(srvreq.prefix,srvreq.nsprefixes[srvreq.prefix]) return e class NewStyleRequest(CBSSRequest): """ Abstract Base Class for Models that represent "new style" requests to the :term:`CBSS` (and responses). """ class Meta: abstract = True def create_client(self): url = self.get_wsdl_uri() logger.debug("Instantiate CBSS client at %s", url) sc = settings.SITE.site_config #~ t = HttpAuthenticated( #~ username=settings.SITE.cbss_username, #~ password=settings.SITE.cbss_password) t = HttpAuthenticated( username=sc.cbss_http_username, password=sc.cbss_http_password) client = Client(url, transport=t, retxml=True) #~ print 20120613, client return client def execute_request_(self, now, simulate_response): """ NewStyle specific part of a request. """ client = get_client(self) client.add_prefix("common", "http://kszbcss.fgov.be/types/common/v3") # info = client.factory.create('ns0:InformationCustomerType') info = client.factory.create('common:InformationCustomerType') info.ticket = str(self.id) info.timestampSent = now # ci = client.factory.create('ns0:CustomerIdentificationType') ci = client.factory.create('common:OrganizationIdentificationType') #~ cbeNumber = client.factory.create('ns0:CbeNumberType') #~ ci.cbeNumber = settings.SITE.cbss_cbe_number #~ ci.cbeNumber = settings.SITE.site_config.site_company.vat_id ci.cbeNumber = settings.SITE.site_config.cbss_org_unit info.customerIdentification = ci return self.execute_newstyle(client, info, simulate_response) def on_cbss_ok(self, reply): """ Called when a successful reply has been received. """ pass #~ def __unicode__(self): # ~ return u"%s#%s" % (self.__class__.__name__,self.pk) def get_service_reply(self): #~ """ #~ Example of a reply:: #~ (reply){ #~ informationCustomer = #~ (InformationCustomerType){ #~ ticket = "1" #~ timestampSent = 2012-05-23 09:24:55.316312 #~ customerIdentification = #~ (CustomerIdentificationType){ #~ cbeNumber = "0123456789" #~ } #~ } #~ informationCBSS = #~ (InformationCBSSType){ #~ ticketCBSS = "f11736b3-97bc-452a-a75c-16fcc2a2f6ae" #~ timestampReceive = 2012-05-23 08:24:37.000385 #~ timestampReply = 2012-05-23 08:24:37.000516 #~ } #~ status = #~ (StatusType){ #~ value = "NO_RESULT" #~ code = "MSG00008" #~ description = "A validation error occurred." #~ information[] = #~ (InformationType){ #~ fieldName = "ssin" #~ fieldValue = "12345678901" #~ }, #~ } #~ searchInformation = #~ (SearchInformationType){ #~ ssin = "12345678901" #~ language = "de" #~ history = False #~ } #~ } #~ """ if not self.response_xml: return None client = get_client(self).service #~ print '20120613b', dir(client) return client.succeeded(client.method.binding.input, self.response_xml) def execute_newstyle(self, client, infoCustomer, simulate_response): raise NotImplementedError() class SSIN(dd.Model): """ Abstract base for Requests that have a field `national_id` and a method :meth:`get_ssin`. """ class Meta: abstract = True national_id = models.CharField( max_length=200, blank=True, verbose_name=_("National ID"), validators=[ssin_validator]) def get_ssin(self): national_id = self.national_id.replace('=', '') national_id = national_id.replace(' ', '') national_id = national_id.replace('-', '') return national_id #~ def save(self,*args,**kw): #~ if self.person_id and not self.last_name: #~ self.fill_from_person(self.person) #~ super(SSIN,self).save(*args,**kw) def on_create(self, ar): #~ print '20120629 SSIN.on_create', dd.obj2str(self), ar #~ super(ContractBase,self).on_create(request) self.person_changed(ar) super(SSIN, self).on_create(ar) def person_changed(self, ar): #~ raise Exception("20120704") #~ print '20120704 person_changed' if self.person_id: self.fill_from_person(self.person) def fill_from_person(self, person): self.national_id = person.national_id class WithPerson(SSIN): """ Mixin for models that have certain fields """ class Meta: abstract = True birth_date = dd.IncompleteDateField( blank=True, verbose_name=_("Birth date")) sis_card_no = models.CharField(verbose_name=_('SIS card number'), max_length=10, blank=True, help_text="""\ The number of the SIS card used to authenticate the person.""") id_card_no = models.CharField(verbose_name=_('ID card number'), max_length=20, blank=True, help_text="""\ The number of the ID card used to authenticate the person.""") first_name = models.CharField(max_length=200, blank=True, verbose_name=_('First name')) "Space-separated list of all first names." last_name = models.CharField(max_length=200, blank=True, verbose_name=_('Last name')) """Last name (family name).""" def fill_from_person(self, person): self.national_id = person.national_id self.id_card_no = person.card_number self.last_name = person.last_name self.first_name = person.first_name self.birth_date = person.birth_date #~ print '20120603 fill_from_person', self.national_id
gonboy/sl4a
refs/heads/master
python/gdata/src/gdata/photos/service.py
162
#!/usr/bin/env python # -*-*- encoding: utf-8 -*-*- # # This is the service file for the Google Photo python client. # It is used for higher level operations. # # $Id: service.py 144 2007-10-25 21:03:34Z havard.gulldahl $ # # Copyright 2007 Håvard Gulldahl # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Google PhotoService provides a human-friendly interface to Google Photo (a.k.a Picasa Web) services[1]. It extends gdata.service.GDataService and as such hides all the nasty details about authenticating, parsing and communicating with Google Photos. [1]: http://code.google.com/apis/picasaweb/gdata.html Example: import gdata.photos, gdata.photos.service pws = gdata.photos.service.PhotosService() pws.ClientLogin(username, password) #Get all albums albums = pws.GetUserFeed().entry # Get all photos in second album photos = pws.GetFeed(albums[1].GetPhotosUri()).entry # Get all tags for photos in second album and print them tags = pws.GetFeed(albums[1].GetTagsUri()).entry print [ tag.summary.text for tag in tags ] # Get all comments for the first photos in list and print them comments = pws.GetCommentFeed(photos[0].GetCommentsUri()).entry print [ c.summary.text for c in comments ] # Get a photo to work with photo = photos[0] # Update metadata # Attributes from the <gphoto:*> namespace photo.summary.text = u'A nice view from my veranda' photo.title.text = u'Verandaview.jpg' # Attributes from the <media:*> namespace photo.media.keywords.text = u'Home, Long-exposure, Sunset' # Comma-separated # Adding attributes to media object # Rotate 90 degrees clockwise photo.rotation = gdata.photos.Rotation(text='90') # Submit modified photo object photo = pws.UpdatePhotoMetadata(photo) # Make sure you only modify the newly returned object, else you'll get # versioning errors. See Optimistic-concurrency # Add comment to a picture comment = pws.InsertComment(photo, u'I wish the water always was this warm') # Remove comment because it was silly print "*blush*" pws.Delete(comment.GetEditLink().href) """ __author__ = u'[email protected]'# (Håvard Gulldahl)' #BUG: pydoc chokes on non-ascii chars in __author__ __license__ = 'Apache License v2' __version__ = '$Revision: 176 $'[11:-2] import sys, os.path, StringIO import time import gdata.service import gdata import atom.service import atom import gdata.photos SUPPORTED_UPLOAD_TYPES = ('bmp', 'jpeg', 'jpg', 'gif', 'png') UNKOWN_ERROR=1000 GPHOTOS_BAD_REQUEST=400 GPHOTOS_CONFLICT=409 GPHOTOS_INTERNAL_SERVER_ERROR=500 GPHOTOS_INVALID_ARGUMENT=601 GPHOTOS_INVALID_CONTENT_TYPE=602 GPHOTOS_NOT_AN_IMAGE=603 GPHOTOS_INVALID_KIND=604 class GooglePhotosException(Exception): def __init__(self, response): self.error_code = response['status'] self.reason = response['reason'].strip() if '<html>' in str(response['body']): #general html message, discard it response['body'] = "" self.body = response['body'].strip() self.message = "(%(status)s) %(body)s -- %(reason)s" % response #return explicit error codes error_map = { '(12) Not an image':GPHOTOS_NOT_AN_IMAGE, 'kind: That is not one of the acceptable values': GPHOTOS_INVALID_KIND, } for msg, code in error_map.iteritems(): if self.body == msg: self.error_code = code break self.args = [self.error_code, self.reason, self.body] class PhotosService(gdata.service.GDataService): userUri = '/data/feed/api/user/%s' def __init__(self, email=None, password=None, source=None, server='picasaweb.google.com', additional_headers=None, **kwargs): """Creates a client for the Google Photos service. Args: email: string (optional) The user's email address, used for authentication. password: string (optional) The user's password. source: string (optional) The name of the user's application. server: string (optional) The name of the server to which a connection will be opened. Default value: 'picasaweb.google.com'. **kwargs: The other parameters to pass to gdata.service.GDataService constructor. """ self.email = email self.client = source gdata.service.GDataService.__init__( self, email=email, password=password, service='lh2', source=source, server=server, additional_headers=additional_headers, **kwargs) def GetFeed(self, uri, limit=None, start_index=None): """Get a feed. The results are ordered by the values of their `updated' elements, with the most recently updated entry appearing first in the feed. Arguments: uri: the uri to fetch limit (optional): the maximum number of entries to return. Defaults to what the server returns. Returns: one of gdata.photos.AlbumFeed, gdata.photos.UserFeed, gdata.photos.PhotoFeed, gdata.photos.CommentFeed, gdata.photos.TagFeed, depending on the results of the query. Raises: GooglePhotosException See: http://code.google.com/apis/picasaweb/gdata.html#Get_Album_Feed_Manual """ if limit is not None: uri += '&max-results=%s' % limit if start_index is not None: uri += '&start-index=%s' % start_index try: return self.Get(uri, converter=gdata.photos.AnyFeedFromString) except gdata.service.RequestError, e: raise GooglePhotosException(e.args[0]) def GetEntry(self, uri, limit=None, start_index=None): """Get an Entry. Arguments: uri: the uri to the entry limit (optional): the maximum number of entries to return. Defaults to what the server returns. Returns: one of gdata.photos.AlbumEntry, gdata.photos.UserEntry, gdata.photos.PhotoEntry, gdata.photos.CommentEntry, gdata.photos.TagEntry, depending on the results of the query. Raises: GooglePhotosException """ if limit is not None: uri += '&max-results=%s' % limit if start_index is not None: uri += '&start-index=%s' % start_index try: return self.Get(uri, converter=gdata.photos.AnyEntryFromString) except gdata.service.RequestError, e: raise GooglePhotosException(e.args[0]) def GetUserFeed(self, kind='album', user='default', limit=None): """Get user-based feed, containing albums, photos, comments or tags; defaults to albums. The entries are ordered by the values of their `updated' elements, with the most recently updated entry appearing first in the feed. Arguments: kind: the kind of entries to get, either `album', `photo', `comment' or `tag', or a python list of these. Defaults to `album'. user (optional): whose albums we're querying. Defaults to current user. limit (optional): the maximum number of entries to return. Defaults to everything the server returns. Returns: gdata.photos.UserFeed, containing appropriate Entry elements See: http://code.google.com/apis/picasaweb/gdata.html#Get_Album_Feed_Manual http://googledataapis.blogspot.com/2007/07/picasa-web-albums-adds-new-api-features.html """ if isinstance(kind, (list, tuple) ): kind = ",".join(kind) uri = '/data/feed/api/user/%s?kind=%s' % (user, kind) return self.GetFeed(uri, limit=limit) def GetTaggedPhotos(self, tag, user='default', limit=None): """Get all photos belonging to a specific user, tagged by the given keyword Arguments: tag: The tag you're looking for, e.g. `dog' user (optional): Whose images/videos you want to search, defaults to current user limit (optional): the maximum number of entries to return. Defaults to everything the server returns. Returns: gdata.photos.UserFeed containing PhotoEntry elements """ # Lower-casing because of # http://code.google.com/p/gdata-issues/issues/detail?id=194 uri = '/data/feed/api/user/%s?kind=photo&tag=%s' % (user, tag.lower()) return self.GetFeed(uri, limit) def SearchUserPhotos(self, query, user='default', limit=100): """Search through all photos for a specific user and return a feed. This will look for matches in file names and image tags (a.k.a. keywords) Arguments: query: The string you're looking for, e.g. `vacation' user (optional): The username of whose photos you want to search, defaults to current user. limit (optional): Don't return more than `limit' hits, defaults to 100 Only public photos are searched, unless you are authenticated and searching through your own photos. Returns: gdata.photos.UserFeed with PhotoEntry elements """ uri = '/data/feed/api/user/%s?kind=photo&q=%s' % (user, query) return self.GetFeed(uri, limit=limit) def SearchCommunityPhotos(self, query, limit=100): """Search through all public photos and return a feed. This will look for matches in file names and image tags (a.k.a. keywords) Arguments: query: The string you're looking for, e.g. `vacation' limit (optional): Don't return more than `limit' hits, defaults to 100 Returns: gdata.GDataFeed with PhotoEntry elements """ uri='/data/feed/api/all?q=%s' % query return self.GetFeed(uri, limit=limit) def GetContacts(self, user='default', limit=None): """Retrieve a feed that contains a list of your contacts Arguments: user: Username of the user whose contacts you want Returns gdata.photos.UserFeed, with UserEntry entries See: http://groups.google.com/group/Google-Picasa-Data-API/msg/819b0025b5ff5e38 """ uri = '/data/feed/api/user/%s/contacts?kind=user' % user return self.GetFeed(uri, limit=limit) def SearchContactsPhotos(self, user='default', search=None, limit=None): """Search over your contacts' photos and return a feed Arguments: user: Username of the user whose contacts you want search (optional): What to search for (photo title, description and keywords) Returns gdata.photos.UserFeed, with PhotoEntry elements See: http://groups.google.com/group/Google-Picasa-Data-API/msg/819b0025b5ff5e38 """ uri = '/data/feed/api/user/%s/contacts?kind=photo&q=%s' % (user, search) return self.GetFeed(uri, limit=limit) def InsertAlbum(self, title, summary, location=None, access='public', commenting_enabled='true', timestamp=None): """Add an album. Needs authentication, see self.ClientLogin() Arguments: title: Album title summary: Album summary / description access (optional): `private' or `public'. Public albums are searchable by everyone on the internet. Defaults to `public' commenting_enabled (optional): `true' or `false'. Defaults to `true'. timestamp (optional): A date and time for the album, in milliseconds since Unix epoch[1] UTC. Defaults to now. Returns: The newly created gdata.photos.AlbumEntry See: http://code.google.com/apis/picasaweb/gdata.html#Add_Album_Manual_Installed [1]: http://en.wikipedia.org/wiki/Unix_epoch """ album = gdata.photos.AlbumEntry() album.title = atom.Title(text=title, title_type='text') album.summary = atom.Summary(text=summary, summary_type='text') if location is not None: album.location = gdata.photos.Location(text=location) album.access = gdata.photos.Access(text=access) if commenting_enabled in ('true', 'false'): album.commentingEnabled = gdata.photos.CommentingEnabled(text=commenting_enabled) if timestamp is None: timestamp = '%i' % int(time.time() * 1000) album.timestamp = gdata.photos.Timestamp(text=timestamp) try: return self.Post(album, uri=self.userUri % self.email, converter=gdata.photos.AlbumEntryFromString) except gdata.service.RequestError, e: raise GooglePhotosException(e.args[0]) def InsertPhoto(self, album_or_uri, photo, filename_or_handle, content_type='image/jpeg'): """Add a PhotoEntry Needs authentication, see self.ClientLogin() Arguments: album_or_uri: AlbumFeed or uri of the album where the photo should go photo: PhotoEntry to add filename_or_handle: A file-like object or file name where the image/video will be read from content_type (optional): Internet media type (a.k.a. mime type) of media object. Currently Google Photos supports these types: o image/bmp o image/gif o image/jpeg o image/png Images will be converted to jpeg on upload. Defaults to `image/jpeg' """ try: assert(isinstance(photo, gdata.photos.PhotoEntry)) except AssertionError: raise GooglePhotosException({'status':GPHOTOS_INVALID_ARGUMENT, 'body':'`photo` must be a gdata.photos.PhotoEntry instance', 'reason':'Found %s, not PhotoEntry' % type(photo) }) try: majtype, mintype = content_type.split('/') assert(mintype in SUPPORTED_UPLOAD_TYPES) except (ValueError, AssertionError): raise GooglePhotosException({'status':GPHOTOS_INVALID_CONTENT_TYPE, 'body':'This is not a valid content type: %s' % content_type, 'reason':'Accepted content types: %s' % \ ['image/'+t for t in SUPPORTED_UPLOAD_TYPES] }) if isinstance(filename_or_handle, (str, unicode)) and \ os.path.exists(filename_or_handle): # it's a file name mediasource = gdata.MediaSource() mediasource.setFile(filename_or_handle, content_type) elif hasattr(filename_or_handle, 'read'):# it's a file-like resource if hasattr(filename_or_handle, 'seek'): filename_or_handle.seek(0) # rewind pointer to the start of the file # gdata.MediaSource needs the content length, so read the whole image file_handle = StringIO.StringIO(filename_or_handle.read()) name = 'image' if hasattr(filename_or_handle, 'name'): name = filename_or_handle.name mediasource = gdata.MediaSource(file_handle, content_type, content_length=file_handle.len, file_name=name) else: #filename_or_handle is not valid raise GooglePhotosException({'status':GPHOTOS_INVALID_ARGUMENT, 'body':'`filename_or_handle` must be a path name or a file-like object', 'reason':'Found %s, not path name or object with a .read() method' % \ type(filename_or_handle) }) if isinstance(album_or_uri, (str, unicode)): # it's a uri feed_uri = album_or_uri elif hasattr(album_or_uri, 'GetFeedLink'): # it's a AlbumFeed object feed_uri = album_or_uri.GetFeedLink().href try: return self.Post(photo, uri=feed_uri, media_source=mediasource, converter=gdata.photos.PhotoEntryFromString) except gdata.service.RequestError, e: raise GooglePhotosException(e.args[0]) def InsertPhotoSimple(self, album_or_uri, title, summary, filename_or_handle, content_type='image/jpeg', keywords=None): """Add a photo without constructing a PhotoEntry. Needs authentication, see self.ClientLogin() Arguments: album_or_uri: AlbumFeed or uri of the album where the photo should go title: Photo title summary: Photo summary / description filename_or_handle: A file-like object or file name where the image/video will be read from content_type (optional): Internet media type (a.k.a. mime type) of media object. Currently Google Photos supports these types: o image/bmp o image/gif o image/jpeg o image/png Images will be converted to jpeg on upload. Defaults to `image/jpeg' keywords (optional): a 1) comma separated string or 2) a python list() of keywords (a.k.a. tags) to add to the image. E.g. 1) `dog, vacation, happy' 2) ['dog', 'happy', 'vacation'] Returns: The newly created gdata.photos.PhotoEntry or GooglePhotosException on errors See: http://code.google.com/apis/picasaweb/gdata.html#Add_Album_Manual_Installed [1]: http://en.wikipedia.org/wiki/Unix_epoch """ metadata = gdata.photos.PhotoEntry() metadata.title=atom.Title(text=title) metadata.summary = atom.Summary(text=summary, summary_type='text') if keywords is not None: if isinstance(keywords, list): keywords = ','.join(keywords) metadata.media.keywords = gdata.media.Keywords(text=keywords) return self.InsertPhoto(album_or_uri, metadata, filename_or_handle, content_type) def UpdatePhotoMetadata(self, photo): """Update a photo's metadata. Needs authentication, see self.ClientLogin() You can update any or all of the following metadata properties: * <title> * <media:description> * <gphoto:checksum> * <gphoto:client> * <gphoto:rotation> * <gphoto:timestamp> * <gphoto:commentingEnabled> Arguments: photo: a gdata.photos.PhotoEntry object with updated elements Returns: The modified gdata.photos.PhotoEntry Example: p = GetFeed(uri).entry[0] p.title.text = u'My new text' p.commentingEnabled.text = 'false' p = UpdatePhotoMetadata(p) It is important that you don't keep the old object around, once it has been updated. See http://code.google.com/apis/gdata/reference.html#Optimistic-concurrency """ try: return self.Put(data=photo, uri=photo.GetEditLink().href, converter=gdata.photos.PhotoEntryFromString) except gdata.service.RequestError, e: raise GooglePhotosException(e.args[0]) def UpdatePhotoBlob(self, photo_or_uri, filename_or_handle, content_type = 'image/jpeg'): """Update a photo's binary data. Needs authentication, see self.ClientLogin() Arguments: photo_or_uri: a gdata.photos.PhotoEntry that will be updated, or a `edit-media' uri pointing to it filename_or_handle: A file-like object or file name where the image/video will be read from content_type (optional): Internet media type (a.k.a. mime type) of media object. Currently Google Photos supports these types: o image/bmp o image/gif o image/jpeg o image/png Images will be converted to jpeg on upload. Defaults to `image/jpeg' Returns: The modified gdata.photos.PhotoEntry Example: p = GetFeed(PhotoUri) p = UpdatePhotoBlob(p, '/tmp/newPic.jpg') It is important that you don't keep the old object around, once it has been updated. See http://code.google.com/apis/gdata/reference.html#Optimistic-concurrency """ try: majtype, mintype = content_type.split('/') assert(mintype in SUPPORTED_UPLOAD_TYPES) except (ValueError, AssertionError): raise GooglePhotosException({'status':GPHOTOS_INVALID_CONTENT_TYPE, 'body':'This is not a valid content type: %s' % content_type, 'reason':'Accepted content types: %s' % \ ['image/'+t for t in SUPPORTED_UPLOAD_TYPES] }) if isinstance(filename_or_handle, (str, unicode)) and \ os.path.exists(filename_or_handle): # it's a file name photoblob = gdata.MediaSource() photoblob.setFile(filename_or_handle, content_type) elif hasattr(filename_or_handle, 'read'):# it's a file-like resource if hasattr(filename_or_handle, 'seek'): filename_or_handle.seek(0) # rewind pointer to the start of the file # gdata.MediaSource needs the content length, so read the whole image file_handle = StringIO.StringIO(filename_or_handle.read()) name = 'image' if hasattr(filename_or_handle, 'name'): name = filename_or_handle.name mediasource = gdata.MediaSource(file_handle, content_type, content_length=file_handle.len, file_name=name) else: #filename_or_handle is not valid raise GooglePhotosException({'status':GPHOTOS_INVALID_ARGUMENT, 'body':'`filename_or_handle` must be a path name or a file-like object', 'reason':'Found %s, not path name or an object with .read() method' % \ type(filename_or_handle) }) if isinstance(photo_or_uri, (str, unicode)): entry_uri = photo_or_uri # it's a uri elif hasattr(photo_or_uri, 'GetEditMediaLink'): entry_uri = photo_or_uri.GetEditMediaLink().href try: return self.Put(photoblob, entry_uri, converter=gdata.photos.PhotoEntryFromString) except gdata.service.RequestError, e: raise GooglePhotosException(e.args[0]) def InsertTag(self, photo_or_uri, tag): """Add a tag (a.k.a. keyword) to a photo. Needs authentication, see self.ClientLogin() Arguments: photo_or_uri: a gdata.photos.PhotoEntry that will be tagged, or a `post' uri pointing to it (string) tag: The tag/keyword Returns: The new gdata.photos.TagEntry Example: p = GetFeed(PhotoUri) tag = InsertTag(p, 'Beautiful sunsets') """ tag = gdata.photos.TagEntry(title=atom.Title(text=tag)) if isinstance(photo_or_uri, (str, unicode)): post_uri = photo_or_uri # it's a uri elif hasattr(photo_or_uri, 'GetEditMediaLink'): post_uri = photo_or_uri.GetPostLink().href try: return self.Post(data=tag, uri=post_uri, converter=gdata.photos.TagEntryFromString) except gdata.service.RequestError, e: raise GooglePhotosException(e.args[0]) def InsertComment(self, photo_or_uri, comment): """Add a comment to a photo. Needs authentication, see self.ClientLogin() Arguments: photo_or_uri: a gdata.photos.PhotoEntry that is about to be commented , or a `post' uri pointing to it (string) comment: The actual comment Returns: The new gdata.photos.CommentEntry Example: p = GetFeed(PhotoUri) tag = InsertComment(p, 'OOOH! I would have loved to be there. Who's that in the back?') """ comment = gdata.photos.CommentEntry(content=atom.Content(text=comment)) if isinstance(photo_or_uri, (str, unicode)): post_uri = photo_or_uri # it's a uri elif hasattr(photo_or_uri, 'GetEditMediaLink'): post_uri = photo_or_uri.GetPostLink().href try: return self.Post(data=comment, uri=post_uri, converter=gdata.photos.CommentEntryFromString) except gdata.service.RequestError, e: raise GooglePhotosException(e.args[0]) def Delete(self, object_or_uri, *args, **kwargs): """Delete an object. Re-implementing the GDataService.Delete method, to add some convenience. Arguments: object_or_uri: Any object that has a GetEditLink() method that returns a link, or a uri to that object. Returns: ? or GooglePhotosException on errors """ try: uri = object_or_uri.GetEditLink().href except AttributeError: uri = object_or_uri try: return gdata.service.GDataService.Delete(self, uri, *args, **kwargs) except gdata.service.RequestError, e: raise GooglePhotosException(e.args[0]) def GetSmallestThumbnail(media_thumbnail_list): """Helper function to get the smallest thumbnail of a list of gdata.media.Thumbnail. Returns gdata.media.Thumbnail """ r = {} for thumb in media_thumbnail_list: r[int(thumb.width)*int(thumb.height)] = thumb keys = r.keys() keys.sort() return r[keys[0]] def ConvertAtomTimestampToEpoch(timestamp): """Helper function to convert a timestamp string, for instance from atom:updated or atom:published, to milliseconds since Unix epoch (a.k.a. POSIX time). `2007-07-22T00:45:10.000Z' -> """ return time.mktime(time.strptime(timestamp, '%Y-%m-%dT%H:%M:%S.000Z')) ## TODO: Timezone aware
MalloyPower/parsing-python
refs/heads/master
front-end/testsuite-python-lib/Python-2.7.2/Lib/test/threaded_import_hangers.py
204
# This is a helper module for test_threaded_import. The test imports this # module, and this module tries to run various Python library functions in # their own thread, as a side effect of being imported. If the spawned # thread doesn't complete in TIMEOUT seconds, an "appeared to hang" message # is appended to the module-global `errors` list. That list remains empty # if (and only if) all functions tested complete. TIMEOUT = 10 import threading import tempfile import os.path errors = [] # This class merely runs a function in its own thread T. The thread importing # this module holds the import lock, so if the function called by T tries # to do its own imports it will block waiting for this module's import # to complete. class Worker(threading.Thread): def __init__(self, function, args): threading.Thread.__init__(self) self.function = function self.args = args def run(self): self.function(*self.args) for name, func, args in [ # Bug 147376: TemporaryFile hung on Windows, starting in Python 2.4. ("tempfile.TemporaryFile", tempfile.TemporaryFile, ()), # The real cause for bug 147376: ntpath.abspath() caused the hang. ("os.path.abspath", os.path.abspath, ('.',)), ]: t = Worker(func, args) t.start() t.join(TIMEOUT) if t.is_alive(): errors.append("%s appeared to hang" % name)
sauloal/cnidaria
refs/heads/master
scripts/venv/lib/python2.7/site-packages/cogent/motif/util.py
1
#!/usr/bin/env python """Utility classes for general motif and module API.""" from __future__ import division from cogent.core.alignment import Alignment from cogent.core.location import Span __author__ = "Jeremy Widmann" __copyright__ = "Copyright 2007-2012, The Cogent Project" __credits__ = ["Jeremy Widmann", "Rob Knight"] __license__ = "GPL" __version__ = "1.5.3" __maintainer__ = "Jeremy Widmann" __email__ = "[email protected]" __status__ = "Prototype" class Location(Span): """Object that stores location information for a module -Sequence refers to the original sequence the module came from -SeqId is the key of the sequence in the alignment -Start is the position in the sequence """ def __init__(self, SeqId, Start, End=None): """Initializes location object""" self.SeqId = SeqId Span.__init__(self,Start, End) def __cmp__(self,other): """Overwriting __cmp__ for sorting purposes""" return cmp(self.SeqId, other.SeqId) class ModuleInstanceI(object): """Object that stores individual module instance information. Contains sequence, location, Pvalue and Evalue of a module instance as well as some basic instance functions. """ def __init__(self, Sequence, Location, Pvalue=None, Evalue=None): """Initializes ModuleInstance object""" self.Sequence = Sequence self.Location = Location #Location Object self.Pvalue = Pvalue self.Evalue = Evalue def distance(self,other): """Calculates the distance between two ModuleInstances""" raise NotImplementedError def __cmp__(self,other): """Overwriting __cmp__ function to compare ModuleInstance objects""" if self is other: return 0 return cmp(self.Pvalue,other.Pvalue) \ or cmp(self.Evalue,other.Evalue) \ or cmp(self.Location,other.Location) \ or cmp(str(self),str(other)) def __lt__(self, other): return cmp(self, other) == -1 def __le__(self, other): return cmp(self, other) <= 0 def __gt__(self, other): return cmp(self, other) == 1 def __ge__(self, other): return cmp(self, other) >= 0 def __eq__(self, other): return self.__cmp__(other) == 0 def __ne__(self, other): return cmp(self, other) != 0 class ModuleInstanceStr(ModuleInstanceI, str): """Constructor for ModuleInstance inheriting from string.""" def __new__(cls, data='', *args, **kwargs): return str.__new__(cls, data) def __init__(self, *args, **kwargs): return ModuleInstanceI.__init__(self, *args, **kwargs) def ModuleInstance(data, Location, Pvalue=None, Evalue=None, constructor=None): """Creates ModuleInstance given a constructor.""" if constructor is None: #maybe add code to try to figure out what to do from the data later constructor=ModuleInstanceStr return constructor(data, Location, Pvalue, Evalue) def seqs_from_empty(obj, *args, **kwargs): """Allows empty initialization of Module, useful when data must be added.""" return [], [] class Module(Alignment): """Object that stores module information. Module is an Alignment of ModuleInstances. Constructed as a dict keyed by location with ModuleInstance sequence as the value: - {(SeqId, Start): ModuleInstance} """ InputHandlers = Alignment.InputHandlers.copy() InputHandlers['empty'] = seqs_from_empty def __init__(self, data=None, Template=None, MolType=None,\ Locations=None, Pvalue=None, Evalue=None, Llr=None,\ ID=None,ConsensusSequence=None): """Initializes Module object""" self.Template = Template if MolType is not None: self.MolType = MolType self.Pvalue = Pvalue self.Evalue = Evalue self.Llr = Llr #Log likelihood ratio self.ID = ID self.ConsensusSequence = ConsensusSequence if isinstance(data, dict): data = sorted(data.items()) else: try: data = sorted(data) except TypeError: pass super(Module, self).__init__(data, MolType=MolType) def update(self, other): """Updates self with info in other, in-place. WARNING: No validation!""" self.Names += other.Names self.NamedSeqs.update(other.NamedSeqs) def __setitem__(self, item, val): """Replaces item in self.NamedSeqs. WARNING: No validation!""" if item not in self.NamedSeqs: self.Names.append(item) self.NamedSeqs[item] = val def __repr__(self): return str(self.NamedSeqs) def __str__(self): """Returns string representation of IUPAC consensus sequence""" if len(self.MolType.Alphabet) < 20: return str(self.IUPACConsensus(self.MolType)) return str(''.join(self.majorityConsensus())) def distance(self,other): """Calculates the distance between two Modules""" raise NotImplementedError def __cmp__(self,other): """Overwriting __cmp__ function to compare Module objects""" return cmp(self.Pvalue,other.Pvalue) \ or cmp(self.Evalue,other.Evalue) def __hash__(self): """overwriting __hash__ function to hash Module object""" return id(self) def _get_location_dict(self): """Returns a dict of module locations. Represented as a dict with SeqId as key and [indices] as values: {SeqId:[indices]} """ location_dict = {} for key in self.Names: try: location_dict[key[0]].append(key[1]) except: location_dict[key[0]]=[key[1]] return location_dict LocationDict = property(_get_location_dict) def _get_loose(self): """Returns a list of all ModuleInstances not in self.Strict. """ loose_list = [] strict = self.Strict[0].Sequence for instance in self.values(): if instance.Sequence != strict: loose_list.append(instance) return loose_list Loose = property(_get_loose) def _get_strict(self): """Returns a list of ModuleInstances with the most common sequence. """ strict_dict = {} #Dictionary to hold counts of instance strings. #For each ModuleInstance in self. for instance in self.values(): #If instance already in strict_dict then increment and append. if instance.Sequence in strict_dict: strict_dict[instance.Sequence][0]+=1 strict_dict[instance.Sequence][1].append(instance) #Else, add count and instance to dict. else: strict_dict[instance.Sequence]=[1,[instance]] #List with all counts and instances count_list = strict_dict.values() count_list.sort() count_list.reverse() #Set self.Template as the Strict ModuleInstance sequence. self.Template = count_list[0][1][0].Sequence #Return list of ModuleInstances with the most common sequence. return count_list[0][1] Strict = property(_get_strict) def basePossibilityCount(self,degenerate_dict=None): """Returns number of possible combinations to form a degenerate string. """ if degenerate_dict is None: degenerate_dict = self.MolType.Degenerates #Get degenerate string representation of module degenerate_string = self.__str__() #Get length of first degenerate character combinations = len(degenerate_dict.get(degenerate_string[0],'-')) #Multiply number of possibilities for each degenerate character together for i in range(1, len(degenerate_string)): combinations *= len(degenerate_dict.get(degenerate_string[i],'-')) #Return total possible ways to make module return combinations def _coerce_seqs(self, seqs, is_array): """Override _coerce_seqs so we keep the orig objects.""" return seqs def _seq_to_aligned(self, seq, key): """Override _seq_to_aligned so we keep the orig objects.""" return seq class ModuleFinder(object): """Object that constructs a dict of modules given an alignment""" def __call__(self, *args): """Call method for ModuleFinder""" raise NotImplementedError class ModuleConsolidator(object): """Object that takes in a list of modules and returns a consolidated list. Modules that are very similar are considered the same module. """ def __call__(self, *args): """Call method for ModuleConsolidator""" raise NotImplementedError class Motif(object): """Object that stores modules that are considered the same motif """ def __init__(self, Modules=None, Info=None): """Initializes Motif object""" self.Modules = [] try: #only one module in motif self.Modules.append(Modules) except: #list of modules self.Modules.extend(Modules) self.Info = Info class MotifFinder(object): """Object that takes modules and constructs motifs - Takes in a list of modules and constructs a list of Motifs""" def __call__(self, *args): """Call method for MotifFinder""" raise NotImplementedError class MotifFormatter(object): """Object that takes a list of Motifs and formats them for output to browser - Takes in a list of motifs and generates specified output format. """ COLORS = [ "#00FF00","#0000FF", "#FFFF00", "#00FFFF", "#FF00FF", "#FAEBD7", "#8A2BE2", "#A52A2A", "#00CC00", "#FF6600", "#FF33CC", "#CC33CC", "#9933FF", "#FFCCCC", "#00CCCC", "#CC6666", "#CCCC33", "#66CCFF", "#6633CC", "#FF6633" ] STYLES = ["", "font-weight: bold", "font-style: italic"] def getColorMapS0(self, module_ids): """ Standalone version - needed b/c of pickle problem """ color_map = {} mod = len(MotifFormatter.COLORS) smod = len(MotifFormatter.STYLES) for module_id in module_ids: ix = int(module_id) cur_color = ix % mod cur_style = int(round((ix / mod))) % smod style_str = """background-color: %s; %s; font-family: 'Courier New', Courier""" color_map[module_id] = style_str % ( MotifFormatter.COLORS[cur_color], MotifFormatter.STYLES[cur_style]) return color_map def getColorMap(self, motif_results): """ Return color mapping for motif_results """ module_ids = [] for motif in motif_results.Motifs: for module in motif.Modules: module_ids.append(module.ID) return self.getColorMapS0(sorted(module_ids)) def getColorMapRgb(self, motif_results): """ Return color mapping for motif_results using RGB rather than hex. """ module_ids = [] for motif in motif_results.Motifs: for module in motif.Modules: module_ids.append(module.ID) color_map = {} mod = len(MotifFormatter.COLORS) for module_id in module_ids: ix = int(module_id) cur_color = ix % mod color_map["color_" + str(module_id)] = \ html_color_to_rgb(MotifFormatter.COLORS[cur_color]) return color_map def __init__(self, *args): """Init method for MotifFormatter""" self.ConsCache={} self.ConservationThresh=None def __call__(self, *args): """Call method for MotifFormatter""" raise NotImplementedError def _make_conservation_consensus(self, module): """ Return conservation consensus string """ mod_id = module.ID if mod_id in self.ConsCache: return self.ConsCache[mod_id] cons_thresh = self.ConservationThresh cons_seq = ''.join(module.majorityConsensus()) col_freqs = module.columnFreqs() cons_con_seq = [] for ix, col in enumerate(col_freqs): col_sum = sum(col.values()) keep = False for b, v in col.items(): cur_cons = v / col_sum if cur_cons >= cons_thresh: keep = True if keep: cons_con_seq.append(cons_seq[ix]) else: cons_con_seq.append(" ") self.ConsCache[mod_id] = (cons_seq, ''.join(cons_con_seq)) return self.ConsCache[mod_id] def _flag_conserved_consensus(self, cons_con_seq, cons_seq, cur_seq): """ Annotate consensus """ color_style = """background-color: %s; font-family: 'Courier New', Courier""" span_fmt = """<span style="%s">%s</span>""" h_str = [] for ix in range(len(cur_seq)): cur_c = cur_seq[ix] if cur_c == cons_con_seq[ix]: h_str.append(span_fmt % (color_style % "#eeeeee", "+")) elif cons_con_seq[ix] != " ": #h_str.append("<font color=red>-</font>") h_str.append(span_fmt % (color_style % "#ff0000", "-")) elif cons_seq[ix] == cur_c: #h_str.append("<font color=orange>*</font>") h_str.append(span_fmt % (color_style % "white", "*")) else: h_str.append("&nbsp;") return h_str #return """<font face="Courier New, Courier, monospace">%s</font>""" % ''.join(h_str) class MotifResults(object): """Object that holds a list of Modules, Motifs and a dict of Results. """ def __init__(self,Modules=None, Motifs=None, Results=None, Parameters=None, Alignment=None,MolType=None): """Initializes MotifResults object.""" self.Modules = Modules or [] self.Motifs = Motifs or [] self.Results = Results or {} #Results not belonging to other categories. if Parameters: self.__dict__.update(Parameters) self.Alignment = Alignment self.MolType = MolType def makeModuleMap(self): """Returns dict of sequence ID keyed to modules. - result = {sequence_id:(index_in_sequence, module_id, module_len)} """ module_map = {} #Dict with locations of every motif keyed by module if self: for motif in self.Motifs: for module in motif.Modules: mod_len = len(module) mod_id = str(module.ID) for skey, indexes in module.LocationDict.items(): if skey not in module_map: module_map[skey] = [] for ix in indexes: module_map[skey].append((ix, mod_id, mod_len)) return module_map def html_color_to_rgb(colorstring): """ convert #RRGGBB to an (R, G, B) tuple - From Python Cookbook. """ colorstring = colorstring.strip() if colorstring[0] == '#': colorstring = colorstring[1:] if len(colorstring) != 6: raise ValueError, "input #%s is not in #RRGGBB format" % colorstring r, g, b = colorstring[:2], colorstring[2:4], colorstring[4:] r, g, b = [int(n, 16) for n in (r, g, b)] #Divide each rgb value by 255.0 so to get float from 0.0-1.0 so colors # work in PyMOL r = r/255.0 g = g/255.0 b = b/255.0 return (r, g, b) def make_remap_dict(results_ids, allowed_ids): """Returns a dict mapping results_ids to allowed_ids. """ remap_dict = {} warning = None if sorted(results_ids) == sorted(allowed_ids): remap_dict = dict(zip(results_ids,results_ids)) else: warning = 'Sequence IDs do not match allowed IDs. IDs were remapped.' for ri in results_ids: curr_match = [] for ai in allowed_ids: if ai.startswith(ri): curr_match.append(ai) if not curr_match: raise ValueError, \ 'Sequence ID "%s" was not found in allowed IDs'%(ri) #if current results id was prefix of more than one allowed ID elif len(curr_match)>1: #Check if any allowed ID matches map to other results IDs for cm in curr_match: #Remove any matches that map to other results IDs for ri2 in results_ids: if ri2 != ri and cm.startswith(ri2): curr_match.remove(cm) #Raise error if still more than one match if len(curr_match)>1: raise ValueError, \ 'Sequence ID "%s" had more than one match in allowed IDs: "%s"'%(ri,str(curr_match)) remap_dict[ri]=curr_match[0] return remap_dict, warning
CopeX/odoo
refs/heads/8.0
addons/base_setup/base_setup.py
382
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import simplejson import cgi from openerp import tools from openerp.osv import fields, osv from openerp.tools.translate import _ from lxml import etree # Specify Your Terminology will move to 'partner' module class specify_partner_terminology(osv.osv_memory): _name = 'base.setup.terminology' _inherit = 'res.config' _columns = { 'partner': fields.selection([ ('Customer','Customer'), ('Client','Client'), ('Member','Member'), ('Patient','Patient'), ('Partner','Partner'), ('Donor','Donor'), ('Guest','Guest'), ('Tenant','Tenant') ], 'How do you call a Customer', required=True ), } _defaults={ 'partner' :'Customer', } def make_translations(self, cr, uid, ids, name, type, src, value, res_id=0, context=None): trans_obj = self.pool.get('ir.translation') user_obj = self.pool.get('res.users') context_lang = user_obj.browse(cr, uid, uid, context=context).lang existing_trans_ids = trans_obj.search(cr, uid, [('name','=',name), ('lang','=',context_lang), ('type','=',type), ('src','=',src), ('res_id','=',res_id)]) if existing_trans_ids: trans_obj.write(cr, uid, existing_trans_ids, {'value': value}, context=context) else: create_id = trans_obj.create(cr, uid, {'name': name,'lang': context_lang, 'type': type, 'src': src, 'value': value , 'res_id': res_id}, context=context) return {} def execute(self, cr, uid, ids, context=None): def _case_insensitive_replace(ref_string, src, value): import re pattern = re.compile(src, re.IGNORECASE) return pattern.sub(_(value), _(ref_string)) trans_obj = self.pool.get('ir.translation') fields_obj = self.pool.get('ir.model.fields') menu_obj = self.pool.get('ir.ui.menu') act_window_obj = self.pool.get('ir.actions.act_window') for o in self.browse(cr, uid, ids, context=context): #translate label of field field_ids = fields_obj.search(cr, uid, [('field_description','ilike','Customer')]) for f_id in fields_obj.browse(cr ,uid, field_ids, context=context): field_ref = f_id.model_id.model + ',' + f_id.name self.make_translations(cr, uid, ids, field_ref, 'field', f_id.field_description, _case_insensitive_replace(f_id.field_description,'Customer',o.partner), context=context) #translate help tooltip of field for obj in self.pool.models.values(): for field_name, field_rec in obj._columns.items(): if field_rec.help.lower().count('customer'): field_ref = obj._name + ',' + field_name self.make_translations(cr, uid, ids, field_ref, 'help', field_rec.help, _case_insensitive_replace(field_rec.help,'Customer',o.partner), context=context) #translate menuitems menu_ids = menu_obj.search(cr,uid, [('name','ilike','Customer')]) for m_id in menu_obj.browse(cr, uid, menu_ids, context=context): menu_name = m_id.name menu_ref = 'ir.ui.menu' + ',' + 'name' self.make_translations(cr, uid, ids, menu_ref, 'model', menu_name, _case_insensitive_replace(menu_name,'Customer',o.partner), res_id=m_id.id, context=context) #translate act window name act_window_ids = act_window_obj.search(cr, uid, [('name','ilike','Customer')]) for act_id in act_window_obj.browse(cr ,uid, act_window_ids, context=context): act_ref = 'ir.actions.act_window' + ',' + 'name' self.make_translations(cr, uid, ids, act_ref, 'model', act_id.name, _case_insensitive_replace(act_id.name,'Customer',o.partner), res_id=act_id.id, context=context) #translate act window tooltips act_window_ids = act_window_obj.search(cr, uid, [('help','ilike','Customer')]) for act_id in act_window_obj.browse(cr ,uid, act_window_ids, context=context): act_ref = 'ir.actions.act_window' + ',' + 'help' self.make_translations(cr, uid, ids, act_ref, 'model', act_id.help, _case_insensitive_replace(act_id.help,'Customer',o.partner), res_id=act_id.id, context=context) return {} # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
lukemarsden/flocker
refs/heads/master
admin/packaging.py
2
# -*- test-case-name: admin.test.test_packaging -*- # Copyright Hybrid Logic Ltd. See LICENSE file for details. """ Helper utilities for Flocker packaging. """ from functools import partial import platform import sys import os from subprocess import check_output, check_call, CalledProcessError, call from tempfile import mkdtemp from textwrap import dedent, fill from eliot import Logger, start_action, to_file from twisted.python.constants import ValueConstant, Values from twisted.python.filepath import FilePath from twisted.python import usage, log from characteristic import attributes, Attribute import virtualenv from flocker.common.version import make_rpm_version class PackageTypes(Values): """ Constants representing supported target packaging formats. """ RPM = ValueConstant('rpm') DEB = ValueConstant('deb') # Associate package formats with platform operating systems. PACKAGE_TYPE_MAP = { PackageTypes.RPM: ('centos',), PackageTypes.DEB: ('ubuntu',), } PACKAGE_NAME_FORMAT = { PackageTypes.RPM: '{}-{}-{}.{}.rpm', PackageTypes.DEB: '{}_{}-{}_{}.deb', } ARCH = { 'all': { PackageTypes.RPM: 'noarch', PackageTypes.DEB: 'all', }, 'native': { # HACK PackageTypes.RPM: 'x86_64', PackageTypes.DEB: 'amd64', }, } # Path from the root of the source tree to the directory holding possible build # targets. A build target is a directory containing a Dockerfile. BUILD_TARGETS_SEGMENTS = [b"admin", b"build_targets"] PACKAGE_ARCHITECTURE = { 'clusterhq-flocker-cli': 'all', 'clusterhq-flocker-node': 'all', 'clusterhq-python-flocker': 'native', } def package_filename(package_type, package, architecture, rpm_version): package_name_format = PACKAGE_NAME_FORMAT[package_type] return package_name_format.format( package, rpm_version.version, rpm_version.release, ARCH[architecture][package_type]) @attributes(['name', 'version']) class Distribution(object): """ A linux distribution. :ivar bytes name: The name of the distribution. :ivar bytes version: The version of the distribution. """ @classmethod def _get_current_distribution(klass): """ :return: A ``Distribution`` representing the current platform. """ name, version, id = ( platform.linux_distribution(full_distribution_name=False)) return klass(name=name.lower(), version=version) def package_type(self): distribution_name = self.name.lower() for package_type, distribution_names in PACKAGE_TYPE_MAP.items(): if distribution_name.lower() in distribution_names: return package_type else: raise ValueError("Unknown distribution.", distribution_name) def native_package_architecture(self): """ :return: The ``bytes`` representing the native package architecture for this distribution. """ return ARCH['native'][self.package_type()] DISTRIBUTION_NAME_MAP = { 'centos-7': Distribution(name="centos", version="7"), 'ubuntu-14.04': Distribution(name="ubuntu", version="14.04"), 'ubuntu-15.04': Distribution(name="ubuntu", version="15.04"), } CURRENT_DISTRIBUTION = Distribution._get_current_distribution() def _native_package_type(): """ :return: The ``bytes`` name of the native package format for this platform. """ distribution_name = CURRENT_DISTRIBUTION.name.lower() for package_type, distribution_names in PACKAGE_TYPE_MAP.items(): if distribution_name.lower() in distribution_names: return package_type else: raise ValueError("Unknown distribution.", distribution_name) @attributes(['steps']) class BuildSequence(object): """ Run the supplied ``steps`` consecutively. :ivar tuple steps: A sequence of steps. """ logger = Logger() _system = u"packaging:buildsequence:run" def run(self): for step in self.steps: with start_action(self.logger, self._system, step=repr(step)): step.run() def run_command(args, added_env=None, cwd=None): """ Run a subprocess and return its output. The command line and its environment are logged for debugging purposes. :param dict env: Addtional environment variables to pass. :return: The output of the command. """ log.msg( format="Running %(args)r with environment %(env)r " "and working directory %(cwd)s", args=args, env=added_env, cwd=cwd) if added_env: env = os.environ.copy() env.update(env) else: env = None try: return check_output(args=args, env=env, cwd=cwd,) except CalledProcessError as e: print e.output @attributes([ Attribute('package'), Attribute('compare', default_value=None), Attribute('version', default_value=None)]) class Dependency(object): """ A package dependency. :ivar bytes package: The name of the dependency package. :ivar bytes compare: The operator to use when comparing required and available versions of the dependency package. :ivar bytes version: The version of the dependency package. """ def __init__(self): """ :raises ValueError: If ``compare`` and ``version`` values are not compatible. """ if (self.compare is None) != (self.version is None): raise ValueError( "Must specify both or neither compare and version.") def format(self, package_type): """ :return: A ``bytes`` representation of the desired version comparison which can be parsed by the package management tools associated with ``package_type``. :raises: ``ValueError`` if supplied with an unrecognised ``package_type``. """ if package_type == PackageTypes.DEB: if self.version: return "%s (%s %s)" % ( self.package, self.compare, self.version) else: return self.package elif package_type == PackageTypes.RPM: if self.version: return "%s %s %s" % (self.package, self.compare, self.version) else: return self.package else: raise ValueError("Unknown package type.") # The minimum required version of Docker. The package names vary between # operating systems and are supplied later. DockerDependency = partial(Dependency, compare='>=', version='1.3.0') # We generate three packages. ``clusterhq-python-flocker`` contains the entire # code base. ``clusterhq-flocker-cli`` and ``clusterhq-flocker-node`` are meta # packages which symlink only the cli or node specific scripts and load only # the dependencies required to satisfy those scripts. This map represents the # dependencies for each of those three packages and accounts for differing # dependency package names and versions on various platforms. DEPENDENCIES = { 'python': { 'centos': ( Dependency(package='python'), ), 'ubuntu': ( Dependency(package='python2.7'), ), }, 'node': { 'centos': ( DockerDependency(package='docker'), Dependency(package='/usr/sbin/iptables'), Dependency(package='openssh-clients'), ), 'ubuntu': ( # trust-updates version DockerDependency(package='docker.io'), Dependency(package='iptables'), Dependency(package='openssh-client'), ), }, 'cli': { 'centos': ( Dependency(package='openssh-clients'), ), 'ubuntu': ( Dependency(package='openssh-client'), ), }, } def make_dependencies(package_name, package_version, distribution): """ Add the supplied version of ``python-flocker`` to the base dependency lists defined in ``DEPENDENCIES``. :param bytes package_name: The name of the flocker package to generate dependencies for. :param bytes package_version: The flocker version. :param Distribution distribution: The distribution for which to generate dependencies. :return: A list of ``Dependency`` instances. """ dependencies = DEPENDENCIES[package_name][distribution.name] if package_name in ('node', 'cli'): dependencies += ( Dependency( package='clusterhq-python-flocker', compare='=', version=package_version),) return dependencies def create_virtualenv(root): """ Create a virtualenv in ``root``. :param FilePath root: The directory in which to install a virtualenv. :returns: A ``VirtualEnv`` instance. """ # We call ``virtualenv`` as a subprocess rather than as a library, so that # we can turn off Python byte code compilation. run_command( ['virtualenv', '--python=/usr/bin/python2.7', '--quiet', root.path], added_env=dict(PYTHONDONTWRITEBYTECODE='1') ) # XXX: Virtualenv doesn't link to pyc files when copying its bootstrap # modules. See https://github.com/pypa/virtualenv/issues/659 for module_name in virtualenv.REQUIRED_MODULES: py_base = root.descendant( ['lib', 'python2.7', module_name]) py = py_base.siblingExtension('.py') if py.exists() and py.islink(): pyc = py_base.siblingExtension('.pyc') py_target = py.realpath() pyc_target = FilePath( py_target.splitext()[0]).siblingExtension('.pyc') if pyc.exists(): pyc.remove() if pyc_target.exists(): pyc_target.linkTo(pyc) return VirtualEnv(root=root) @attributes(['virtualenv']) class InstallVirtualEnv(object): """ Install a virtualenv in the supplied ``target_path``. :ivar FilePath target_path: The path to a directory in which to create the virtualenv. """ _create_virtualenv = staticmethod(create_virtualenv) def run(self): self._create_virtualenv(root=self.virtualenv.root) @attributes(['name', 'version']) class PythonPackage(object): """ A model representing a single pip installable Python package. :ivar bytes name: The name of the package. :ivar bytes version: The version of the package. """ @attributes(['root']) class VirtualEnv(object): """ A model representing a virtualenv directory. """ def install(self, package_uri): """ Install package and its dependencies into this virtualenv. """ # We can't just call pip directly, because in the virtualenvs created # in tests, the shebang line becomes too long and triggers an # error. See http://www.in-ulm.de/~mascheck/various/shebang/#errors python_path = self.root.child('bin').child('python').path run_command( [python_path, '-m', 'pip', '--quiet', 'install', package_uri], ) @attributes(['virtualenv', 'package_uri']) class InstallApplication(object): """ Install the supplied ``package_uri`` using the supplied ``virtualenv``. :ivar VirtualEnv virtualenv: The virtual environment in which to install ``package``. :ivar bytes package_uri: A pip compatible URI. """ def run(self): self.virtualenv.install(self.package_uri) @attributes(['links']) class CreateLinks(object): """ Create symlinks to the files in ``links``. """ def run(self): """ If link is a directory, the target filename will be used as the link name within that directory. """ for target, link in self.links: if link.isdir(): name = link.child(target.basename()) else: name = link target.linkTo(name) @attributes(['virtualenv', 'package_name']) class GetPackageVersion(object): """ Record the version of ``package_name`` installed in ``virtualenv_path`` by examining ``<package_name>.__version__``. :ivar VirtualEnv virtualenv: The ``virtualenv`` containing the package. :ivar bytes package_name: The name of the package whose version will be recorded. :ivar version: The version string of the supplied package. Default is ``None`` until the step has been run. or if the supplied :raises: If ``package_name`` is not found. """ version = None def run(self): python_path = self.virtualenv.root.child('bin').child('python').path output = check_output( [python_path, '-c', '; '.join([ 'from sys import stdout', 'stdout.write(__import__(%r).__version__)' % self.package_name ])]) self.version = output @attributes([ 'package_type', 'destination_path', 'source_paths', 'name', 'prefix', 'epoch', 'rpm_version', 'license', 'url', 'vendor', 'maintainer', 'architecture', 'description', 'dependencies', 'category', Attribute('directories', default_factory=list), Attribute('after_install', default_value=None), ]) class BuildPackage(object): """ Use ``fpm`` to build an RPM file from the supplied ``source_path``. :ivar package_type: A package type constant from ``PackageTypes``. :ivar FilePath destination_path: The path in which to save the resulting RPM package file. :ivar dict source_paths: A dictionary mapping paths in the filesystem to the path in the package. :ivar bytes name: The name of the package. :ivar FilePath prefix: The path beneath which the packaged files will be installed. :ivar bytes epoch: An integer string tag used to help RPM determine version number ordering. :ivar rpm_version rpm_version: An object representing an RPM style version containing a release and a version attribute. :ivar bytes license: The name of the license under which this package is released. :ivar bytes url: The URL of the source of this package. :ivar unicode vendor: The name of the package vendor. :ivar bytes maintainer: The email address of the package maintainer. :ivar bytes architecture: The OS architecture for which this package is targeted. Default ``None`` means architecture independent. :ivar unicode description: A description of the package. :ivar unicode category: The category of the package. :ivar list dependencies: The list of dependencies of the package. :ivar list directories: List of directories the package should own. """ def run(self): architecture = self.architecture command = [ 'fpm', '--force', '-s', 'dir', '-t', self.package_type.value, '--package', self.destination_path.path, '--name', self.name, '--prefix', self.prefix.path, '--version', self.rpm_version.version, '--iteration', self.rpm_version.release, '--license', self.license, '--url', self.url, '--vendor', self.vendor, '--maintainer', self.maintainer, '--architecture', architecture, '--description', self.description, '--category', self.category, ] if not (self.package_type is PackageTypes.DEB and self.epoch == '0'): # Leave epoch unset for deb's with epoch 0 command.extend(['--epoch', self.epoch]) for requirement in self.dependencies: command.extend( ['--depends', requirement.format(self.package_type)]) for directory in self.directories: command.extend( ['--directories', directory.path]) if self.after_install is not None: command.extend( ['--after-install', self.after_install.path]) for source_path, package_path in self.source_paths.items(): # Think of /= as a separate operator. It causes fpm to copy the # content of the directory rather than the directory its self. command.append( "%s/=%s" % (source_path.path, package_path.path)) run_command(command) @attributes(['package_version_step']) class DelayedRpmVersion(object): """ Pretend to be an ``rpm_version`` instance providing a ``version`` and ``release`` attribute. The values of these attributes will be calculated from the Python version string read from a previous ``GetPackageVersion`` build step. :ivar GetPackageVersion package_version_step: An instance of ``GetPackageVersion`` whose ``run`` method will have been called and from which the version string will be read. """ _rpm_version = None @property def rpm_version(self): """ :return: An ``rpm_version`` and cache it. """ if self._rpm_version is None: self._rpm_version = make_rpm_version( self.package_version_step.version ) return self._rpm_version @property def version(self): """ :return: The ``version`` string. """ return self.rpm_version.version @property def release(self): """ :return: The ``release`` string. """ return self.rpm_version.release def __str__(self): return self.rpm_version.version + '-' + self.rpm_version.release IGNORED_WARNINGS = { PackageTypes.RPM: ( # Ignore the summary line rpmlint prints. # We always check a single package, so we can hardcode the numbers. '1 packages and 0 specfiles checked;', # This isn't an distribution package so we deliberately install in /opt 'dir-or-file-in-opt', # We don't care enough to fix this 'python-bytecode-inconsistent-mtime', # /opt/flocker/lib/python2.7/no-global-site-packages.txt will be empty. 'zero-length', # cli/node packages have symlink to base package 'dangling-symlink', # Should be fixed 'no-documentation', 'no-manual-page-for-binary', # changelogs are elsewhere 'no-changelogname-tag', # virtualenv's interpreter is correct. 'wrong-script-interpreter', # rpmlint on CentOS 7 doesn't see python in the virtualenv. 'no-binary', # These are in our dependencies. 'incorrect-fsf-address', 'pem-certificate', 'non-executable-script', 'devel-file-in-non-devel-package', 'unstripped-binary-or-object', # Firewall and systemd configuration live in /usr/lib 'only-non-binary-in-usr-lib', # We don't allow configuring ufw firewall applications. 'non-conffile-in-etc /etc/ufw/applications.d/flocker-control', # Upstart control files are not installed as conffiles. 'non-conffile-in-etc /etc/init/flocker-dataset-agent.conf', 'non-conffile-in-etc /etc/init/flocker-container-agent.conf', 'non-conffile-in-etc /etc/init/flocker-control.conf', # Cryptography hazmat bindings 'package-installs-python-pycache-dir opt/flocker/lib/python2.7/site-packages/cryptography/hazmat/bindings/__pycache__/', # noqa # We require an old version of setuptools # XXX This should not be necessary after # https://clusterhq.atlassian.net/browse/FLOC-1373 'backup-file-in-package /opt/flocker/lib/python2.7/site-packages/setuptools-3.6.dist-info/requires.txt.orig', # noqa ), # See https://www.debian.org/doc/manuals/developers-reference/tools.html#lintian # noqa PackageTypes.DEB: ( # This isn't an distribution package so we deliberately install in /opt 'dir-or-file-in-opt', # This isn't a distribution package, so the precise details of the # distro portion of the version don't need to be followed. 'debian-revision-not-well-formed', # virtualenv's interpreter is correct. 'wrong-path-for-interpreter', # Virtualenv creates symlinks for local/{bin,include,lib}. Ignore them. 'symlink-should-be-relative', # We depend on python2.7 which depends on libc 'missing-dependency-on-libc', # We are installing in a virtualenv, so we can't easily use debian's # bytecompiling infrastructure. It doesn't provide any benefit, either. 'package-installs-python-bytecode', # https://github.com/jordansissel/fpm/issues/833 ('file-missing-in-md5sums ' 'usr/share/doc/'), # lintian expects python dep for .../python shebang lines. # We are in a virtualenv that points at python2.7 explictly and has # that dependency. 'python-script-but-no-python-dep', # Should be fixed 'binary-without-manpage', 'no-copyright-file', # These are in our dependencies. 'script-not-executable', 'embedded-javascript-library', 'extra-license-file', 'unstripped-binary-or-object', # Werkzeug installs various images with executable permissions. # https://github.com/mitsuhiko/werkzeug/issues/629 # Fixed upstream, but not released. 'executable-not-elf-or-script', # Our omnibus packages are never going to be used by upstream so # there's no bug to close. # https://lintian.debian.org/tags/new-package-should-close-itp-bug.html 'new-package-should-close-itp-bug', # We don't allow configuring ufw firewall applications. ('file-in-etc-not-marked-as-conffile ' 'etc/ufw/applications.d/flocker-control'), # Upstart control files are not installed as conffiles. 'file-in-etc-not-marked-as-conffile etc/init/flocker-dataset-agent.conf', # noqa 'file-in-etc-not-marked-as-conffile etc/init/flocker-container-agent.conf', # noqa 'file-in-etc-not-marked-as-conffile etc/init/flocker-control.conf', # Cryptography hazmat bindings 'package-installs-python-pycache-dir opt/flocker/lib/python2.7/site-packages/cryptography/hazmat/bindings/__pycache__/', # noqa ), } @attributes([ 'package_type', 'destination_path', 'epoch', 'rpm_version', 'package', 'architecture', ]) class LintPackage(object): """ Run package linting tool against a package and fail if there are any errors or warnings that aren't whitelisted. """ output = sys.stdout @staticmethod def check_lint_output(warnings, ignored_warnings): """ Filter the output of a linting tool against a list of ignored warnings. :param list warnings: List of warnings produced. :param list ignored_warnings: List of warnings to ignore. A warning is ignored it it has a substring matching something in this list. """ unacceptable = [] for warning in warnings: # Ignore certain warning lines for ignored in ignored_warnings: if ignored in warning: break else: unacceptable.append(warning) return unacceptable def run(self): filename = package_filename( package_type=self.package_type, package=self.package, rpm_version=self.rpm_version, architecture=self.architecture) output_file = self.destination_path.child(filename) try: check_output([ { PackageTypes.RPM: 'rpmlint', PackageTypes.DEB: 'lintian', }[self.package_type], output_file.path, ]) except CalledProcessError as e: results = self.check_lint_output( warnings=e.output.splitlines(), ignored_warnings=IGNORED_WARNINGS[self.package_type], ) if results: self.output.write("Package errors (%s):\n" % (self.package)) self.output.write('\n'.join(results) + "\n") raise SystemExit(1) class PACKAGE(Values): """ Constants for ClusterHQ specific metadata that we add to all three packages. """ EPOCH = ValueConstant(b'0') LICENSE = ValueConstant(b'ASL 2.0') URL = ValueConstant(b'https://clusterhq.com') VENDOR = ValueConstant(b'ClusterHQ') MAINTAINER = ValueConstant(b'ClusterHQ <[email protected]>') class PACKAGE_PYTHON(PACKAGE): DESCRIPTION = ValueConstant( 'Docker orchestration and volume management tool\n' + fill('This is the base package of scripts and libraries.', 79) ) class PACKAGE_CLI(PACKAGE): DESCRIPTION = ValueConstant( 'Docker orchestration and volume management tool\n' + fill('This meta-package contains links to the Flocker client ' 'utilities, and has only the dependencies required to run ' 'those tools', 79) ) class PACKAGE_NODE(PACKAGE): DESCRIPTION = ValueConstant( 'Docker orchestration and volume management tool\n' + fill('This meta-package contains links to the Flocker node ' 'utilities, and has only the dependencies required to run ' 'those tools', 79) ) def omnibus_package_builder( distribution, destination_path, package_uri, package_files, target_dir=None): """ Build a sequence of build steps which when run will generate a package in ``destination_path``, containing the package installed from ``package_uri`` and all its dependencies. The steps are: * Create a virtualenv with ``--system-site-packages`` which allows certain python libraries to be supplied by the operating system. * Install Flocker and all its dependencies in the virtualenv. * Find the version of the installed Flocker package, as reported by ``pip``. * Build an RPM from the virtualenv directory using ``fpm``. :param package_type: A package type constant from ``PackageTypes``. :param FilePath destination_path: The path to a directory in which to save the resulting RPM file. :param Package package: A ``Package`` instance with a ``pip install`` compatible package URI. :param FilePath package_files: Directory containg system-level files to be installed with packages. :param FilePath target_dir: An optional path in which to create the virtualenv from which the package will be generated. Default is a temporary directory created using ``mkdtemp``. :return: A ``BuildSequence`` instance containing all the required build steps. """ if target_dir is None: target_dir = FilePath(mkdtemp()) flocker_cli_path = target_dir.child('flocker-cli') flocker_cli_path.makedirs() flocker_node_path = target_dir.child('flocker-node') flocker_node_path.makedirs() empty_path = target_dir.child('empty') empty_path.makedirs() # Flocker is installed in /opt. # See http://fedoraproject.org/wiki/Packaging:Guidelines#Limited_usage_of_.2Fopt.2C_.2Fetc.2Fopt.2C_and_.2Fvar.2Fopt # noqa virtualenv_dir = FilePath('/opt/flocker') virtualenv = VirtualEnv(root=virtualenv_dir) get_package_version_step = GetPackageVersion( virtualenv=virtualenv, package_name='flocker') rpm_version = DelayedRpmVersion( package_version_step=get_package_version_step) category = { PackageTypes.RPM: 'Applications/System', PackageTypes.DEB: 'admin', }[distribution.package_type()] return BuildSequence( steps=( InstallVirtualEnv(virtualenv=virtualenv), InstallApplication(virtualenv=virtualenv, package_uri=package_uri), # get_package_version_step must be run before steps that reference # rpm_version get_package_version_step, BuildPackage( package_type=distribution.package_type(), destination_path=destination_path, source_paths={virtualenv_dir: virtualenv_dir}, name='clusterhq-python-flocker', prefix=FilePath('/'), epoch=PACKAGE.EPOCH.value, rpm_version=rpm_version, license=PACKAGE.LICENSE.value, url=PACKAGE.URL.value, vendor=PACKAGE.VENDOR.value, maintainer=PACKAGE.MAINTAINER.value, architecture=PACKAGE_ARCHITECTURE['clusterhq-python-flocker'], description=PACKAGE_PYTHON.DESCRIPTION.value, category=category, dependencies=make_dependencies( 'python', rpm_version, distribution), directories=[virtualenv_dir], ), LintPackage( package_type=distribution.package_type(), destination_path=destination_path, epoch=PACKAGE.EPOCH.value, rpm_version=rpm_version, package='clusterhq-python-flocker', architecture=PACKAGE_ARCHITECTURE['clusterhq-python-flocker'], ), # flocker-cli steps # First, link command-line tools that should be available. If you # change this you may also want to change entry_points in setup.py. CreateLinks( links=[ (FilePath('/opt/flocker/bin/flocker-deploy'), flocker_cli_path), (FilePath('/opt/flocker/bin/flocker'), flocker_cli_path), (FilePath('/opt/flocker/bin/flocker-ca'), flocker_cli_path), ] ), BuildPackage( package_type=distribution.package_type(), destination_path=destination_path, source_paths={flocker_cli_path: FilePath("/usr/bin")}, name='clusterhq-flocker-cli', prefix=FilePath('/'), epoch=PACKAGE.EPOCH.value, rpm_version=rpm_version, license=PACKAGE.LICENSE.value, url=PACKAGE.URL.value, vendor=PACKAGE.VENDOR.value, maintainer=PACKAGE.MAINTAINER.value, architecture=PACKAGE_ARCHITECTURE['clusterhq-flocker-cli'], description=PACKAGE_CLI.DESCRIPTION.value, category=category, dependencies=make_dependencies( 'cli', rpm_version, distribution), ), LintPackage( package_type=distribution.package_type(), destination_path=destination_path, epoch=PACKAGE.EPOCH.value, rpm_version=rpm_version, package='clusterhq-flocker-cli', architecture=PACKAGE_ARCHITECTURE['clusterhq-flocker-cli'], ), # flocker-node steps # First, link command-line tools that should be available. If you # change this you may also want to change entry_points in setup.py. CreateLinks( links=[ (FilePath('/opt/flocker/bin/flocker-volume'), flocker_node_path), (FilePath('/opt/flocker/bin/flocker-control'), flocker_node_path), (FilePath('/opt/flocker/bin/flocker-container-agent'), flocker_node_path), (FilePath('/opt/flocker/bin/flocker-dataset-agent'), flocker_node_path), ] ), BuildPackage( package_type=distribution.package_type(), destination_path=destination_path, source_paths={ flocker_node_path: FilePath("/usr/sbin"), # CentOS firewall configuration package_files.child('firewalld-services'): FilePath("/usr/lib/firewalld/services/"), # Ubuntu firewall configuration package_files.child('ufw-applications.d'): FilePath("/etc/ufw/applications.d/"), # SystemD configuration package_files.child('systemd'): FilePath('/usr/lib/systemd/system'), # Upstart configuration package_files.child('upstart'): FilePath('/etc/init'), # Flocker Control State dir empty_path: FilePath('/var/lib/flocker/'), }, name='clusterhq-flocker-node', prefix=FilePath('/'), epoch=PACKAGE.EPOCH.value, rpm_version=rpm_version, license=PACKAGE.LICENSE.value, url=PACKAGE.URL.value, vendor=PACKAGE.VENDOR.value, maintainer=PACKAGE.MAINTAINER.value, architecture=PACKAGE_ARCHITECTURE['clusterhq-flocker-node'], description=PACKAGE_NODE.DESCRIPTION.value, category=category, dependencies=make_dependencies( 'node', rpm_version, distribution), after_install=package_files.child('after-install.sh'), directories=[FilePath('/var/lib/flocker/')], ), LintPackage( package_type=distribution.package_type(), destination_path=destination_path, epoch=PACKAGE.EPOCH.value, rpm_version=rpm_version, package='clusterhq-flocker-node', architecture=PACKAGE_ARCHITECTURE['clusterhq-flocker-node'], ), ) ) @attributes(['tag', 'build_directory']) class DockerBuild(object): """ Build a docker image and tag it. :ivar bytes tag: The tag name which will be assigned to the generated docker image. :ivar FilePath build_directory: The directory containing the ``Dockerfile`` to build. """ def run(self): check_call( ['docker', 'build', '--tag', self.tag, self.build_directory.path]) @attributes(['tag', 'volumes', 'command']) class DockerRun(object): """ Run a docker image with the supplied volumes and command line arguments. :ivar bytes tag: The tag name of the image to run. :ivar dict volumes: A dict mapping ``FilePath`` container path to ``FilePath`` host path for each docker volume. :ivar list command: The command line arguments which will be supplied to the docker image entry point. """ def run(self): volume_options = [] for container, host in self.volumes.iteritems(): volume_options.extend( ['--volume', '%s:%s' % (host.path, container.path)]) result = call( ['docker', 'run', '--rm'] + volume_options + [self.tag] + self.command) if result: raise SystemExit(result) def available_distributions(flocker_source_path): """ Determine the distributions for which packages can be built. :param FilePath flocker_source_path: The top-level directory of a Flocker source checkout. Distributions will be inferred from the build targets available in this checkout. :return: A ``set`` of ``bytes`` giving distribution names which can be used with ``build_in_docker`` (and therefore with the ``--distribution`` command line option of ``build-package``). """ return set( path.basename() for path in flocker_source_path.descendant(BUILD_TARGETS_SEGMENTS).children() if path.isdir() and path.child(b"Dockerfile").exists() ) def build_in_docker(destination_path, distribution, top_level, package_uri): """ Build a flocker package for a given ``distribution`` inside a clean docker container of that ``distribution``. :param FilePath destination_path: The directory to which the generated packages will be copied. :param bytes distribution: The distribution name for which to build a package. :param FilePath top_level: The Flocker source code directory. :param bytes package_uri: The ``pip`` style python package URI to install. """ if destination_path.exists() and not destination_path.isdir(): raise ValueError("go away") volumes = { FilePath('/output'): destination_path, FilePath('/flocker'): top_level, } # Special case to allow building the currently checked out Flocker code. if package_uri == top_level.path: package_uri = '/flocker' tag = "clusterhq/build-%s" % (distribution,) build_targets_directory = top_level.descendant(BUILD_TARGETS_SEGMENTS) build_directory = build_targets_directory.child(distribution) # The <src> path must be inside the context of the build; you cannot COPY # ../something /something, because the first step of a docker build is to # send the context directory (and subdirectories) to the docker daemon. # To work around this, we copy a shared requirements file into the build # directory. requirements_file = build_targets_directory.child('requirements.txt') tmp_requirements = build_directory.child('requirements.txt') requirements_file.copyTo(tmp_requirements) return BuildSequence( steps=[ DockerBuild( tag=tag, build_directory=build_directory ), DockerRun( tag=tag, volumes=volumes, command=[package_uri] ), ]) class DockerBuildOptions(usage.Options): """ Command line options for the ``build-package-entrypoint`` tool. """ synopsis = 'build-package-entrypoint [options] <package-uri>' optParameters = [ ['destination-path', 'd', '.', 'The path to a directory in which to create package files and ' 'artifacts.'], ] longdesc = dedent("""\ Arguments: <package-uri>: The Python package url or path to install using ``pip``. """) def parseArgs(self, package_uri): """ The Python package to install. """ self['package-uri'] = package_uri def postOptions(self): """ Coerce paths to ``FilePath``. """ self['destination-path'] = FilePath(self['destination-path']) class DockerBuildScript(object): """ Check supplied command line arguments, print command line argument errors to ``stderr`` otherwise build the RPM package. :ivar build_command: The function responsible for building the package. Allows the command to be overridden in tests. """ build_command = staticmethod(omnibus_package_builder) def __init__(self, sys_module=None): """ :param sys_module: A ``sys`` like object whose ``argv``, ``stdout`` and ``stderr`` will be used in the script. Can be overridden in tests to make assertions about the script argument parsing and output printing. Default is ``sys``. """ if sys_module is None: sys_module = sys self.sys_module = sys_module def main(self, top_level=None, base_path=None): """ Check command line arguments and run the build steps. :param FilePath top_level: The top-level of the flocker repository. :param base_path: ignored. """ to_file(self.sys_module.stderr) options = DockerBuildOptions() try: options.parseOptions(self.sys_module.argv[1:]) except usage.UsageError as e: self.sys_module.stderr.write("%s\n" % (options,)) self.sys_module.stderr.write("%s\n" % (e,)) raise SystemExit(1) # Currently we add system control files for both EL and Debian-based # systems. We should probably be more specific. See FLOC-1736. self.build_command( distribution=CURRENT_DISTRIBUTION, destination_path=options['destination-path'], package_uri=options['package-uri'], package_files=top_level.descendant(['admin', 'package-files']), ).run() docker_main = DockerBuildScript().main class BuildOptions(usage.Options): """ Command line options for the ``build-package`` tool. """ synopsis = 'build-package [options] <package-uri>' optParameters = [ ['destination-path', 'd', '.', 'The path to a directory in which to create package files and ' 'artifacts.'], ['distribution', None, None, # {} is formatted in __init__ 'The target distribution. One of {}'], ] longdesc = dedent("""\ Arguments: <package-uri>: The Python package url or path to install using ``pip``. """) def __init__(self, distributions): """ :param distributions: An iterable of the names of distributions which are acceptable as values for the ``--distribution`` parameter. """ usage.Options.__init__(self) self.docs["distribution"] = self.docs["distribution"].format( ', '.join(sorted(distributions)) ) def parseArgs(self, package_uri): """ The Python package to install. """ self['package-uri'] = package_uri def postOptions(self): """ Coerce paths to ``FilePath`` and select a suitable ``native`` ``package-type``. """ self['destination-path'] = FilePath(self['destination-path']) if self['distribution'] is None: raise usage.UsageError('Must specify --distribution.') class BuildScript(object): """ Check supplied command line arguments, print command line argument errors to ``stderr`` otherwise build the RPM package. :ivar build_command: The function responsible for building the package. Allows the command to be overridden in tests. """ build_command = staticmethod(build_in_docker) def __init__(self, sys_module=None): """ :param sys_module: A ``sys`` like object whose ``argv``, ``stdout`` and ``stderr`` will be used in the script. Can be overridden in tests to make assertions about the script argument parsing and output printing. Default is ``sys``. """ if sys_module is None: sys_module = sys self.sys_module = sys_module def main(self, top_level=None, base_path=None): """ Check command line arguments and run the build steps. :param top_level: The path to the root of the checked out flocker directory. :param base_path: ignored. """ to_file(self.sys_module.stderr) distributions = available_distributions(top_level) options = BuildOptions(distributions) try: options.parseOptions(self.sys_module.argv[1:]) except usage.UsageError as e: self.sys_module.stderr.write("%s\n" % (options,)) self.sys_module.stderr.write("%s\n" % (e,)) raise SystemExit(1) self.build_command( destination_path=options['destination-path'], package_uri=options['package-uri'], top_level=top_level, distribution=options['distribution'], ).run() main = BuildScript().main
HyperBaton/ansible
refs/heads/devel
test/lib/ansible_test/_data/pytest/plugins/ansible_pytest_collections.py
25
"""Enable unit testing of Ansible collections. PYTEST_DONT_REWRITE""" from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import sys # set by ansible-test to a single directory, rather than a list of directories as supported by Ansible itself ANSIBLE_COLLECTIONS_PATH = os.path.join(os.environ['ANSIBLE_COLLECTIONS_PATHS'], 'ansible_collections') def collection_pypkgpath(self): """Configure the Python package path so that pytest can find our collections.""" for parent in self.parts(reverse=True): if str(parent) == ANSIBLE_COLLECTIONS_PATH: return parent raise Exception('File "%s" not found in collection path "%s".' % (self.strpath, ANSIBLE_COLLECTIONS_PATH)) def pytest_configure(): """Configure this pytest plugin.""" try: if pytest_configure.executed: return except AttributeError: pytest_configure.executed = True from ansible.utils.collection_loader import AnsibleCollectionLoader # allow unit tests to import code from collections sys.meta_path.insert(0, AnsibleCollectionLoader()) # noinspection PyProtectedMember import py._path.local # force collections unit tests to be loaded with the ansible_collections namespace # original idea from https://stackoverflow.com/questions/50174130/how-do-i-pytest-a-project-using-pep-420-namespace-packages/50175552#50175552 # noinspection PyProtectedMember py._path.local.LocalPath.pypkgpath = collection_pypkgpath # pylint: disable=protected-access pytest_configure()
tempbottle/kbengine
refs/heads/master
kbe/res/scripts/common/Lib/calendar.py
828
"""Calendar printing functions Note when comparing these calendars to the ones printed by cal(1): By default, these calendars have Monday as the first day of the week, and Sunday as the last (the European convention). Use setfirstweekday() to set the first day of the week (0=Monday, 6=Sunday).""" import sys import datetime import locale as _locale __all__ = ["IllegalMonthError", "IllegalWeekdayError", "setfirstweekday", "firstweekday", "isleap", "leapdays", "weekday", "monthrange", "monthcalendar", "prmonth", "month", "prcal", "calendar", "timegm", "month_name", "month_abbr", "day_name", "day_abbr"] # Exception raised for bad input (with string parameter for details) error = ValueError # Exceptions raised for bad input class IllegalMonthError(ValueError): def __init__(self, month): self.month = month def __str__(self): return "bad month number %r; must be 1-12" % self.month class IllegalWeekdayError(ValueError): def __init__(self, weekday): self.weekday = weekday def __str__(self): return "bad weekday number %r; must be 0 (Monday) to 6 (Sunday)" % self.weekday # Constants for months referenced later January = 1 February = 2 # Number of days per month (except for February in leap years) mdays = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] # This module used to have hard-coded lists of day and month names, as # English strings. The classes following emulate a read-only version of # that, but supply localized names. Note that the values are computed # fresh on each call, in case the user changes locale between calls. class _localized_month: _months = [datetime.date(2001, i+1, 1).strftime for i in range(12)] _months.insert(0, lambda x: "") def __init__(self, format): self.format = format def __getitem__(self, i): funcs = self._months[i] if isinstance(i, slice): return [f(self.format) for f in funcs] else: return funcs(self.format) def __len__(self): return 13 class _localized_day: # January 1, 2001, was a Monday. _days = [datetime.date(2001, 1, i+1).strftime for i in range(7)] def __init__(self, format): self.format = format def __getitem__(self, i): funcs = self._days[i] if isinstance(i, slice): return [f(self.format) for f in funcs] else: return funcs(self.format) def __len__(self): return 7 # Full and abbreviated names of weekdays day_name = _localized_day('%A') day_abbr = _localized_day('%a') # Full and abbreviated names of months (1-based arrays!!!) month_name = _localized_month('%B') month_abbr = _localized_month('%b') # Constants for weekdays (MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY) = range(7) def isleap(year): """Return True for leap years, False for non-leap years.""" return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0) def leapdays(y1, y2): """Return number of leap years in range [y1, y2). Assume y1 <= y2.""" y1 -= 1 y2 -= 1 return (y2//4 - y1//4) - (y2//100 - y1//100) + (y2//400 - y1//400) def weekday(year, month, day): """Return weekday (0-6 ~ Mon-Sun) for year (1970-...), month (1-12), day (1-31).""" return datetime.date(year, month, day).weekday() def monthrange(year, month): """Return weekday (0-6 ~ Mon-Sun) and number of days (28-31) for year, month.""" if not 1 <= month <= 12: raise IllegalMonthError(month) day1 = weekday(year, month, 1) ndays = mdays[month] + (month == February and isleap(year)) return day1, ndays class Calendar(object): """ Base calendar class. This class doesn't do any formatting. It simply provides data to subclasses. """ def __init__(self, firstweekday=0): self.firstweekday = firstweekday # 0 = Monday, 6 = Sunday def getfirstweekday(self): return self._firstweekday % 7 def setfirstweekday(self, firstweekday): self._firstweekday = firstweekday firstweekday = property(getfirstweekday, setfirstweekday) def iterweekdays(self): """ Return a iterator for one week of weekday numbers starting with the configured first one. """ for i in range(self.firstweekday, self.firstweekday + 7): yield i%7 def itermonthdates(self, year, month): """ Return an iterator for one month. The iterator will yield datetime.date values and will always iterate through complete weeks, so it will yield dates outside the specified month. """ date = datetime.date(year, month, 1) # Go back to the beginning of the week days = (date.weekday() - self.firstweekday) % 7 date -= datetime.timedelta(days=days) oneday = datetime.timedelta(days=1) while True: yield date try: date += oneday except OverflowError: # Adding one day could fail after datetime.MAXYEAR break if date.month != month and date.weekday() == self.firstweekday: break def itermonthdays2(self, year, month): """ Like itermonthdates(), but will yield (day number, weekday number) tuples. For days outside the specified month the day number is 0. """ for date in self.itermonthdates(year, month): if date.month != month: yield (0, date.weekday()) else: yield (date.day, date.weekday()) def itermonthdays(self, year, month): """ Like itermonthdates(), but will yield day numbers. For days outside the specified month the day number is 0. """ for date in self.itermonthdates(year, month): if date.month != month: yield 0 else: yield date.day def monthdatescalendar(self, year, month): """ Return a matrix (list of lists) representing a month's calendar. Each row represents a week; week entries are datetime.date values. """ dates = list(self.itermonthdates(year, month)) return [ dates[i:i+7] for i in range(0, len(dates), 7) ] def monthdays2calendar(self, year, month): """ Return a matrix representing a month's calendar. Each row represents a week; week entries are (day number, weekday number) tuples. Day numbers outside this month are zero. """ days = list(self.itermonthdays2(year, month)) return [ days[i:i+7] for i in range(0, len(days), 7) ] def monthdayscalendar(self, year, month): """ Return a matrix representing a month's calendar. Each row represents a week; days outside this month are zero. """ days = list(self.itermonthdays(year, month)) return [ days[i:i+7] for i in range(0, len(days), 7) ] def yeardatescalendar(self, year, width=3): """ Return the data for the specified year ready for formatting. The return value is a list of month rows. Each month row contains up to width months. Each month contains between 4 and 6 weeks and each week contains 1-7 days. Days are datetime.date objects. """ months = [ self.monthdatescalendar(year, i) for i in range(January, January+12) ] return [months[i:i+width] for i in range(0, len(months), width) ] def yeardays2calendar(self, year, width=3): """ Return the data for the specified year ready for formatting (similar to yeardatescalendar()). Entries in the week lists are (day number, weekday number) tuples. Day numbers outside this month are zero. """ months = [ self.monthdays2calendar(year, i) for i in range(January, January+12) ] return [months[i:i+width] for i in range(0, len(months), width) ] def yeardayscalendar(self, year, width=3): """ Return the data for the specified year ready for formatting (similar to yeardatescalendar()). Entries in the week lists are day numbers. Day numbers outside this month are zero. """ months = [ self.monthdayscalendar(year, i) for i in range(January, January+12) ] return [months[i:i+width] for i in range(0, len(months), width) ] class TextCalendar(Calendar): """ Subclass of Calendar that outputs a calendar as a simple plain text similar to the UNIX program cal. """ def prweek(self, theweek, width): """ Print a single week (no newline). """ print(self.formatweek(theweek, width), end=' ') def formatday(self, day, weekday, width): """ Returns a formatted day. """ if day == 0: s = '' else: s = '%2i' % day # right-align single-digit days return s.center(width) def formatweek(self, theweek, width): """ Returns a single week in a string (no newline). """ return ' '.join(self.formatday(d, wd, width) for (d, wd) in theweek) def formatweekday(self, day, width): """ Returns a formatted week day name. """ if width >= 9: names = day_name else: names = day_abbr return names[day][:width].center(width) def formatweekheader(self, width): """ Return a header for a week. """ return ' '.join(self.formatweekday(i, width) for i in self.iterweekdays()) def formatmonthname(self, theyear, themonth, width, withyear=True): """ Return a formatted month name. """ s = month_name[themonth] if withyear: s = "%s %r" % (s, theyear) return s.center(width) def prmonth(self, theyear, themonth, w=0, l=0): """ Print a month's calendar. """ print(self.formatmonth(theyear, themonth, w, l), end=' ') def formatmonth(self, theyear, themonth, w=0, l=0): """ Return a month's calendar string (multi-line). """ w = max(2, w) l = max(1, l) s = self.formatmonthname(theyear, themonth, 7 * (w + 1) - 1) s = s.rstrip() s += '\n' * l s += self.formatweekheader(w).rstrip() s += '\n' * l for week in self.monthdays2calendar(theyear, themonth): s += self.formatweek(week, w).rstrip() s += '\n' * l return s def formatyear(self, theyear, w=2, l=1, c=6, m=3): """ Returns a year's calendar as a multi-line string. """ w = max(2, w) l = max(1, l) c = max(2, c) colwidth = (w + 1) * 7 - 1 v = [] a = v.append a(repr(theyear).center(colwidth*m+c*(m-1)).rstrip()) a('\n'*l) header = self.formatweekheader(w) for (i, row) in enumerate(self.yeardays2calendar(theyear, m)): # months in this row months = range(m*i+1, min(m*(i+1)+1, 13)) a('\n'*l) names = (self.formatmonthname(theyear, k, colwidth, False) for k in months) a(formatstring(names, colwidth, c).rstrip()) a('\n'*l) headers = (header for k in months) a(formatstring(headers, colwidth, c).rstrip()) a('\n'*l) # max number of weeks for this row height = max(len(cal) for cal in row) for j in range(height): weeks = [] for cal in row: if j >= len(cal): weeks.append('') else: weeks.append(self.formatweek(cal[j], w)) a(formatstring(weeks, colwidth, c).rstrip()) a('\n' * l) return ''.join(v) def pryear(self, theyear, w=0, l=0, c=6, m=3): """Print a year's calendar.""" print(self.formatyear(theyear, w, l, c, m)) class HTMLCalendar(Calendar): """ This calendar returns complete HTML pages. """ # CSS classes for the day <td>s cssclasses = ["mon", "tue", "wed", "thu", "fri", "sat", "sun"] def formatday(self, day, weekday): """ Return a day as a table cell. """ if day == 0: return '<td class="noday">&nbsp;</td>' # day outside month else: return '<td class="%s">%d</td>' % (self.cssclasses[weekday], day) def formatweek(self, theweek): """ Return a complete week as a table row. """ s = ''.join(self.formatday(d, wd) for (d, wd) in theweek) return '<tr>%s</tr>' % s def formatweekday(self, day): """ Return a weekday name as a table header. """ return '<th class="%s">%s</th>' % (self.cssclasses[day], day_abbr[day]) def formatweekheader(self): """ Return a header for a week as a table row. """ s = ''.join(self.formatweekday(i) for i in self.iterweekdays()) return '<tr>%s</tr>' % s def formatmonthname(self, theyear, themonth, withyear=True): """ Return a month name as a table row. """ if withyear: s = '%s %s' % (month_name[themonth], theyear) else: s = '%s' % month_name[themonth] return '<tr><th colspan="7" class="month">%s</th></tr>' % s def formatmonth(self, theyear, themonth, withyear=True): """ Return a formatted month as a table. """ v = [] a = v.append a('<table border="0" cellpadding="0" cellspacing="0" class="month">') a('\n') a(self.formatmonthname(theyear, themonth, withyear=withyear)) a('\n') a(self.formatweekheader()) a('\n') for week in self.monthdays2calendar(theyear, themonth): a(self.formatweek(week)) a('\n') a('</table>') a('\n') return ''.join(v) def formatyear(self, theyear, width=3): """ Return a formatted year as a table of tables. """ v = [] a = v.append width = max(width, 1) a('<table border="0" cellpadding="0" cellspacing="0" class="year">') a('\n') a('<tr><th colspan="%d" class="year">%s</th></tr>' % (width, theyear)) for i in range(January, January+12, width): # months in this row months = range(i, min(i+width, 13)) a('<tr>') for m in months: a('<td>') a(self.formatmonth(theyear, m, withyear=False)) a('</td>') a('</tr>') a('</table>') return ''.join(v) def formatyearpage(self, theyear, width=3, css='calendar.css', encoding=None): """ Return a formatted year as a complete HTML page. """ if encoding is None: encoding = sys.getdefaultencoding() v = [] a = v.append a('<?xml version="1.0" encoding="%s"?>\n' % encoding) a('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">\n') a('<html>\n') a('<head>\n') a('<meta http-equiv="Content-Type" content="text/html; charset=%s" />\n' % encoding) if css is not None: a('<link rel="stylesheet" type="text/css" href="%s" />\n' % css) a('<title>Calendar for %d</title>\n' % theyear) a('</head>\n') a('<body>\n') a(self.formatyear(theyear, width)) a('</body>\n') a('</html>\n') return ''.join(v).encode(encoding, "xmlcharrefreplace") class different_locale: def __init__(self, locale): self.locale = locale def __enter__(self): self.oldlocale = _locale.getlocale(_locale.LC_TIME) _locale.setlocale(_locale.LC_TIME, self.locale) def __exit__(self, *args): _locale.setlocale(_locale.LC_TIME, self.oldlocale) class LocaleTextCalendar(TextCalendar): """ This class can be passed a locale name in the constructor and will return month and weekday names in the specified locale. If this locale includes an encoding all strings containing month and weekday names will be returned as unicode. """ def __init__(self, firstweekday=0, locale=None): TextCalendar.__init__(self, firstweekday) if locale is None: locale = _locale.getdefaultlocale() self.locale = locale def formatweekday(self, day, width): with different_locale(self.locale): if width >= 9: names = day_name else: names = day_abbr name = names[day] return name[:width].center(width) def formatmonthname(self, theyear, themonth, width, withyear=True): with different_locale(self.locale): s = month_name[themonth] if withyear: s = "%s %r" % (s, theyear) return s.center(width) class LocaleHTMLCalendar(HTMLCalendar): """ This class can be passed a locale name in the constructor and will return month and weekday names in the specified locale. If this locale includes an encoding all strings containing month and weekday names will be returned as unicode. """ def __init__(self, firstweekday=0, locale=None): HTMLCalendar.__init__(self, firstweekday) if locale is None: locale = _locale.getdefaultlocale() self.locale = locale def formatweekday(self, day): with different_locale(self.locale): s = day_abbr[day] return '<th class="%s">%s</th>' % (self.cssclasses[day], s) def formatmonthname(self, theyear, themonth, withyear=True): with different_locale(self.locale): s = month_name[themonth] if withyear: s = '%s %s' % (s, theyear) return '<tr><th colspan="7" class="month">%s</th></tr>' % s # Support for old module level interface c = TextCalendar() firstweekday = c.getfirstweekday def setfirstweekday(firstweekday): if not MONDAY <= firstweekday <= SUNDAY: raise IllegalWeekdayError(firstweekday) c.firstweekday = firstweekday monthcalendar = c.monthdayscalendar prweek = c.prweek week = c.formatweek weekheader = c.formatweekheader prmonth = c.prmonth month = c.formatmonth calendar = c.formatyear prcal = c.pryear # Spacing of month columns for multi-column year calendar _colwidth = 7*3 - 1 # Amount printed by prweek() _spacing = 6 # Number of spaces between columns def format(cols, colwidth=_colwidth, spacing=_spacing): """Prints multi-column formatting for year calendars""" print(formatstring(cols, colwidth, spacing)) def formatstring(cols, colwidth=_colwidth, spacing=_spacing): """Returns a string formatted from n strings, centered within n columns.""" spacing *= ' ' return spacing.join(c.center(colwidth) for c in cols) EPOCH = 1970 _EPOCH_ORD = datetime.date(EPOCH, 1, 1).toordinal() def timegm(tuple): """Unrelated but handy function to calculate Unix timestamp from GMT.""" year, month, day, hour, minute, second = tuple[:6] days = datetime.date(year, month, 1).toordinal() - _EPOCH_ORD + day - 1 hours = days*24 + hour minutes = hours*60 + minute seconds = minutes*60 + second return seconds def main(args): import optparse parser = optparse.OptionParser(usage="usage: %prog [options] [year [month]]") parser.add_option( "-w", "--width", dest="width", type="int", default=2, help="width of date column (default 2, text only)" ) parser.add_option( "-l", "--lines", dest="lines", type="int", default=1, help="number of lines for each week (default 1, text only)" ) parser.add_option( "-s", "--spacing", dest="spacing", type="int", default=6, help="spacing between months (default 6, text only)" ) parser.add_option( "-m", "--months", dest="months", type="int", default=3, help="months per row (default 3, text only)" ) parser.add_option( "-c", "--css", dest="css", default="calendar.css", help="CSS to use for page (html only)" ) parser.add_option( "-L", "--locale", dest="locale", default=None, help="locale to be used from month and weekday names" ) parser.add_option( "-e", "--encoding", dest="encoding", default=None, help="Encoding to use for output." ) parser.add_option( "-t", "--type", dest="type", default="text", choices=("text", "html"), help="output type (text or html)" ) (options, args) = parser.parse_args(args) if options.locale and not options.encoding: parser.error("if --locale is specified --encoding is required") sys.exit(1) locale = options.locale, options.encoding if options.type == "html": if options.locale: cal = LocaleHTMLCalendar(locale=locale) else: cal = HTMLCalendar() encoding = options.encoding if encoding is None: encoding = sys.getdefaultencoding() optdict = dict(encoding=encoding, css=options.css) write = sys.stdout.buffer.write if len(args) == 1: write(cal.formatyearpage(datetime.date.today().year, **optdict)) elif len(args) == 2: write(cal.formatyearpage(int(args[1]), **optdict)) else: parser.error("incorrect number of arguments") sys.exit(1) else: if options.locale: cal = LocaleTextCalendar(locale=locale) else: cal = TextCalendar() optdict = dict(w=options.width, l=options.lines) if len(args) != 3: optdict["c"] = options.spacing optdict["m"] = options.months if len(args) == 1: result = cal.formatyear(datetime.date.today().year, **optdict) elif len(args) == 2: result = cal.formatyear(int(args[1]), **optdict) elif len(args) == 3: result = cal.formatmonth(int(args[1]), int(args[2]), **optdict) else: parser.error("incorrect number of arguments") sys.exit(1) write = sys.stdout.write if options.encoding: result = result.encode(options.encoding) write = sys.stdout.buffer.write write(result) if __name__ == "__main__": main(sys.argv)
mm112287/2015cd_midterm
refs/heads/master
static/Brython3.1.0-20150301-090019/Lib/xml/dom/minicompat.py
781
"""Python version compatibility support for minidom.""" # This module should only be imported using "import *". # # The following names are defined: # # NodeList -- lightest possible NodeList implementation # # EmptyNodeList -- lightest possible NodeList that is guaranteed to # remain empty (immutable) # # StringTypes -- tuple of defined string types # # defproperty -- function used in conjunction with GetattrMagic; # using these together is needed to make them work # as efficiently as possible in both Python 2.2+ # and older versions. For example: # # class MyClass(GetattrMagic): # def _get_myattr(self): # return something # # defproperty(MyClass, "myattr", # "return some value") # # For Python 2.2 and newer, this will construct a # property object on the class, which avoids # needing to override __getattr__(). It will only # work for read-only attributes. # # For older versions of Python, inheriting from # GetattrMagic will use the traditional # __getattr__() hackery to achieve the same effect, # but less efficiently. # # defproperty() should be used for each version of # the relevant _get_<property>() function. __all__ = ["NodeList", "EmptyNodeList", "StringTypes", "defproperty"] import xml.dom StringTypes = (str,) class NodeList(list): __slots__ = () def item(self, index): if 0 <= index < len(self): return self[index] def _get_length(self): return len(self) def _set_length(self, value): raise xml.dom.NoModificationAllowedErr( "attempt to modify read-only attribute 'length'") length = property(_get_length, _set_length, doc="The number of nodes in the NodeList.") def __getstate__(self): return list(self) def __setstate__(self, state): self[:] = state class EmptyNodeList(tuple): __slots__ = () def __add__(self, other): NL = NodeList() NL.extend(other) return NL def __radd__(self, other): NL = NodeList() NL.extend(other) return NL def item(self, index): return None def _get_length(self): return 0 def _set_length(self, value): raise xml.dom.NoModificationAllowedErr( "attempt to modify read-only attribute 'length'") length = property(_get_length, _set_length, doc="The number of nodes in the NodeList.") def defproperty(klass, name, doc): get = getattr(klass, ("_get_" + name)) def set(self, value, name=name): raise xml.dom.NoModificationAllowedErr( "attempt to modify read-only attribute " + repr(name)) assert not hasattr(klass, "_set_" + name), \ "expected not to find _set_" + name prop = property(get, set, doc=doc) setattr(klass, name, prop)
edespino/gpdb
refs/heads/master
src/test/tinc/tincrepo/resource_management/memory_accounting/scenario/__init__.py
12133432
robertmattmueller/sdac-compiler
refs/heads/master
sympy/integrals/benchmarks/__init__.py
12133432
rvs/gpdb
refs/heads/master
src/test/tinc/tincrepo/mpp/gpdb/tests/storage/pg_twophase/commit_create_tests/post_sql/__init__.py
12133432
translate/pootle
refs/heads/master
tests/pootle_language/__init__.py
12133432
forzi/phantomjs_stradivari_fork
refs/heads/master
src/breakpad/src/third_party/protobuf/protobuf/python/google/protobuf/internal/__init__.py
12133432
slevenhagen/odoo-npg
refs/heads/8.0
addons/account_sequence/__init__.py
433
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import account_sequence import account_sequence_installer # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
912/M-new
refs/heads/master
src/librehatti/prints/views.py
2
#from django.http import HttpResponse #from useraccounts.models import * #from helper import * from django import forms from django.shortcuts import * from librehatti.catalog.models import * from django.db.models import Sum def add_lab(request): """ It displays the form where the user selects the Lab. """ lab_info = Category.objects.all() return render(request,'prints/add_lab.html',{'lab_info':lab_info}) def add_material(request): """ Depending on the Lab selected, this function displays the form where the user selects Material. """ lab = request.GET['lab'] material_info = Category.objects.filter(parent__name=lab) return render( request, 'prints/add_material.html', {'lab':lab, 'material_info' : material_info}) def add_test(request): """ Depending on the Lab and Material selected, this function displays the form where the user selects a test and enters the time span. """ if 'Submit' in request.GET: material = request.GET['material'] test_info = Product.objects.filter(category__name=material) return render( request, 'prints/add_test.html', { 'material':material,'test_info' : test_info}) def lab_report(request): """ It generates the report which lists all the orders for the test selected and the in the entered Time Span. """ test = request.POST['test'] start_date = request.POST['From'] end_date = request.POST['To'] purchase_item= PurchasedItem.objects.filter(purchase_order__date_time__range =(start_date,end_date),item__name=test).values( 'purchase_order_id','purchase_order__date_time', 'purchase_order__buyer_id__username', 'purchase_order__buyer_id__customer__title', 'purchase_order__buyer_id__customer__company','price', 'purchase_order__buyer_id__customer__is_org') total=PurchasedItem.objects.filter(purchase_order__date_time__range =(start_date,end_date)).aggregate(Sum('price')).get('price__sum', 0.00) return render(request, 'prints/lab_reports.html', { 'purchase_item': purchase_item,'start_date':start_date,'end_date':end_date, 'total_cost':total}) def bill(request): """ It generates a Bill for the user which lists all the items, their quantity , subtotal and then adds it to the surcharges and generates the Grand total. """ purchase_order = PurchaseOrder.objects.all() purchased_item = PurchasedItem.objects.filter().values('item__name', 'qty', 'item__price_per_unit','price') total = PurchasedItem.objects.filter().aggregate(Sum('price')).get( 'price__sum', 0.00) surcharge = Surcharge.objects.filter().values('tax_name' ,'value') surcharge_total=0 i=0 tax_list = [] tax_data = [] for tax in surcharge: tax_list.append(float((tax['value']*total)/100)) for tax in tax_list: surcharge_total=surcharge_total+tax tax_data = zip(surcharge, tax_list) grand_total = surcharge_total + total return render(request, 'bill.html', { 'STC_No' :'1','PAN_No' :'12', 'L_No': '123', 'purchase_order':purchase_order, 'purchased_item' : purchased_item, 'total_cost': total,'surcharge_total': surcharge_total, 'tax_data' : tax_data, 'grand_total': grand_total})
andresriancho/boto
refs/heads/develop
boto/sdb/db/query.py
40
# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/ # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. class Query(object): __local_iter__ = None def __init__(self, model_class, limit=None, next_token=None, manager=None): self.model_class = model_class self.limit = limit self.offset = 0 if manager: self.manager = manager else: self.manager = self.model_class._manager self.filters = [] self.select = None self.sort_by = None self.rs = None self.next_token = next_token def __iter__(self): return iter(self.manager.query(self)) def next(self): if self.__local_iter__ == None: self.__local_iter__ = self.__iter__() return self.__local_iter__.next() def filter(self, property_operator, value): self.filters.append((property_operator, value)) return self def fetch(self, limit, offset=0): """Not currently fully supported, but we can use this to allow them to set a limit in a chainable method""" self.limit = limit self.offset = offset return self def count(self, quick=True): return self.manager.count(self.model_class, self.filters, quick, self.sort_by, self.select) def get_query(self): return self.manager._build_filter_part(self.model_class, self.filters, self.sort_by, self.select) def order(self, key): self.sort_by = key return self def to_xml(self, doc=None): if not doc: xmlmanager = self.model_class.get_xmlmanager() doc = xmlmanager.new_doc() for obj in self: obj.to_xml(doc) return doc def get_next_token(self): if self.rs: return self.rs.next_token if self._next_token: return self._next_token return None def set_next_token(self, token): self._next_token = token next_token = property(get_next_token, set_next_token)
iogf/candocabot
refs/heads/master
plugins/jumble/wordList.py
1
from contextlib import closing import shelve import re # Return an iterator over the words in the given character sequence. def fromString(string): return (m.group() for m in re.finditer(r'[a-zA-Z]+', string)) # Returns an iterator over the words in the given text file. def fromFile(fileName): with open(fileName, 'r') as file: return (w.upper() for w in fromString(file.read())) # Return an iterator over the alphabetic nicks occuring in a log kept by plugins.log. def nicksFromLog(logName, fileName): with closing(shelve.open(fileName, 'r')) as shelf: return fromUArrays(m.group(1) for m in re.finditer(r'^<[~^@%+]?([a-z]+)>', shelf[logName], re.I | re.M)) # Returns an iterator over the words in a log kept by plugins.log. def fromLog(logName, fileName): with closing(shelve.open(fileName, 'r')) as shelf: return fromUArrays(fromString(shelf[logName])) # Return an iterator converting unicode arrays to plain uppercase strings. def fromUArrays(arrays): return (a.tounicode().encode().upper() for a in arrays)
CYBAI/servo
refs/heads/master
python/tidy/servo_tidy_tests/lints/proper_file.py
105
from servo_tidy.tidy import LintRunner class Lint(LintRunner): def run(self): for _ in [None]: yield ('path', 0, 'foobar')
zzzeek/sqlalchemy
refs/heads/master
test/ext/test_automap.py
2
import random import threading import time from sqlalchemy import create_engine from sqlalchemy import ForeignKey from sqlalchemy import Integer from sqlalchemy import MetaData from sqlalchemy import select from sqlalchemy import String from sqlalchemy import testing from sqlalchemy.ext.automap import automap_base from sqlalchemy.ext.automap import generate_relationship from sqlalchemy.orm import configure_mappers from sqlalchemy.orm import exc as orm_exc from sqlalchemy.orm import interfaces from sqlalchemy.orm import relationship from sqlalchemy.orm import Session from sqlalchemy.testing import assert_raises_message from sqlalchemy.testing import fixtures from sqlalchemy.testing import is_ from sqlalchemy.testing.mock import Mock from sqlalchemy.testing.mock import patch from sqlalchemy.testing.schema import Column from sqlalchemy.testing.schema import Table from ..orm._fixtures import FixtureTest class AutomapTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): FixtureTest.define_tables(metadata) def test_relationship_o2m_default(self): Base = automap_base(metadata=self.tables_test_metadata) Base.prepare() User = Base.classes.users Address = Base.classes.addresses a1 = Address(email_address="e1") u1 = User(name="u1", addresses_collection=[a1]) assert a1.users is u1 def test_relationship_explicit_override_o2m(self): Base = automap_base(metadata=self.tables_test_metadata) prop = relationship("addresses", collection_class=set) class User(Base): __tablename__ = "users" addresses_collection = prop Base.prepare() assert User.addresses_collection.property is prop Address = Base.classes.addresses a1 = Address(email_address="e1") u1 = User(name="u1", addresses_collection=set([a1])) assert a1.user is u1 def test_prepare_w_only(self): Base = automap_base() Base.prepare( testing.db, reflection_options={"only": ["users"], "resolve_fks": False}, ) assert hasattr(Base.classes, "users") assert not hasattr(Base.classes, "addresses") def test_exception_prepare_not_called(self): Base = automap_base(metadata=self.tables_test_metadata) class User(Base): __tablename__ = "users" s = Session() assert_raises_message( orm_exc.UnmappedClassError, "Class test.ext.test_automap.User is a subclass of AutomapBase. " r"Mappings are not produced until the .prepare\(\) method is " "called on the class hierarchy.", s.query, User, ) def test_relationship_explicit_override_m2o(self): Base = automap_base(metadata=self.tables_test_metadata) prop = relationship("users") class Address(Base): __tablename__ = "addresses" users = prop Base.prepare() User = Base.classes.users assert Address.users.property is prop a1 = Address(email_address="e1") u1 = User(name="u1", address_collection=[a1]) assert a1.users is u1 def test_relationship_self_referential(self): Base = automap_base(metadata=self.tables_test_metadata) Base.prepare() Node = Base.classes.nodes n1 = Node() n2 = Node() n1.nodes_collection.append(n2) assert n2.nodes is n1 def test_prepare_accepts_optional_schema_arg(self): """ The underlying reflect call accepts an optional schema argument. This is for determining which database schema to load. This test verifies that prepare can accept an optional schema argument and pass it to reflect. """ Base = automap_base(metadata=self.tables_test_metadata) engine_mock = Mock() with patch.object(Base.metadata, "reflect") as reflect_mock: Base.prepare(autoload_with=engine_mock, schema="some_schema") reflect_mock.assert_called_once_with( engine_mock, schema="some_schema", extend_existing=True, autoload_replace=False, ) def test_prepare_defaults_to_no_schema(self): """ The underlying reflect call accepts an optional schema argument. This is for determining which database schema to load. This test verifies that prepare passes a default None if no schema is provided. """ Base = automap_base(metadata=self.tables_test_metadata) engine_mock = Mock() with patch.object(Base.metadata, "reflect") as reflect_mock: Base.prepare(autoload_with=engine_mock) reflect_mock.assert_called_once_with( engine_mock, schema=None, extend_existing=True, autoload_replace=False, ) def test_prepare_w_dialect_kwargs(self): Base = automap_base(metadata=self.tables_test_metadata) engine_mock = Mock() with patch.object(Base.metadata, "reflect") as reflect_mock: Base.prepare( autoload_with=engine_mock, reflection_options={"oracle_resolve_synonyms": True}, ) reflect_mock.assert_called_once_with( engine_mock, schema=None, extend_existing=True, autoload_replace=False, oracle_resolve_synonyms=True, ) def test_naming_schemes(self): Base = automap_base(metadata=self.tables_test_metadata) def classname_for_table(base, tablename, table): return str("cls_" + tablename) def name_for_scalar_relationship( base, local_cls, referred_cls, constraint ): return "scalar_" + referred_cls.__name__ def name_for_collection_relationship( base, local_cls, referred_cls, constraint ): return "coll_" + referred_cls.__name__ Base.prepare( classname_for_table=classname_for_table, name_for_scalar_relationship=name_for_scalar_relationship, name_for_collection_relationship=name_for_collection_relationship, ) User = Base.classes.cls_users Address = Base.classes.cls_addresses u1 = User() a1 = Address() u1.coll_cls_addresses.append(a1) assert a1.scalar_cls_users is u1 def test_relationship_m2m(self): Base = automap_base(metadata=self.tables_test_metadata) Base.prepare() Order, Item = Base.classes.orders, Base.classes["items"] o1 = Order() i1 = Item() o1.items_collection.append(i1) assert o1 in i1.orders_collection def test_relationship_explicit_override_forwards_m2m(self): Base = automap_base(metadata=self.tables_test_metadata) class Order(Base): __tablename__ = "orders" items_collection = relationship( "items", secondary="order_items", collection_class=set ) Base.prepare() Item = Base.classes["items"] o1 = Order() i1 = Item() o1.items_collection.add(i1) # it is 'order_collection' because the class name is # "Order" ! assert isinstance(i1.order_collection, list) assert o1 in i1.order_collection def test_m2m_relationship_also_map_the_secondary(self): """test #6679""" Base = automap_base(metadata=self.tables_test_metadata) # extend the table to have pk cols Table( "order_items", self.tables_test_metadata, Column("item_id", None, ForeignKey("items.id"), primary_key=True), Column( "order_id", None, ForeignKey("orders.id"), primary_key=True ), extend_existing=True, ) # then also map to it class OrderItem(Base): __tablename__ = "order_items" Base.prepare() Order = Base.classes["orders"] Item = Base.classes["items"] o1 = Order() i1 = Item(description="x") o1.items_collection.append(i1) s = fixtures.fixture_session() s.add(o1) s.flush() oi = s.execute(select(OrderItem)).scalars().one() is_(oi.items, i1) is_(oi.orders, o1) def test_relationship_pass_params(self): Base = automap_base(metadata=self.tables_test_metadata) mock = Mock() def _gen_relationship( base, direction, return_fn, attrname, local_cls, referred_cls, **kw ): mock(base, direction, attrname) return generate_relationship( base, direction, return_fn, attrname, local_cls, referred_cls, **kw ) Base.prepare(generate_relationship=_gen_relationship) assert set(tuple(c[1]) for c in mock.mock_calls).issuperset( [ (Base, interfaces.MANYTOONE, "nodes"), (Base, interfaces.MANYTOMANY, "keywords_collection"), (Base, interfaces.MANYTOMANY, "items_collection"), (Base, interfaces.MANYTOONE, "users"), (Base, interfaces.ONETOMANY, "addresses_collection"), ] ) class CascadeTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table("a", metadata, Column("id", Integer, primary_key=True)) Table( "b", metadata, Column("id", Integer, primary_key=True), Column("aid", ForeignKey("a.id"), nullable=True), ) Table( "c", metadata, Column("id", Integer, primary_key=True), Column("aid", ForeignKey("a.id"), nullable=False), ) Table( "d", metadata, Column("id", Integer, primary_key=True), Column( "aid", ForeignKey("a.id", ondelete="cascade"), nullable=False ), ) Table( "e", metadata, Column("id", Integer, primary_key=True), Column( "aid", ForeignKey("a.id", ondelete="set null"), nullable=True ), ) def test_o2m_relationship_cascade(self): Base = automap_base(metadata=self.tables_test_metadata) Base.prepare() configure_mappers() b_rel = Base.classes.a.b_collection assert not b_rel.property.cascade.delete assert not b_rel.property.cascade.delete_orphan assert not b_rel.property.passive_deletes assert b_rel.property.cascade.save_update c_rel = Base.classes.a.c_collection assert c_rel.property.cascade.delete assert c_rel.property.cascade.delete_orphan assert not c_rel.property.passive_deletes assert c_rel.property.cascade.save_update d_rel = Base.classes.a.d_collection assert d_rel.property.cascade.delete assert d_rel.property.cascade.delete_orphan assert d_rel.property.passive_deletes assert d_rel.property.cascade.save_update e_rel = Base.classes.a.e_collection assert not e_rel.property.cascade.delete assert not e_rel.property.cascade.delete_orphan assert e_rel.property.passive_deletes assert e_rel.property.cascade.save_update class AutomapInhTest(fixtures.MappedTest): @classmethod def define_tables(cls, metadata): Table( "single", metadata, Column("id", Integer, primary_key=True), Column("type", String(10)), test_needs_fk=True, ) Table( "joined_base", metadata, Column("id", Integer, primary_key=True), Column("type", String(10)), test_needs_fk=True, ) Table( "joined_inh", metadata, Column( "id", Integer, ForeignKey("joined_base.id"), primary_key=True ), test_needs_fk=True, ) FixtureTest.define_tables(metadata) def test_single_inheritance_reflect(self): Base = automap_base() class Single(Base): __tablename__ = "single" type = Column(String) __mapper_args__ = { "polymorphic_identity": "u0", "polymorphic_on": type, } class SubUser1(Single): __mapper_args__ = {"polymorphic_identity": "u1"} class SubUser2(Single): __mapper_args__ = {"polymorphic_identity": "u2"} Base.prepare(autoload_with=testing.db) assert SubUser2.__mapper__.inherits is Single.__mapper__ def test_joined_inheritance_reflect(self): Base = automap_base() class Joined(Base): __tablename__ = "joined_base" type = Column(String) __mapper_args__ = { "polymorphic_identity": "u0", "polymorphic_on": type, } class SubJoined(Joined): __tablename__ = "joined_inh" __mapper_args__ = {"polymorphic_identity": "u1"} Base.prepare(autoload_with=testing.db) assert SubJoined.__mapper__.inherits is Joined.__mapper__ assert not Joined.__mapper__.relationships assert not SubJoined.__mapper__.relationships def test_conditional_relationship(self): Base = automap_base() def _gen_relationship(*arg, **kw): return None Base.prepare( autoload_with=testing.db, generate_relationship=_gen_relationship, ) class ConcurrentAutomapTest(fixtures.TestBase): __only_on__ = "sqlite" def _make_tables(self, e): m = MetaData() for i in range(15): Table( "table_%d" % i, m, Column("id", Integer, primary_key=True), Column("data", String(50)), Column( "t_%d_id" % (i - 1), ForeignKey("table_%d.id" % (i - 1)) ) if i > 4 else None, ) m.drop_all(e) m.create_all(e) def _automap(self, e): Base = automap_base() Base.prepare(autoload_with=e) time.sleep(0.01) configure_mappers() def _chaos(self): e = create_engine("sqlite://") try: self._make_tables(e) for i in range(2): try: self._automap(e) except: self._success = False raise time.sleep(random.random()) finally: e.dispose() def test_concurrent_automaps_w_configure(self): self._success = True threads = [threading.Thread(target=self._chaos) for i in range(30)] for t in threads: t.start() for t in threads: t.join() assert self._success, "One or more threads failed"
alexchandel/political-compass
refs/heads/master
compass.py
1
#!/usr/bin/env python3 import urllib import urllib.request import lxml.html import matplotlib.pyplot as pyplot url = "http://www.politicalcompass.org/test" def_opts = {"pageno": 1, "carried_x": 0, "carried_y": 0, "submit": "Next Page"} def_answers = { 1: 3, 2: 0, 3: 3, 4: 0, 5: 0, 6: 0, 7: 0, 8: 2, 9: 2, 10: 3, 11: 2, 12: 0, 13: 1, 14: 1, 15: 0, 16: 2, 17: 1, 18: 2, 19: 3, 20: 2, 21: 2, 22: 0, 23: 3, 24: 0, 25: 0, 26: 0, 27: 0, 28: 0, 29: 3, 30: 3, 31: 0, 32: 1, 33: 0, 34: 1, 35: 1, 36: 0, 37: 0, 38: 0, 39: 0, 40: 3, 41: 0, 42: 0, 43: 0, 44: 0, 45: 0, 46: 0, 47: 0, 48: 0, 49: 0, 50: 1, 51: 0, 52: 0, 53: 0, 54: 1, 55: 0, 56: 0, 57: 0, 58: 3, 59: 3, 60: 3, 61: 0, 62: 0 } values = {0: "Strongly Disagree", 1: "Disagree", 2: "Agree", 3: "Strongly Agree"} def_questions = { 1: "If economic globalisation is inevitable, it should primarily serve humanity rather than the interests of trans-national corporations.", 2: "I'd always support my country, whether it was right or wrong.", 3: "No one chooses his or her country of birth, so it's foolish to be proud of it.", 4: "Our race has many superior qualities, compared with other races.", 5: "The enemy of my enemy is my friend.", 6: "Military action that defies international law is sometimes justified.", 7: "There is now a worrying fusion of information and entertainment.", 8: "People are ultimately divided more by class than by nationality.", 9: "Controlling inflation is more important than controlling unemployment.", 10: "Because corporations cannot be trusted to voluntarily protect the environment, they require regulation.", 11: "\"from each according to his ability, to each according to his need\" is a fundamentally good idea.", 12: "It's a sad reflection on our society that something as basic as drinking water is now a bottled, branded consumer product.", 13: "Land shouldn't be a commodity to be bought and sold.", 14: "It is regrettable that many personal fortunes are made by people who simply manipulate money and contribute nothing to their society.", 15: "Protectionism is sometimes necessary in trade.", 16: "The only social responsibility of a company should be to deliver a profit to its shareholders.", 17: "The rich are too highly taxed.", 18: "Those with the ability to pay should have the right to higher standards of medical care .", 19: "Governments should penalise businesses that mislead the public.", 20: "A genuine free market requires restrictions on the ability of predator multinationals to create monopolies.", 21: "The freer the market, the freer the people.", 22: "Abortion, when the woman's life is not threatened, should always be illegal.", 23: "All authority should be questioned.", 24: "An eye for an eye and a tooth for a tooth.", 25: "Taxpayers should not be expected to prop up any theatres or museums that cannot survive on a commercial basis.", 26: "Schools should not make classroom attendance compulsory.", 27: "All people have their rights, but it is better for all of us that different sorts of people should keep to their own kind.", 28: "Good parents sometimes have to spank their children.", 29: "It's natural for children to keep some secrets from their parents.", 30: "Possessing marijuana for personal use should not be a criminal offence.", 31: "The prime function of schooling should be to equip the future generation to find jobs.", 32: "People with serious inheritable disabilities should not be allowed to reproduce.", 33: "The most important thing for children to learn is to accept discipline.", 34: "There are no savage and civilised peoples; there are only different cultures.", 35: "Those who are able to work, and refuse the opportunity, should not expect society's support.", 36: "When you are troubled, it's better not to think about it, but to keep busy with more cheerful things.", 37: "First-generation immigrants can never be fully integrated within their new country.", 38: "What's good for the most successful corporations is always, ultimately, good for all of us.", 39: "No broadcasting institution, however independent its content, should receive public funding.", 40: "Our civil liberties are being excessively curbed in the name of counter-terrorism.", 41: "A significant advantage of a one-party state is that it avoids all the arguments that delay progress in a democratic political system.", 42: "Although the electronic age makes official surveillance easier, only wrongdoers need to be worried.", 43: "The death penalty should be an option for the most serious crimes.", 44: "In a civilised society, one must always have people above to be obeyed and people below to be commanded.", 45: "Abstract art that doesn't represent anything shouldn't be considered art at all.", 46: "In criminal justice, punishment should be more important than rehabilitation.", 47: "It is a waste of time to try to rehabilitate some criminals.", 48: "The businessperson and the manufacturer are more important than the writer and the artist.", 49: "Mothers may have careers, but their first duty is to be homemakers.", 50: "Multinational companies are unethically exploiting the plant genetic resources of developing countries.", 51: "Making peace with the establishment is an important aspect of maturity.", 52: "Astrology accurately explains many things.", 53: "You cannot be moral without being religious.", 54: "Charity is better than social security as a means of helping the genuinely disadvantaged.", 55: "Some people are naturally unlucky.", 56: "It is important that my child's school instills religious values.", 57: "Sex outside marriage is usually immoral.", 58: "A same sex couple in a stable, loving relationship, should not be excluded from the possibility of child adoption.", 59: "Pornography, depicting consenting adults, should be legal for the adult population.", 60: "What goes on in a private bedroom between consenting adults is no business of the state.", 61: "No one can feel naturally homosexual.", 62: "These days openness about sex has gone too far." } def submit_page(post_args=None): ''' Returns response body, as UTF8-decoded <str> ''' req = urllib.request.Request(url) if isinstance(post_args, dict) and "submit" in post_args: post = urllib.parse.urlencode(post_args) # print(post_args) # print(post.encode('ascii')) req.add_data(post.encode('ascii')) response = urllib.request.urlopen(req) data = response.readall() return(data.decode()) def reap_questions(html): questions = {} for tag in html.findall(".//label[1]/input[@type='radio']"): num = int(tag.name.split('_')[-1]) questions[num] = tag.find("....../td[1]").text_content() return questions def compass(): answers = def_answers.copy() questions = {} post_args = {} while post_args is not None: # Post previous responses, Get new questions (first post is empty, gets page 1) html_text = submit_page(post_args) html = lxml.html.fromstring(html_text) curr_questions = reap_questions(html) # If the test isn't done, prepare [post_args] for next page if len(curr_questions): # Verify test integrity if not all(item in def_questions.items() for item in curr_questions.items()): raise RuntimeError("Questions have changed. Answer cache is bad!") questions.update(curr_questions) # Assemble responses post_args = {'answer_' + str(key): answers[key] for key in curr_questions} # Print responses for num in sorted(curr_questions): print(str(num) + ":\t" + curr_questions[num] + "\n\t" + values[int(answers[num])] + '\n') submit_tag = html.find(".//input[@type='submit']") post_args["submit"] = submit_tag.value # submit_tag.type == "submit" for tag in html.findall(".//input[@type='hidden']"): post_args[tag.name] = tag.value pageno = post_args["pageno"] else: post_args = None pageno = 'f' # with open('/Users/alex/Desktop/page' + pageno + ".html", "a+") as f: # f.write(html_text) h2 = html.find(".//h2") print(h2.text_content()) lines = h2.text_content().split('\n') x = float(lines[0][-6:]) y = float(lines[1][-6:]) pyplot.scatter(x, y) pyplot.xlim(-10, 10) pyplot.ylim(-10, 10) pyplot.title("Political coordinates") pyplot.xlabel("Economic Left/Right") pyplot.ylabel("Social Libertarian/Authoritarian") pyplot.grid() pyplot.show() return questions def main(): compass() if __name__ == '__main__': main()
Xilinx/PYNQ
refs/heads/master
pynq/lib/arduino/arduino_grove_dlight.py
4
# Copyright (c) 2016, NECST Laboratory, Politecnico di Milano # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from . import Arduino from . import ARDUINO_GROVE_I2C __author__ = "Marco Rabozzi, Luca Cerina, Giuseppe Natale" __copyright__ = "Copyright 2016, NECST Laboratory, Politecnico di Milano" ARDUINO_GROVE_DLIGHT_PROGRAM = "arduino_grove_dlight.bin" CONFIG_IOP_SWITCH = 0x1 GET_LIGHT_VALUE = 0x3 GET_LUX_VALUE = 0x5 class Grove_Dlight(object): """This class controls the Grove IIC color sensor. Grove Color sensor based on the TCS3414CS. Hardware version: v1.3. Attributes ---------- microblaze : Arduino Microblaze processor instance used by this module. """ def __init__(self, mb_info, gr_pin): """Return a new instance of an Grove_Dlight object. Parameters ---------- mb_info : dict A dictionary storing Microblaze information, such as the IP name and the reset name. gr_pin: list A group of pins on arduino-grove shield. """ if gr_pin not in [ARDUINO_GROVE_I2C]: raise ValueError("Group number can only be I2C.") self.microblaze = Arduino(mb_info, ARDUINO_GROVE_DLIGHT_PROGRAM) self.microblaze.write_blocking_command(CONFIG_IOP_SWITCH) def read_raw_light(self): """Read the visible and IR channel values. Read the values from the grove digital light peripheral. Returns ------- tuple A tuple containing 2 integer values ch0 (visible) and ch1 (IR). """ self.microblaze.write_blocking_command(GET_LIGHT_VALUE) ch0, ch1 = self.microblaze.read_mailbox(0, 2) return ch0, ch1 def read_lux(self): """Read the computed lux value of the sensor. Returns ------- int The lux value from the sensor """ self.microblaze.write_blocking_command(GET_LUX_VALUE) lux = self.microblaze.read_mailbox(0x8) return lux
350dotorg/Django
refs/heads/master
django/conf/locale/he/formats.py
81
# -*- encoding: utf-8 -*- # This file is distributed under the same license as the Django package. # DATE_FORMAT = 'j בF Y' TIME_FORMAT = 'H:i:s' DATETIME_FORMAT = 'j בF Y H:i:s' YEAR_MONTH_FORMAT = 'F Y' MONTH_DAY_FORMAT = 'j בF' SHORT_DATE_FORMAT = 'd/m/Y' SHORT_DATETIME_FORMAT = 'd/m/Y H:i:s' # FIRST_DAY_OF_WEEK = # DATE_INPUT_FORMATS = # TIME_INPUT_FORMATS = # DATETIME_INPUT_FORMATS = DECIMAL_SEPARATOR = '.' THOUSAND_SEPARATOR = ',' # NUMBER_GROUPING =
samratashok87/Rammbock
refs/heads/master
utest/test_templates/__init__.py
6
from unittest import TestCase
shreyasp/erpnext
refs/heads/develop
erpnext/stock/doctype/landed_cost_item/landed_cost_item.py
121
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import frappe from frappe.model.document import Document class LandedCostItem(Document): pass
JohnS-01/coala
refs/heads/master
coalib/collecting/Collectors.py
9
import functools import os import pkg_resources import itertools from pyprint.NullPrinter import NullPrinter from coalib.bears.BEAR_KIND import BEAR_KIND from coalib.collecting.Importers import iimport_objects from coala_utils.decorators import yield_once from coalib.output.printers.LOG_LEVEL import LOG_LEVEL from coalib.parsing.Globbing import fnmatch, iglob, glob_escape from coalib.output.printers.LogPrinter import LogPrinter def _get_kind(bear_class): try: return bear_class.kind() except NotImplementedError: return None def _import_bears(file_path, kinds): # recursive imports: for bear_list in iimport_objects(file_path, names='__additional_bears__', types=list): for bear_class in bear_list: if _get_kind(bear_class) in kinds: yield bear_class # normal import for bear_class in iimport_objects(file_path, attributes='kind', local=True): if _get_kind(bear_class) in kinds: yield bear_class @yield_once def icollect(file_paths, ignored_globs=None, match_cache={}): """ Evaluate globs in file paths and return all matching files. :param file_paths: File path or list of such that can include globs :param ignored_globs: List of globs to ignore when matching files :param match_cache: Dictionary to use for caching results :return: Iterator that yields tuple of path of a matching file, the glob where it was found """ if isinstance(file_paths, str): file_paths = [file_paths] for file_path in file_paths: if file_path not in match_cache: match_cache[file_path] = list(iglob(file_path)) for match in match_cache[file_path]: if not ignored_globs or not fnmatch(match, ignored_globs): yield match, file_path def collect_files(file_paths, log_printer, ignored_file_paths=None, limit_file_paths=None): """ Evaluate globs in file paths and return all matching files :param file_paths: File path or list of such that can include globs :param ignored_file_paths: List of globs that match to-be-ignored files :param limit_file_paths: List of globs that the files are limited to :return: List of paths of all matching files """ limit_fnmatch = (functools.partial(fnmatch, globs=limit_file_paths) if limit_file_paths else lambda fname: True) valid_files = list(filter(lambda fname: os.path.isfile(fname[0]), icollect(file_paths, ignored_file_paths))) # Find globs that gave no files and warn the user if valid_files: collected_files, file_globs_with_files = zip(*valid_files) else: collected_files, file_globs_with_files = [], [] _warn_if_unused_glob(log_printer, file_paths, file_globs_with_files, "No files matching '{}' were found.") limited_files = list(filter(limit_fnmatch, collected_files)) return limited_files def collect_dirs(dir_paths, ignored_dir_paths=None): """ Evaluate globs in directory paths and return all matching directories :param dir_paths: File path or list of such that can include globs :param ignored_dir_paths: List of globs that match to-be-ignored dirs :return: List of paths of all matching directories """ valid_dirs = list(filter(lambda fname: os.path.isdir(fname[0]), icollect(dir_paths, ignored_dir_paths))) if valid_dirs: collected_dirs, _ = zip(*valid_dirs) return list(collected_dirs) else: return [] @yield_once def icollect_bears(bear_dir_glob, bear_globs, kinds, log_printer): """ Collect all bears from bear directories that have a matching kind. :param bear_dir_glob: Directory globs or list of such that can contain bears :param bear_globs: Globs of bears to collect :param kinds: List of bear kinds to be collected :param log_printer: Log_printer to handle logging :return: Iterator that yields a tuple with bear class and which bear_glob was used to find that bear class. """ for bear_dir, dir_glob in filter(lambda x: os.path.isdir(x[0]), icollect(bear_dir_glob)): # Since we get a real directory here and since we # pass this later to iglob, we need to escape this. bear_dir = glob_escape(bear_dir) for bear_glob in bear_globs: for matching_file in iglob( os.path.join(bear_dir, bear_glob + '.py')): try: for bear in _import_bears(matching_file, kinds): yield bear, bear_glob except pkg_resources.VersionConflict as exception: log_printer.log_exception( ('Unable to collect bears from {file} because there ' 'is a conflict with the version of a dependency ' 'you have installed. This may be resolved by ' 'creating a separate virtual environment for coala ' 'or running `pip install \"{pkg}\"`. Be aware that ' 'the latter solution might break other python ' 'packages that depend on the currently installed ' 'version.').format(file=matching_file, pkg=exception.req), exception, log_level=LOG_LEVEL.WARNING) except BaseException as exception: log_printer.log_exception( 'Unable to collect bears from {file}. Probably the ' 'file is malformed or the module code raises an ' 'exception.'.format(file=matching_file), exception, log_level=LOG_LEVEL.WARNING) def collect_bears(bear_dirs, bear_globs, kinds, log_printer, warn_if_unused_glob=True): """ Collect all bears from bear directories that have a matching kind matching the given globs. :param bear_dirs: Directory name or list of such that can contain bears. :param bear_globs: Globs of bears to collect. :param kinds: List of bear kinds to be collected. :param log_printer: log_printer to handle logging. :param warn_if_unused_glob: True if warning message should be shown if a glob didn't give any bears. :return: Tuple of list of matching bear classes based on kind. The lists are in the same order as kinds. """ bears_found = tuple([] for i in range(len(kinds))) bear_globs_with_bears = set() for bear, glob in icollect_bears(bear_dirs, bear_globs, kinds, log_printer): index = kinds.index(_get_kind(bear)) bears_found[index].append(bear) bear_globs_with_bears.add(glob) if warn_if_unused_glob: _warn_if_unused_glob(log_printer, bear_globs, bear_globs_with_bears, "No bears matching '{}' were found. Make sure you " 'have coala-bears installed or you have typed the ' 'name correctly.') return bears_found def filter_section_bears_by_languages(bears, languages): """ Filters the bears by languages. :param bears: The dictionary of the sections as keys and list of bears as values. :param languages: Languages that bears are being filtered on. :return: New dictionary with filtered out bears that don't match any language from languages. """ new_bears = {} # All bears with "all" languages supported shall be shown languages = set(language.lower() for language in languages) | {'all'} for section in bears.keys(): new_bears[section] = tuple( bear for bear in bears[section] if {language.lower() for language in bear.LANGUAGES} & languages) return new_bears def filter_capabilities_by_languages(bears, languages): """ Filters the bears capabilities by languages. :param bears: Dictionary with sections as keys and list of bears as values. :param languages: Languages that bears are being filtered on. :return: New dictionary with languages as keys and their bears capabilities as values. The capabilities are stored in a tuple of two elements where the first one represents what the bears can detect, and the second one what they can fix. """ languages = set(language.lower() for language in languages) language_bears_capabilities = {language: ( set(), set()) for language in languages} for section_bears in bears.values(): for bear in section_bears: bear_language = ( ({language.lower() for language in bear.LANGUAGES} | {'all'}) & languages) language = bear_language.pop() if bear_language else '' capabilities = (language_bears_capabilities[language] if language else tuple()) language_bears_capabilities.update( {language: (capabilities[0] | bear.can_detect, capabilities[1] | bear.CAN_FIX)} if language else {}) return language_bears_capabilities def get_all_bears(): """ Get a ``list`` of all available bears. """ from coalib.settings.Section import Section printer = LogPrinter(NullPrinter()) local_bears, global_bears = collect_bears( Section('').bear_dirs(), ['**'], [BEAR_KIND.LOCAL, BEAR_KIND.GLOBAL], printer, warn_if_unused_glob=False) return list(itertools.chain(local_bears, global_bears)) def get_all_bears_names(): """ Get a ``list`` of names of all available bears. """ return [bear.name for bear in get_all_bears()] def collect_all_bears_from_sections(sections, log_printer): """ Collect all kinds of bears from bear directories given in the sections. :param sections: List of sections so bear_dirs are taken into account :param log_printer: Log_printer to handle logging :return: Tuple of dictionaries of local and global bears. The dictionary key is section class and dictionary value is a list of Bear classes """ local_bears = {} global_bears = {} for section in sections: bear_dirs = sections[section].bear_dirs() local_bears[section], global_bears[section] = collect_bears( bear_dirs, ['**'], [BEAR_KIND.LOCAL, BEAR_KIND.GLOBAL], log_printer, warn_if_unused_glob=False) return local_bears, global_bears def _warn_if_unused_glob(log_printer, globs, used_globs, message): """ Warn if a glob has not been used. :param log_printer: The log_printer to handle logging. :param globs: List of globs that were expected to be used. :param used_globs: List of globs that were actually used. :param message: Warning message to display if a glob is unused. The glob which was unused will be added using .format() """ unused_globs = set(globs) - set(used_globs) for glob in unused_globs: log_printer.warn(message.format(glob)) def collect_registered_bears_dirs(entrypoint): """ Searches setuptools for the entrypoint and returns the bear directories given by the module. :param entrypoint: The entrypoint to find packages with. :return: List of bear directories. """ collected_dirs = [] for ep in pkg_resources.iter_entry_points(entrypoint): registered_package = None try: registered_package = ep.load() except pkg_resources.DistributionNotFound: continue collected_dirs.append(os.path.abspath( os.path.dirname(registered_package.__file__))) return collected_dirs
wrouesnel/ansible
refs/heads/devel
lib/ansible/modules/network/netscaler/netscaler_cs_vserver.py
52
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright (c) 2017 Citrix Systems # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: netscaler_cs_vserver short_description: Manage content switching vserver description: - Manage content switching vserver - This module is intended to run either on the ansible control node or a bastion (jumpserver) with access to the actual netscaler instance version_added: "2.4" author: George Nikolopoulos (@giorgos-nikolopoulos) options: name: description: - >- Name for the content switching virtual server. Must begin with an ASCII alphanumeric or underscore C(_) character, and must contain only ASCII alphanumeric, underscore C(_), hash C(#), period C(.), space, colon C(:), at sign C(@), equal sign C(=), and hyphen C(-) characters. - "Cannot be changed after the CS virtual server is created." - "Minimum length = 1" td: description: - >- Integer value that uniquely identifies the traffic domain in which you want to configure the entity. If you do not specify an ID, the entity becomes part of the default traffic domain, which has an ID of 0. - "Minimum value = 0" - "Maximum value = 4094" servicetype: choices: - 'HTTP' - 'SSL' - 'TCP' - 'FTP' - 'RTSP' - 'SSL_TCP' - 'UDP' - 'DNS' - 'SIP_UDP' - 'SIP_TCP' - 'SIP_SSL' - 'ANY' - 'RADIUS' - 'RDP' - 'MYSQL' - 'MSSQL' - 'DIAMETER' - 'SSL_DIAMETER' - 'DNS_TCP' - 'ORACLE' - 'SMPP' description: - "Protocol used by the virtual server." ipv46: description: - "IP address of the content switching virtual server." - "Minimum length = 1" targettype: choices: - 'GSLB' description: - "Virtual server target type." ippattern: description: - >- IP address pattern, in dotted decimal notation, for identifying packets to be accepted by the virtual server. The IP Mask parameter specifies which part of the destination IP address is matched against the pattern. Mutually exclusive with the IP Address parameter. - >- For example, if the IP pattern assigned to the virtual server is C(198.51.100.0) and the IP mask is C(255.255.240.0) (a forward mask), the first 20 bits in the destination IP addresses are matched with the first 20 bits in the pattern. The virtual server accepts requests with IP addresses that range from 198.51.96.1 to 198.51.111.254. You can also use a pattern such as C(0.0.2.2) and a mask such as C(0.0.255.255) (a reverse mask). - >- If a destination IP address matches more than one IP pattern, the pattern with the longest match is selected, and the associated virtual server processes the request. For example, if the virtual servers, C(vs1) and C(vs2), have the same IP pattern, C(0.0.100.128), but different IP masks of C(0.0.255.255) and C(0.0.224.255), a destination IP address of 198.51.100.128 has the longest match with the IP pattern of C(vs1). If a destination IP address matches two or more virtual servers to the same extent, the request is processed by the virtual server whose port number matches the port number in the request. ipmask: description: - >- IP mask, in dotted decimal notation, for the IP Pattern parameter. Can have leading or trailing non-zero octets (for example, C(255.255.240.0) or C(0.0.255.255)). Accordingly, the mask specifies whether the first n bits or the last n bits of the destination IP address in a client request are to be matched with the corresponding bits in the IP pattern. The former is called a forward mask. The latter is called a reverse mask. range: description: - >- Number of consecutive IP addresses, starting with the address specified by the IP Address parameter, to include in a range of addresses assigned to this virtual server. - "Minimum value = C(1)" - "Maximum value = C(254)" port: description: - "Port number for content switching virtual server." - "Minimum value = 1" - "Range C(1) - C(65535)" - "* in CLI is represented as 65535 in NITRO API" stateupdate: choices: - 'enabled' - 'disabled' description: - >- Enable state updates for a specific content switching virtual server. By default, the Content Switching virtual server is always UP, regardless of the state of the Load Balancing virtual servers bound to it. This parameter interacts with the global setting as follows: - "Global Level | Vserver Level | Result" - "enabled enabled enabled" - "enabled disabled enabled" - "disabled enabled enabled" - "disabled disabled disabled" - >- If you want to enable state updates for only some content switching virtual servers, be sure to disable the state update parameter. cacheable: description: - >- Use this option to specify whether a virtual server, used for load balancing or content switching, routes requests to the cache redirection virtual server before sending it to the configured servers. type: bool redirecturl: description: - >- URL to which traffic is redirected if the virtual server becomes unavailable. The service type of the virtual server should be either C(HTTP) or C(SSL). - >- Caution: Make sure that the domain in the URL does not match the domain specified for a content switching policy. If it does, requests are continuously redirected to the unavailable virtual server. - "Minimum length = 1" clttimeout: description: - "Idle time, in seconds, after which the client connection is terminated. The default values are:" - "Minimum value = C(0)" - "Maximum value = C(31536000)" precedence: choices: - 'RULE' - 'URL' description: - >- Type of precedence to use for both RULE-based and URL-based policies on the content switching virtual server. With the default C(RULE) setting, incoming requests are evaluated against the rule-based content switching policies. If none of the rules match, the URL in the request is evaluated against the URL-based content switching policies. casesensitive: description: - >- Consider case in URLs (for policies that use URLs instead of RULES). For example, with the C(on) setting, the URLs /a/1.html and /A/1.HTML are treated differently and can have different targets (set by content switching policies). With the C(off) setting, /a/1.html and /A/1.HTML are switched to the same target. type: bool somethod: choices: - 'CONNECTION' - 'DYNAMICCONNECTION' - 'BANDWIDTH' - 'HEALTH' - 'NONE' description: - >- Type of spillover used to divert traffic to the backup virtual server when the primary virtual server reaches the spillover threshold. Connection spillover is based on the number of connections. Bandwidth spillover is based on the total Kbps of incoming and outgoing traffic. sopersistence: choices: - 'enabled' - 'disabled' description: - "Maintain source-IP based persistence on primary and backup virtual servers." sopersistencetimeout: description: - "Time-out value, in minutes, for spillover persistence." - "Minimum value = C(2)" - "Maximum value = C(1440)" sothreshold: description: - >- Depending on the spillover method, the maximum number of connections or the maximum total bandwidth (Kbps) that a virtual server can handle before spillover occurs. - "Minimum value = C(1)" - "Maximum value = C(4294967287)" sobackupaction: choices: - 'DROP' - 'ACCEPT' - 'REDIRECT' description: - >- Action to be performed if spillover is to take effect, but no backup chain to spillover is usable or exists. redirectportrewrite: choices: - 'enabled' - 'disabled' description: - "State of port rewrite while performing HTTP redirect." downstateflush: choices: - 'enabled' - 'disabled' description: - >- Flush all active transactions associated with a virtual server whose state transitions from UP to DOWN. Do not enable this option for applications that must complete their transactions. backupvserver: description: - >- Name of the backup virtual server that you are configuring. Must begin with an ASCII alphanumeric or underscore C(_) character, and must contain only ASCII alphanumeric, underscore C(_), hash C(#), period C(.), space C( ), colon C(:), at sign C(@), equal sign C(=), and hyphen C(-) characters. Can be changed after the backup virtual server is created. You can assign a different backup virtual server or rename the existing virtual server. - "Minimum length = 1" disableprimaryondown: choices: - 'enabled' - 'disabled' description: - >- Continue forwarding the traffic to backup virtual server even after the primary server comes UP from the DOWN state. insertvserveripport: choices: - 'OFF' - 'VIPADDR' - 'V6TOV4MAPPING' description: - >- Insert the virtual server's VIP address and port number in the request header. Available values function as follows: - "C(VIPADDR) - Header contains the vserver's IP address and port number without any translation." - "C(OFF) - The virtual IP and port header insertion option is disabled." - >- C(V6TOV4MAPPING) - Header contains the mapped IPv4 address corresponding to the IPv6 address of the vserver and the port number. An IPv6 address can be mapped to a user-specified IPv4 address using the set ns ip6 command. vipheader: description: - "Name of virtual server IP and port header, for use with the VServer IP Port Insertion parameter." - "Minimum length = 1" rtspnat: description: - "Enable network address translation (NAT) for real-time streaming protocol (RTSP) connections." type: bool authenticationhost: description: - >- FQDN of the authentication virtual server. The service type of the virtual server should be either C(HTTP) or C(SSL). - "Minimum length = 3" - "Maximum length = 252" authentication: description: - "Authenticate users who request a connection to the content switching virtual server." type: bool listenpolicy: description: - >- String specifying the listen policy for the content switching virtual server. Can be either the name of an existing expression or an in-line expression. authn401: description: - "Enable HTTP 401-response based authentication." type: bool authnvsname: description: - >- Name of authentication virtual server that authenticates the incoming user requests to this content switching virtual server. . - "Minimum length = 1" - "Maximum length = 252" push: choices: - 'enabled' - 'disabled' description: - >- Process traffic with the push virtual server that is bound to this content switching virtual server (specified by the Push VServer parameter). The service type of the push virtual server should be either C(HTTP) or C(SSL). pushvserver: description: - >- Name of the load balancing virtual server, of type C(PUSH) or C(SSL_PUSH), to which the server pushes updates received on the client-facing load balancing virtual server. - "Minimum length = 1" pushlabel: description: - >- Expression for extracting the label from the response received from server. This string can be either an existing rule name or an inline expression. The service type of the virtual server should be either C(HTTP) or C(SSL). pushmulticlients: description: - >- Allow multiple Web 2.0 connections from the same client to connect to the virtual server and expect updates. type: bool tcpprofilename: description: - "Name of the TCP profile containing TCP configuration settings for the virtual server." - "Minimum length = 1" - "Maximum length = 127" httpprofilename: description: - >- Name of the HTTP profile containing HTTP configuration settings for the virtual server. The service type of the virtual server should be either C(HTTP) or C(SSL). - "Minimum length = 1" - "Maximum length = 127" dbprofilename: description: - "Name of the DB profile." - "Minimum length = 1" - "Maximum length = 127" oracleserverversion: choices: - '10G' - '11G' description: - "Oracle server version." comment: description: - "Information about this virtual server." mssqlserverversion: choices: - '70' - '2000' - '2000SP1' - '2005' - '2008' - '2008R2' - '2012' - '2014' description: - "The version of the MSSQL server." l2conn: description: - "Use L2 Parameters to identify a connection." mysqlprotocolversion: description: - "The protocol version returned by the mysql vserver." mysqlserverversion: description: - "The server version string returned by the mysql vserver." - "Minimum length = 1" - "Maximum length = 31" mysqlcharacterset: description: - "The character set returned by the mysql vserver." mysqlservercapabilities: description: - "The server capabilities returned by the mysql vserver." appflowlog: choices: - 'enabled' - 'disabled' description: - "Enable logging appflow flow information." netprofile: description: - "The name of the network profile." - "Minimum length = 1" - "Maximum length = 127" icmpvsrresponse: choices: - 'PASSIVE' - 'ACTIVE' description: - "Can be active or passive." rhistate: choices: - 'PASSIVE' - 'ACTIVE' description: - "A host route is injected according to the setting on the virtual servers" - >- * If set to C(PASSIVE) on all the virtual servers that share the IP address, the appliance always injects the hostroute. - >- * If set to C(ACTIVE) on all the virtual servers that share the IP address, the appliance injects even if one virtual server is UP. - >- * If set to C(ACTIVE) on some virtual servers and C(PASSIVE) on the others, the appliance, injects even if one virtual server set to C(ACTIVE) is UP. authnprofile: description: - "Name of the authentication profile to be used when authentication is turned on." dnsprofilename: description: - >- Name of the DNS profile to be associated with the VServer. DNS profile properties will applied to the transactions processed by a VServer. This parameter is valid only for DNS and DNS-TCP VServers. - "Minimum length = 1" - "Maximum length = 127" domainname: description: - "Domain name for which to change the time to live (TTL) and/or backup service IP address." - "Minimum length = 1" ttl: description: - "." - "Minimum value = C(1)" backupip: description: - "." - "Minimum length = 1" cookiedomain: description: - "." - "Minimum length = 1" cookietimeout: description: - "." - "Minimum value = C(0)" - "Maximum value = C(1440)" sitedomainttl: description: - "." - "Minimum value = C(1)" lbvserver: description: - The default Load Balancing virtual server. version_added: "2.5" ssl_certkey: description: - The name of the ssl certificate that is bound to this service. - The ssl certificate must already exist. - Creating the certificate can be done with the M(netscaler_ssl_certkey) module. - This option is only applicable only when C(servicetype) is C(SSL). version_added: "2.5" disabled: description: - When set to C(yes) the cs vserver will be disabled. - When set to C(no) the cs vserver will be enabled. - >- Note that due to limitations of the underlying NITRO API a C(disabled) state change alone does not cause the module result to report a changed status. type: bool default: 'no' extends_documentation_fragment: netscaler requirements: - nitro python sdk ''' EXAMPLES = ''' # policy_1 must have been already created with the netscaler_cs_policy module # lbvserver_1 must have been already created with the netscaler_lb_vserver module - name: Setup content switching vserver delegate_to: localhost netscaler_cs_vserver: nsip: 172.18.0.2 nitro_user: nsroot nitro_pass: nsroot state: present name: cs_vserver_1 ipv46: 192.168.1.1 port: 80 servicetype: HTTP policybindings: - policyname: policy_1 targetlbvserver: lbvserver_1 ''' RETURN = ''' loglines: description: list of logged messages by the module returned: always type: list sample: ['message 1', 'message 2'] msg: description: Message detailing the failure reason returned: failure type: str sample: "Action does not exist" diff: description: List of differences between the actual configured object and the configuration specified in the module returned: failure type: dict sample: { 'clttimeout': 'difference. ours: (float) 100.0 other: (float) 60.0' } ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.network.netscaler.netscaler import ( ConfigProxy, get_nitro_client, netscaler_common_arguments, log, loglines, ensure_feature_is_enabled, get_immutables_intersection ) try: from nssrc.com.citrix.netscaler.nitro.resource.config.cs.csvserver import csvserver from nssrc.com.citrix.netscaler.nitro.resource.config.cs.csvserver_lbvserver_binding import csvserver_lbvserver_binding from nssrc.com.citrix.netscaler.nitro.resource.config.cs.csvserver_cspolicy_binding import csvserver_cspolicy_binding from nssrc.com.citrix.netscaler.nitro.resource.config.ssl.sslvserver_sslcertkey_binding import sslvserver_sslcertkey_binding from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception PYTHON_SDK_IMPORTED = True except ImportError as e: PYTHON_SDK_IMPORTED = False def cs_vserver_exists(client, module): if csvserver.count_filtered(client, 'name:%s' % module.params['name']) > 0: return True else: return False def cs_vserver_identical(client, module, csvserver_proxy): csvserver_list = csvserver.get_filtered(client, 'name:%s' % module.params['name']) diff_dict = csvserver_proxy.diff_object(csvserver_list[0]) if len(diff_dict) == 0: return True else: return False def get_configured_policybindings(client, module): log('Getting configured policy bindigs') bindings = {} if module.params['policybindings'] is None: return bindings for binding in module.params['policybindings']: binding['name'] = module.params['name'] key = binding['policyname'] binding_proxy = ConfigProxy( actual=csvserver_cspolicy_binding(), client=client, readwrite_attrs=[ 'priority', 'bindpoint', 'policyname', 'labelname', 'gotopriorityexpression', 'targetlbvserver', 'name', 'invoke', 'labeltype', ], readonly_attrs=[], attribute_values_dict=binding ) bindings[key] = binding_proxy return bindings def get_default_lb_vserver(client, module): try: default_lb_vserver = csvserver_lbvserver_binding.get(client, module.params['name']) return default_lb_vserver[0] except nitro_exception as e: if e.errorcode == 258: return csvserver_lbvserver_binding() else: raise def default_lb_vserver_identical(client, module): d = get_default_lb_vserver(client, module) configured = ConfigProxy( actual=csvserver_lbvserver_binding(), client=client, readwrite_attrs=[ 'name', 'lbvserver', ], attribute_values_dict={ 'name': module.params['name'], 'lbvserver': module.params['lbvserver'], } ) log('default lb vserver %s' % ((d.name, d.lbvserver),)) if d.name is None and module.params['lbvserver'] is None: log('Default lb vserver identical missing') return True elif d.name is not None and module.params['lbvserver'] is None: log('Default lb vserver needs removing') return False elif configured.has_equal_attributes(d): log('Default lb vserver identical') return True else: log('Default lb vserver not identical') return False def sync_default_lb_vserver(client, module): d = get_default_lb_vserver(client, module) if module.params['lbvserver'] is not None: configured = ConfigProxy( actual=csvserver_lbvserver_binding(), client=client, readwrite_attrs=[ 'name', 'lbvserver', ], attribute_values_dict={ 'name': module.params['name'], 'lbvserver': module.params['lbvserver'], } ) if not configured.has_equal_attributes(d): if d.name is not None: log('Deleting default lb vserver %s' % d.lbvserver) csvserver_lbvserver_binding.delete(client, d) log('Adding default lb vserver %s' % configured.lbvserver) configured.add() else: if d.name is not None: log('Deleting default lb vserver %s' % d.lbvserver) csvserver_lbvserver_binding.delete(client, d) def get_actual_policybindings(client, module): log('Getting actual policy bindigs') bindings = {} try: count = csvserver_cspolicy_binding.count(client, name=module.params['name']) if count == 0: return bindings except nitro_exception as e: if e.errorcode == 258: return bindings else: raise for binding in csvserver_cspolicy_binding.get(client, name=module.params['name']): key = binding.policyname bindings[key] = binding return bindings def cs_policybindings_identical(client, module): log('Checking policy bindings identical') actual_bindings = get_actual_policybindings(client, module) configured_bindings = get_configured_policybindings(client, module) actual_keyset = set(actual_bindings.keys()) configured_keyset = set(configured_bindings.keys()) if len(actual_keyset ^ configured_keyset) > 0: return False # Compare item to item for key in actual_bindings.keys(): configured_binding_proxy = configured_bindings[key] actual_binding_object = actual_bindings[key] if not configured_binding_proxy.has_equal_attributes(actual_binding_object): return False # Fallthrough to success return True def sync_cs_policybindings(client, module): log('Syncing cs policybindings') actual_bindings = get_actual_policybindings(client, module) configured_bindings = get_configured_policybindings(client, module) # Delete actual bindings not in configured delete_keys = list(set(actual_bindings.keys()) - set(configured_bindings.keys())) for key in delete_keys: log('Deleting binding for policy %s' % key) csvserver_cspolicy_binding.delete(client, actual_bindings[key]) # Add configured bindings not in actual add_keys = list(set(configured_bindings.keys()) - set(actual_bindings.keys())) for key in add_keys: log('Adding binding for policy %s' % key) configured_bindings[key].add() # Update existing if changed modify_keys = list(set(configured_bindings.keys()) & set(actual_bindings.keys())) for key in modify_keys: if not configured_bindings[key].has_equal_attributes(actual_bindings[key]): log('Updating binding for policy %s' % key) csvserver_cspolicy_binding.delete(client, actual_bindings[key]) configured_bindings[key].add() def ssl_certkey_bindings_identical(client, module): log('Checking if ssl cert key bindings are identical') vservername = module.params['name'] if sslvserver_sslcertkey_binding.count(client, vservername) == 0: bindings = [] else: bindings = sslvserver_sslcertkey_binding.get(client, vservername) if module.params['ssl_certkey'] is None: if len(bindings) == 0: return True else: return False else: certificate_list = [item.certkeyname for item in bindings] if certificate_list == [module.params['ssl_certkey']]: return True else: return False def ssl_certkey_bindings_sync(client, module): log('Syncing certkey bindings') vservername = module.params['name'] if sslvserver_sslcertkey_binding.count(client, vservername) == 0: bindings = [] else: bindings = sslvserver_sslcertkey_binding.get(client, vservername) # Delete existing bindings for binding in bindings: log('Deleting existing binding for certkey %s' % binding.certkeyname) sslvserver_sslcertkey_binding.delete(client, binding) # Add binding if appropriate if module.params['ssl_certkey'] is not None: log('Adding binding for certkey %s' % module.params['ssl_certkey']) binding = sslvserver_sslcertkey_binding() binding.vservername = module.params['name'] binding.certkeyname = module.params['ssl_certkey'] sslvserver_sslcertkey_binding.add(client, binding) def diff_list(client, module, csvserver_proxy): csvserver_list = csvserver.get_filtered(client, 'name:%s' % module.params['name']) return csvserver_proxy.diff_object(csvserver_list[0]) def do_state_change(client, module, csvserver_proxy): if module.params['disabled']: log('Disabling cs vserver') result = csvserver.disable(client, csvserver_proxy.actual) else: log('Enabling cs vserver') result = csvserver.enable(client, csvserver_proxy.actual) return result def main(): module_specific_arguments = dict( name=dict(type='str'), td=dict(type='float'), servicetype=dict( type='str', choices=[ 'HTTP', 'SSL', 'TCP', 'FTP', 'RTSP', 'SSL_TCP', 'UDP', 'DNS', 'SIP_UDP', 'SIP_TCP', 'SIP_SSL', 'ANY', 'RADIUS', 'RDP', 'MYSQL', 'MSSQL', 'DIAMETER', 'SSL_DIAMETER', 'DNS_TCP', 'ORACLE', 'SMPP' ] ), ipv46=dict(type='str'), dnsrecordtype=dict( type='str', choices=[ 'A', 'AAAA', 'CNAME', 'NAPTR', ] ), ippattern=dict(type='str'), ipmask=dict(type='str'), range=dict(type='float'), port=dict(type='int'), stateupdate=dict( type='str', choices=[ 'enabled', 'disabled', ] ), cacheable=dict(type='bool'), redirecturl=dict(type='str'), clttimeout=dict(type='float'), precedence=dict( type='str', choices=[ 'RULE', 'URL', ] ), casesensitive=dict(type='bool'), somethod=dict( type='str', choices=[ 'CONNECTION', 'DYNAMICCONNECTION', 'BANDWIDTH', 'HEALTH', 'NONE', ] ), sopersistence=dict( type='str', choices=[ 'enabled', 'disabled', ] ), sopersistencetimeout=dict(type='float'), sothreshold=dict(type='float'), sobackupaction=dict( type='str', choices=[ 'DROP', 'ACCEPT', 'REDIRECT', ] ), redirectportrewrite=dict( type='str', choices=[ 'enabled', 'disabled', ] ), downstateflush=dict( type='str', choices=[ 'enabled', 'disabled', ] ), disableprimaryondown=dict( type='str', choices=[ 'enabled', 'disabled', ] ), insertvserveripport=dict( type='str', choices=[ 'OFF', 'VIPADDR', 'V6TOV4MAPPING', ] ), vipheader=dict(type='str'), rtspnat=dict(type='bool'), authenticationhost=dict(type='str'), authentication=dict(type='bool'), listenpolicy=dict(type='str'), authn401=dict(type='bool'), authnvsname=dict(type='str'), push=dict( type='str', choices=[ 'enabled', 'disabled', ] ), pushvserver=dict(type='str'), pushlabel=dict(type='str'), pushmulticlients=dict(type='bool'), tcpprofilename=dict(type='str'), httpprofilename=dict(type='str'), dbprofilename=dict(type='str'), oracleserverversion=dict( type='str', choices=[ '10G', '11G', ] ), comment=dict(type='str'), mssqlserverversion=dict( type='str', choices=[ '70', '2000', '2000SP1', '2005', '2008', '2008R2', '2012', '2014', ] ), l2conn=dict(type='bool'), mysqlprotocolversion=dict(type='float'), mysqlserverversion=dict(type='str'), mysqlcharacterset=dict(type='float'), mysqlservercapabilities=dict(type='float'), appflowlog=dict( type='str', choices=[ 'enabled', 'disabled', ] ), netprofile=dict(type='str'), icmpvsrresponse=dict( type='str', choices=[ 'PASSIVE', 'ACTIVE', ] ), rhistate=dict( type='str', choices=[ 'PASSIVE', 'ACTIVE', ] ), authnprofile=dict(type='str'), dnsprofilename=dict(type='str'), ) hand_inserted_arguments = dict( policybindings=dict(type='list'), ssl_certkey=dict(type='str'), disabled=dict( type='bool', default=False ), lbvserver=dict(type='str'), ) argument_spec = dict() argument_spec.update(netscaler_common_arguments) argument_spec.update(module_specific_arguments) argument_spec.update(hand_inserted_arguments) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, ) module_result = dict( changed=False, failed=False, loglines=loglines, ) # Fail the module if imports failed if not PYTHON_SDK_IMPORTED: module.fail_json(msg='Could not load nitro python sdk') # Fallthrough to rest of execution client = get_nitro_client(module) try: client.login() except nitro_exception as e: msg = "nitro exception during login. errorcode=%s, message=%s" % (str(e.errorcode), e.message) module.fail_json(msg=msg) except Exception as e: if str(type(e)) == "<class 'requests.exceptions.ConnectionError'>": module.fail_json(msg='Connection error %s' % str(e)) elif str(type(e)) == "<class 'requests.exceptions.SSLError'>": module.fail_json(msg='SSL Error %s' % str(e)) else: module.fail_json(msg='Unexpected error during login %s' % str(e)) readwrite_attrs = [ 'name', 'td', 'servicetype', 'ipv46', 'dnsrecordtype', 'ippattern', 'ipmask', 'range', 'port', 'stateupdate', 'cacheable', 'redirecturl', 'clttimeout', 'precedence', 'casesensitive', 'somethod', 'sopersistence', 'sopersistencetimeout', 'sothreshold', 'sobackupaction', 'redirectportrewrite', 'downstateflush', 'disableprimaryondown', 'insertvserveripport', 'vipheader', 'rtspnat', 'authenticationhost', 'authentication', 'listenpolicy', 'authn401', 'authnvsname', 'push', 'pushvserver', 'pushlabel', 'pushmulticlients', 'tcpprofilename', 'httpprofilename', 'dbprofilename', 'oracleserverversion', 'comment', 'mssqlserverversion', 'l2conn', 'mysqlprotocolversion', 'mysqlserverversion', 'mysqlcharacterset', 'mysqlservercapabilities', 'appflowlog', 'netprofile', 'icmpvsrresponse', 'rhistate', 'authnprofile', 'dnsprofilename', ] readonly_attrs = [ 'ip', 'value', 'ngname', 'type', 'curstate', 'sc', 'status', 'cachetype', 'redirect', 'homepage', 'dnsvservername', 'domain', 'policyname', 'servicename', 'weight', 'cachevserver', 'targetvserver', 'priority', 'url', 'gotopriorityexpression', 'bindpoint', 'invoke', 'labeltype', 'labelname', 'gt2gb', 'statechangetimesec', 'statechangetimemsec', 'tickssincelaststatechange', 'ruletype', 'lbvserver', 'targetlbvserver', ] immutable_attrs = [ 'name', 'td', 'servicetype', 'ipv46', 'targettype', 'range', 'port', 'state', 'vipheader', 'newname', ] transforms = { 'cacheable': ['bool_yes_no'], 'rtspnat': ['bool_on_off'], 'authn401': ['bool_on_off'], 'casesensitive': ['bool_on_off'], 'authentication': ['bool_on_off'], 'l2conn': ['bool_on_off'], 'pushmulticlients': ['bool_yes_no'], 'stateupdate': [lambda v: v.upper()], 'sopersistence': [lambda v: v.upper()], 'redirectportrewrite': [lambda v: v.upper()], 'downstateflush': [lambda v: v.upper()], 'disableprimaryondown': [lambda v: v.upper()], 'push': [lambda v: v.upper()], 'appflowlog': [lambda v: v.upper()], } # Instantiate config proxy csvserver_proxy = ConfigProxy( actual=csvserver(), client=client, attribute_values_dict=module.params, readwrite_attrs=readwrite_attrs, readonly_attrs=readonly_attrs, immutable_attrs=immutable_attrs, transforms=transforms, ) try: ensure_feature_is_enabled(client, 'CS') # Apply appropriate state if module.params['state'] == 'present': log('Applying actions for state present') if not cs_vserver_exists(client, module): if not module.check_mode: csvserver_proxy.add() if module.params['save_config']: client.save_config() module_result['changed'] = True elif not cs_vserver_identical(client, module, csvserver_proxy): # Check if we try to change value of immutable attributes immutables_changed = get_immutables_intersection(csvserver_proxy, diff_list(client, module, csvserver_proxy).keys()) if immutables_changed != []: module.fail_json( msg='Cannot update immutable attributes %s' % (immutables_changed,), diff=diff_list(client, module, csvserver_proxy), **module_result ) if not module.check_mode: csvserver_proxy.update() if module.params['save_config']: client.save_config() module_result['changed'] = True else: module_result['changed'] = False # Check policybindings if not cs_policybindings_identical(client, module): if not module.check_mode: sync_cs_policybindings(client, module) if module.params['save_config']: client.save_config() module_result['changed'] = True if module.params['servicetype'] != 'SSL' and module.params['ssl_certkey'] is not None: module.fail_json(msg='ssl_certkey is applicable only to SSL vservers', **module_result) # Check ssl certkey bindings if module.params['servicetype'] == 'SSL': if not ssl_certkey_bindings_identical(client, module): if not module.check_mode: ssl_certkey_bindings_sync(client, module) module_result['changed'] = True # Check default lb vserver if not default_lb_vserver_identical(client, module): if not module.check_mode: sync_default_lb_vserver(client, module) module_result['changed'] = True if not module.check_mode: res = do_state_change(client, module, csvserver_proxy) if res.errorcode != 0: msg = 'Error when setting disabled state. errorcode: %s message: %s' % (res.errorcode, res.message) module.fail_json(msg=msg, **module_result) # Sanity check for state if not module.check_mode: log('Sanity checks for state present') if not cs_vserver_exists(client, module): module.fail_json(msg='CS vserver does not exist', **module_result) if not cs_vserver_identical(client, module, csvserver_proxy): module.fail_json(msg='CS vserver differs from configured', diff=diff_list(client, module, csvserver_proxy), **module_result) if not cs_policybindings_identical(client, module): module.fail_json(msg='Policy bindings differ') if module.params['servicetype'] == 'SSL': if not ssl_certkey_bindings_identical(client, module): module.fail_json(msg='sll certkey bindings not identical', **module_result) elif module.params['state'] == 'absent': log('Applying actions for state absent') if cs_vserver_exists(client, module): if not module.check_mode: csvserver_proxy.delete() if module.params['save_config']: client.save_config() module_result['changed'] = True else: module_result['changed'] = False # Sanity check for state if not module.check_mode: log('Sanity checks for state absent') if cs_vserver_exists(client, module): module.fail_json(msg='CS vserver still exists', **module_result) except nitro_exception as e: msg = "nitro exception errorcode=%s, message=%s" % (str(e.errorcode), e.message) module.fail_json(msg=msg, **module_result) client.logout() module.exit_json(**module_result) if __name__ == "__main__": main()
wpjesus/codematch
refs/heads/dev
ietf/ipr/sitemaps.py
2
# Copyright The IETF Trust 2007, All Rights Reserved # from django.contrib.sitemaps import GenericSitemap from ietf.ipr.models import IprDisclosureBase # changefreq is "never except when it gets updated or withdrawn" # so skip giving one queryset = IprDisclosureBase.objects.filter(state__in=('posted','removed')) archive = {'queryset':queryset, 'date_field': 'time', 'allow_empty':True } IPRMap = GenericSitemap(archive)
KurtDeGreeff/infernal-twin
refs/heads/master
build/pip/build/lib.linux-i686-2.7/pip/_vendor/requests/packages/urllib3/util/response.py
928
def is_fp_closed(obj): """ Checks whether a given file-like object is closed. :param obj: The file-like object to check. """ try: # Check via the official file-like-object way. return obj.closed except AttributeError: pass try: # Check if the object is a container for another file-like object that # gets released on exhaustion (e.g. HTTPResponse). return obj.fp is None except AttributeError: pass raise ValueError("Unable to determine whether fp is closed.")
Alberto-Beralix/Beralix
refs/heads/master
i386-squashfs-root/usr/lib/python2.7/dist-packages/butterfly/capabilities.py
2
../../../../share/pyshared/butterfly/capabilities.py
chrisdearman/micropython
refs/heads/master
tests/basics/array_construct2.py
24
try: from array import array except ImportError: print("SKIP") raise SystemExit # construct from something with unknown length (requires generators) print(array('i', (i for i in range(10))))
40223131/W17test
refs/heads/master
static/Brython3.1.1-20150328-091302/Lib/_functools.py
727
def partial(func, *args, **keywords): def newfunc(*fargs, **fkeywords): newkeywords = keywords.copy() newkeywords.update(fkeywords) return func(*(args + fargs), **newkeywords) newfunc.func = func newfunc.args = args newfunc.keywords = keywords return newfunc def reduce(func,iterable,initializer=None): args = iter(iterable) if initializer is not None: res = initializer else: res = next(args) while True: try: res = func(res,next(args)) except StopIteration: return res
xfournet/intellij-community
refs/heads/master
python/testData/intentions/PyConvertToFStringIntentionTest/formatMethodIndexContainsQuoteOfMultilineHost_after.py
31
f'''{d["'"]}'''
maxive/erp
refs/heads/master
addons/website_mail/models/mail_message.py
17
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. from odoo import api, fields, models, _ from odoo.osv import expression from odoo.tools import html2plaintext from odoo.exceptions import AccessError class MailMessage(models.Model): _inherit = 'mail.message' @api.model def default_get(self, fields_list): defaults = super(MailMessage, self).default_get(fields_list) # Note: explicitly implemented in default_get() instead of field default, # to avoid setting to True for all existing messages during upgrades. # TODO: this default should probably be dynamic according to the model # on which the messages are attached, thus moved to create(). if 'website_published' in fields_list: defaults.setdefault('website_published', True) return defaults description = fields.Char(compute="_compute_description", help='Message description: either the subject, or the beginning of the body') website_published = fields.Boolean(string='Published', help="Visible on the website as a comment", copy=False) @api.multi def _compute_description(self): for message in self: if message.subject: message.description = message.subject else: plaintext_ct = '' if not message.body else html2plaintext(message.body) message.description = plaintext_ct[:30] + '%s' % (' [...]' if len(plaintext_ct) >= 30 else '') @api.model def _search(self, args, offset=0, limit=None, order=None, count=False, access_rights_uid=None): """ Override that adds specific access rights of mail.message, to restrict messages to published messages for public users. """ if self.user_has_groups('base.group_public'): args = expression.AND([[('website_published', '=', True)], list(args)]) return super(MailMessage, self)._search(args, offset=offset, limit=limit, order=order, count=count, access_rights_uid=access_rights_uid) @api.multi def check_access_rule(self, operation): """ Add Access rules of mail.message for non-employee user: - read: - raise if the type is comment and subtype NULL (internal note) """ if self.user_has_groups('base.group_public'): self.env.cr.execute('SELECT id FROM "%s" WHERE website_published IS FALSE AND id = ANY (%%s)' % (self._table), (self.ids,)) if self.env.cr.fetchall(): raise AccessError(_('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % (self._description, operation)) return super(MailMessage, self).check_access_rule(operation=operation) @api.multi def _portal_message_format(self, fields_list): fields_list += ['website_published'] return super(MailMessage, self)._portal_message_format(fields_list)
frank-tancf/scikit-learn
refs/heads/master
examples/applications/wikipedia_principal_eigenvector.py
16
""" =============================== Wikipedia principal eigenvector =============================== A classical way to assert the relative importance of vertices in a graph is to compute the principal eigenvector of the adjacency matrix so as to assign to each vertex the values of the components of the first eigenvector as a centrality score: http://en.wikipedia.org/wiki/Eigenvector_centrality On the graph of webpages and links those values are called the PageRank scores by Google. The goal of this example is to analyze the graph of links inside wikipedia articles to rank articles by relative importance according to this eigenvector centrality. The traditional way to compute the principal eigenvector is to use the power iteration method: http://en.wikipedia.org/wiki/Power_iteration Here the computation is achieved thanks to Martinsson's Randomized SVD algorithm implemented in the scikit. The graph data is fetched from the DBpedia dumps. DBpedia is an extraction of the latent structured data of the Wikipedia content. """ # Author: Olivier Grisel <[email protected]> # License: BSD 3 clause from __future__ import print_function from bz2 import BZ2File import os from datetime import datetime from pprint import pprint from time import time import numpy as np from scipy import sparse from sklearn.decomposition import randomized_svd from sklearn.externals.joblib import Memory from sklearn.externals.six.moves.urllib.request import urlopen from sklearn.externals.six import iteritems print(__doc__) ############################################################################### # Where to download the data, if not already on disk redirects_url = "http://downloads.dbpedia.org/3.5.1/en/redirects_en.nt.bz2" redirects_filename = redirects_url.rsplit("/", 1)[1] page_links_url = "http://downloads.dbpedia.org/3.5.1/en/page_links_en.nt.bz2" page_links_filename = page_links_url.rsplit("/", 1)[1] resources = [ (redirects_url, redirects_filename), (page_links_url, page_links_filename), ] for url, filename in resources: if not os.path.exists(filename): print("Downloading data from '%s', please wait..." % url) opener = urlopen(url) open(filename, 'wb').write(opener.read()) print() ############################################################################### # Loading the redirect files memory = Memory(cachedir=".") def index(redirects, index_map, k): """Find the index of an article name after redirect resolution""" k = redirects.get(k, k) return index_map.setdefault(k, len(index_map)) DBPEDIA_RESOURCE_PREFIX_LEN = len("http://dbpedia.org/resource/") SHORTNAME_SLICE = slice(DBPEDIA_RESOURCE_PREFIX_LEN + 1, -1) def short_name(nt_uri): """Remove the < and > URI markers and the common URI prefix""" return nt_uri[SHORTNAME_SLICE] def get_redirects(redirects_filename): """Parse the redirections and build a transitively closed map out of it""" redirects = {} print("Parsing the NT redirect file") for l, line in enumerate(BZ2File(redirects_filename)): split = line.split() if len(split) != 4: print("ignoring malformed line: " + line) continue redirects[short_name(split[0])] = short_name(split[2]) if l % 1000000 == 0: print("[%s] line: %08d" % (datetime.now().isoformat(), l)) # compute the transitive closure print("Computing the transitive closure of the redirect relation") for l, source in enumerate(redirects.keys()): transitive_target = None target = redirects[source] seen = set([source]) while True: transitive_target = target target = redirects.get(target) if target is None or target in seen: break seen.add(target) redirects[source] = transitive_target if l % 1000000 == 0: print("[%s] line: %08d" % (datetime.now().isoformat(), l)) return redirects # disabling joblib as the pickling of large dicts seems much too slow #@memory.cache def get_adjacency_matrix(redirects_filename, page_links_filename, limit=None): """Extract the adjacency graph as a scipy sparse matrix Redirects are resolved first. Returns X, the scipy sparse adjacency matrix, redirects as python dict from article names to article names and index_map a python dict from article names to python int (article indexes). """ print("Computing the redirect map") redirects = get_redirects(redirects_filename) print("Computing the integer index map") index_map = dict() links = list() for l, line in enumerate(BZ2File(page_links_filename)): split = line.split() if len(split) != 4: print("ignoring malformed line: " + line) continue i = index(redirects, index_map, short_name(split[0])) j = index(redirects, index_map, short_name(split[2])) links.append((i, j)) if l % 1000000 == 0: print("[%s] line: %08d" % (datetime.now().isoformat(), l)) if limit is not None and l >= limit - 1: break print("Computing the adjacency matrix") X = sparse.lil_matrix((len(index_map), len(index_map)), dtype=np.float32) for i, j in links: X[i, j] = 1.0 del links print("Converting to CSR representation") X = X.tocsr() print("CSR conversion done") return X, redirects, index_map # stop after 5M links to make it possible to work in RAM X, redirects, index_map = get_adjacency_matrix( redirects_filename, page_links_filename, limit=5000000) names = dict((i, name) for name, i in iteritems(index_map)) print("Computing the principal singular vectors using randomized_svd") t0 = time() U, s, V = randomized_svd(X, 5, n_iter=3) print("done in %0.3fs" % (time() - t0)) # print the names of the wikipedia related strongest components of the the # principal singular vector which should be similar to the highest eigenvector print("Top wikipedia pages according to principal singular vectors") pprint([names[i] for i in np.abs(U.T[0]).argsort()[-10:]]) pprint([names[i] for i in np.abs(V[0]).argsort()[-10:]]) def centrality_scores(X, alpha=0.85, max_iter=100, tol=1e-10): """Power iteration computation of the principal eigenvector This method is also known as Google PageRank and the implementation is based on the one from the NetworkX project (BSD licensed too) with copyrights by: Aric Hagberg <[email protected]> Dan Schult <[email protected]> Pieter Swart <[email protected]> """ n = X.shape[0] X = X.copy() incoming_counts = np.asarray(X.sum(axis=1)).ravel() print("Normalizing the graph") for i in incoming_counts.nonzero()[0]: X.data[X.indptr[i]:X.indptr[i + 1]] *= 1.0 / incoming_counts[i] dangle = np.asarray(np.where(X.sum(axis=1) == 0, 1.0 / n, 0)).ravel() scores = np.ones(n, dtype=np.float32) / n # initial guess for i in range(max_iter): print("power iteration #%d" % i) prev_scores = scores scores = (alpha * (scores * X + np.dot(dangle, prev_scores)) + (1 - alpha) * prev_scores.sum() / n) # check convergence: normalized l_inf norm scores_max = np.abs(scores).max() if scores_max == 0.0: scores_max = 1.0 err = np.abs(scores - prev_scores).max() / scores_max print("error: %0.6f" % err) if err < n * tol: return scores return scores print("Computing principal eigenvector score using a power iteration method") t0 = time() scores = centrality_scores(X, max_iter=100, tol=1e-10) print("done in %0.3fs" % (time() - t0)) pprint([names[i] for i in np.abs(scores).argsort()[-10:]])
sugarlabs/sugar-toolkit-gtk3
refs/heads/master
src/sugar3/speech.py
1
# Copyright (C) 2011 One Laptop Per Child # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA import os import logging from gettext import gettext as _ import gi gi.require_version('Gtk', '3.0') from gi.repository import Gio from gi.repository import Gtk from gi.repository import Gdk from gi.repository import GLib from gi.repository import GObject _HAS_GST = True try: gi.require_version('Gst', '1.0') from gi.repository import Gst Gst.init(None) Gst.parse_launch('espeak') except BaseException: logging.error('Gst or the espeak plugin is not installed in the system.') _HAS_GST = False from sugar3 import power DEFAULT_PITCH = 0 DEFAULT_RATE = 0 _SAVE_TIMEOUT = 500 SPEECH_SCHEMA = 'org.sugarlabs.speech' # This voice names are use dto allow the translation of the voice names. # If espeak add new voices, we need update this list. translated_espeak_voices = { # Translators: http://en.wikipedia.org/wiki/Afrikaans_language 'af': _('Afrikaans'), # Translators: http://en.wikipedia.org/wiki/Aragonese_language 'an': _('Aragonese'), # Translators: http://en.wikipedia.org/wiki/Bulgarian_language 'bg': _('Bulgarian'), # Translators: http://en.wikipedia.org/wiki/Bosnian_language 'bs': _('Bosnian'), # Translators: http://en.wikipedia.org/wiki/Catalan_language 'ca': _('Catalan'), # Translators: http://en.wikipedia.org/wiki/Czech_language 'cs': _('Czech'), # Translators: http://en.wikipedia.org/wiki/Welsh_language 'cy': _('Welsh'), # Translators: http://en.wikipedia.org/wiki/Danish_language 'da': _('Danish'), # Translators: http://en.wikipedia.org/wiki/German_language 'de': _('German'), # Translators: http://en.wikipedia.org/wiki/Modern_Greek 'el': _('Greek'), 'en': _('English'), # Translators: http://en.wikipedia.org/wiki/British_English 'en_gb': _('English Britain'), # Translators: http://en.wikipedia.org/wiki/Scottish_English 'en_sc': _('English scottish'), 'en_uk-north': _('English-north'), # Translators: http://en.wikipedia.org/wiki/Received_Pronunciation 'en_uk-rp': _('English_rp'), # Translators: http://en.wikipedia.org/wiki/West_Midlands_English 'en_uk-wmids': _('English_wmids'), # Translators: http://en.wikipedia.org/wiki/American_English 'en_us': _('English USA'), # Translators: http://en.wikipedia.org/wiki/Caribbean_English 'en_wi': _('English West Indies'), # Translators: http://en.wikipedia.org/wiki/Esperanto 'eo': _('Esperanto'), # Translators: http://en.wikipedia.org/wiki/Spanish_language 'es': _('Spanish'), 'es_la': _('Spanish latin american'), # Translators: http://en.wikipedia.org/wiki/Estonian_language 'et': _('Estonian'), # Translators: http://en.wikipedia.org/wiki/Persian_language 'fa': _('Farsi'), # Translators: http://en.wikipedia.org/wiki/Fingilish 'fa_pin': _('Farsi-pinglish'), # Translators: http://en.wikipedia.org/wiki/Finnish_language 'fi': _('Finnish'), # Translators: http://en.wikipedia.org/wiki/Belgian_French 'fr_be': _('French belgium'), # Translators: http://en.wikipedia.org/wiki/French_language 'fr_fr': _('French'), # Translators: http://en.wikipedia.org/wiki/Irish_language 'ga': _('Irish-gaeilge'), # Translators: http://en.wikipedia.org/wiki/Ancient_Greek 'grc': _('Greek-ancient'), # Translators: http://en.wikipedia.org/wiki/Hindi 'hi': _('Hindi'), # Translators: http://en.wikipedia.org/wiki/Croatian_language 'hr': _('Croatian'), # Translators: http://en.wikipedia.org/wiki/Hungarian_language 'hu': _('Hungarian'), # Translators: http://en.wikipedia.org/wiki/Armenian_language 'hy': _('Armenian'), # Translators: http://en.wikipedia.org/wiki/Western_Armenian 'hy_west': _('Armenian (west)'), # Translators: http://en.wikipedia.org/wiki/Indonesian_language 'id': _('Indonesian'), # Translators: http://en.wikipedia.org/wiki/Icelandic_language 'is': _('Icelandic'), # Translators: http://en.wikipedia.org/wiki/Italian_language 'it': _('Italian'), # Translators: http://en.wikipedia.org/wiki/Lojban 'jbo': _('Lojban'), # Translators: http://en.wikipedia.org/wiki/Georgian_language 'ka': _('Georgian'), # Translators: http://en.wikipedia.org/wiki/Kannada_language 'kn': _('Kannada'), # Translators: http://en.wikipedia.org/wiki/Kurdish_language 'ku': _('Kurdish'), # Translators: http://en.wikipedia.org/wiki/Latin 'la': _('Latin'), # Translators: http://en.wikipedia.org/wiki/Lithuanian_language 'lt': _('Lithuanian'), # Translators: http://en.wikipedia.org/wiki/Latvian_language 'lv': _('Latvian'), # Translators: http://en.wikipedia.org/wiki/Macedonian_language 'mk': _('Macedonian'), # Translators: http://en.wikipedia.org/wiki/Malayalam 'ml': _('Malayalam'), # Translators: http://en.wikipedia.org/wiki/Malay_language 'ms': _('Malay'), # Translators: http://en.wikipedia.org/wiki/Nepali_language 'ne': _('Nepali'), # Translators: http://en.wikipedia.org/wiki/Dutch_language 'nl': _('Dutch'), # Translators: http://en.wikipedia.org/wiki/Norwegian_language 'no': _('Norwegian'), # Translators: http://en.wikipedia.org/wiki/Punjabi_language 'pa': _('Punjabi'), # Translators: http://en.wikipedia.org/wiki/Polish_language 'pl': _('Polish'), # Translators: http://en.wikipedia.org/wiki/Brazilian_Portuguese 'pt_br': _('Portuguese (Brazil)'), # Translators: http://en.wikipedia.org/wiki/Portuguese_language 'pt_pt': _('Portuguese (Portugal)'), # Translators: http://en.wikipedia.org/wiki/Romanian_language 'ro': _('Romanian'), # Translators: http://en.wikipedia.org/wiki/Russian_language 'ru': _('Russian'), # Translators: http://en.wikipedia.org/wiki/Slovak_language 'sk': _('Slovak'), # Translators: http://en.wikipedia.org/wiki/Albanian_language 'sq': _('Albanian'), # Translators: http://en.wikipedia.org/wiki/Serbian_language 'sr': _('Serbian'), # Translators: http://en.wikipedia.org/wiki/Swedish_language 'sv': _('Swedish'), # Translators: http://en.wikipedia.org/wiki/Swahili_language 'sw': _('Swahili'), # Translators: http://en.wikipedia.org/wiki/Tamil_language 'ta': _('Tamil'), # Translators: http://en.wikipedia.org/wiki/Turkish_language 'tr': _('Turkish'), # Translators: http://en.wikipedia.org/wiki/Vietnamese_language 'vi': _('Vietnam'), 'vi_hue': _('Vietnam_hue'), 'vi_sgn': _('Vietnam_sgn'), # Translators: http://en.wikipedia.org/wiki/Mandarin_Chinese 'zh': _('Mandarin'), # Translators: http://en.wikipedia.org/wiki/Cantonese 'zh_yue': _('Cantonese') } class SpeechManager(GObject.GObject): __gtype_name__ = 'SpeechManager' __gsignals__ = { 'play': (GObject.SignalFlags.RUN_FIRST, None, []), 'pause': (GObject.SignalFlags.RUN_FIRST, None, []), 'stop': (GObject.SignalFlags.RUN_FIRST, None, []), 'mark': (GObject.SignalFlags.RUN_FIRST, None, [str]) } MIN_PITCH = -100 MAX_PITCH = 100 MIN_RATE = -100 MAX_RATE = 100 def __init__(self, **kwargs): GObject.GObject.__init__(self, **kwargs) self.player = None if not self.enabled(): return self.player = GstSpeechPlayer() self.player.connect('play', self._update_state, 'play') self.player.connect('stop', self._update_state, 'stop') self.player.connect('pause', self._update_state, 'pause') self.player.connect('mark', self._mark_cb) self._default_voice_name = self.player.get_default_voice() self._pitch = DEFAULT_PITCH self._rate = DEFAULT_RATE self._is_playing = False self._is_paused = False self._save_timeout_id = -1 self.restore() def enabled(self): return _HAS_GST def _update_state(self, player, signal): self._is_playing = (signal == 'play') self._is_paused = (signal == 'pause') self.emit(signal) def _mark_cb(self, player, value): self.emit('mark', value) def get_is_playing(self): return self._is_playing is_playing = GObject.Property(type=bool, getter=get_is_playing, setter=None, default=False) def get_is_paused(self): return self._is_paused is_paused = GObject.Property(type=bool, getter=get_is_paused, setter=None, default=False) def get_pitch(self): return self._pitch def get_rate(self): return self._rate def set_pitch(self, pitch): self._pitch = pitch if self._save_timeout_id != -1: GLib.source_remove(self._save_timeout_id) self._save_timeout_id = GLib.timeout_add(_SAVE_TIMEOUT, self.save) def set_rate(self, rate): self._rate = rate if self._save_timeout_id != -1: GLib.source_remove(self._save_timeout_id) self._save_timeout_id = GLib.timeout_add(_SAVE_TIMEOUT, self.save) def say_text(self, text, pitch=None, rate=None, lang_code=None): if pitch is None: pitch = self._pitch if rate is None: rate = self._rate if lang_code is None: voice_name = self._default_voice_name else: voice_name = self.player.get_all_voices()[lang_code] if text: logging.debug( 'PLAYING %r lang %r pitch %r rate %r', text, voice_name, pitch, rate) self.player.speak(pitch, rate, voice_name, text) def say_selected_text(self): clipboard = Gtk.Clipboard.get(Gdk.SELECTION_PRIMARY) clipboard.request_text(self.__primary_selection_cb, None) def pause(self): self.player.pause_sound_device() def restart(self): self.player.restart_sound_device() def stop(self): self.player.stop_sound_device() def __primary_selection_cb(self, clipboard, text, user_data): self.say_text(text) def save(self): self._save_timeout_id = -1 schema_source = Gio.SettingsSchemaSource.get_default() if schema_source.lookup(SPEECH_SCHEMA, True) is None: return False settings = Gio.Settings(SPEECH_SCHEMA) settings.set_int('pitch', self._pitch) settings.set_int('rate', self._rate) logging.debug('saving speech configuration pitch %s rate %s', self._pitch, self._rate) return False def restore(self): schema_source = Gio.SettingsSchemaSource.get_default() if schema_source.lookup(SPEECH_SCHEMA, True) is None: return settings = Gio.Settings(SPEECH_SCHEMA) self._pitch = settings.get_int('pitch') self._rate = settings.get_int('rate') logging.debug('loading speech configuration pitch %s rate %s', self._pitch, self._rate) def get_all_voices(self): if self.player: return self.player.get_all_voices() return None def get_all_traslated_voices(self): """ deprecated after 0.112, due to method name spelling error """ if self.player: return self.player.get_all_translated_voices() return None def get_all_translated_voices(self): if self.player: return self.player.get_all_translated_voices() return None class GstSpeechPlayer(GObject.GObject): __gsignals__ = { 'play': (GObject.SignalFlags.RUN_FIRST, None, []), 'pause': (GObject.SignalFlags.RUN_FIRST, None, []), 'stop': (GObject.SignalFlags.RUN_FIRST, None, []), 'mark': (GObject.SignalFlags.RUN_FIRST, None, [str]) } def __init__(self): GObject.GObject.__init__(self) self.pipeline = None self._all_voices = None self._all_translated_voices = None def restart_sound_device(self): if self.pipeline is None: logging.debug('Trying to restart not initialized sound device') return power.get_power_manager().inhibit_suspend() self.pipeline.set_state(Gst.State.PLAYING) self.emit('play') def pause_sound_device(self): if self.pipeline is None: return self.pipeline.set_state(Gst.State.PAUSED) power.get_power_manager().restore_suspend() self.emit('pause') def stop_sound_device(self): if self.pipeline is None: return self.pipeline.set_state(Gst.State.NULL) power.get_power_manager().restore_suspend() self.emit('stop') def make_pipeline(self, command): if self.pipeline is not None: self.stop_sound_device() del self.pipeline self.pipeline = Gst.parse_launch(command) bus = self.pipeline.get_bus() bus.add_signal_watch() bus.connect('message', self.__pipe_message_cb) def __pipe_message_cb(self, bus, message): if message.type in (Gst.MessageType.EOS, Gst.MessageType.ERROR): self.pipeline.set_state(Gst.State.NULL) self.pipeline = None power.get_power_manager().restore_suspend() self.emit('stop') elif message.type is Gst.MessageType.ELEMENT and \ message.get_structure().get_name() == 'espeak-mark': mark_value = message.get_structure().get_value('mark') self.emit('mark', mark_value) def speak(self, pitch, rate, voice_name, text): # TODO workaround for http://bugs.sugarlabs.org/ticket/1801 if not [i for i in text if i.isalnum()]: return self.make_pipeline('espeak name=espeak ! autoaudiosink') src = self.pipeline.get_by_name('espeak') src.props.text = text src.props.pitch = pitch src.props.rate = rate src.props.voice = voice_name src.props.track = 2 # track for marks self.restart_sound_device() def get_all_voices(self): if self._all_voices is not None: return self._all_voices self._init_voices() return self._all_voices def get_all_translated_voices(self): if self._all_translated_voices is not None: return self._all_translated_voices self._init_voices() return self._all_translated_voices def _init_voices(self): self._all_voices = {} self._all_translated_voices = {} for voice in Gst.ElementFactory.make('espeak', None).props.voices: name, language, dialect = voice if dialect != 'none': lang_code = language + '_' + dialect else: lang_code = language self._all_voices[lang_code] = name if lang_code in translated_espeak_voices: self._all_translated_voices[lang_code] = \ translated_espeak_voices[lang_code] else: self._all_translated_voices[lang_code] = name def get_default_voice(self): """Try to figure out the default voice, from the current locale ($LANG) Fall back to espeak's voice called Default.""" voices = self.get_all_voices() locale = os.environ.get('LANG', '') language_location = locale.split('.', 1)[0].lower() language = language_location.split('_')[0] # if the language is es but not es_es default to es_la (latin voice) if language == 'es' and language_location != 'es_es': language_location = 'es_la' best = voices.get(language_location) or voices.get(language) \ or 'english' return best
mstuttgart/qdarkgray-stylesheet
refs/heads/develop
example/ui/example_pyqt5_ui.py
5
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'example.ui' # # Created by: PyQt5 UI code generator 5.7 # # WARNING! All changes made in this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName("MainWindow") MainWindow.resize(1068, 824) self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName("centralwidget") self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.centralwidget) self.verticalLayout_5.setObjectName("verticalLayout_5") self.tabWidget = QtWidgets.QTabWidget(self.centralwidget) self.tabWidget.setTabPosition(QtWidgets.QTabWidget.East) self.tabWidget.setTabsClosable(True) self.tabWidget.setObjectName("tabWidget") self.tab = QtWidgets.QWidget() self.tab.setObjectName("tab") self.gridLayout = QtWidgets.QGridLayout(self.tab) self.gridLayout.setContentsMargins(0, 0, 0, 0) self.gridLayout.setObjectName("gridLayout") self.groupBox = QtWidgets.QGroupBox(self.tab) self.groupBox.setObjectName("groupBox") self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.groupBox) self.verticalLayout_3.setObjectName("verticalLayout_3") self.toolBox = QtWidgets.QToolBox(self.groupBox) self.toolBox.setObjectName("toolBox") self.page = QtWidgets.QWidget() self.page.setGeometry(QtCore.QRect(0, 0, 98, 44)) self.page.setObjectName("page") self.gridLayout_4 = QtWidgets.QGridLayout(self.page) self.gridLayout_4.setContentsMargins(0, 0, 0, 0) self.gridLayout_4.setObjectName("gridLayout_4") self.lineEdit = QtWidgets.QLineEdit(self.page) self.lineEdit.setObjectName("lineEdit") self.gridLayout_4.addWidget(self.lineEdit, 0, 0, 1, 1) self.toolBox.addItem(self.page, "") self.page_2 = QtWidgets.QWidget() self.page_2.setGeometry(QtCore.QRect(0, 0, 697, 210)) self.page_2.setObjectName("page_2") self.gridLayout_5 = QtWidgets.QGridLayout(self.page_2) self.gridLayout_5.setContentsMargins(0, 0, 0, 0) self.gridLayout_5.setObjectName("gridLayout_5") self.listWidget = QtWidgets.QListWidget(self.page_2) self.listWidget.setObjectName("listWidget") item = QtWidgets.QListWidgetItem() self.listWidget.addItem(item) item = QtWidgets.QListWidgetItem() self.listWidget.addItem(item) item = QtWidgets.QListWidgetItem() self.listWidget.addItem(item) item = QtWidgets.QListWidgetItem() self.listWidget.addItem(item) item = QtWidgets.QListWidgetItem() self.listWidget.addItem(item) item = QtWidgets.QListWidgetItem() self.listWidget.addItem(item) item = QtWidgets.QListWidgetItem() self.listWidget.addItem(item) item = QtWidgets.QListWidgetItem() self.listWidget.addItem(item) self.gridLayout_5.addWidget(self.listWidget, 0, 0, 1, 1) self.toolBox.addItem(self.page_2, "") self.verticalLayout_3.addWidget(self.toolBox) self.gridLayout.addWidget(self.groupBox, 1, 0, 1, 1) self.tabWidget_2 = QtWidgets.QTabWidget(self.tab) self.tabWidget_2.setObjectName("tabWidget_2") self.tab_3 = QtWidgets.QWidget() self.tab_3.setObjectName("tab_3") self.gridLayout_6 = QtWidgets.QGridLayout(self.tab_3) self.gridLayout_6.setContentsMargins(0, 0, 0, 0) self.gridLayout_6.setObjectName("gridLayout_6") self.checkableButton = QtWidgets.QPushButton(self.tab_3) self.checkableButton.setCheckable(True) self.checkableButton.setChecked(True) self.checkableButton.setObjectName("checkableButton") self.gridLayout_6.addWidget(self.checkableButton, 1, 0, 1, 1) self.pushButton = QtWidgets.QPushButton(self.tab_3) self.pushButton.setObjectName("pushButton") self.gridLayout_6.addWidget(self.pushButton, 0, 0, 1, 1) self.pushButton_5 = QtWidgets.QPushButton(self.tab_3) self.pushButton_5.setObjectName("pushButton_5") self.gridLayout_6.addWidget(self.pushButton_5, 2, 0, 1, 1) self.tabWidget_2.addTab(self.tab_3, "") self.tab_5 = QtWidgets.QWidget() self.tab_5.setObjectName("tab_5") self.gridLayout_7 = QtWidgets.QGridLayout(self.tab_5) self.gridLayout_7.setContentsMargins(0, 0, 0, 0) self.gridLayout_7.setObjectName("gridLayout_7") self.tableWidget = QtWidgets.QTableWidget(self.tab_5) self.tableWidget.setObjectName("tableWidget") self.tableWidget.setColumnCount(2) self.tableWidget.setRowCount(4) item = QtWidgets.QTableWidgetItem() self.tableWidget.setVerticalHeaderItem(0, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setVerticalHeaderItem(1, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setVerticalHeaderItem(2, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setVerticalHeaderItem(3, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(0, item) item = QtWidgets.QTableWidgetItem() self.tableWidget.setHorizontalHeaderItem(1, item) self.gridLayout_7.addWidget(self.tableWidget, 0, 0, 1, 1) self.tabWidget_2.addTab(self.tab_5, "") self.tab_4 = QtWidgets.QWidget() self.tab_4.setObjectName("tab_4") self.tabWidget_2.addTab(self.tab_4, "") self.gridLayout.addWidget(self.tabWidget_2, 0, 0, 1, 1) self.dateEdit = QtWidgets.QDateEdit(self.tab) self.dateEdit.setObjectName("dateEdit") self.gridLayout.addWidget(self.dateEdit, 2, 0, 1, 1) self.tabWidget.addTab(self.tab, "") self.tab_2 = QtWidgets.QWidget() self.tab_2.setObjectName("tab_2") self.gridLayout_2 = QtWidgets.QGridLayout(self.tab_2) self.gridLayout_2.setContentsMargins(0, 0, 0, 0) self.gridLayout_2.setObjectName("gridLayout_2") self.groupBox_2 = QtWidgets.QGroupBox(self.tab_2) self.groupBox_2.setObjectName("groupBox_2") self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.groupBox_2) self.verticalLayout_4.setObjectName("verticalLayout_4") self.label = QtWidgets.QLabel(self.groupBox_2) self.label.setObjectName("label") self.verticalLayout_4.addWidget(self.label) self.radioButton = QtWidgets.QRadioButton(self.groupBox_2) self.radioButton.setObjectName("radioButton") self.verticalLayout_4.addWidget(self.radioButton) self.checkBox = QtWidgets.QCheckBox(self.groupBox_2) self.checkBox.setObjectName("checkBox") self.verticalLayout_4.addWidget(self.checkBox) self.checkBox_2 = QtWidgets.QCheckBox(self.groupBox_2) self.checkBox_2.setTristate(True) self.checkBox_2.setObjectName("checkBox_2") self.verticalLayout_4.addWidget(self.checkBox_2) self.treeWidget = QtWidgets.QTreeWidget(self.groupBox_2) self.treeWidget.setObjectName("treeWidget") item_0 = QtWidgets.QTreeWidgetItem(self.treeWidget) item_0 = QtWidgets.QTreeWidgetItem(self.treeWidget) self.verticalLayout_4.addWidget(self.treeWidget) self.gridLayout_2.addWidget(self.groupBox_2, 0, 0, 1, 1) self.tabWidget.addTab(self.tab_2, "") self.verticalLayout_5.addWidget(self.tabWidget) self.horizontalLayout = QtWidgets.QHBoxLayout() self.horizontalLayout.setObjectName("horizontalLayout") self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget) self.pushButton_2.setObjectName("pushButton_2") self.horizontalLayout.addWidget(self.pushButton_2) self.bt_delay_popup = QtWidgets.QToolButton(self.centralwidget) self.bt_delay_popup.setObjectName("bt_delay_popup") self.horizontalLayout.addWidget(self.bt_delay_popup) self.bt_instant_popup = QtWidgets.QToolButton(self.centralwidget) self.bt_instant_popup.setPopupMode(QtWidgets.QToolButton.InstantPopup) self.bt_instant_popup.setObjectName("bt_instant_popup") self.horizontalLayout.addWidget(self.bt_instant_popup) self.bt_menu_button_popup = QtWidgets.QToolButton(self.centralwidget) self.bt_menu_button_popup.setPopupMode(QtWidgets.QToolButton.MenuButtonPopup) self.bt_menu_button_popup.setObjectName("bt_menu_button_popup") self.horizontalLayout.addWidget(self.bt_menu_button_popup) self.line_2 = QtWidgets.QFrame(self.centralwidget) self.line_2.setFrameShape(QtWidgets.QFrame.VLine) self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_2.setObjectName("line_2") self.horizontalLayout.addWidget(self.line_2) self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget) self.pushButton_3.setEnabled(False) self.pushButton_3.setObjectName("pushButton_3") self.horizontalLayout.addWidget(self.pushButton_3) self.doubleSpinBox = QtWidgets.QDoubleSpinBox(self.centralwidget) self.doubleSpinBox.setObjectName("doubleSpinBox") self.horizontalLayout.addWidget(self.doubleSpinBox) self.toolButton = QtWidgets.QToolButton(self.centralwidget) self.toolButton.setPopupMode(QtWidgets.QToolButton.InstantPopup) self.toolButton.setObjectName("toolButton") self.horizontalLayout.addWidget(self.toolButton) self.verticalLayout_5.addLayout(self.horizontalLayout) MainWindow.setCentralWidget(self.centralwidget) self.menubar = QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 1068, 23)) self.menubar.setObjectName("menubar") self.menuMenu = QtWidgets.QMenu(self.menubar) self.menuMenu.setObjectName("menuMenu") self.menuSubmenu_2 = QtWidgets.QMenu(self.menuMenu) self.menuSubmenu_2.setObjectName("menuSubmenu_2") MainWindow.setMenuBar(self.menubar) self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName("statusbar") MainWindow.setStatusBar(self.statusbar) self.dockWidget1 = QtWidgets.QDockWidget(MainWindow) self.dockWidget1.setObjectName("dockWidget1") self.dockWidgetContents = QtWidgets.QWidget() self.dockWidgetContents.setObjectName("dockWidgetContents") self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.dockWidgetContents) self.verticalLayout_2.setContentsMargins(0, 0, 0, 0) self.verticalLayout_2.setObjectName("verticalLayout_2") self.verticalLayout = QtWidgets.QVBoxLayout() self.verticalLayout.setObjectName("verticalLayout") self.comboBox = QtWidgets.QComboBox(self.dockWidgetContents) self.comboBox.setObjectName("comboBox") self.comboBox.addItem("") self.comboBox.addItem("") self.verticalLayout.addWidget(self.comboBox) self.horizontalSlider = QtWidgets.QSlider(self.dockWidgetContents) self.horizontalSlider.setOrientation(QtCore.Qt.Horizontal) self.horizontalSlider.setObjectName("horizontalSlider") self.verticalLayout.addWidget(self.horizontalSlider) self.textEdit = QtWidgets.QTextEdit(self.dockWidgetContents) self.textEdit.setObjectName("textEdit") self.verticalLayout.addWidget(self.textEdit) self.line = QtWidgets.QFrame(self.dockWidgetContents) self.line.setFrameShape(QtWidgets.QFrame.HLine) self.line.setFrameShadow(QtWidgets.QFrame.Sunken) self.line.setObjectName("line") self.verticalLayout.addWidget(self.line) self.progressBar = QtWidgets.QProgressBar(self.dockWidgetContents) self.progressBar.setProperty("value", 24) self.progressBar.setObjectName("progressBar") self.verticalLayout.addWidget(self.progressBar) self.verticalLayout_2.addLayout(self.verticalLayout) self.frame = QtWidgets.QFrame(self.dockWidgetContents) self.frame.setMinimumSize(QtCore.QSize(0, 100)) self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel) self.frame.setFrameShadow(QtWidgets.QFrame.Raised) self.frame.setLineWidth(3) self.frame.setObjectName("frame") self.verticalLayout_2.addWidget(self.frame) self.dockWidget1.setWidget(self.dockWidgetContents) MainWindow.addDockWidget(QtCore.Qt.DockWidgetArea(1), self.dockWidget1) self.toolBar = QtWidgets.QToolBar(MainWindow) self.toolBar.setObjectName("toolBar") MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBar) self.dockWidget2 = QtWidgets.QDockWidget(MainWindow) self.dockWidget2.setObjectName("dockWidget2") self.dockWidgetContents_2 = QtWidgets.QWidget() self.dockWidgetContents_2.setObjectName("dockWidgetContents_2") self.gridLayout_3 = QtWidgets.QGridLayout(self.dockWidgetContents_2) self.gridLayout_3.setContentsMargins(0, 0, 0, 0) self.gridLayout_3.setObjectName("gridLayout_3") self.verticalSlider = QtWidgets.QSlider(self.dockWidgetContents_2) self.verticalSlider.setOrientation(QtCore.Qt.Vertical) self.verticalSlider.setObjectName("verticalSlider") self.gridLayout_3.addWidget(self.verticalSlider, 0, 0, 1, 1) self.dockWidget2.setWidget(self.dockWidgetContents_2) MainWindow.addDockWidget(QtCore.Qt.DockWidgetArea(1), self.dockWidget2) self.actionAction = QtWidgets.QAction(MainWindow) self.actionAction.setObjectName("actionAction") self.actionSub_menu = QtWidgets.QAction(MainWindow) self.actionSub_menu.setObjectName("actionSub_menu") self.actionAction_C = QtWidgets.QAction(MainWindow) self.actionAction_C.setObjectName("actionAction_C") self.menuSubmenu_2.addAction(self.actionSub_menu) self.menuSubmenu_2.addAction(self.actionAction_C) self.menuMenu.addAction(self.actionAction) self.menuMenu.addAction(self.menuSubmenu_2.menuAction()) self.menubar.addAction(self.menuMenu.menuAction()) self.toolBar.addAction(self.actionAction) self.toolBar.addAction(self.actionSub_menu) self.retranslateUi(MainWindow) self.tabWidget.setCurrentIndex(0) self.toolBox.setCurrentIndex(1) self.tabWidget_2.setCurrentIndex(0) QtCore.QMetaObject.connectSlotsByName(MainWindow) MainWindow.setTabOrder(self.pushButton, self.checkableButton) MainWindow.setTabOrder(self.checkableButton, self.pushButton_5) MainWindow.setTabOrder(self.pushButton_5, self.tabWidget_2) MainWindow.setTabOrder(self.tabWidget_2, self.tableWidget) MainWindow.setTabOrder(self.tableWidget, self.radioButton) MainWindow.setTabOrder(self.radioButton, self.checkBox) MainWindow.setTabOrder(self.checkBox, self.checkBox_2) MainWindow.setTabOrder(self.checkBox_2, self.treeWidget) MainWindow.setTabOrder(self.treeWidget, self.pushButton_2) MainWindow.setTabOrder(self.pushButton_2, self.bt_delay_popup) MainWindow.setTabOrder(self.bt_delay_popup, self.bt_instant_popup) MainWindow.setTabOrder(self.bt_instant_popup, self.bt_menu_button_popup) MainWindow.setTabOrder(self.bt_menu_button_popup, self.pushButton_3) MainWindow.setTabOrder(self.pushButton_3, self.doubleSpinBox) MainWindow.setTabOrder(self.doubleSpinBox, self.toolButton) MainWindow.setTabOrder(self.toolButton, self.comboBox) MainWindow.setTabOrder(self.comboBox, self.horizontalSlider) MainWindow.setTabOrder(self.horizontalSlider, self.textEdit) MainWindow.setTabOrder(self.textEdit, self.verticalSlider) MainWindow.setTabOrder(self.verticalSlider, self.tabWidget) MainWindow.setTabOrder(self.tabWidget, self.lineEdit) MainWindow.setTabOrder(self.lineEdit, self.listWidget) def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow")) self.groupBox.setTitle(_translate("MainWindow", "ToolBox")) self.toolBox.setItemText(self.toolBox.indexOf(self.page), _translate("MainWindow", "Page 1")) __sortingEnabled = self.listWidget.isSortingEnabled() self.listWidget.setSortingEnabled(False) item = self.listWidget.item(0) item.setText(_translate("MainWindow", "New Item")) item = self.listWidget.item(1) item.setText(_translate("MainWindow", "New Item")) item = self.listWidget.item(2) item.setText(_translate("MainWindow", "New Item")) item = self.listWidget.item(3) item.setText(_translate("MainWindow", "New Item")) item = self.listWidget.item(4) item.setText(_translate("MainWindow", "New Item")) item = self.listWidget.item(5) item.setText(_translate("MainWindow", "New Item")) item = self.listWidget.item(6) item.setText(_translate("MainWindow", "New Item")) item = self.listWidget.item(7) item.setText(_translate("MainWindow", "New Item")) self.listWidget.setSortingEnabled(__sortingEnabled) self.toolBox.setItemText(self.toolBox.indexOf(self.page_2), _translate("MainWindow", "Page 2")) self.checkableButton.setText(_translate("MainWindow", "Checkable button")) self.pushButton.setText(_translate("MainWindow", "PushButton")) self.pushButton_5.setText(_translate("MainWindow", "PushButton")) self.tabWidget_2.setTabText(self.tabWidget_2.indexOf(self.tab_3), _translate("MainWindow", "Tab 1")) item = self.tableWidget.verticalHeaderItem(0) item.setText(_translate("MainWindow", "New Row")) item = self.tableWidget.verticalHeaderItem(1) item.setText(_translate("MainWindow", "New Row")) item = self.tableWidget.verticalHeaderItem(2) item.setText(_translate("MainWindow", "New Row")) item = self.tableWidget.verticalHeaderItem(3) item.setText(_translate("MainWindow", "New Row")) item = self.tableWidget.horizontalHeaderItem(0) item.setText(_translate("MainWindow", "New Column")) item = self.tableWidget.horizontalHeaderItem(1) item.setText(_translate("MainWindow", "New Column 2")) self.tabWidget_2.setTabText(self.tabWidget_2.indexOf(self.tab_5), _translate("MainWindow", "Page")) self.tabWidget_2.setTabText(self.tabWidget_2.indexOf(self.tab_4), _translate("MainWindow", "Tab 2")) self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("MainWindow", "Tab 1")) self.groupBox_2.setTitle(_translate("MainWindow", "GroupBox")) self.label.setText(_translate("MainWindow", "TextLabel")) self.radioButton.setText(_translate("MainWindow", "RadioB&utton")) self.checkBox.setText(_translate("MainWindow", "CheckBox")) self.checkBox_2.setText(_translate("MainWindow", "CheckBox Tristate")) self.treeWidget.headerItem().setText(0, _translate("MainWindow", "qdz")) __sortingEnabled = self.treeWidget.isSortingEnabled() self.treeWidget.setSortingEnabled(False) self.treeWidget.topLevelItem(0).setText(0, _translate("MainWindow", "qzd")) self.treeWidget.topLevelItem(1).setText(0, _translate("MainWindow", "effefe")) self.treeWidget.setSortingEnabled(__sortingEnabled) self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("MainWindow", "Tab 2")) self.pushButton_2.setText(_translate("MainWindow", "PushButton")) self.bt_delay_popup.setText(_translate("MainWindow", "Delayed popup ")) self.bt_instant_popup.setText(_translate("MainWindow", "Instant popup")) self.bt_menu_button_popup.setText(_translate("MainWindow", "MenuButtonPopup")) self.pushButton_3.setText(_translate("MainWindow", "Disabled")) self.toolButton.setText(_translate("MainWindow", "...")) self.menuMenu.setTitle(_translate("MainWindow", "&Menu")) self.menuSubmenu_2.setTitle(_translate("MainWindow", "&Submenu 2")) self.dockWidget1.setWindowTitle(_translate("MainWindow", "&Dock widget 1")) self.comboBox.setItemText(0, _translate("MainWindow", "Item 0")) self.comboBox.setItemText(1, _translate("MainWindow", "Item 2")) self.toolBar.setWindowTitle(_translate("MainWindow", "toolBar")) self.dockWidget2.setWindowTitle(_translate("MainWindow", "Dock widget &2")) self.actionAction.setText(_translate("MainWindow", "&Action")) self.actionSub_menu.setText(_translate("MainWindow", "&Action B")) self.actionSub_menu.setToolTip(_translate("MainWindow", "submenu")) self.actionAction_C.setText(_translate("MainWindow", "Action &C"))
tbinjiayou/Odoo
refs/heads/master
addons/mrp/__openerp__.py
52
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'MRP', 'version': '1.1', 'author': 'OpenERP SA', 'website': 'https://www.odoo.com/page/manufacturing', 'category': 'Manufacturing', 'sequence': 18, 'summary': 'Manufacturing Orders, Bill of Materials, Routing', 'images': ['images/bill_of_materials.jpeg', 'images/manufacturing_order.jpeg', 'images/planning_manufacturing_order.jpeg', 'images/manufacturing_analysis.jpeg', 'images/routings.jpeg','images/work_centers.jpeg'], 'depends': ['product', 'procurement', 'stock_account', 'resource', 'report'], 'description': """ Manage the Manufacturing process in OpenERP =========================================== The manufacturing module allows you to cover planning, ordering, stocks and the manufacturing or assembly of products from raw materials and components. It handles the consumption and production of products according to a bill of materials and the necessary operations on machinery, tools or human resources according to routings. It supports complete integration and planification of stockable goods, consumables or services. Services are completely integrated with the rest of the software. For instance, you can set up a sub-contracting service in a bill of materials to automatically purchase on order the assembly of your production. Key Features ------------ * Make to Stock/Make to Order * Multi-level bill of materials, no limit * Multi-level routing, no limit * Routing and work center integrated with analytic accounting * Periodical scheduler computation * Allows to browse bills of materials in a complete structure that includes child and phantom bills of materials Dashboard / Reports for MRP will include: ----------------------------------------- * Procurements in Exception (Graph) * Stock Value Variation (Graph) * Work Order Analysis """, 'data': [ 'security/mrp_security.xml', 'security/ir.model.access.csv', 'mrp_workflow.xml', 'mrp_data.xml', 'wizard/mrp_product_produce_view.xml', 'wizard/change_production_qty_view.xml', 'wizard/mrp_price_view.xml', 'wizard/mrp_workcenter_load_view.xml', 'wizard/stock_move_view.xml', 'mrp_view.xml', 'mrp_report.xml', 'company_view.xml', 'report/mrp_report_view.xml', 'res_config_view.xml', 'views/report_mrporder.xml', 'views/report_mrpbomstructure.xml', ], 'demo': ['mrp_demo.xml'], 'test': [ 'test/bom_with_service_type_product.yml', 'test/mrp_users.yml', 'test/order_demo.yml', 'test/order_process.yml', 'test/cancel_order.yml', ], 'installable': True, 'application': True, 'auto_install': False, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
tstirrat15/exercism-python-responses
refs/heads/master
meetup/meetup_test.py
2
from datetime import date import unittest from meetup import meetup_day class MeetupTest(unittest.TestCase): def test_monteenth_of_may_2013(self): self.assertEqual(date(2013, 5, 13), meetup_day(2013, 5, 'Monday', 'teenth')) def test_saturteenth_of_february_2013(self): self.assertEqual(date(2013, 2, 16), meetup_day(2013, 2, 'Saturday', 'teenth')) def test_first_tuesday_of_may_2013(self): self.assertEqual(date(2013, 5, 7), meetup_day(2013, 5, 'Tuesday', '1st')) def test_second_monday_of_april_2013(self): self.assertEqual(date(2013, 4, 8), meetup_day(2013, 4, 'Monday', '2nd')) def test_third_thursday_of_september_2013(self): self.assertEqual(date(2013, 9, 19), meetup_day(2013, 9, 'Thursday', '3rd')) def test_fourth_sunday_of_march_2013(self): self.assertEqual(date(2013, 3, 24), meetup_day(2013, 3, 'Sunday', '4th')) def test_last_thursday_of_october_2013(self): self.assertEqual(date(2013, 10, 31), meetup_day(2013, 10, 'Thursday', 'last')) def test_last_wednesday_of_february_2012(self): self.assertEqual(date(2012, 2, 29), meetup_day(2012, 2, 'Wednesday', 'last')) def test_first_friday_of_december_2012(self): self.assertEqual(date(2012, 12, 7), meetup_day(2012, 12, 'Friday', '1st')) if __name__ == '__main__': unittest.main()
poiesisconsulting/openerp-restaurant
refs/heads/master
website_mail/__init__.py
1577
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import controllers import models
ezequielpereira/Time-Line
refs/heads/master
libs64/wx/tools/helpviewer.py
5
#---------------------------------------------------------------------- # Name: wx.tools.helpviewer # Purpose: HTML Help viewer # # Author: Robin Dunn # # Created: 11-Dec-2002 # RCS-ID: $Id: helpviewer.py 45966 2007-05-11 18:54:09Z RD $ # Copyright: (c) 2002 by Total Control Software # Licence: wxWindows license #---------------------------------------------------------------------- """ helpviewer.py -- Displays HTML Help in a wxHtmlHelpController window. Usage: helpviewer [--cache=path] helpfile [helpfile(s)...] Where helpfile is the path to either a .hhp file or a .zip file which contians a .hhp file. The .hhp files are the same as those used by Microsoft's HTML Help Workshop for creating CHM files. """ import sys, os #--------------------------------------------------------------------------- def makeOtherFrame(helpctrl): import wx parent = helpctrl.GetFrame() otherFrame = wx.Frame(parent) def main(args=sys.argv): if len(args) < 2: print __doc__ return args = args[1:] cachedir = None if args[0][:7] == '--cache': cachedir = os.path.expanduser(args[0].split('=')[1]) args = args[1:] if len(args) == 0: print __doc__ return import wx import wx.html app = wx.PySimpleApp() #wx.Log.SetActiveTarget(wx.LogStderr()) wx.Log.SetLogLevel(wx.LOG_Error) # Set up the default config so the htmlhelp frame can save its preferences app.SetVendorName('wxWidgets') app.SetAppName('helpviewer') cfg = wx.ConfigBase.Get() # Add the Zip filesystem wx.FileSystem.AddHandler(wx.ZipFSHandler()) # Create the viewer helpctrl = wx.html.HtmlHelpController() if cachedir: helpctrl.SetTempDir(cachedir) # and add the books for helpfile in args: print "Adding %s..." % helpfile helpctrl.AddBook(helpfile, 1) # The frame used by the HtmlHelpController is set to not prevent # app exit, so in the case of a standalone helpviewer like this # when the about box or search box is closed the help frame will # be the only one left and the app will close unexpectedly. To # work around this we'll create another frame that is never shown, # but which will be closed when the helpviewer frame is closed. wx.CallAfter(makeOtherFrame, helpctrl) # start it up! helpctrl.DisplayContents() app.MainLoop() if __name__ == '__main__': main()
aayush2911/Fibonaccio
refs/heads/master
web2py/gluon/contrib/minify/jsmin.py
44
#!/usr/bin/env python # -*- coding: ascii -*- # # Copyright 2011 # Andr\xe9 Malo or his licensors, as applicable # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r""" ===================== Javascript Minifier ===================== Javascript Minifier based on `jsmin.c by Douglas Crockford`_\. This module is a re-implementation based on the semantics of jsmin.c. Usually it produces the same results. It differs in the following ways: - there is no error detection: unterminated string, regex and comment literals are treated as regular javascript code and minified as such. - Control characters inside string and regex literals are left untouched; they are not converted to spaces (nor to \n) - Newline characters are not allowed inside string and regex literals, except for line continuations in string literals (ECMA-5). - "return /regex/" is recognized correctly. - rjsmin does not handle streams, but only complete strings. (However, the module provides a "streamy" interface). Besides the list above it differs from direct python ports of jsmin.c in speed. Since most parts of the logic are handled by the regex engine it's way faster than the original python port by Baruch Even. The speed factor varies between about 6 and 55 depending on input and python version (it gets faster the more compressed the input already is). Compared to the speed-refactored python port by Dave St.Germain the performance gain is less dramatic but still between 1.2 and 7. See the docs/BENCHMARKS file for details. rjsmin.c is a reimplementation of rjsmin.py in C and speeds it up even more. Both python 2 and python 3 are supported. .. _jsmin.c by Douglas Crockford: http://www.crockford.com/javascript/jsmin.c Original author of Python version: Andr\xe9 Malo Home page: http://opensource.perlig.de/rjsmin/ Modified by Ross Peoples <[email protected]> for inclusion into web2py. """ __author__ = "Andr\xe9 Malo" __author__ = getattr(__author__, 'decode', lambda x: __author__)('latin-1') __docformat__ = "restructuredtext en" __license__ = "Apache License, Version 2.0" __version__ = '1.0.2' __all__ = ['jsmin', 'jsmin_for_posers'] import re as _re def _make_jsmin(extended=True, python_only=True): """ Generate JS minifier based on `jsmin.c by Douglas Crockford`_ .. _jsmin.c by Douglas Crockford: http://www.crockford.com/javascript/jsmin.c :Parameters: `extended` : ``bool`` Extended Regexps? (using lookahead and lookbehind). This is faster, because it can be optimized way more. The regexps used with `extended` being false are only left here to allow easier porting to platforms without extended regex features (and for my own reference...) `python_only` : ``bool`` Use only the python variant. If true, the c extension is not even tried to be loaded. :Return: Minifier :Rtype: ``callable`` """ # pylint: disable = R0912, R0914, W0612 if not python_only: try: import _rjsmin except ImportError: pass else: return _rjsmin.jsmin try: xrange except NameError: xrange = range # pylint: disable = W0622 space_chars = r'[\000-\011\013\014\016-\040]' line_comment = r'(?://[^\r\n]*)' space_comment = r'(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)' string1 = \ r'(?:\047[^\047\\\r\n]*(?:\\(?:[^\r\n]|\r?\n|\r)[^\047\\\r\n]*)*\047)' string2 = r'(?:"[^"\\\r\n]*(?:\\(?:[^\r\n]|\r?\n|\r)[^"\\\r\n]*)*")' strings = r'(?:%s|%s)' % (string1, string2) charclass = r'(?:\[[^\\\]\r\n]*(?:\\[^\r\n][^\\\]\r\n]*)*\])' nospecial = r'[^/\\\[\r\n]' if extended: regex = r'(?:/(?![\r\n/*])%s*(?:(?:\\[^\r\n]|%s)%s*)*/)' % ( nospecial, charclass, nospecial ) else: regex = ( r'(?:/(?:[^*/\\\r\n\[]|%s|\\[^\r\n])%s*(?:(?:\\[^\r\n]|%s)%s*)*/)' ) regex = regex % (charclass, nospecial, charclass, nospecial) space = r'(?:%s|%s)' % (space_chars, space_comment) newline = r'(?:%s?[\r\n])' % line_comment def fix_charclass(result): """ Fixup string of chars to fit into a regex char class """ pos = result.find('-') if pos >= 0: result = r'%s%s-' % (result[:pos], result[pos + 1:]) def sequentize(string): """ Notate consecutive characters as sequence (1-4 instead of 1234) """ first, last, result = None, None, [] for char in map(ord, string): if last is None: first = last = char elif last + 1 == char: last = char else: result.append((first, last)) first = last = char if last is not None: result.append((first, last)) return ''.join(['%s%s%s' % ( chr(first), last > first + 1 and '-' or '', last != first and chr(last) or '' ) for first, last in result]) return _re.sub(r'([\000-\040\047])', # for better portability lambda m: '\\%03o' % ord(m.group(1)), (sequentize(result) .replace('\\', '\\\\') .replace('[', '\\[') .replace(']', '\\]') ) ) def id_literal_(what): """ Make id_literal like char class """ match = _re.compile(what).match result = ''.join([ chr(c) for c in xrange(127) if not match(chr(c)) ]) return '[^%s]' % fix_charclass(result) def not_id_literal_(keep): """ Make negated id_literal like char class """ match = _re.compile(id_literal_(keep)).match result = ''.join([ chr(c) for c in xrange(127) if not match(chr(c)) ]) return r'[%s]' % fix_charclass(result) not_id_literal = not_id_literal_(r'[a-zA-Z0-9_$]') preregex1 = r'[(,=:\[!&|?{};\r\n]' preregex2 = r'%(not_id_literal)sreturn' % locals() if extended: id_literal = id_literal_(r'[a-zA-Z0-9_$]') id_literal_open = id_literal_(r'[a-zA-Z0-9_${\[(+-]') id_literal_close = id_literal_(r'[a-zA-Z0-9_$}\])"\047+-]') space_sub = _re.compile(( r'([^\047"/\000-\040]+)' r'|(%(strings)s[^\047"/\000-\040]*)' r'|(?:(?<=%(preregex1)s)%(space)s*(%(regex)s[^\047"/\000-\040]*))' r'|(?:(?<=%(preregex2)s)%(space)s*(%(regex)s[^\047"/\000-\040]*))' r'|(?<=%(id_literal_close)s)' r'%(space)s*(?:(%(newline)s)%(space)s*)+' r'(?=%(id_literal_open)s)' r'|(?<=%(id_literal)s)(%(space)s)+(?=%(id_literal)s)' r'|%(space)s+' r'|(?:%(newline)s%(space)s*)+' ) % locals()).sub def space_subber(match): """ Substitution callback """ # pylint: disable = C0321, R0911 groups = match.groups() if groups[0]: return groups[0] elif groups[1]: return groups[1] elif groups[2]: return groups[2] elif groups[3]: return groups[3] elif groups[4]: return '\n' elif groups[5]: return ' ' else: return '' def jsmin(script): # pylint: disable = W0621 r""" Minify javascript based on `jsmin.c by Douglas Crockford`_\. Instead of parsing the stream char by char, it uses a regular expression approach which minifies the whole script with one big substitution regex. .. _jsmin.c by Douglas Crockford: http://www.crockford.com/javascript/jsmin.c :Parameters: `script` : ``str`` Script to minify :Return: Minified script :Rtype: ``str`` """ return space_sub(space_subber, '\n%s\n' % script).strip() else: pre_regex = r'(?:%(preregex1)s|%(preregex2)s)' % locals() not_id_literal_open = not_id_literal_(r'[a-zA-Z0-9_${\[(+-]') not_id_literal_close = not_id_literal_(r'[a-zA-Z0-9_$}\])"\047+-]') space_norm_sub = _re.compile(( r'(%(strings)s)' r'|(?:(%(pre_regex)s)%(space)s*(%(regex)s))' r'|(%(space)s)+' r'|(?:(%(newline)s)%(space)s*)+' ) % locals()).sub def space_norm_subber(match): """ Substitution callback """ # pylint: disable = C0321 groups = match.groups() if groups[0]: return groups[0] elif groups[1]: return groups[1].replace('\r', '\n') + groups[2] elif groups[3]: return ' ' elif groups[4]: return '\n' space_sub1 = _re.compile(( r'[\040\n]?(%(strings)s|%(pre_regex)s%(regex)s)' r'|\040(%(not_id_literal)s)' r'|\n(%(not_id_literal_open)s)' ) % locals()).sub def space_subber1(match): """ Substitution callback """ groups = match.groups() return groups[0] or groups[1] or groups[2] space_sub2 = _re.compile(( r'(%(strings)s)\040?' r'|(%(pre_regex)s%(regex)s)[\040\n]?' r'|(%(not_id_literal)s)\040' r'|(%(not_id_literal_close)s)\n' ) % locals()).sub def space_subber2(match): """ Substitution callback """ groups = match.groups() return groups[0] or groups[1] or groups[2] or groups[3] def jsmin(script): r""" Minify javascript based on `jsmin.c by Douglas Crockford`_\. Instead of parsing the stream char by char, it uses a regular expression approach. The script is minified with three passes: normalization Control character are mapped to spaces, spaces and newlines are squeezed and comments are stripped. space removal 1 Spaces before certain tokens are removed space removal 2 Spaces after certain tokens are remove .. _jsmin.c by Douglas Crockford: http://www.crockford.com/javascript/jsmin.c :Parameters: `script` : ``str`` Script to minify :Return: Minified script :Rtype: ``str`` """ return space_sub2(space_subber2, space_sub1(space_subber1, space_norm_sub(space_norm_subber, '\n%s\n' % script) ) ).strip() return jsmin jsmin = _make_jsmin() ##################### # EXAMPLE USAGE # ##################### # # import jsmin # jsmin.jsmin(script) # def jsmin_for_posers(script): r""" Minify javascript based on `jsmin.c by Douglas Crockford`_\. Instead of parsing the stream char by char, it uses a regular expression approach which minifies the whole script with one big substitution regex. .. _jsmin.c by Douglas Crockford: http://www.crockford.com/javascript/jsmin.c :Warning: This function is the digest of a _make_jsmin() call. It just utilizes the resulting regex. It's just for fun here and may vanish any time. Use the `jsmin` function instead. :Parameters: `script` : ``str`` Script to minify :Return: Minified script :Rtype: ``str`` """ def subber(match): """ Substitution callback """ groups = match.groups() return ( groups[0] or groups[1] or groups[2] or groups[3] or (groups[4] and '\n') or (groups[5] and ' ') or '' ) return _re.sub( r'([^\047"/\000-\040]+)|((?:(?:\047[^\047\\\r\n]*(?:\\(?:[^\r\n]|\r?' r'\n|\r)[^\047\\\r\n]*)*\047)|(?:"[^"\\\r\n]*(?:\\(?:[^\r\n]|\r?\n|' r'\r)[^"\\\r\n]*)*"))[^\047"/\000-\040]*)|(?:(?<=[(,=:\[!&|?{};\r\n]' r')(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/' r'))*((?:/(?![\r\n/*])[^/\\\[\r\n]*(?:(?:\\[^\r\n]|(?:\[[^\\\]\r\n]*' r'(?:\\[^\r\n][^\\\]\r\n]*)*\]))[^/\\\[\r\n]*)*/)[^\047"/\000-\040]*' r'))|(?:(?<=[\000-#%-,./:-@\[-^`{-~-]return)(?:[\000-\011\013\014\01' r'6-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*((?:/(?![\r\n/*])[^/' r'\\\[\r\n]*(?:(?:\\[^\r\n]|(?:\[[^\\\]\r\n]*(?:\\[^\r\n][^\\\]\r\n]' r'*)*\]))[^/\\\[\r\n]*)*/)[^\047"/\000-\040]*))|(?<=[^\000-!#%&(*,./' r':-@\[\\^`{|~])(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/' r'*][^*]*\*+)*/))*(?:((?:(?://[^\r\n]*)?[\r\n]))(?:[\000-\011\013\01' r'4\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*)+(?=[^\000-#%-\04' r'7)*,./:-@\\-^`|-~])|(?<=[^\000-#%-,./:-@\[-^`{-~-])((?:[\000-\011' r'\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/)))+(?=[^\000-' r'#%-,./:-@\[-^`{-~-])|(?:[\000-\011\013\014\016-\040]|(?:/\*[^*]*\*' r'+(?:[^/*][^*]*\*+)*/))+|(?:(?:(?://[^\r\n]*)?[\r\n])(?:[\000-\011' r'\013\014\016-\040]|(?:/\*[^*]*\*+(?:[^/*][^*]*\*+)*/))*)+', subber, '\n%s\n' % script ).strip() if __name__ == '__main__': import sys as _sys _sys.stdout.write(jsmin(_sys.stdin.read()))
polyfractal/elasticsearch
refs/heads/master
dev-tools/smoke_test_rc.py
2
# Licensed to Elasticsearch under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on # an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, # either express or implied. See the License for the specific # language governing permissions and limitations under the License. # Smoke-tests a release candidate # # 1. Downloads the tar.gz, deb, RPM and zip file from the staging URL # 2. Verifies it's sha1 hashes and GPG signatures against the release key # 3. Installs all official plugins # 4. Starts one node for tar.gz and zip packages and checks: # -- if it runs with Java 1.7 # -- if the build hash given is the one that is returned by the status response # -- if the build is a release version and not a snapshot version # -- if all plugins are loaded # -- if the status response returns the correct version # # USAGE: # # python3 -B ./dev-tools/smoke_test_rc.py --version 2.0.0-beta1 --hash bfa3e47 # # to also test other plugins try run # # python3 -B ./dev-tools/smoke_test_rc.py --version 2.0.0-beta1 --hash bfa3e47 --plugins license,shield,watcher # # Note: Ensure the script is run from the elasticsearch top level directory # # For testing a release from sonatype try this: # # python3 -B dev-tools/smoke_test_rc.py --version 2.0.0-beta1 --hash bfa3e47 --fetch_url https://oss.sonatype.org/content/repositories/releases/ # import argparse import tempfile import os import signal import shutil import urllib import urllib.request import hashlib import time import socket import json import base64 from prepare_release_candidate import run from http.client import HTTPConnection DEFAULT_PLUGINS = ["analysis-icu", "analysis-kuromoji", "analysis-phonetic", "analysis-smartcn", "analysis-stempel", "delete-by-query", "discovery-azure", "discovery-ec2", "discovery-gce", "discovery-multicast", "lang-expression", "lang-groovy", "lang-javascript", "lang-plan-a", "lang-python", "mapper-murmur3", "mapper-size", "repository-azure", "repository-s3", "store-smb"] try: JAVA_HOME = os.environ['JAVA_HOME'] except KeyError: raise RuntimeError(""" Please set JAVA_HOME in the env before running release tool On OSX use: export JAVA_HOME=`/usr/libexec/java_home -v '1.7*'`""") def java_exe(): path = JAVA_HOME return 'export JAVA_HOME="%s" PATH="%s/bin:$PATH" JAVACMD="%s/bin/java"' % (path, path, path) def verify_java_version(version): s = os.popen('%s; java -version 2>&1' % java_exe()).read() if ' version "%s.' % version not in s: raise RuntimeError('got wrong version for java %s:\n%s' % (version, s)) def sha1(file): with open(file, 'rb') as f: return hashlib.sha1(f.read()).hexdigest() def read_fully(file): with open(file, encoding='utf-8') as f: return f.read() def wait_for_node_startup(host='127.0.0.1', port=9200, timeout=60, header={}): print(' Waiting until node becomes available for at most %s seconds' % timeout) for _ in range(timeout): conn = HTTPConnection(host=host, port=port, timeout=timeout) try: time.sleep(1) conn.request('GET', '', headers=header) res = conn.getresponse() if res.status == 200: return True except socket.error as e: pass #that is ok it might not be there yet finally: conn.close() return False def download_and_verify(version, hash, files, base_url, plugins=DEFAULT_PLUGINS): print('Downloading and verifying release %s from %s' % (version, base_url)) tmp_dir = tempfile.mkdtemp() try: downloaded_files = [] print(' ' + '*' * 80) for file in files: name = os.path.basename(file) print(' Smoketest file: %s' % name) url = '%s/%s' % (base_url, file) print(' Downloading %s' % (url)) artifact_path = os.path.join(tmp_dir, file) downloaded_files.append(artifact_path) current_artifact_dir = os.path.dirname(artifact_path) os.makedirs(current_artifact_dir) urllib.request.urlretrieve(url, os.path.join(tmp_dir, file)) sha1_url = ''.join([url, '.sha1']) checksum_file = artifact_path + ".sha1" print(' Downloading %s' % (sha1_url)) urllib.request.urlretrieve(sha1_url, checksum_file) print(' Verifying checksum %s' % (checksum_file)) expected = read_fully(checksum_file) actual = sha1(artifact_path) if expected != actual : raise RuntimeError('sha1 hash for %s doesn\'t match %s != %s' % (name, expected, actual)) gpg_url = ''.join([url, '.asc']) gpg_file = artifact_path + ".asc" print(' Downloading %s' % (gpg_url)) urllib.request.urlretrieve(gpg_url, gpg_file) print(' Verifying gpg signature %s' % (gpg_file)) # here we create a temp gpg home where we download the release key as the only key into # when we verify the signature it will fail if the signed key is not in the keystore and that # way we keep the executing host unmodified since we don't have to import the key into the default keystore gpg_home_dir = os.path.join(current_artifact_dir, "gpg_home_dir") os.makedirs(gpg_home_dir, 0o700) run('gpg --homedir %s --keyserver pgp.mit.edu --recv-key D88E42B4' % gpg_home_dir) run('cd %s && gpg --homedir %s --verify %s' % (current_artifact_dir, gpg_home_dir, os.path.basename(gpg_file))) print(' ' + '*' * 80) print() smoke_test_release(version, downloaded_files, hash, plugins) print(' SUCCESS') finally: shutil.rmtree(tmp_dir) def smoke_test_release(release, files, expected_hash, plugins): for release_file in files: if not os.path.isfile(release_file): raise RuntimeError('Smoketest failed missing file %s' % (release_file)) tmp_dir = tempfile.mkdtemp() if release_file.endswith('tar.gz'): run('tar -xzf %s -C %s' % (release_file, tmp_dir)) elif release_file.endswith('zip'): run('unzip %s -d %s' % (release_file, tmp_dir)) else: print(' Skip SmokeTest for [%s]' % release_file) continue # nothing to do here es_run_path = os.path.join(tmp_dir, 'elasticsearch-%s' % (release), 'bin/elasticsearch') print(' Smoke testing package [%s]' % release_file) es_plugin_path = os.path.join(tmp_dir, 'elasticsearch-%s' % (release), 'bin/plugin') plugin_names = {} for plugin in plugins: print(' Install plugin [%s]' % (plugin)) run('%s; %s -Des.plugins.staging=true %s %s' % (java_exe(), es_plugin_path, 'install', plugin)) plugin_names[plugin] = True if 'shield' in plugin_names: headers = { 'Authorization' : 'Basic %s' % base64.b64encode(b"es_admin:foobar").decode("UTF-8") } es_shield_path = os.path.join(tmp_dir, 'elasticsearch-%s' % (release), 'bin/shield/esusers') print(" Install dummy shield user") run('%s; %s useradd es_admin -r admin -p foobar' % (java_exe(), es_shield_path)) else: headers = {} print(' Starting elasticsearch deamon from [%s]' % os.path.join(tmp_dir, 'elasticsearch-%s' % release)) try: run('%s; %s -Des.node.name=smoke_tester -Des.cluster.name=prepare_release -Des.script.inline=on -Des.script.indexed=on -Des.repositories.url.allowed_urls=http://snapshot.test* %s -Des.pidfile=%s' % (java_exe(), es_run_path, '-d', os.path.join(tmp_dir, 'elasticsearch-%s' % (release), 'es-smoke.pid'))) conn = HTTPConnection(host='127.0.0.1', port=9200, timeout=20) if not wait_for_node_startup(header=headers): print("elasticsearch logs:") print('*' * 80) logs = read_fully(os.path.join(tmp_dir, 'elasticsearch-%s' % (release), 'logs/prepare_release.log')) print(logs) print('*' * 80) raise RuntimeError('server didn\'t start up') try: # we now get / and /_nodes to fetch basic infos like hashes etc and the installed plugins conn.request('GET', '', headers=headers) res = conn.getresponse() if res.status == 200: version = json.loads(res.read().decode("utf-8"))['version'] if release != version['number']: raise RuntimeError('Expected version [%s] but was [%s]' % (release, version['number'])) if version['build_snapshot']: raise RuntimeError('Expected non snapshot version') if expected_hash.startswith(version['build_hash'].strip()): raise RuntimeError('HEAD hash does not match expected [%s] but got [%s]' % (expected_hash, version['build_hash'])) print(' Verify if plugins are listed in _nodes') conn.request('GET', '/_nodes?plugin=true&pretty=true', headers=headers) res = conn.getresponse() if res.status == 200: nodes = json.loads(res.read().decode("utf-8"))['nodes'] for _, node in nodes.items(): node_plugins = node['plugins'] for node_plugin in node_plugins: if not plugin_names.get(node_plugin['name'].strip(), False): raise RuntimeError('Unexpeced plugin %s' % node_plugin['name']) del plugin_names[node_plugin['name']] if plugin_names: raise RuntimeError('Plugins not loaded %s' % list(plugin_names.keys())) else: raise RuntimeError('Expected HTTP 200 but got %s' % res.status) else: raise RuntimeError('Expected HTTP 200 but got %s' % res.status) finally: conn.close() finally: pid_path = os.path.join(tmp_dir, 'elasticsearch-%s' % (release), 'es-smoke.pid') if os.path.exists(pid_path): # try reading the pid and kill the node pid = int(read_fully(pid_path)) os.kill(pid, signal.SIGKILL) shutil.rmtree(tmp_dir) print(' ' + '*' * 80) print() def parse_list(string): return [x.strip() for x in string.split(',')] if __name__ == "__main__": parser = argparse.ArgumentParser(description='SmokeTests a Release Candidate from S3 staging repo') parser.add_argument('--version', '-v', dest='version', default=None, help='The Elasticsearch Version to smoke-tests', required=True) parser.add_argument('--hash', '-s', dest='hash', default=None, required=True, help='The sha1 short hash of the git commit to smoketest') parser.add_argument('--plugins', '-p', dest='plugins', default=[], required=False, type=parse_list, help='A list of additional plugins to smoketest') parser.add_argument('--fetch_url', '-u', dest='url', default=None, help='Fetched from the specified URL') parser.set_defaults(hash=None) parser.set_defaults(plugins=[]) parser.set_defaults(version=None) parser.set_defaults(url=None) args = parser.parse_args() plugins = args.plugins version = args.version hash = args.hash url = args.url files = [ x % {'version': version} for x in [ 'org/elasticsearch/distribution/tar/elasticsearch/%(version)s/elasticsearch-%(version)s.tar.gz', 'org/elasticsearch/distribution/zip/elasticsearch/%(version)s/elasticsearch-%(version)s.zip', 'org/elasticsearch/distribution/deb/elasticsearch/%(version)s/elasticsearch-%(version)s.deb', 'org/elasticsearch/distribution/rpm/elasticsearch/%(version)s/elasticsearch-%(version)s.rpm' ]] verify_java_version('1.7') if url: download_url = url else: download_url = '%s/%s-%s' % ('http://download.elasticsearch.org/elasticsearch/staging', version, hash) download_and_verify(version, hash, files, download_url, plugins=DEFAULT_PLUGINS + plugins)
r-owen/TUI
refs/heads/master
TUI/Inst/SPIcam/StatusConfigInputWdg.py
1
#!/usr/bin/env python """Configuration input panel for SPIcam. This is just a placeholder. History: 2007-05-22 ROwen First pass based on DIS; may be way off. 2007-05-24 ROwen Added corrections submitted by Craig Loomis. 2008-02-11 ROwen Modified to be compatible with the new TUI.Inst.StatusConfigWdg. 2008-04-24 ROwen Fixed bug in test code (found by pychecker). 2008-07-24 ROwen Fixed CR 809: added x,y labels to CCD controls. 2014-02-03 ROwen Added explicit stateTracker argument and the fixed test code to use it. Updated to use modernized TestData. 2014-02-05 ROwen Added config widget. 2014-03-14 ROwen Added a Presets widget """ import Tkinter import RO.Constants import RO.MathUtil import RO.Wdg import RO.KeyVariable import TUI.TUIModel import SPIcamModel _MaxDataWidth = 5 class StatusConfigInputWdg (RO.Wdg.InputContFrame): InstName = "SPIcam" HelpPrefix = 'Instruments/%s/%sWin.html#' % (InstName, InstName) # category names CCDCat = "ccd" ConfigCat = RO.Wdg.StatusConfigGridder.ConfigCat def __init__(self, master, stateTracker, **kargs): """Create a new widget to show status for and configure SPIcam Inputs: - master: parent widget - stateTracker: an RO.Wdg.StateTracker """ RO.Wdg.InputContFrame.__init__(self, master=master, stateTracker=stateTracker, **kargs) self.model = SPIcamModel.getModel() self.tuiModel = TUI.TUIModel.getModel() # set while updating user ccd binning or user window default, # to prevent storing new unbinned values for ccd window. self._freezeCCDUBWindow = False gr = RO.Wdg.StatusConfigGridder( master = self, sticky = "e", ) self.gridder = gr shutterCurrWdg = RO.Wdg.StrLabel(self, helpText = "current state of the shutter", helpURL = self.HelpPrefix + "Shutter", anchor = "w", ) self.model.shutter.addROWdg(shutterCurrWdg) gr.gridWdg ("Shutter", shutterCurrWdg, sticky="ew", colSpan=3) filterNameCurrWdg = RO.Wdg.StrLabel(self, helpText = "current filter", helpURL = self.HelpPrefix + "Filter", anchor = "w", ) self.model.filterName.addROWdg(filterNameCurrWdg) self.filterNameUserWdg = RO.Wdg.OptionMenu(self, items=[], helpText = "requested filter", helpURL = self.HelpPrefix + "Filter", defMenu = "Current", autoIsCurrent = True, ) gr.gridWdg ( label = "Filter", dataWdg = filterNameCurrWdg, units = False, cfgWdg = self.filterNameUserWdg, sticky = "ew", cfgSticky = "w", colSpan = 3, ) # ccd widgets # store user-set window in unbinned pixels # so the displayed binned value can be properly # updated when the user changes the binning self.userCCDUBWindow = None # ccd image header; the label is a toggle button # for showing ccd image info # grid that first as it is always displayed self.showCCDWdg = RO.Wdg.Checkbutton(self, text = "CCD", defValue = False, helpText = "Show binning, etc.?", helpURL = self.HelpPrefix + "ShowCCD", ) gr.addShowHideControl(self.CCDCat, self.showCCDWdg) self._stateTracker.trackCheckbutton("showCCD", self.showCCDWdg) gr.gridWdg ( label = self.showCCDWdg, ) # grid ccd labels; these show/hide along with all other CCD data axisLabels = ("x", "y") ccdLabelDict = {} for setName in ("data", "cfg"): ccdLabelDict[setName] = [ Tkinter.Label(self, text=axis, ) for axis in axisLabels ] gr.gridWdg ( label = None, dataWdg = ccdLabelDict["data"], cfgWdg = ccdLabelDict["cfg"], sticky = "e", cat = self.CCDCat, row = -1, ) ccdBinCurrWdgSet = [RO.Wdg.IntLabel(self, width = 4, helpText = "current bin factor in %s" % (axis,), helpURL=self.HelpPrefix + "Bin", ) for axis in axisLabels ] self.model.ccdBin.addROWdgSet(ccdBinCurrWdgSet) self.ccdBinUserWdgSet = [ RO.Wdg.IntEntry(self, minValue = 1, maxValue = 99, width = 2, helpText = "requested bin factor in %s" % (axis,), helpURL = self.HelpPrefix + "Bin", clearMenu = None, defMenu = "Current", callFunc = self._userBinChanged, autoIsCurrent = True, ) for axis in axisLabels ] self.model.ccdBin.addROWdgSet(self.ccdBinUserWdgSet, setDefault=True) gr.gridWdg ( label = "Bin", dataWdg = ccdBinCurrWdgSet, cfgWdg = self.ccdBinUserWdgSet, cat = self.CCDCat, ) # CCD window winDescr = ( "smallest x", "smallest y", "largest x", "largest y", ) ccdWindowCurrWdgSet = [RO.Wdg.IntLabel(self, width = 4, helpText = "%s of current window (binned pix)" % winDescr[ii], helpURL = self.HelpPrefix + "Window", ) for ii in range(4) ] self.model.ccdWindow.addROWdgSet(ccdWindowCurrWdgSet) self.ccdWindowUserWdgSet = [ RO.Wdg.IntEntry(self, minValue = 1, maxValue = (2048, 2048, 2048, 2048)[ii], width = 4, helpText = "%s of requested window (binned pix)" % winDescr[ii], helpURL = self.HelpPrefix + "Window", clearMenu = None, defMenu = "Current", minMenu = ("Mininum", "Minimum", None, None)[ii], maxMenu = (None, None, "Maximum", "Maximum")[ii], callFunc = self._userWindowChanged, autoIsCurrent = True, isCurrent = False, ) for ii in range(4) ] # self.model.ccdUBWindow.addCallback(self._setCCDWindowWdgDef) gr.gridWdg ( label = "Window", dataWdg = ccdWindowCurrWdgSet[0:2], cfgWdg = self.ccdWindowUserWdgSet[0:2], units = "LL bpix", cat = self.CCDCat, ) gr.gridWdg ( label = None, dataWdg = ccdWindowCurrWdgSet[2:4], cfgWdg = self.ccdWindowUserWdgSet[2:4], units = "UR bpix", cat = self.CCDCat, ) # Image size, in binned pixels self.ccdImageSizeCurrWdgSet = [RO.Wdg.IntLabel(self, width = 4, helpText = "current %s size of image (binned pix)" % winDescr[ii], helpURL = self.HelpPrefix + "Window", ) for ii in range(2) ] # self.model.ccdWindow.addCallback(self._updCurrImageSize) self.ccdImageSizeUserWdgSet = [ RO.Wdg.IntLabel(self, width = 4, helpText = "requested %s size of image (binned pix)" % ("x", "y")[ii], helpURL = self.HelpPrefix + "ImageSize", ) for ii in range(2) ] gr.gridWdg ( label = "Image Size", dataWdg = self.ccdImageSizeCurrWdgSet, cfgWdg = self.ccdImageSizeUserWdgSet, units = "bpix", cat = self.CCDCat, ) # ccd overscan ccdOverscanCurrWdgSet = [RO.Wdg.IntLabel(self, width = 4, helpText = "current overscan in %s" % (axis,), helpURL = self.HelpPrefix + "Overscan", ) for axis in axisLabels ] self.model.ccdOverscan.addROWdgSet(ccdOverscanCurrWdgSet) self.ccdOverscanUserWdgSet = [ RO.Wdg.IntEntry(self, minValue = 0, maxValue = 2048, width = 4, helpText = "requested overscan in %s" % (axis,), helpURL = self.HelpPrefix + "Overscan", clearMenu = None, defMenu = "Current", autoIsCurrent = True, ) for axis in axisLabels ] self.model.ccdOverscan.addROWdgSet(self.ccdOverscanUserWdgSet, setDefault=True) gr.gridWdg ( label = "Overscan", dataWdg = ccdOverscanCurrWdgSet, units = "bpix", cfgWdg = self.ccdOverscanUserWdgSet, cat = self.CCDCat, ) # set up format functions for the various pop-up menus # these allow us to return index values instead of names class indFormat(object): def __init__(self, indFunc, offset=1): self.indFunc = indFunc self.offset = offset def __call__(self, inputCont): valueList = inputCont.getValueList() if not valueList: return '' name = inputCont.getName() return "%s %d" % (name, self.indFunc(valueList[0]) + self.offset) # add callbacks that access widgets self.model.filterNames.addCallback(self.filterNameUserWdg.setItems) self.model.filterName.addIndexedCallback(self.filterNameUserWdg.setDefault, 0) self.model.ccdUBWindow.addCallback(self._setCCDWindowWdgDef) self.model.ccdWindow.addCallback(self._updCurrImageSize) # set up the input container set; this is what formats the commands # and allows saving and recalling commands self.inputCont = RO.InputCont.ContList ( conts = [ RO.InputCont.WdgCont ( name = "filter", wdgs = self.filterNameUserWdg, formatFunc = indFormat(self.filterNameUserWdg.index), ), RO.InputCont.WdgCont ( name = "bin", wdgs = self.ccdBinUserWdgSet, formatFunc = RO.InputCont.BasicFmt( rejectBlanks = True, ), ), RO.InputCont.WdgCont ( name = "window", wdgs = self.ccdWindowUserWdgSet, formatFunc = RO.InputCont.BasicFmt( rejectBlanks = True, ), ), RO.InputCont.WdgCont ( name = "overscan", wdgs = self.ccdOverscanUserWdgSet, formatFunc = RO.InputCont.BasicFmt( rejectBlanks = True, ), ), ], ) self.configWdg = RO.Wdg.InputContPresetsWdg( master = self, sysName = "%sConfig" % (self.InstName,), userPresetsDict = self.tuiModel.userPresetsDict, stdPresets = dict(), inputCont = self.inputCont, helpText = "use and manage named presets", helpURL = self.HelpPrefix + "Presets", ) self.gridder.gridWdg( "Presets", cfgWdg = self.configWdg, colSpan = 2, ) self.gridder.allGridded() def repaint(evt): self.restoreDefault() self.bind("<Map>", repaint) def _saveCCDUBWindow(self): """Save user ccd window in unbinned pixels. """ if self._freezeCCDUBWindow: return userWindow = [wdg.getNum() for wdg in self.ccdWindowUserWdgSet] if 0 in userWindow: return userBinFac = self._getUserBinFac() if 0 in userBinFac: return self.userCCDUBWindow = self.model.unbin(userWindow, userBinFac) def _setCCDWindowWdgDef(self, *args, **kargs): """Updates the default value of CCD window wdg. If this has the effect of changing the displayed values (only true if a box is blank) then update the saved unbinned window. """ if self.userCCDUBWindow is None: currUBWindow, isCurrent = self.model.ccdUBWindow.get() if isCurrent: self.userCCDUBWindow = currUBWindow initialUserCCDWindow = self._getUserCCDWindow() # print "_setCCDWindowWdgDef; initialUserCCDWindow =", initialUserCCDWindow self._updUserCCDWindow(doCurrValue=False) if initialUserCCDWindow != self._getUserCCDWindow(): # print "_setCCDWindowWdgDef; user value changed when default changed; save new unbinned value" self._saveCCDUBWindow() def _userBinChanged(self, *args, **kargs): """User bin factor changed. Update ccd window current values and default values. """ self._updUserCCDWindow() def _userWindowChanged(self, *args, **kargs): self._saveCCDUBWindow() # update user ccd image size actUserCCDWindow = self._getUserCCDWindow() if 0 in actUserCCDWindow: return for ind in range(2): imSize = 1 + actUserCCDWindow[ind+2] - actUserCCDWindow[ind] self.ccdImageSizeUserWdgSet[ind].set(imSize) def _updCurrImageSize(self, *args, **kargs): """Updates current image size. """ window, isCurrent = self.model.ccdWindow.get() if not isCurrent: return try: imageSize = [1 + window[ind+2] - window[ind] for ind in range(2)] except TypeError: imageSize = (None, None) for ind in range(2): self.ccdImageSizeCurrWdgSet[ind].set(imageSize[ind]) def _updUserCCDWindow(self, doCurrValue = True): """Update user-set ccd window. Inputs: - doCurrValue: if True, set current value and default; otherwise just set default. The current value is set from the cached user's unbinned value """ self._freezeCCDUBWindow = True try: if doCurrValue and self.userCCDUBWindow is None: # print "_updUserCCDWindow; unbinned = none" return userBinFac = self._getUserBinFac() # print "_updUserCCDWindow; userBinFac =", userBinFac if 0 in userBinFac: # print "_updUserCCDWindow; bin fac has 0" return # update user ccd window displayed value, default valud and limits if doCurrValue: userWindow = self.model.bin(self.userCCDUBWindow, userBinFac) currUBWindow, isCurrent = self.model.ccdUBWindow.get() if isCurrent: currWindow = self.model.bin(currUBWindow, userBinFac) else: currWindow = (None,)*4 # print "_updUserCCDWindow; currWindow=", currWindow minWindowXYXY = self.model.minCoord(userBinFac)*2 maxWindowXYXY = self.model.maxCoord(userBinFac)*2 # print "_updUserCCDWindow: setting values", userWindow for ind in range(4): wdg = self.ccdWindowUserWdgSet[ind] # disable limits wdg.setRange( minValue = None, maxValue = None, ) # set displayed and default value if doCurrValue: wdg.set(userWindow[ind], isCurrent) wdg.setDefault(currWindow[ind], isCurrent) # set correct range for this bin factor wdg.setRange( minValue = minWindowXYXY[ind], maxValue = maxWindowXYXY[ind], ) finally: self._freezeCCDUBWindow = False def _getUserBinFac(self): """Return the current user-set bin factor in x and y. """ return [wdg.getNum() for wdg in self.ccdBinUserWdgSet] def _getUserCCDWindow(self): """Return the current user-set ccd window (binned) in x and y. """ return [wdg.getNum() for wdg in self.ccdWindowUserWdgSet] if __name__ == "__main__": import TestData root = TestData.tuiModel.tkRoot stateTracker = RO.Wdg.StateTracker(logFunc=TestData.tuiModel.logFunc) testFrame = StatusConfigInputWdg(root, stateTracker=stateTracker) testFrame.pack() TestData.start() testFrame.restoreDefault() def printCmds(): cmdList = testFrame.getStringList() for cmd in cmdList: print cmd bf = Tkinter.Frame(root) cfgWdg = RO.Wdg.Checkbutton(bf, text="Config", defValue=True) cfgWdg.pack(side="left") Tkinter.Button(bf, text="Cmds", command=printCmds).pack(side="left") Tkinter.Button(bf, text="Current", command=testFrame.restoreDefault).pack(side="left") Tkinter.Button(bf, text="Demo", command=TestData.animate).pack(side="left") bf.pack() testFrame.gridder.addShowHideControl(testFrame.ConfigCat, cfgWdg) root.mainloop()
csliu/pinball
refs/heads/master
tests/pinball/workflow/worker_test.py
5
# Copyright 2015, Pinterest, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Validation tests for the token hierarchy inspector.""" import copy import mock import pickle import time import unittest from pinball.master.factory import Factory from pinball.workflow.event import Event from pinball.workflow.name import Name from pinball.workflow.job import ShellJob from pinball.workflow.worker import Worker from pinball.master.thrift_lib.ttypes import GroupRequest from pinball.master.thrift_lib.ttypes import ModifyRequest from pinball.master.thrift_lib.ttypes import Token from pinball.master.thrift_lib.ttypes import Query from pinball.master.thrift_lib.ttypes import QueryRequest from pinball.workflow.job_executor import ExecutionRecord from tests.pinball.persistence.ephemeral_store import EphemeralStore from pinball.workflow.signaller import Signal __author__ = 'Pawel Garbacki' __copyright__ = 'Copyright 2015, Pinterest, Inc.' __credits__ = [__author__] __license__ = 'Apache' __version__ = '2.0' class WorkerTestCase(unittest.TestCase): def setUp(self): self._factory = Factory() self._store = EphemeralStore() self._factory.create_master(self._store) self._emailer = mock.Mock() self._worker = Worker(self._factory.get_client(), self._store, self._emailer) self._client = self._factory.get_client() def _get_parent_job_token(self): name = Name(workflow='some_workflow', instance='12345', job_state=Name.WAITING_STATE, job='parent_job') job = ShellJob(name=name.job, inputs=[Name.WORKFLOW_START_INPUT], outputs=['child_job'], command='echo parent', emails=['[email protected]']) return Token(name=name.get_job_token_name(), data=pickle.dumps(job)) def _get_child_job_token(self): name = Name(workflow='some_workflow', instance='12345', job_state=Name.WAITING_STATE, job='child_job') job = ShellJob(name=name.job, inputs=['parent_job'], outputs=[], command='echo child', emails=['[email protected]']) return Token(name=name.get_job_token_name(), data=pickle.dumps(job)) def _post_job_tokens(self): """Add waiting job tokens to the master.""" request = ModifyRequest(updates=[]) request.updates.append(self._get_parent_job_token()) request.updates.append(self._get_child_job_token()) self._client.modify(request) def _post_workflow_start_event_token(self): name = Name(workflow='some_workflow', instance='12345', job='parent_job', input_name=Name.WORKFLOW_START_INPUT, event='workflow_start_event') event = Event(creator='SimpleWorkflowTest') token = Token(name=name.get_event_token_name(), data=pickle.dumps(event)) request = ModifyRequest(updates=[token]) self._client.modify(request) def _verify_token_names(self, names): request = GroupRequest(namePrefix='/workflow/') response = self._client.group(request) names = sorted(names) counts = sorted(response.counts.keys()) self.assertEqual(names, counts) def _verify_archived_token_names(self, names): active_tokens = self._store.read_active_tokens() all_tokens = self._store.read_tokens() archived_token_names = [] for token in all_tokens: if not token in active_tokens: archived_token_names.append(token.name) names = sorted(names) archived_token_names = sorted(archived_token_names) self.assertEqual(names, archived_token_names) def _get_token(self, name): query = Query(namePrefix=name) request = QueryRequest(queries=[query]) response = self._client.query(request) self.assertEqual(1, len(response.tokens)) self.assertEqual(1, len(response.tokens[0])) return response.tokens[0][0] def _get_stored_token(self, name): tokens = self._store.read_tokens(name_prefix=name) self.assertEqual(1, len(tokens)) return tokens[0] def _verify_parent_job_waiting(self): token_names = [ Name(workflow='some_workflow', instance='12345', job_state=Name.WAITING_STATE, job='parent_job').get_job_token_name(), Name(workflow='some_workflow', instance='12345', job_state=Name.WAITING_STATE, job='child_job').get_job_token_name(), Name(workflow='some_workflow', instance='12345', job='parent_job', input_name=Name.WORKFLOW_START_INPUT, event='workflow_start_event').get_event_token_name()] self._verify_token_names(token_names) def _verify_parent_job_runnable(self): token_names = [Name(workflow='some_workflow', instance='12345', job_state=Name.RUNNABLE_STATE, job='parent_job').get_job_token_name(), Name(workflow='some_workflow', instance='12345', job_state=Name.WAITING_STATE, job='child_job').get_job_token_name()] self._verify_token_names(token_names) def test_get_triggering_events(self): self.assertEqual([], Worker._get_triggering_events([])) self.assertEqual(['a'], Worker._get_triggering_events([['a']])) events = Worker._get_triggering_events([['a', 'b']]) self.assertTrue(events == ['a'] or events == ['b']) events = Worker._get_triggering_events([['a', 'b'], ['1', '2']]) self.assertTrue(events == ['a', '1'] or events == ['a', '2'] or events == ['b', '1'] or events == ['b', '2']) def test_move_job_token_to_runnable(self): self._post_job_tokens() self._post_workflow_start_event_token() job_name = Name(workflow='some_workflow', instance='12345', job_state=Name.WAITING_STATE, job='parent_job') job_token = self._get_token(job_name.get_job_token_name()) event_name = Name(workflow='some_workflow', instance='12345', job='parent_job', input_name=Name.WORKFLOW_START_INPUT, event='workflow_start_event') event_token = self._get_token(event_name.get_event_token_name()) self._worker._move_job_token_to_runnable(job_token, [event_token]) # Event token should have been removed and the parent job should be # runnable. self._verify_parent_job_runnable() def test_make_job_runnable(self): self._post_job_tokens() self._post_workflow_start_event_token() parent_job_name = Name(workflow='some_workflow', instance='12345', job_state=Name.WAITING_STATE, job='parent_job').get_job_token_name() child_job_name = Name(workflow='some_workflow', instance='12345', job_state=Name.WAITING_STATE, job='child_job').get_job_token_name() parent_job_token = self._get_token(parent_job_name) child_job_token = self._get_token(child_job_name) self._worker._make_job_runnable(child_job_token) # Child job is missing triggering tokens so it cannot be made runnable. self._verify_parent_job_waiting() self._worker._make_job_runnable(parent_job_token) # Parent job has all triggering tokens so it can be made runnable. self._verify_parent_job_runnable() def test_make_runnable(self): self._post_job_tokens() self._post_workflow_start_event_token() self._worker._make_runnable('some_other_workflow', '12345') # Workflow instance does not exist so nothing should have changed. self._verify_parent_job_waiting() self._worker._make_runnable('some_workflow', 'some_other_instance') # Workflow instance does not exist so nothing should have changed. self._verify_parent_job_waiting() self._worker._make_runnable('some_workflow', '12345') self._verify_parent_job_runnable() def test_own_runnable_job_token(self): self._post_job_tokens() self._worker._own_runnable_job_token() # Event token is not present so nothing should have changed. token_names = [Name(workflow='some_workflow', instance='12345', job_state=Name.WAITING_STATE, job='parent_job').get_job_token_name(), Name(workflow='some_workflow', instance='12345', job_state=Name.WAITING_STATE, job='child_job').get_job_token_name()] self._verify_token_names(token_names) self.assertIsNone(self._worker._owned_job_token) self._post_workflow_start_event_token() self._worker._own_runnable_job_token() # Worker should now own a runnable job token. self._verify_parent_job_runnable() parent_token = self._get_token( Name(workflow='some_workflow', instance='12345', job_state=Name.RUNNABLE_STATE, job='parent_job').get_job_token_name()) self.assertEqual(parent_token, self._worker._owned_job_token) def _add_history_to_owned_token(self): job = pickle.loads(self._worker._owned_job_token.data) execution_record = ExecutionRecord(start_time=123456, end_time=1234567, exit_code=0) job.history.append(execution_record) self._worker._owned_job_token.data = pickle.dumps(job) def test_get_output_event_tokens(self): self._post_job_tokens() self._post_workflow_start_event_token() self._worker._own_runnable_job_token() self.assertIsNotNone(self._worker._owned_job_token) job = pickle.loads(self._worker._owned_job_token.data) execution_record = ExecutionRecord(start_time=123456, end_time=1234567, exit_code=0) job.history.append(execution_record) event_tokens = self._worker._get_output_event_tokens(job) self.assertEqual(1, len(event_tokens)) event_token_name = Name.from_event_token_name(event_tokens[0].name) expected_prefix = Name(workflow='some_workflow', instance='12345', job='child_job', input_name='parent_job').get_input_prefix() self.assertEqual(expected_prefix, event_token_name.get_input_prefix()) def test_move_job_token_to_waiting(self): self._post_job_tokens() self._post_workflow_start_event_token() self._worker._own_runnable_job_token() job = pickle.loads(self._worker._owned_job_token.data) execution_record = ExecutionRecord(start_time=123456, end_time=1234567, exit_code=0) job.history.append(execution_record) self._worker._owned_job_token.data = pickle.dumps(job) self._worker._move_job_token_to_waiting(job, True) parent_token = self._get_token( Name(workflow='some_workflow', instance='12345', job_state=Name.WAITING_STATE, job='parent_job').get_job_token_name()) job = pickle.loads(parent_token.data) self.assertEqual(1, len(job.history)) self.assertEqual(execution_record.start_time, job.history[0].start_time) def test_keep_job_token_in_runnable(self): self._post_job_tokens() self._post_workflow_start_event_token() self._worker._own_runnable_job_token() job = pickle.loads(self._worker._owned_job_token.data) job.history.append('some_historic_record') self._worker._keep_job_token_in_runnable(job) self._verify_parent_job_runnable() parent_token = self._get_token( Name(workflow='some_workflow', instance='12345', job_state=Name.RUNNABLE_STATE, job='parent_job').get_job_token_name()) job = pickle.loads(parent_token.data) self.assertEqual(1, len(job.history)) self.assertEqual('some_historic_record', job.history[0]) @staticmethod def _from_job(workflow, instance, job_name, job, data_builder, emailer): execution_record = ExecutionRecord(start_time=123456, end_time=1234567, exit_code=0) executed_job = copy.copy(job) executed_job.history.append(execution_record) job_executor = mock.Mock() job_executor.job = executed_job job_executor.prepare.return_value = True job_executor.execute.return_value = True return job_executor @mock.patch('pinball.workflow.worker.JobExecutor') def test_execute_job(self, job_executor_mock): self._post_job_tokens() self._post_workflow_start_event_token() self._worker._own_runnable_job_token() job_executor = mock.Mock() job_executor_mock.from_job.return_value = job_executor job_executor_mock.from_job.side_effect = WorkerTestCase._from_job self._worker._execute_job() self.assertIsNone(self._worker._owned_job_token) parent_token = self._get_token( Name(workflow='some_workflow', instance='12345', job_state=Name.WAITING_STATE, job='parent_job').get_job_token_name()) job = pickle.loads(parent_token.data) self.assertEqual(1, len(job.history)) execution_record = job.history[0] self.assertEqual(0, execution_record.exit_code) self.assertEqual(1234567, execution_record.end_time) def test_send_instance_end_email(self): data_builder = mock.Mock() self._worker._data_builder = data_builder schedule_data = mock.Mock() schedule_data.emails = ['[email protected]'] data_builder.get_schedule.return_value = schedule_data instance_data = mock.Mock() data_builder.get_instance.return_value = instance_data job_data = mock.Mock() data_builder.get_jobs.return_value = [job_data] self._worker._send_instance_end_email('some_workflow', '12345') self._emailer.send_instance_end_message.assert_called_once_with( ['[email protected]'], instance_data, [job_data]) def test_send_job_failure_emails(self): self._post_job_tokens() self._post_workflow_start_event_token() self._worker._own_runnable_job_token() job = pickle.loads(self._worker._owned_job_token.data) job.history.append('some_historic_record') executor = mock.Mock() self._worker._executor = executor executor.job = job data_builder = mock.Mock() self._worker._data_builder = data_builder schedule_data = mock.Mock() schedule_data.emails = ['[email protected]'] data_builder.get_schedule.return_value = schedule_data execution_data = mock.Mock() data_builder.get_execution.return_value = execution_data self._worker._send_job_failure_emails(True) self._emailer.send_job_execution_end_message.assert_any_call( ['[email protected]', '[email protected]'], execution_data) @mock.patch('pinball.workflow.worker.JobExecutor') def test_run(self, job_executor_mock): self._post_job_tokens() self._post_workflow_start_event_token() job_executor_mock.from_job.side_effect = WorkerTestCase._from_job self._worker._test_only_end_if_no_runnable = True self._worker.run() with mock.patch('pinball.workflow.archiver.time') as time_patch: # add one day time_patch.time.return_value = time.time() + 24 * 60 * 60 self._worker.run() parent_job_token_name = Name(workflow='some_workflow', instance='12345', job_state=Name.WAITING_STATE, job='parent_job').get_job_token_name() child_job_token_name = Name(workflow='some_workflow', instance='12345', job_state=Name.WAITING_STATE, job='child_job').get_job_token_name() signal_string = Signal.action_to_string(Signal.ARCHIVE) signal_token_name = Name(workflow='some_workflow', instance='12345', signal=signal_string).get_signal_token_name() token_names = [parent_job_token_name, child_job_token_name, signal_token_name] self._verify_archived_token_names(token_names) self.assertEqual(2, job_executor_mock.from_job.call_count) parent_token = self._get_stored_token(parent_job_token_name) job = pickle.loads(parent_token.data) self.assertEqual(1, len(job.history)) execution_record = job.history[0] self.assertEqual(0, execution_record.exit_code) self.assertEqual(1234567, execution_record.end_time) child_token = self._get_stored_token(child_job_token_name) job = pickle.loads(child_token.data) self.assertEqual(1, len(job.history)) execution_record = job.history[0] self.assertEqual(0, execution_record.exit_code) self.assertEqual(1234567, execution_record.end_time) signal_token = self._get_stored_token(signal_token_name) signal = pickle.loads(signal_token.data) self.assertEqual(Signal.ARCHIVE, signal.action)
jettisonjoe/openhtf
refs/heads/master
openhtf/plugs/usb/adb_message.py
2
# Copyright 2014 Google Inc. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This module contains a class to encapsulate ADB messages. See the following in the Android source for more details: https://android.googlesource.com/platform/system/core/+/master/adb/protocol.txt The ADB transport layer deals in "messages", which consist of a 24 byte header followed (optionally) by a payload. The header consists of 6 32 bit words which are sent across the wire in little endian format: struct message { unsigned command; /* command identifier constant */ unsigned arg0; /* first argument */ unsigned arg1; /* second argument */ unsigned data_length; /* length of payload (0 is allowed) */ unsigned data_crc32; /* crc32 of data payload */ unsigned magic; /* command ^ 0xffffffff */ }; Receipt of an invalid message header, corrupt message payload, or an unrecognized command MUST result in the closing of the remote connection. The protocol depends on shared state and any break in the message stream will result in state getting out of sync. This class does not keep any of this state, but rather represents a single message entity. See adb_protocol.py for the stateful components. """ import collections import logging import string import struct import threading from openhtf.plugs.usb import usb_exceptions from openhtf.util import timeouts import six _LOG = logging.getLogger(__name__) def make_wire_commands(*ids): """Assemble the commands.""" cmd_to_wire = { cmd: sum(ord(c) << (i * 8) for i, c in enumerate(cmd)) for cmd in ids } wire_to_cmd = {wire: cmd for cmd, wire in six.iteritems(cmd_to_wire)} return cmd_to_wire, wire_to_cmd class RawAdbMessage(collections.namedtuple('RawAdbMessage', ['cmd', 'arg0', 'arg1', 'data_length', 'data_checksum', 'magic'])): """Helper class for handling the struct -> AdbMessage mapping.""" def to_adb_message(self, data): """Turn the data into an ADB message.""" message = AdbMessage(AdbMessage.WIRE_TO_CMD.get(self.cmd), self.arg0, self.arg1, data) if (len(data) != self.data_length or message.data_crc32 != self.data_checksum): raise usb_exceptions.AdbDataIntegrityError( '%s (%s) received invalid data: %s', message, self, repr(data)) return message class AdbTransportAdapter(object): """Transport adapter for reading/writing AdbMessages to another transport. This class handles the over-the-wire sending/receiving of AdbMessages, and has a utility method for ignoring messages we don't care about. This is a 'transport' in that it has Read/Write methods, but those methods only work on AdbMessage instances, not arbitrary data, so it can't quite be dropped in anywhere a Transport is used. This class maintains its own Lock so that multiple threads can read AdbMessages from the same underlying transport without stealing each other's headers/data. """ def __init__(self, transport): """Create an AdbTransportAdapter that writes to/reads from 'transport'.""" self._transport = transport self._reader_lock = threading.Lock() self._writer_lock = threading.Lock() def __str__(self): trans = str(self._transport) return '<%s: (%s)' % (type(self).__name__, trans[1:]) def close(self): """Close the connection.""" self._transport.close() def write_message(self, message, timeout): """Send the given message over this transport. Args: message: The AdbMessage to send. timeout: Use this timeout for the entire write operation, it should be an instance of timeouts.PolledTimeout. """ with self._writer_lock: self._transport.write(message.header, timeout.remaining_ms) # Use any remaining time to send the data. Note that if we get this far, # we always at least try to send the data (with a minimum of 10ms timeout) # because we don't want the remote end to get out of sync because we sent # a header but no data. if timeout.has_expired(): _LOG.warning('Timed out between AdbMessage header and data, sending ' 'data anyway with 10ms timeout') timeout = timeouts.PolledTimeout.from_millis(10) self._transport.write(message.data, timeout.remaining_ms) def read_message(self, timeout): """Read an AdbMessage from this transport. Args: timeout: Timeout for the entire read operation, in the form of a timeouts.PolledTimeout instance. Note that for packets with a data payload, two USB reads are performed. Returns: The ADB message read from the device. Raises: UsbReadFailedError: There's an error during read, including timeout. AdbProtocolError: A message is incorrectly formatted. AdbTimeoutError: timeout is already expired, or expires before we read the entire message, specifically between reading header and data packets. """ with self._reader_lock: raw_header = self._transport.read( struct.calcsize(AdbMessage.HEADER_STRUCT_FORMAT), timeout.remaining_ms) if not raw_header: raise usb_exceptions.AdbProtocolError('Adb connection lost') try: raw_message = RawAdbMessage(*struct.unpack( AdbMessage.HEADER_STRUCT_FORMAT, raw_header)) except struct.error as exception: raise usb_exceptions.AdbProtocolError( 'Unable to unpack ADB command (%s): %s (%s)', AdbMessage.HEADER_STRUCT_FORMAT, raw_header, exception) if raw_message.data_length > 0: if timeout.has_expired(): _LOG.warning('Timed out between AdbMessage header and data, reading ' 'data anyway with 10ms timeout') timeout = timeouts.PolledTimeout.from_millis(10) data = self._transport.read(raw_message.data_length, timeout.remaining_ms) else: data = '' return raw_message.to_adb_message(data) def read_until(self, expected_commands, timeout): """Read AdbMessages from this transport until we get an expected command. The ADB protocol specifies that before a successful CNXN handshake, any other packets must be ignored, so this method provides the ability to ignore unwanted commands. It's primarily used during the initial connection to the device. See Read() for more details, including more exceptions that may be raised. Args: expected_commands: Iterable of expected command responses, like ('CNXN', 'AUTH'). timeout: timeouts.PolledTimeout object to use for timeout. Returns: The ADB message received that matched one of expected_commands. Raises: AdbProtocolError: If timeout expires between reads, this can happen if we are getting spammed with unexpected commands. """ msg = timeouts.loop_until_timeout_or_valid( timeout, lambda: self.read_message(timeout), lambda m: m.command in expected_commands, 0) if msg.command not in expected_commands: raise usb_exceptions.AdbTimeoutError( 'Timed out establishing connection, waiting for: %s', expected_commands) return msg class DebugAdbTransportAdapter(AdbTransportAdapter): """Debug transport adapter that logs messages read/written.""" def __init__(self, transport): super(DebugAdbTransportAdapter, self).__init__(transport) self.messages = [] _LOG.debug('%s logging messages', self) def close(self): _LOG.debug('%s logged messages:', self) for message in self.messages: _LOG.debug(message) super(DebugAdbTransportAdapter, self).close() def read_message(self, timeout): message = super(DebugAdbTransportAdapter, self).read_message(timeout) self.messages.append('READING: %s' % message) return message def write_message(self, message, timeout): self.messages.append('WRITING: %s' % message) super(DebugAdbTransportAdapter, self).write_message(message, timeout) class AdbMessage(object): """ADB Protocol message class. This class encapsulates all host<->device communication for ADB. The attributes of this class correspond roughly to the ADB message struct. The 'command' attribute of this class is a stringified version of the unsigned command struct value, and is one of the values in ids ('SYNC', 'CNXN', 'AUTH', etc). The arg0 and arg1 attributes have different meanings depending on the command (see adb_protocol.py for more info). This class stores the 'data' associated with a message, but some messages have no data (data will default to ''). Additionally, reading/writing messages to the wire results in two reads/writes because the data is actually sent in a second USB transaction. This may have implications to transport layers that aren't direct libusb reads/writes to a local device (ie, over a network). The 'header' attribute returns the over-the-wire format of the header for this message. To send a message over the header, send its header, followed by its data if it has any. Attributes: header command arg0 arg1 data magic """ PRINTABLE_DATA = set(string.printable) - set(string.whitespace) CMD_TO_WIRE, WIRE_TO_CMD = make_wire_commands('SYNC', 'CNXN', 'AUTH', 'OPEN', 'OKAY', 'CLSE', 'WRTE') # An ADB message is 6 words in little-endian. HEADER_STRUCT_FORMAT = '<6I' def __init__(self, command, arg0=0, arg1=0, data=''): if command not in self.CMD_TO_WIRE: raise usb_exceptions.AdbProtocolError('Unrecognized ADB command: %s', command) self._command = self.CMD_TO_WIRE[command] self.arg0 = arg0 self.arg1 = arg1 self.data = data self.magic = self._command ^ 0xFFFFFFFF @property def header(self): """The message header.""" return struct.pack( self.HEADER_STRUCT_FORMAT, self._command, self.arg0, self.arg1, len(self.data), self.data_crc32, self.magic) @property def command(self): """The ADB command.""" return self.WIRE_TO_CMD[self._command] def __str__(self): return '<%s: %s(%s, %s): %s (%s bytes)>' % ( type(self).__name__, self.command, self.arg0, self.arg1, ''.join(char if char in self.PRINTABLE_DATA else '.' for char in self.data[:64]), len(self.data)) __repr__ = __str__ @property def data_crc32(self): """Return the sum of all the data bytes. The "crc32" used by ADB is actually just a sum of all the bytes, but we name this data_crc32 to be consistent with ADB. """ return sum([ord(x) for x in self.data]) & 0xFFFFFFFF
dex4er/django
refs/heads/1.6.x
django/core/management/commands/check.py
119
from __future__ import unicode_literals import warnings from django.core.checks.compatibility.base import check_compatibility from django.core.management.base import NoArgsCommand class Command(NoArgsCommand): help = "Checks your configuration's compatibility with this version " + \ "of Django." def handle_noargs(self, **options): for message in check_compatibility(): warnings.warn(message)
zhujzhuo/Sahara
refs/heads/master
sahara/tests/tempest/scenario/data_processing/client_tests/test_node_group_templates.py
9
# Copyright (c) 2014 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.scenario.data_processing.client_tests import base from tempest import test from tempest_lib.common.utils import data_utils class NodeGroupTemplateTest(base.BaseDataProcessingTest): def _check_create_node_group_template(self): template_name = data_utils.rand_name('sahara-ng-template') # create node group template resp_body = self.create_node_group_template(template_name, **self.worker_template) # check that template created successfully self.assertEqual(template_name, resp_body.name) self.assertDictContainsSubset(self.worker_template, resp_body.__dict__) return resp_body.id, template_name def _check_node_group_template_list(self, template_id, template_name): # check for node group template in list template_list = self.client.node_group_templates.list() templates_info = [(template.id, template.name) for template in template_list] self.assertIn((template_id, template_name), templates_info) def _check_node_group_template_get(self, template_id, template_name): # check node group template fetch by id template = self.client.node_group_templates.get( template_id) self.assertEqual(template_name, template.name) self.assertDictContainsSubset(self.worker_template, template.__dict__) def _check_node_group_template_delete(self, template_id): # delete node group template by id self.client.node_group_templates.delete(template_id) # check that node group really deleted templates = self.client.node_group_templates.list() self.assertNotIn(template_id, [template.id for template in templates]) @test.services('data_processing') def test_node_group_templates(self): template_id, template_name = self._check_create_node_group_template() self._check_node_group_template_list(template_id, template_name) self._check_node_group_template_get(template_id, template_name) self._check_node_group_template_delete(template_id)
ZuoCaiSong/Python
refs/heads/master
exercise/2.py
1
#! usr/bin/env python # -*- coding:utf-8 -*- ''' 题目:企业发放的奖金根据利润提成。利润(I)低于或等于10万元时,奖金可提10%;利润高于10万元, 低于20万元时,低于10万元的部分按10%提成,高于10万元的部分,可提成7.5%;20万到40万之间时, 高于20万元的部分,可提成5%;40万到60万之间时高于40万元的部分,可提成3%;60万到100万之间时, 高于60万元的部分,可提成1.5%,高于100万元时,超过100万元的部分按1%提成,从键盘输入当月利润I, 求应发放奖金总数? 程序分析:请利用数轴来分界,定位。注意定义时需把奖金定义成长整型。 ''' #参考方法1 i = int(raw_input('净利润')) I=[1000000,600000,400000,200000,100000,0] r=[0.01,0.015,0.03,0.05,0.075,0.1] for j in range(len(I)): if i>I[j]: b = [0,0,0,0,0,0] b[j] = i -I[j] for k in range(j, len(I)): # 4-5 b[k] = I[k] # 一个是函数,一个是序列,map将传入的函数依次作用到序列的每个元素,并把结果作为新的list返回 bonus = sum(map(lambda (i1, i2): i1 * i2, zip(b,r))) #zip函数接受任意多个(包括0个和1个)序列作为参数,返回一个tuple列表[(),()] break print '奖金:', bonus x = [1, 2,3] y = [4, 5, 6] z = [7, 8, 9] # 多个参数长度均等 print zip(x,y,z) x = [1, 2,] y = [4, 5, 6] z = [7, 8, 9] # 多个参数长度不等 print zip(x,y,z) # 单个参数长度不等 print zip(z) b = [0,0,0,0,2,1] r=[0.01,0.015,0.03,0.05,2,1] print map(lambda (i1, i2): i1 * i2, zip(b,r)) bonus = sum(map(lambda (i1, i2): i1 * i2, zip(b,r))) print bonus
indictranstech/vestasi-erpnext
refs/heads/develop
erpnext/controllers/recurring_document.py
10
from __future__ import unicode_literals import frappe import frappe.utils import frappe.defaults from frappe.utils import add_days, cint, cstr, date_diff, flt, getdate, nowdate, \ get_first_day, get_last_day, comma_and from frappe.model.naming import make_autoname from frappe import _, msgprint, throw from erpnext.accounts.party import get_party_account, get_due_date, get_party_details from frappe.model.mapper import get_mapped_doc month_map = {'Monthly': 1, 'Quarterly': 3, 'Half-yearly': 6, 'Yearly': 12} date_field_map = { "Sales Order": "transaction_date", "Sales Invoice": "posting_date", "Purchase Order": "transaction_date", "Purchase Invoice": "posting_date" } def create_recurring_documents(): manage_recurring_documents("Sales Order") manage_recurring_documents("Sales Invoice") manage_recurring_documents("Purchase Order") manage_recurring_documents("Purchase Invoice") def manage_recurring_documents(doctype, next_date=None, commit=True): """ Create recurring documents on specific date by copying the original one and notify the concerned people """ next_date = next_date or nowdate() date_field = date_field_map[doctype] recurring_documents = frappe.db.sql("""select name, recurring_id from `tab{}` where ifnull(is_recurring, 0)=1 and docstatus=1 and next_date='{}' and next_date <= ifnull(end_date, '2199-12-31')""".format(doctype, next_date)) exception_list = [] for ref_document, recurring_id in recurring_documents: if not frappe.db.sql("""select name from `tab%s` where %s=%s and recurring_id=%s and docstatus=1""" % (doctype, date_field, '%s', '%s'), (next_date, recurring_id)): try: ref_wrapper = frappe.get_doc(doctype, ref_document) if hasattr(ref_wrapper, "before_recurring"): ref_wrapper.before_recurring() new_document_wrapper = make_new_document(ref_wrapper, date_field, next_date) send_notification(new_document_wrapper) if commit: frappe.db.commit() except: if commit: frappe.db.rollback() frappe.db.begin() frappe.db.sql("update `tab%s` \ set is_recurring = 0 where name = %s" % (doctype, '%s'), (ref_document)) notify_errors(ref_document, doctype, ref_wrapper.get("customer") or ref_wrapper.get("supplier"), ref_wrapper.owner) frappe.db.commit() exception_list.append(frappe.get_traceback()) finally: if commit: frappe.db.begin() if exception_list: exception_message = "\n\n".join([cstr(d) for d in exception_list]) frappe.throw(exception_message) def make_new_document(ref_wrapper, date_field, posting_date): from erpnext.accounts.utils import get_fiscal_year new_document = frappe.copy_doc(ref_wrapper) mcount = month_map[ref_wrapper.recurring_type] from_date = get_next_date(ref_wrapper.from_date, mcount) # get last day of the month to maintain period if the from date is first day of its own month # and to date is the last day of its own month if (cstr(get_first_day(ref_wrapper.from_date)) == \ cstr(ref_wrapper.from_date)) and \ (cstr(get_last_day(ref_wrapper.to_date)) == \ cstr(ref_wrapper.to_date)): to_date = get_last_day(get_next_date(ref_wrapper.to_date, mcount)) else: to_date = get_next_date(ref_wrapper.to_date, mcount) new_document.update({ date_field: posting_date, "from_date": from_date, "to_date": to_date, "fiscal_year": get_fiscal_year(posting_date)[0], "owner": ref_wrapper.owner, }) if ref_wrapper.doctype == "Sales Order": new_document.update({ "delivery_date": get_next_date(ref_wrapper.delivery_date, mcount, cint(ref_wrapper.repeat_on_day_of_month)) }) new_document.submit() return new_document def get_next_date(dt, mcount, day=None): dt = getdate(dt) from dateutil.relativedelta import relativedelta dt += relativedelta(months=mcount, day=day) return dt def send_notification(new_rv): """Notify concerned persons about recurring document generation""" frappe.sendmail(new_rv.notification_email_address, subject= _("New {0}: #{1}").format(new_rv.doctype, new_rv.name), message = _("Please find attached {0} #{1}").format(new_rv.doctype, new_rv.name), attachments = [frappe.attach_print(new_rv.doctype, new_rv.name, file_name=new_rv.name)]) def notify_errors(doc, doctype, party, owner): from frappe.utils.user import get_system_managers recipients = get_system_managers(only_name=True) frappe.sendmail(recipients + [frappe.db.get_value("User", owner, "email")], subject="[Urgent] Error while creating recurring %s for %s" % (doctype, doc), message = frappe.get_template("templates/emails/recurring_document_failed.html").render({ "type": doctype, "name": doc, "party": party })) assign_task_to_owner(doc, doctype, "Recurring Invoice Failed", recipients) def assign_task_to_owner(doc, doctype, msg, users): for d in users: from frappe.widgets.form import assign_to args = { 'assign_to' : d, 'doctype' : doctype, 'name' : doc, 'description' : msg, 'priority' : 'High' } assign_to.add(args) def validate_recurring_document(doc): if doc.is_recurring: validate_notification_email_id(doc) if not doc.recurring_type: msgprint(_("Please select {0}").format(doc.meta.get_label("recurring_type")), raise_exception=1) elif not (doc.from_date and doc.to_date): throw(_("Period From and Period To dates mandatory for recurring %s") % doc.doctype) # def convert_to_recurring(doc, posting_date): if doc.is_recurring: if not doc.recurring_id: frappe.db.set(doc, "recurring_id", doc.name) set_next_date(doc, posting_date) elif doc.recurring_id: frappe.db.sql("""update `tab%s` set is_recurring = 0 where recurring_id = %s""" % (doc.doctype, '%s'), (doc.recurring_id)) # def validate_notification_email_id(doc): if doc.notification_email_address: email_list = filter(None, [cstr(email).strip() for email in doc.notification_email_address.replace("\n", "").split(",")]) from frappe.utils import validate_email_add for email in email_list: if not validate_email_add(email): throw(_("{0} is an invalid email address in 'Notification \ Email Address'").format(email)) else: frappe.throw(_("'Notification Email Addresses' not specified for recurring %s") \ % doc.doctype) def set_next_date(doc, posting_date): """ Set next date on which recurring document will be created""" if not doc.repeat_on_day_of_month: msgprint(_("Please enter 'Repeat on Day of Month' field value"), raise_exception=1) next_date = get_next_date(posting_date, month_map[doc.recurring_type], cint(doc.repeat_on_day_of_month)) frappe.db.set(doc, 'next_date', next_date) msgprint(_("Next Recurring {0} will be created on {1}").format(doc.doctype, next_date))
FocusTheOne/Qomolangma
refs/heads/master
Qomolangma/Pods/AVOSCloudCrashReporting/Breakpad/src/tools/gyp/test/win/gyptest-link-fixed-base.py
344
#!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Make sure fixed base setting is extracted properly. """ import TestGyp import sys if sys.platform == 'win32': test = TestGyp.TestGyp(formats=['msvs', 'ninja']) CHDIR = 'linker-flags' test.run_gyp('fixed-base.gyp', chdir=CHDIR) test.build('fixed-base.gyp', test.ALL, chdir=CHDIR) def GetHeaders(exe): full_path = test.built_file_path(exe, chdir=CHDIR) return test.run_dumpbin('/headers', full_path) # For exe, default is fixed, for dll, it's not fixed. if 'Relocations stripped' not in GetHeaders('test_fixed_default_exe.exe'): test.fail_test() if 'Relocations stripped' in GetHeaders('test_fixed_default_dll.dll'): test.fail_test() # Explicitly not fixed. if 'Relocations stripped' in GetHeaders('test_fixed_no.exe'): test.fail_test() # Explicitly fixed. if 'Relocations stripped' not in GetHeaders('test_fixed_yes.exe'): test.fail_test() test.pass_test()
edofic/ggrc-core
refs/heads/develop
src/ggrc_basic_permissions/migrations/versions/20131206002015_c460b4f8cc3_place_all_public_pro.py
6
# Copyright (C) 2016 Google Inc. # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> """Place all public programs into their own context. Create Date: 2013-12-06 00:20:15.108809 """ # disable Invalid constant name pylint warning for mandatory Alembic variables. # pylint: disable=invalid-name import sqlalchemy as sa from alembic import op from datetime import datetime from sqlalchemy.sql import table, column, select, and_ # revision identifiers, used by Alembic. revision = 'c460b4f8cc3' down_revision = '40a621571ac7' contexts_table = table( 'contexts', column('id', sa.Integer), column('context_id', sa.Integer), column('description', sa.Text), column('related_object_id', sa.Integer), column('related_object_type', sa.String), column('modified_by_id', sa.Integer), column('created_at', sa.DateTime), column('updated_at', sa.DateTime), ) roles_table = table( 'roles', column('id', sa.Integer), column('name', sa.String), ) role_implications_table = table( 'role_implications', column('id', sa.Integer), column('context_id', sa.Integer), column('source_context_id', sa.Integer), column('source_role_id', sa.Integer), column('role_id', sa.Integer), column('modified_by_id', sa.Integer), column('created_at', sa.DateTime), column('updated_at', sa.DateTime), ) programs_table = table( 'programs', column('id', sa.Integer), column('context_id', sa.Integer), ) object_documents_table = table( 'object_documents', column('id', sa.Integer), column('context_id', sa.Integer), column('documentable_id', sa.Integer), column('documentable_type', sa.String), ) object_people_table = table( 'object_people', column('id', sa.Integer), column('context_id', sa.Integer), column('personable_id', sa.Integer), column('personable_type', sa.String), ) object_objectives_table = table( 'object_objectives', column('id', sa.Integer), column('context_id', sa.Integer), column('objectiveable_id', sa.Integer), column('objectiveable_type', sa.String), ) relationships_table = table( 'relationships', column('id', sa.Integer), column('context_id', sa.Integer), column('source_id', sa.Integer), column('source_type', sa.String), column('destination_id', sa.Integer), column('destination_type', sa.String), ) program_controls_table = table( 'program_controls', column('id', sa.Integer), column('context_id', sa.Integer), column('program_id', sa.Integer), column('control_id', sa.String), ) program_directives_table = table( 'program_directives', column('id', sa.Integer), column('context_id', sa.Integer), column('program_id', sa.Integer), column('directive_id', sa.String), ) object_owners_table = table( 'object_owners', column('id', sa.Integer), column('person_id', sa.Integer), column('ownable_id', sa.Integer), column('ownable_type', sa.String), column('modified_by_id', sa.Integer), column('created_at', sa.DateTime), column('updated_at', sa.DateTime), ) user_roles_table = table( 'user_roles', column('id', sa.Integer), column('context_id', sa.Integer), column('role_id', sa.Integer), column('person_id', sa.Integer), column('modified_by_id', sa.Integer), column('created_at', sa.DateTime), column('updated_at', sa.DateTime), ) def get_role(name): connection = op.get_bind() return connection.execute( select([roles_table.c.id]).where(roles_table.c.name == name)).fetchone() def add_role_implication( context_id, source_context_id, source_role_id, role_id): current_datetime = datetime.now() connection = op.get_bind() connection.execute( role_implications_table.insert().values( context_id=context_id, source_context_id=source_context_id, source_role_id=source_role_id, role_id=role_id, modified_by_id=1, created_at=current_datetime, updated_at=current_datetime, )) def upgrade(): reader_role = get_role('Reader') object_editor_role = get_role('ObjectEditor') program_creator_role = get_role('ProgramCreator') program_reader_role = get_role('ProgramReader') program_owner_role = get_role('ProgramOwner') connection = op.get_bind() programs = connection.execute( select([programs_table.c.id]) .where(programs_table.c.context_id == None)) # noqa current_datetime = datetime.now() for program in programs: # Create the program context connection.execute( contexts_table.insert().values( context_id=1, description='', related_object_id=program.id, related_object_type='Program', modified_by_id=1, created_at=current_datetime, updated_at=current_datetime, )) context = connection.execute( select([contexts_table.c.id]).where( and_( contexts_table.c.related_object_id == program.id, contexts_table.c.related_object_type == 'Program') )).fetchone() context_id = context.id # Add the role implications that makes the program public for role in [reader_role, object_editor_role, program_creator_role]: add_role_implication(context_id, None, role.id, program_reader_role.id) # Move the program into the program context op.execute(programs_table.update().values(context_id=context_id) .where(programs_table.c.id == program.id)) # Add role assignments for owners and delete the object_owner relationships owners = connection.execute( select([object_owners_table.c.id, object_owners_table.c.person_id]) .where( and_( object_owners_table.c.ownable_id == program.id, object_owners_table.c.ownable_type == 'Program') )).fetchall() for owner in owners: connection.execute( user_roles_table.insert().values( context_id=context_id, role_id=program_owner_role.id, person_id=owner.person_id, modified_by_id=1, created_at=current_datetime, updated_at=current_datetime, )) connection.execute( object_owners_table.delete().where( object_owners_table.c.id == owner.id)) # Move all relationships for the program into the program context op.execute(object_documents_table.update().values(context_id=context_id) .where( and_( object_documents_table.c.documentable_id == program.id, object_documents_table.c.documentable_type == 'Program'))) op.execute(object_people_table.update().values(context_id=context_id) .where( and_( object_people_table.c.personable_id == program.id, object_people_table.c.personable_type == 'Program'))) op.execute(object_objectives_table.update().values(context_id=context_id) .where( and_( object_objectives_table.c.objectiveable_id == program.id, object_objectives_table.c.objectiveable_type == 'Program'))) op.execute(relationships_table.update().values(context_id=context_id) .where( and_( relationships_table.c.source_id == program.id, relationships_table.c.source_type == 'Program'))) op.execute(relationships_table.update().values(context_id=context_id) .where( and_( relationships_table.c.destination_id == program.id, relationships_table.c.destination_type == 'Program'))) op.execute(program_controls_table.update().values(context_id=context_id) .where(program_controls_table.c.program_id == program.id)) op.execute(program_directives_table.update().values(context_id=context_id) .where(program_directives_table.c.program_id == program.id)) def downgrade(): reader_role = get_role('Reader') program_owner_role = get_role('ProgramOwner') connection = op.get_bind() # Find public programs by finding a public role implication reader_implications = connection.execute( select([role_implications_table.c.context_id]) .where(role_implications_table.c.source_role_id == reader_role.id)) current_datetime = datetime.now() for public_implication in reader_implications: context_id = public_implication.context_id # Move all relationships back to the NULL context op.execute(object_documents_table.update().values(context_id=None) .where(object_documents_table.c.context_id == context_id)) op.execute(object_people_table.update().values(context_id=None) .where(object_people_table.c.context_id == context_id)) op.execute(object_objectives_table.update().values(context_id=None) .where(object_objectives_table.c.context_id == context_id)) op.execute(relationships_table.update().values(context_id=None) .where(relationships_table.c.context_id == context_id)) op.execute(program_controls_table.update().values(context_id=None) .where(program_controls_table.c.context_id == context_id)) op.execute(program_directives_table.update().values(context_id=None) .where(program_directives_table.c.context_id == context_id)) # Remove the role implications that made the program public op.execute(role_implications_table.delete().where( role_implications_table.c.context_id == context_id)) # Create ObjectOwner rows for each ProgramOwner role assignment, delete # the now defunct ProgramOwner assignments program = connection.execute( select([programs_table.c.id]) .where(programs_table.c.context_id == context_id)).fetchone() program_owners = connection.execute( select([user_roles_table.c.id, user_roles_table.c.person_id]) .where( and_( user_roles_table.c.context_id == context_id, user_roles_table.c.role_id == program_owner_role.id))) for program_owner in program_owners: connection.execute( object_owners_table.insert().values( person_id=program_owner.person_id, ownable_id=program.id, ownable_type='Program', modified_by_id=1, created_at=current_datetime, updated_at=current_datetime, )) # Delete defunct role assignments connection.execute( user_roles_table.delete().where( user_roles_table.c.context_id == context_id)) # Move the program back into the NULL context op.execute(programs_table.update().values(context_id=None) .where(programs_table.c.context_id == context_id)) # Remove the defunct context op.execute(contexts_table.delete() .where(contexts_table.c.id == context_id))