function
stringlengths
11
56k
repo_name
stringlengths
5
60
features
sequence
def simple_separated_format(separator): """Construct a simple TableFormat with columns separated by a separator. >>> tsv = simple_separated_format("\\t") ; \ tabulate([["foo", 1], ["spam", 23]], tablefmt=tsv) == 'foo \\t 1\\nspam\\t23' True """ # noqa return TableFormat(None, None, None, None, headerrow=DataRow('', separator, ''), datarow=DataRow('', separator, ''), padding=0, with_header_hide=None)
anlambert/tulip
[ 2, 1, 2, 1, 1486498884 ]
def _isnumber(string): """ >>> _isnumber("123.45") True >>> _isnumber("123") True >>> _isnumber("spam") False >>> _isnumber("123e45678") False >>> _isnumber("inf") True """ if not _isconvertible(float, string): return False elif isinstance(string, (_text_type, _binary_type)) and ( math.isinf(float(string)) or math.isnan(float(string))): return string.lower() in ['inf', '-inf', 'nan'] return True
anlambert/tulip
[ 2, 1, 2, 1, 1486498884 ]
def _isbool(string): """ >>> _isbool(True) True >>> _isbool("False") True >>> _isbool(1) False """ return (type(string) is _bool_type or (isinstance(string, (_binary_type, _text_type)) and string in ("True", "False")))
anlambert/tulip
[ 2, 1, 2, 1, 1486498884 ]
def _afterpoint(string): """Symbols after a decimal point, -1 if the string lacks the decimal point. >>> _afterpoint("123.45") 2 >>> _afterpoint("1001") -1 >>> _afterpoint("eggs") -1 >>> _afterpoint("123e45") 2 """ if _isnumber(string): if _isint(string): return -1 else: pos = string.rfind(".") pos = string.lower().rfind("e") if pos < 0 else pos if pos >= 0: return len(string) - pos - 1 else: return -1 # no point else: return -1 # not a number
anlambert/tulip
[ 2, 1, 2, 1, 1486498884 ]
def _padright(width, s): """Flush left. >>> _padright(6, '\u044f\u0439\u0446\u0430') == '\u044f\u0439\u0446\u0430 ' True """ # noqa fmt = "{0:<%ds}" % width return fmt.format(s)
anlambert/tulip
[ 2, 1, 2, 1, 1486498884 ]
def _padnone(ignore_width, s): return s
anlambert/tulip
[ 2, 1, 2, 1, 1486498884 ]
def _visible_width(s): """Visible width of a printed string. ANSI color codes are removed. >>> _visible_width('\x1b[31mhello\x1b[0m'), _visible_width("world") (5, 5) """ # optional wide-character support if wcwidth is not None and WIDE_CHARS_MODE: len_fn = wcwidth.wcswidth else: len_fn = len if isinstance(s, _text_type) or isinstance(s, _binary_type): return len_fn(_strip_invisible(s)) else: return len_fn(_text_type(s))
anlambert/tulip
[ 2, 1, 2, 1, 1486498884 ]
def _multiline_width(multiline_s, line_width_fn=len): """Visible width of a potentially multiline content.""" return max(map(line_width_fn, re.split("[\r\n]", multiline_s)))
anlambert/tulip
[ 2, 1, 2, 1, 1486498884 ]
def _align_column_choose_padfn(strings, alignment, has_invisible): if alignment == "right": if not PRESERVE_WHITESPACE: strings = [s.strip() for s in strings] padfn = _padleft elif alignment == "center": if not PRESERVE_WHITESPACE: strings = [s.strip() for s in strings] padfn = _padboth elif alignment == "decimal": if has_invisible: decimals = [_afterpoint(_strip_invisible(s)) for s in strings] else: decimals = [_afterpoint(s) for s in strings] maxdecimals = max(decimals) strings = [s + (maxdecimals - decs) * " " for s, decs in zip(strings, decimals)] padfn = _padleft elif not alignment: padfn = _padnone else: if not PRESERVE_WHITESPACE: strings = [s.strip() for s in strings] padfn = _padright return strings, padfn
anlambert/tulip
[ 2, 1, 2, 1, 1486498884 ]
def _more_generic(type1, type2): types = {_none_type: 0, _bool_type: 1, int: 2, float: 3, _binary_type: 4, _text_type: 5} invtypes = {5: _text_type, 4: _binary_type, 3: float, 2: int, 1: _bool_type, 0: _none_type} moregeneric = max(types.get(type1, 5), types.get(type2, 5)) return invtypes[moregeneric]
anlambert/tulip
[ 2, 1, 2, 1, 1486498884 ]
def _format(val, valtype, floatfmt, missingval="", has_invisible=True): """Format a value according to its type. Unicode is supported: >>> hrow = ['\u0431\u0443\u043a\u0432\u0430', '\u0446\u0438\u0444\u0440\u0430'] ; \ tbl = [['\u0430\u0437', 2], ['\u0431\u0443\u043a\u0438', 4]] ; \ good_result = '\\u0431\\u0443\\u043a\\u0432\\u0430 \\u0446\\u0438\\u0444\\u0440\\u0430\\n------- -------\\n\\u0430\\u0437 2\\n\\u0431\\u0443\\u043a\\u0438 4' ; \ tabulate(tbl, headers=hrow) == good_result True """ # noqa if val is None: return missingval if valtype in [int, _text_type]: return "{0}".format(val) elif valtype is _binary_type: try: return _text_type(val, "ascii") except TypeError: return _text_type(val) elif valtype is float: is_a_colored_number = (has_invisible and isinstance(val, (_text_type, _binary_type))) if is_a_colored_number: raw_val = _strip_invisible(val) formatted_val = format(float(raw_val), floatfmt) return val.replace(raw_val, formatted_val) else: return format(float(val), floatfmt) else: return "{0}".format(val)
anlambert/tulip
[ 2, 1, 2, 1, 1486498884 ]
def _prepend_row_index(rows, index): """Add a left-most index column.""" if index is None or index is False: return rows if len(index) != len(rows): print('index=', index) print('rows=', rows) raise ValueError('index must be as long as the number of data rows') rows = [[v]+list(row) for v, row in zip(index, rows)] return rows
anlambert/tulip
[ 2, 1, 2, 1, 1486498884 ]
def _normalize_tabular_data(tabular_data, headers, showindex="default"): """Transform a supported data type to a list of lists, and a list of headers. Supported tabular data types: * list-of-lists or another iterable of iterables * list of named tuples (usually used with headers="keys") * list of dicts (usually used with headers="keys") * list of OrderedDicts (usually used with headers="keys") * 2D NumPy arrays * NumPy record arrays (usually used with headers="keys") * dict of iterables (usually used with headers="keys") * pandas.DataFrame (usually used with headers="keys") The first row can be used as headers if headers="firstrow", column indices can be used as headers if headers="keys". If showindex="default", show row indices of the pandas.DataFrame. If showindex="always", show row indices for all types of data. If showindex="never", don't show row indices for all types of data. If showindex is an iterable, show its values as row indices. """ try: bool(headers) except ValueError: # numpy.ndarray, pandas.core.index.Index, ... headers = list(headers) index = None if hasattr(tabular_data, "keys") and hasattr(tabular_data, "values"): # dict-like and pandas.DataFrame? if hasattr(tabular_data.values, "__call__"): # likely a conventional dict keys = tabular_data.keys() # columns have to be transposed rows = list(izip_longest(*tabular_data.values())) elif hasattr(tabular_data, "index"): # values is a property, has .index => it's likely a # pandas.DataFrame (pandas 0.11.0) keys = list(tabular_data) if tabular_data.index.name is not None: if isinstance(tabular_data.index.name, list): keys[:0] = tabular_data.index.name else: keys[:0] = [tabular_data.index.name] # values matrix doesn't need to be transposed vals = tabular_data.values # for DataFrames add an index per default index = list(tabular_data.index) rows = [list(row) for row in vals] else: raise ValueError( "tabular data doesn't appear to be a dict or a DataFrame") if headers == "keys": headers = list(map(_text_type, keys)) # headers should be strings else: # it's a usual an iterable of iterables, or a NumPy array rows = list(tabular_data) if (headers == "keys" and not rows): # an empty table (issue #81) headers = [] elif (headers == "keys" and hasattr(tabular_data, "dtype") and getattr(tabular_data.dtype, "names")): # numpy record array headers = tabular_data.dtype.names elif (headers == "keys" and len(rows) > 0 and isinstance(rows[0], tuple) and hasattr(rows[0], "_fields")): # namedtuple headers = list(map(_text_type, rows[0]._fields)) elif (len(rows) > 0 and isinstance(rows[0], dict)): # dict or OrderedDict uniq_keys = set() # implements hashed lookup keys = [] # storage for set if headers == "firstrow": firstdict = rows[0] if len(rows) > 0 else {} keys.extend(firstdict.keys()) uniq_keys.update(keys) rows = rows[1:] for row in rows: for k in row.keys(): # Save unique items in input order if k not in uniq_keys: keys.append(k) uniq_keys.add(k) if headers == 'keys': headers = keys elif isinstance(headers, dict): # a dict of headers for a list of dicts headers = [headers.get(k, k) for k in keys] headers = list(map(_text_type, headers)) elif headers == "firstrow": if len(rows) > 0: headers = [firstdict.get(k, k) for k in keys] headers = list(map(_text_type, headers)) else: headers = [] elif headers: raise ValueError( 'headers for a list of dicts is not a dict or a keyword') rows = [[row.get(k) for k in keys] for row in rows] elif (headers == "keys" and hasattr(tabular_data, "description") and hasattr(tabular_data, "fetchone") and hasattr(tabular_data, "rowcount")): # Python Database API cursor object (PEP 0249) # print tabulate(cursor, headers='keys') headers = [column[0] for column in tabular_data.description] elif headers == "keys" and len(rows) > 0: # keys are column indices headers = list(map(_text_type, range(len(rows[0])))) # take headers from the first row if necessary if headers == "firstrow" and len(rows) > 0: if index is not None: headers = [index[0]] + list(rows[0]) index = index[1:] else: headers = rows[0] headers = list(map(_text_type, headers)) # headers should be strings rows = rows[1:] headers = list(map(_text_type, headers)) rows = list(map(list, rows)) # add or remove an index column showindex_is_a_str = type(showindex) in [_text_type, _binary_type] if showindex == "default" and index is not None: rows = _prepend_row_index(rows, index) elif isinstance(showindex, Iterable) and not showindex_is_a_str: rows = _prepend_row_index(rows, list(showindex)) elif showindex == "always" or (_bool(showindex) and not showindex_is_a_str): if index is None: index = list(range(len(rows))) rows = _prepend_row_index(rows, index) elif showindex == "never" or (not _bool(showindex) and not showindex_is_a_str): pass # pad with empty headers for initial columns if necessary if headers and len(rows) > 0: nhs = len(headers) ncols = len(rows[0]) if nhs < ncols: headers = [""]*(ncols - nhs) + headers return rows, headers
anlambert/tulip
[ 2, 1, 2, 1, 1486498884 ]
def _expand_numparse(disable_numparse, column_count): """ Return a list of bools of length `column_count` which indicates whether number parsing should be used on each column. If `disable_numparse` is a list of indices, each of those indices are False, and everything else is True. If `disable_numparse` is a bool, then the returned list is all the same. """ if isinstance(disable_numparse, Iterable): numparses = [True] * column_count for index in disable_numparse: numparses[index] = False return numparses else: return [not disable_numparse] * column_count
anlambert/tulip
[ 2, 1, 2, 1, 1486498884 ]
def _build_simple_row(padded_cells, rowfmt): "Format row according to DataRow format without padding." begin, sep, end = rowfmt return (begin + sep.join(padded_cells) + end).rstrip()
anlambert/tulip
[ 2, 1, 2, 1, 1486498884 ]
def _append_basic_row(lines, padded_cells, colwidths, colaligns, rowfmt): lines.append(_build_row(padded_cells, colwidths, colaligns, rowfmt)) return lines
anlambert/tulip
[ 2, 1, 2, 1, 1486498884 ]
def _build_line(colwidths, colaligns, linefmt): "Return a string which represents a horizontal line." if not linefmt: return None if hasattr(linefmt, "__call__"): return linefmt(colwidths, colaligns) else: begin, fill, sep, end = linefmt cells = [fill*w for w in colwidths] return _build_simple_row(cells, (begin, sep, end))
anlambert/tulip
[ 2, 1, 2, 1, 1486498884 ]
def _format_table(fmt, headers, rows, colwidths, colaligns, is_multiline): """Produce a plain-text representation of the table.""" lines = [] hidden = fmt.with_header_hide if (headers and fmt.with_header_hide) else [] pad = fmt.padding headerrow = fmt.headerrow padded_widths = [(w + 2*pad) for w in colwidths] if is_multiline: # do it later, in _append_multiline_row pad_row = lambda row, _: row # noqa append_row = partial(_append_multiline_row, pad=pad) else: pad_row = _pad_row append_row = _append_basic_row padded_headers = pad_row(headers, pad) padded_rows = [pad_row(row, pad) for row in rows] if fmt.lineabove and "lineabove" not in hidden: _append_line(lines, padded_widths, colaligns, fmt.lineabove) if padded_headers: append_row(lines, padded_headers, padded_widths, colaligns, headerrow) if fmt.linebelowheader and "linebelowheader" not in hidden: _append_line(lines, padded_widths, colaligns, fmt.linebelowheader) if padded_rows and fmt.linebetweenrows and "linebetweenrows" not in hidden: # initial rows with a line below for row in padded_rows[:-1]: append_row(lines, row, padded_widths, colaligns, fmt.datarow) _append_line(lines, padded_widths, colaligns, fmt.linebetweenrows) # the last row without a line below append_row(lines, padded_rows[-1], padded_widths, colaligns, fmt.datarow) else: for row in padded_rows: append_row(lines, row, padded_widths, colaligns, fmt.datarow) if fmt.linebelow and "linebelow" not in hidden: _append_line(lines, padded_widths, colaligns, fmt.linebelow) if headers or rows: return "\n".join(lines) else: # a completely empty table return ""
anlambert/tulip
[ 2, 1, 2, 1, 1486498884 ]
def _pprint_file(fobject, headers, tablefmt, sep, floatfmt, file, colalign): rows = fobject.readlines() table = [re.split(sep, r.rstrip()) for r in rows if r.strip()] print(tabulate(table, headers, tablefmt, floatfmt=floatfmt, colalign=colalign), file=file)
anlambert/tulip
[ 2, 1, 2, 1, 1486498884 ]
def parallelCCompile(self, sources, output_dir=None, macros=None, include_dirs=None, debug=0, extra_preargs=None, extra_postargs=None, depends=None): # those lines are copied from distutils.ccompiler.CCompiler directly macros, objects, extra_postargs, pp_opts, build = self._setup_compile(output_dir, macros, include_dirs, sources, depends, extra_postargs) cc_args = self._get_cc_args(pp_opts, debug, extra_preargs) # parallel code
peter-ch/MultiNEAT
[ 319, 101, 319, 32, 1344853505 ]
def _single_compile(obj): try: src, ext = build[obj] except KeyError: return self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
peter-ch/MultiNEAT
[ 319, 101, 319, 32, 1344853505 ]
def getExtensions(): platform = sys.platform extensionsList = [] sources = ['src/Genome.cpp', 'src/Innovation.cpp', 'src/NeuralNetwork.cpp', 'src/Parameters.cpp', 'src/PhenotypeBehavior.cpp', 'src/Population.cpp', 'src/Random.cpp', 'src/Species.cpp', 'src/Substrate.cpp', 'src/Utils.cpp'] extra = ['-march=native', '-mtune=native', '-g', ] if platform == 'darwin': extra += ['-stdlib=libc++', '-std=c++11',] else: extra += ['-std=gnu++11'] is_windows = 'win' in platform and platform != 'darwin' if is_windows: extra.append('/EHsc') else: extra.append('-w') prefix = os.getenv('PREFIX') if prefix and len(prefix) > 0: extra += ["-I{}/include".format(prefix)] build_sys = os.getenv('MN_BUILD') if build_sys is None: if os.path.exists('_MultiNEAT.cpp'): sources.insert(0, '_MultiNEAT.cpp') extra.append('-O3') extensionsList.extend([Extension('MultiNEAT._MultiNEAT', sources, extra_compile_args=extra)], ) else: print('Source file is missing and MN_BUILD environment variable is not set.\n' 'Specify either \'cython\' or \'boost\'. Example to build in Linux with Cython:\n' '\t$ export MN_BUILD=cython') exit(1) elif build_sys == 'cython': from Cython.Build import cythonize sources.insert(0, '_MultiNEAT.pyx') extra.append('-O3') extensionsList.extend(cythonize([Extension('MultiNEAT._MultiNEAT', sources, extra_compile_args=extra)], )) elif build_sys == 'boost': is_python_2 = sys.version_info[0] < 3 sources.insert(0, 'src/PythonBindings.cpp') if is_windows: if is_python_2: raise RuntimeError("Python prior to version 3 is not supported on Windows due to limits of VC++ compiler version") libs = ['boost_system', 'boost_serialization'] if is_python_2: libs += ['boost_python', "boost_numpy"] else: # with boost 1.67 you need boost_python3x and boost_numpy3x where x is python version 3.x libs += ['boost_python36', "boost_numpy36"] # in Ubuntu 14 there is only 'boost_python-py34' # for Windows with mingw # libraries= ['libboost_python-mgw48-mt-1_58', # 'libboost_serialization-mgw48-mt-1_58'], # include_dirs = ['C:/MinGW/include', 'C:/Users/Peter/Desktop/boost_1_58_0'], # library_dirs = ['C:/MinGW/lib', 'C:/Users/Peter/Desktop/boost_1_58_0/stage/lib'], extra.extend(['-DUSE_BOOST_PYTHON', '-DUSE_BOOST_RANDOM', #'-O0', #'-DVDEBUG', ]) exx = Extension('MultiNEAT._MultiNEAT', sources, libraries=libs, extra_compile_args=extra) print(dir(exx)) print(exx) print(exx.extra_compile_args) extensionsList.append(exx) else: raise AttributeError('Unknown tool: {}'.format(build_sys)) return extensionsList
peter-ch/MultiNEAT
[ 319, 101, 319, 32, 1344853505 ]
def __init__(self, config=None, args=None): # Init self.config = config self.args = args # Init windows positions self.term_w = 80 self.term_h = 24 # Space between stats self.space_between_column = 3 self.space_between_line = 2 # Init the curses screen self.screen = curses.initscr() if not self.screen: logger.critical("Cannot init the curses library.\n") sys.exit(1) # Load the 'outputs' section of the configuration file # - Init the theme (default is black) self.theme = {'name': 'black'} # Load configuration file self.load_config(config) # Init cursor self._init_cursor() # Init the colors self._init_colors() # Init main window self.term_window = self.screen.subwin(0, 0) # Init edit filter tag self.edit_filter = False # Init kill process tag self.kill_process = False # Init the process min/max reset self.args.reset_minmax_tag = False # Init cursor self.args.cursor_position = 0 # Catch key pressed with non blocking mode self.term_window.keypad(1) self.term_window.nodelay(1) self.pressedkey = -1 # History tag self._init_history()
nicolargo/glances
[ 22397, 1365, 22397, 239, 1322988555 ]
def is_theme(self, name): """Return True if the theme *name* should be used.""" return getattr(self.args, 'theme_' + name) or self.theme['name'] == name
nicolargo/glances
[ 22397, 1365, 22397, 239, 1322988555 ]
def _init_cursor(self): """Init cursors.""" if hasattr(curses, 'noecho'): curses.noecho() if hasattr(curses, 'cbreak'): curses.cbreak() self.set_cursor(0)
nicolargo/glances
[ 22397, 1365, 22397, 239, 1322988555 ]
def set_cursor(self, value): """Configure the curse cursor appearance. 0: invisible 1: visible 2: very visible """ if hasattr(curses, 'curs_set'): try: curses.curs_set(value) except Exception: pass
nicolargo/glances
[ 22397, 1365, 22397, 239, 1322988555 ]
def __catch_key(self, return_to_browser=False): # Catch the pressed key self.pressedkey = self.get_key(self.term_window) if self.pressedkey == -1: return -1 # Actions (available in the global hotkey dict)... logger.debug("Keypressed (code: {})".format(self.pressedkey)) for hotkey in self._hotkeys: if self.pressedkey == ord(hotkey) and 'switch' in self._hotkeys[hotkey]: # Get the option name # Ex: disable_foo return foo # enable_foo_bar return foo_bar option = '_'.join(self._hotkeys[hotkey]['switch'].split('_')[1:]) if self._hotkeys[hotkey]['switch'].startswith('disable_'): # disable_ switch if getattr(self.args, self._hotkeys[hotkey]['switch']): enable(self.args, option) else: disable(self.args, option) elif self._hotkeys[hotkey]['switch'].startswith('enable_'): # enable_ switch if getattr(self.args, self._hotkeys[hotkey]['switch']): disable(self.args, option) else: enable(self.args, option) else: # Others switchs options (with no enable_ or disable_) setattr( self.args, self._hotkeys[hotkey]['switch'], not getattr(self.args, self._hotkeys[hotkey]['switch']), ) if self.pressedkey == ord(hotkey) and 'sort_key' in self._hotkeys[hotkey]: glances_processes.set_sort_key( self._hotkeys[hotkey]['sort_key'], self._hotkeys[hotkey]['sort_key'] == 'auto' ) # Other actions... if self.pressedkey == ord('\n'): # 'ENTER' > Edit the process filter self.edit_filter = not self.edit_filter elif self.pressedkey == ord('4'): # '4' > Enable or disable quicklook self.args.full_quicklook = not self.args.full_quicklook if self.args.full_quicklook: self.enable_fullquicklook() else: self.disable_fullquicklook() elif self.pressedkey == ord('5'): # '5' > Enable or disable top menu self.args.disable_top = not self.args.disable_top if self.args.disable_top: self.disable_top() else: self.enable_top() elif self.pressedkey == ord('9'): # '9' > Theme from black to white and reverse self._init_colors() elif self.pressedkey == ord('e'): # 'e' > Enable/Disable process extended self.args.enable_process_extended = not self.args.enable_process_extended if not self.args.enable_process_extended: glances_processes.disable_extended() else: glances_processes.enable_extended() elif self.pressedkey == ord('E'): # 'E' > Erase the process filter glances_processes.process_filter = None elif self.pressedkey == ord('f'): # 'f' > Show/hide fs / folder stats self.args.disable_fs = not self.args.disable_fs self.args.disable_folders = not self.args.disable_folders elif self.pressedkey == ord('k'): # 'k' > Kill selected process (after confirmation) self.kill_process = not self.kill_process elif self.pressedkey == ord('w'): # 'w' > Delete finished warning logs glances_events.clean() elif self.pressedkey == ord('x'): # 'x' > Delete finished warning and critical logs glances_events.clean(critical=True) elif self.pressedkey == ord('z'): # 'z' > Enable or disable processes self.args.disable_process = not self.args.disable_process if self.args.disable_process: glances_processes.disable() else: glances_processes.enable() elif self.pressedkey == curses.KEY_LEFT: # "<" (left arrow) navigation through process sort next_sort = (self.loop_position() - 1) % len(self._sort_loop) glances_processes.set_sort_key(self._sort_loop[next_sort], False) elif self.pressedkey == curses.KEY_RIGHT: # ">" (right arrow) navigation through process sort next_sort = (self.loop_position() + 1) % len(self._sort_loop) glances_processes.set_sort_key(self._sort_loop[next_sort], False) elif self.pressedkey == curses.KEY_UP or self.pressedkey == 65: # 'UP' > Up in the server list if self.args.cursor_position > 0: self.args.cursor_position -= 1 elif self.pressedkey == curses.KEY_DOWN or self.pressedkey == 66: # 'DOWN' > Down in the server list # if self.args.cursor_position < glances_processes.max_processes - 2: if self.args.cursor_position < glances_processes.processes_count: self.args.cursor_position += 1 elif self.pressedkey == ord('\x1b') or self.pressedkey == ord('q'): # 'ESC'|'q' > Quit if return_to_browser: logger.info("Stop Glances client and return to the browser") else: logger.info("Stop Glances (keypressed: {})".format(self.pressedkey)) elif self.pressedkey == curses.KEY_F5: # "F5" manual refresh requested pass # Return the key code return self.pressedkey
nicolargo/glances
[ 22397, 1365, 22397, 239, 1322988555 ]
def disable_top(self): """Disable the top panel""" for p in ['quicklook', 'cpu', 'gpu', 'mem', 'memswap', 'load']: setattr(self.args, 'disable_' + p, True)
nicolargo/glances
[ 22397, 1365, 22397, 239, 1322988555 ]
def disable_fullquicklook(self): """Disable the full quicklook mode""" for p in ['quicklook', 'cpu', 'gpu', 'mem', 'memswap']: setattr(self.args, 'disable_' + p, False)
nicolargo/glances
[ 22397, 1365, 22397, 239, 1322988555 ]
def end(self): """Shutdown the curses window.""" if hasattr(curses, 'echo'): curses.echo() if hasattr(curses, 'nocbreak'): curses.nocbreak() if hasattr(curses, 'curs_set'): try: curses.curs_set(1) except Exception: pass curses.endwin()
nicolargo/glances
[ 22397, 1365, 22397, 239, 1322988555 ]
def init_line(self): """Init the line position for the curses interface.""" self.line = 0 self.next_line = 0
nicolargo/glances
[ 22397, 1365, 22397, 239, 1322988555 ]
def new_line(self, separator=False): """New line in the curses interface.""" self.line = self.next_line
nicolargo/glances
[ 22397, 1365, 22397, 239, 1322988555 ]
def separator_line(self, color='TITLE'): """New separator line in the curses interface.""" if not self.args.enable_separator: return self.new_line() self.line -= 1 line_width = self.term_window.getmaxyx()[1] - self.column self.term_window.addnstr(self.line, self.column, unicode_message('MEDIUM_LINE', self.args) * line_width, line_width, self.colors_list[color])
nicolargo/glances
[ 22397, 1365, 22397, 239, 1322988555 ]
def display(self, stats, cs_status=None): """Display stats on the screen. :param stats: Stats database to display :param cs_status: "None": standalone or server mode "Connected": Client is connected to a Glances server "SNMP": Client is connected to a SNMP server "Disconnected": Client is disconnected from the server :return: True if the stats have been displayed else False if the help have been displayed """ # Init the internal line/column for Glances Curses self.init_line_column() # Update the stats messages ########################### # Get all the plugins but quicklook and process list self.args.cs_status = cs_status __stat_display = self.__get_stat_display(stats, layer=cs_status) # Adapt number of processes to the available space max_processes_displayed = ( self.term_window.getmaxyx()[0] - 11 - (0 if 'docker' not in __stat_display else self.get_stats_display_height(__stat_display["docker"])) - ( 0 if 'processcount' not in __stat_display else self.get_stats_display_height(__stat_display["processcount"]) ) - (0 if 'amps' not in __stat_display else self.get_stats_display_height(__stat_display["amps"])) - (0 if 'alert' not in __stat_display else self.get_stats_display_height(__stat_display["alert"])) ) try: if self.args.enable_process_extended: max_processes_displayed -= 4 except AttributeError: pass if max_processes_displayed < 0: max_processes_displayed = 0 if glances_processes.max_processes is None or glances_processes.max_processes != max_processes_displayed: logger.debug("Set number of displayed processes to {}".format(max_processes_displayed)) glances_processes.max_processes = max_processes_displayed # Get the processlist __stat_display["processlist"] = stats.get_plugin('processlist').get_stats_display(args=self.args) # Display the stats on the curses interface ########################################### # Help screen (on top of the other stats) if self.args.help_tag: # Display the stats... self.display_plugin(stats.get_plugin('help').get_stats_display(args=self.args)) # ... and exit return False # ===================================== # Display first line (system+ip+uptime) # Optionally: Cloud on second line # ===================================== self.__display_header(__stat_display) self.separator_line() # ============================================================== # Display second line (<SUMMARY>+CPU|PERCPU+<GPU>+LOAD+MEM+SWAP) # ============================================================== self.__display_top(__stat_display, stats) self.init_column() self.separator_line() # ================================================================== # Display left sidebar (NETWORK+PORTS+DISKIO+FS+SENSORS+Current time) # ================================================================== self.__display_left(__stat_display) # ==================================== # Display right stats (process and co) # ==================================== self.__display_right(__stat_display) # ===================== # Others popup messages # ===================== # Display edit filter popup # Only in standalone mode (cs_status is None) if self.edit_filter and cs_status is None: new_filter = self.display_popup( 'Process filter pattern: \n\n' + 'Examples:\n' + '- python\n' + '- .*python.*\n' + '- /usr/lib.*\n' + '- name:.*nautilus.*\n' + '- cmdline:.*glances.*\n' + '- username:nicolargo\n' + '- username:^root ', popup_type='input', input_value=glances_processes.process_filter_input, ) glances_processes.process_filter = new_filter elif self.edit_filter and cs_status is not None: self.display_popup('Process filter only available in standalone mode') self.edit_filter = False # Display kill process confirmation popup # Only in standalone mode (cs_status is None) if self.kill_process and cs_status is None: selected_process_raw = stats.get_plugin('processlist').get_raw()[self.args.cursor_position] confirm = self.display_popup( 'Kill process: {} (pid: {}) ?\n\nConfirm ([y]es/[n]o): '.format( selected_process_raw['name'], selected_process_raw['pid'] ), popup_type='yesno', ) if confirm.lower().startswith('y'): try: ret_kill = glances_processes.kill(selected_process_raw['pid']) except Exception as e: logger.error('Can not kill process {} ({})'.format(selected_process_raw['name'], e)) else: logger.info( 'Kill signal has been sent to process {} (return code: {})'.format( selected_process_raw['name'], ret_kill ) ) elif self.kill_process and cs_status is not None: self.display_popup('Kill process only available in standalone mode') self.kill_process = False # Display graph generation popup if self.args.generate_graph: self.display_popup('Generate graph in {}'.format(self.args.export_graph_path)) return True
nicolargo/glances
[ 22397, 1365, 22397, 239, 1322988555 ]
def __display_top(self, stat_display, stats): """Display the second line in the Curses interface. <QUICKLOOK> + CPU|PERCPU + <GPU> + MEM + SWAP + LOAD """ self.init_column() self.new_line() # Init quicklook stat_display['quicklook'] = {'msgdict': []} # Dict for plugins width plugin_widths = {} for p in self._top: plugin_widths[p] = ( self.get_stats_display_width(stat_display.get(p, 0)) if hasattr(self.args, 'disable_' + p) else 0 ) # Width of all plugins stats_width = sum(itervalues(plugin_widths)) # Number of plugin but quicklook stats_number = sum( [int(stat_display[p]['msgdict'] != []) for p in self._top if not getattr(self.args, 'disable_' + p)] ) if not self.args.disable_quicklook: # Quick look is in the place ! if self.args.full_quicklook: quicklook_width = self.term_window.getmaxyx()[1] - ( stats_width + 8 + stats_number * self.space_between_column ) else: quicklook_width = min( self.term_window.getmaxyx()[1] - (stats_width + 8 + stats_number * self.space_between_column), self._quicklook_max_width - 5, ) try: stat_display["quicklook"] = stats.get_plugin('quicklook').get_stats_display( max_width=quicklook_width, args=self.args ) except AttributeError as e: logger.debug("Quicklook plugin not available (%s)" % e) else: plugin_widths['quicklook'] = self.get_stats_display_width(stat_display["quicklook"]) stats_width = sum(itervalues(plugin_widths)) + 1 self.space_between_column = 1 self.display_plugin(stat_display["quicklook"]) self.new_column() # Compute spaces between plugins # Note: Only one space between Quicklook and others plugin_display_optional = {} for p in self._top: plugin_display_optional[p] = True if stats_number > 1: self.space_between_column = max(1, int((self.term_window.getmaxyx()[1] - stats_width) / (stats_number - 1))) for p in ['mem', 'cpu']: # No space ? Remove optional stats if self.space_between_column < 3: plugin_display_optional[p] = False plugin_widths[p] = ( self.get_stats_display_width(stat_display[p], without_option=True) if hasattr(self.args, 'disable_' + p) else 0 ) stats_width = sum(itervalues(plugin_widths)) + 1 self.space_between_column = max( 1, int((self.term_window.getmaxyx()[1] - stats_width) / (stats_number - 1)) ) else: self.space_between_column = 0 # Display CPU, MEM, SWAP and LOAD for p in self._top: if p == 'quicklook': continue if p in stat_display: self.display_plugin(stat_display[p], display_optional=plugin_display_optional[p]) if p != 'load': # Skip last column self.new_column() # Space between column self.space_between_column = 3 # Backup line position self.saved_line = self.next_line
nicolargo/glances
[ 22397, 1365, 22397, 239, 1322988555 ]
def __display_right(self, stat_display): """Display the right sidebar in the Curses interface. docker + processcount + amps + processlist + alert """ # Do not display anything if space is not available... if self.term_window.getmaxyx()[1] < self._left_sidebar_min_width: return # Restore line position self.next_line = self.saved_line # Display right sidebar self.new_column() for p in self._right_sidebar: if (hasattr(self.args, 'enable_' + p) or hasattr(self.args, 'disable_' + p)) and p in stat_display: if p not in p: # Catch for issue #1470 continue self.new_line() if p == 'processlist': self.display_plugin( stat_display['processlist'], display_optional=(self.term_window.getmaxyx()[1] > 102), display_additional=(not MACOS), max_y=( self.term_window.getmaxyx()[0] - self.get_stats_display_height(stat_display['alert']) - 2 ), ) else: self.display_plugin(stat_display[p])
nicolargo/glances
[ 22397, 1365, 22397, 239, 1322988555 ]
def display_plugin(self, plugin_stats, display_optional=True, display_additional=True, max_y=65535, add_space=0): """Display the plugin_stats on the screen. :param plugin_stats: :param display_optional: display the optional stats if True :param display_additional: display additional stats if True :param max_y: do not display line > max_y :param add_space: add x space (line) after the plugin """ # Exit if: # - the plugin_stats message is empty # - the display tag = False if plugin_stats is None or not plugin_stats['msgdict'] or not plugin_stats['display']: # Exit return 0 # Get the screen size screen_x = self.term_window.getmaxyx()[1] screen_y = self.term_window.getmaxyx()[0] # Set the upper/left position of the message if plugin_stats['align'] == 'right': # Right align (last column) display_x = screen_x - self.get_stats_display_width(plugin_stats) else: display_x = self.column if plugin_stats['align'] == 'bottom': # Bottom (last line) display_y = screen_y - self.get_stats_display_height(plugin_stats) else: display_y = self.line # Display x = display_x x_max = x y = display_y for m in plugin_stats['msgdict']: # New line try: if m['msg'].startswith('\n'): # Go to the next line y += 1 # Return to the first column x = display_x continue except: # Avoid exception (see issue #1692) pass # Do not display outside the screen if x < 0: continue if not m['splittable'] and (x + len(m['msg']) > screen_x): continue if y < 0 or (y + 1 > screen_y) or (y > max_y): break # If display_optional = False do not display optional stats if not display_optional and m['optional']: continue # If display_additional = False do not display additional stats if not display_additional and m['additional']: continue # Is it possible to display the stat with the current screen size # !!! Crash if not try/except... Why ??? try: self.term_window.addnstr( y, x, m['msg'], # Do not display outside the screen screen_x - x, self.colors_list[m['decoration']], ) except Exception: pass else: # New column # Python 2: we need to decode to get real screen size because # UTF-8 special tree chars occupy several bytes. # Python 3: strings are strings and bytes are bytes, all is # good. try: x += len(u(m['msg'])) except UnicodeDecodeError: # Quick and dirty hack for issue #745 pass if x > x_max: x_max = x # Compute the next Glances column/line position self.next_column = max(self.next_column, x_max + self.space_between_column) self.next_line = max(self.next_line, y + self.space_between_line) # Have empty lines after the plugins self.next_line += add_space
nicolargo/glances
[ 22397, 1365, 22397, 239, 1322988555 ]
def flush(self, stats, cs_status=None): """Clear and update the screen. :param stats: Stats database to display :param cs_status: "None": standalone or server mode "Connected": Client is connected to the server "Disconnected": Client is disconnected from the server """ self.erase() self.display(stats, cs_status=cs_status)
nicolargo/glances
[ 22397, 1365, 22397, 239, 1322988555 ]
def wait(self, delay=100): """Wait delay in ms""" curses.napms(100)
nicolargo/glances
[ 22397, 1365, 22397, 239, 1322988555 ]
def get_stats_display_height(self, curse_msg): """Return the height of the formatted curses message. The height is defined by the number of '\n' (new line). """ try: c = [i['msg'] for i in curse_msg['msgdict']].count('\n') except Exception as e: logger.debug('ERROR: Can not compute plugin height ({})'.format(e)) return 0 else: return c + 1
nicolargo/glances
[ 22397, 1365, 22397, 239, 1322988555 ]
def __init__(self, *args, **kwargs): super(GlancesTextbox, self).__init__(*args, **kwargs)
nicolargo/glances
[ 22397, 1365, 22397, 239, 1322988555 ]
def __init__(self, *args, **kwargs): super(GlancesTextboxYesNo, self).__init__(*args, **kwargs)
nicolargo/glances
[ 22397, 1365, 22397, 239, 1322988555 ]
def forwards(self, orm): # Adding model 'Category' db.create_table(u'core_category', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('title', self.gf('django.db.models.fields.CharField')(max_length=75)), )) db.send_create_signal(u'core', ['Category']) # Adding model 'Source' db.create_table(u'core_source', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('type', self.gf('django.db.models.fields.CharField')(max_length=20)), ('title', self.gf('django.db.models.fields.CharField')(max_length=75)), ('author', self.gf('django.db.models.fields.CharField')(max_length=75)), ('year_published', self.gf('django.db.models.fields.PositiveIntegerField')()), ('more_info', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)), ('series_season', self.gf('django.db.models.fields.PositiveIntegerField')()), ('series_episode', self.gf('django.db.models.fields.PositiveIntegerField')()), ('description_E', self.gf('django.db.models.fields.TextField')(max_length=300)), ('description_D', self.gf('django.db.models.fields.TextField')(max_length=300)), ('image', self.gf('django.db.models.fields.files.ImageField')(max_length=100, blank=True)), ('image_credit', self.gf('django.db.models.fields.CharField')(max_length=75, blank=True)), )) db.send_create_signal(u'core', ['Source']) # Adding model 'Prediction' db.create_table(u'core_prediction', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('source', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['core.Source'])), ('category', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['core.Category'])), ('description_E', self.gf('django.db.models.fields.TextField')(max_length=300)), ('description_D', self.gf('django.db.models.fields.TextField')(max_length=300)), ('year_predicted', self.gf('django.db.models.fields.PositiveIntegerField')()), ('more_info', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)), ('headline_E', self.gf('django.db.models.fields.TextField')(max_length=300)), ('headline_D', self.gf('django.db.models.fields.TextField')(max_length=300)), ('image', self.gf('django.db.models.fields.files.ImageField')(max_length=100, blank=True)), ('image_credit', self.gf('django.db.models.fields.CharField')(max_length=75, blank=True)), ('username', self.gf('django.db.models.fields.CharField')(max_length=75)), ('creation_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('edition_date', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('published', self.gf('django.db.models.fields.BooleanField')(default=False)), )) db.send_create_signal(u'core', ['Prediction']) # Adding model 'Realisation' db.create_table(u'core_realisation', ( (u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('prediction', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['core.Prediction'])), ('description_E', self.gf('django.db.models.fields.TextField')(max_length=300)), ('description_D', self.gf('django.db.models.fields.TextField')(max_length=300)), ('year_introduced', self.gf('django.db.models.fields.PositiveIntegerField')()), ('more_info', self.gf('django.db.models.fields.URLField')(max_length=200, blank=True)), ('image', self.gf('django.db.models.fields.files.ImageField')(max_length=100, blank=True)), ('image_credit', self.gf('django.db.models.fields.CharField')(max_length=75, blank=True)), ('creation_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('edition_date', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), ('published', self.gf('django.db.models.fields.BooleanField')(default=False)), )) db.send_create_signal(u'core', ['Realisation'])
jplusplus/dystopia-tracker
[ 22, 1, 22, 15, 1395838587 ]
def compute_q_noisy_max(counts, noise_eps): """returns ~ Pr[outcome != winner]. Args: counts: a list of scores noise_eps: privacy parameter for noisy_max Returns: q: the probability that outcome is different from true winner. """ # For noisy max, we only get an upper bound. # Pr[ j beats i*] \leq (2+gap(j,i*))/ 4 exp(gap(j,i*) # proof at http://mathoverflow.net/questions/66763/ # tight-bounds-on-probability-of-sum-of-laplace-random-variables winner = np.argmax(counts) counts_normalized = noise_eps * (counts - counts[winner]) counts_rest = np.array( [counts_normalized[i] for i in xrange(len(counts)) if i != winner]) q = 0.0 for c in counts_rest: gap = -c q += (gap + 2.0) / (4.0 * math.exp(gap)) return min(q, 1.0 - (1.0/len(counts)))
cshallue/models
[ 6, 3, 6, 1, 1473384593 ]
def logmgf_exact(q, priv_eps, l): """Computes the logmgf value given q and privacy eps. The bound used is the min of three terms. The first term is from https://arxiv.org/pdf/1605.02065.pdf. The second term is based on the fact that when event has probability (1-q) for q close to zero, q can only change by exp(eps), which corresponds to a much smaller multiplicative change in (1-q) The third term comes directly from the privacy guarantee. Args: q: pr of non-optimal outcome priv_eps: eps parameter for DP l: moment to compute. Returns: Upper bound on logmgf """ if q < 0.5: t_one = (1-q) * math.pow((1-q) / (1 - math.exp(priv_eps) * q), l) t_two = q * math.exp(priv_eps * l) t = t_one + t_two try: log_t = math.log(t) except ValueError: print("Got ValueError in math.log for values :" + str((q, priv_eps, l, t))) log_t = priv_eps * l else: log_t = priv_eps * l return min(0.5 * priv_eps * priv_eps * l * (l + 1), log_t, priv_eps * l)
cshallue/models
[ 6, 3, 6, 1, 1473384593 ]
def sens_at_k(counts, noise_eps, l, k): """Return sensitivity at distane k. Args: counts: an array of scores noise_eps: noise parameter used l: moment whose sensitivity is being computed k: distance Returns: sensitivity: at distance k """ counts_sorted = sorted(counts, reverse=True) if 0.5 * noise_eps * l > 1: print("l too large to compute sensitivity") return 0 # Now we can assume that at k, gap remains positive # or we have reached the point where logmgf_exact is # determined by the first term and ind of q. if counts[0] < counts[1] + k: return 0 counts_sorted[0] -= k counts_sorted[1] += k val = logmgf_from_counts(counts_sorted, noise_eps, l) counts_sorted[0] -= 1 counts_sorted[1] += 1 val_changed = logmgf_from_counts(counts_sorted, noise_eps, l) return val_changed - val
cshallue/models
[ 6, 3, 6, 1, 1473384593 ]
def main(unused_argv): ################################################################## # If we are reproducing results from paper https://arxiv.org/abs/1610.05755, # download the required binaries with label information. ################################################################## # Binaries for MNIST results paper_binaries_mnist = \ ["https://github.com/npapernot/multiple-teachers-for-privacy/blob/master/mnist_250_teachers_labels.npy?raw=true", "https://github.com/npapernot/multiple-teachers-for-privacy/blob/master/mnist_250_teachers_100_indices_used_by_student.npy?raw=true"] if FLAGS.counts_file == "mnist_250_teachers_labels.npy" \ or FLAGS.indices_file == "mnist_250_teachers_100_indices_used_by_student.npy": maybe_download(paper_binaries_mnist, os.getcwd()) # Binaries for SVHN results paper_binaries_svhn = ["https://github.com/npapernot/multiple-teachers-for-privacy/blob/master/svhn_250_teachers_labels.npy?raw=true"] if FLAGS.counts_file == "svhn_250_teachers_labels.npy": maybe_download(paper_binaries_svhn, os.getcwd()) input_mat = np.load(FLAGS.counts_file) if FLAGS.input_is_counts: counts_mat = input_mat else: # In this case, the input is the raw predictions. Transform num_teachers, n = input_mat.shape counts_mat = np.zeros((n, 10)).astype(np.int32) for i in range(n): for j in range(num_teachers): counts_mat[i, int(input_mat[j, i])] += 1 n = counts_mat.shape[0] num_examples = min(n, FLAGS.max_examples) if not FLAGS.indices_file: indices = np.array(range(num_examples)) else: index_list = np.load(FLAGS.indices_file) indices = index_list[:num_examples] l_list = 1.0 + np.array(xrange(FLAGS.moments)) beta = FLAGS.beta total_log_mgf_nm = np.array([0.0 for _ in l_list]) total_ss_nm = np.array([0.0 for _ in l_list]) noise_eps = FLAGS.noise_eps for i in indices: total_log_mgf_nm += np.array( [logmgf_from_counts(counts_mat[i], noise_eps, l) for l in l_list]) total_ss_nm += np.array( [smoothed_sens(counts_mat[i], noise_eps, l, beta) for l in l_list]) delta = FLAGS.delta # We want delta = exp(alpha - eps l). # Solving gives eps = (alpha - ln (delta))/l eps_list_nm = (total_log_mgf_nm - math.log(delta)) / l_list print("Epsilons (Noisy Max): " + str(eps_list_nm)) print("Smoothed sensitivities (Noisy Max): " + str(total_ss_nm / l_list)) # If beta < eps / 2 ln (1/delta), then adding noise Lap(1) * 2 SS/eps # is eps,delta DP # Also if beta < eps / 2(gamma +1), then adding noise 2(gamma+1) SS eta / eps # where eta has density proportional to 1 / (1+|z|^gamma) is eps-DP # Both from Corolloary 2.4 in # http://www.cse.psu.edu/~ads22/pubs/NRS07/NRS07-full-draft-v1.pdf # Print the first one's scale ss_eps = 2.0 * beta * math.log(1/delta) ss_scale = 2.0 / ss_eps print("To get an " + str(ss_eps) + "-DP estimate of epsilon, ") print("..add noise ~ " + str(ss_scale)) print("... times " + str(total_ss_nm / l_list)) print("Epsilon = " + str(min(eps_list_nm)) + ".") if min(eps_list_nm) == eps_list_nm[-1]: print("Warning: May not have used enough values of l") # Data independent bound, as mechanism is # 2*noise_eps DP. data_ind_log_mgf = np.array([0.0 for _ in l_list]) data_ind_log_mgf += num_examples * np.array( [logmgf_exact(1.0, 2.0 * noise_eps, l) for l in l_list]) data_ind_eps_list = (data_ind_log_mgf - math.log(delta)) / l_list print("Data independent bound = " + str(min(data_ind_eps_list)) + ".") return
cshallue/models
[ 6, 3, 6, 1, 1473384593 ]
def random_uuid(x, cache=True): import uuid return str(uuid.uuid4())
Parsl/parsl
[ 369, 114, 369, 333, 1476980871 ]
def collect_activations( model_path: str, layers: List[str], dataset: str, data_format: str = None, split: str = FULL, batch_size: int = 128, output_directory: str = 'results', gpus: List[str] = None, gpu_memory_limit: int =None, allow_parallel_threads: bool = True, backend: Union[Backend, str] = None, debug: bool = False, **kwargs
uber/ludwig
[ 8787, 1030, 8787, 265, 1545955092 ]
def collect_weights( model_path: str, tensors: List[str], output_directory: str = 'results', debug: bool = False, **kwargs
uber/ludwig
[ 8787, 1030, 8787, 265, 1545955092 ]
def save_tensors(collected_tensors, output_directory): filenames = [] for tensor_name, tensor_value in collected_tensors: np_filename = os.path.join( output_directory, make_safe_filename(tensor_name) + '.npy' ) np.save(np_filename, tensor_value.numpy()) filenames.append(np_filename) return filenames
uber/ludwig
[ 8787, 1030, 8787, 265, 1545955092 ]
def cli_collect_activations(sys_argv): """Command Line Interface to communicate with the collection of tensors and there are several options that can specified when calling this function: --data_csv: Filepath for the input csv --data_hdf5: Filepath for the input hdf5 file, if there is a csv file, this is not read --d: Refers to the dataset type of the file being read, by default is *generic* --s: Refers to the split of the data, can be one of: train, test, validation, full --m: Input model that is necessary to collect to the tensors, this is a required *option* --t: Tensors to collect --od: Output directory of the model, defaults to results --bs: Batch size --g: Number of gpus that are to be used --gf: Fraction of each GPUs memory to use. --dbg: Debug if the model is to be started with python debugger --v: Verbose: Defines the logging level that the user will be exposed to """ parser = argparse.ArgumentParser( description='This script loads a pretrained model and uses it collect ' 'tensors for each datapoint in the dataset.', prog='ludwig collect_activations', usage='%(prog)s [options]') # --------------- # Data parameters # --------------- parser.add_argument( '--dataset', help='input data file path', required=True ) parser.add_argument( '--data_format', help='format of the input data', default='auto', choices=['auto', 'csv', 'excel', 'feather', 'fwf', 'hdf5', 'html' 'tables', 'json', 'jsonl', 'parquet', 'pickle', 'sas', 'spss', 'stata', 'tsv'] ) parser.add_argument( '-s', '--split', default=FULL, choices=[TRAINING, VALIDATION, TEST, FULL], help='the split to obtain the model activations from' ) # ---------------- # Model parameters # ---------------- parser.add_argument( '-m', '--model_path', help='model to load', required=True ) parser.add_argument( '-lyr', '--layers', help='tensors to collect', nargs='+', required=True ) # ------------------------- # Output results parameters # ------------------------- parser.add_argument( '-od', '--output_directory', type=str, default='results', help='directory that contains the results' ) # ------------------ # Generic parameters # ------------------ parser.add_argument( '-bs', '--batch_size', type=int, default=128, help='size of batches' ) # ------------------ # Runtime parameters # ------------------ parser.add_argument( '-g', '--gpus', type=int, default=0, help='list of gpu to use' ) parser.add_argument( '-gml', '--gpu_memory_limit', type=int, default=None, help='maximum memory in MB to allocate per GPU device' ) parser.add_argument( '-dpt', '--disable_parallel_threads', action='store_false', dest='allow_parallel_threads', help='disable TensorFlow from using multithreading for reproducibility' ) parser.add_argument( "-b", "--backend", help='specifies backend to use for parallel / distributed execution, ' 'defaults to local execution or Horovod if called using horovodrun', choices=ALL_BACKENDS, ) parser.add_argument( '-dbg', '--debug', action='store_true', default=False, help='enables debugging mode' ) parser.add_argument( '-l', '--logging_level', default='info', help='the level of logging to use', choices=['critical', 'error', 'warning', 'info', 'debug', 'notset'] ) args = parser.parse_args(sys_argv) args.logging_level = logging_level_registry[args.logging_level] logging.getLogger('ludwig').setLevel( args.logging_level ) global logger logger = logging.getLogger('ludwig.collect') print_ludwig('Collect Activations', LUDWIG_VERSION) collect_activations(**vars(args))
uber/ludwig
[ 8787, 1030, 8787, 265, 1545955092 ]
def cli_collect_summary(sys_argv): """Command Line Interface to collecting a summary of the model layers and weights. --m: Input model that is necessary to collect to the tensors, this is a required *option* --v: Verbose: Defines the logging level that the user will be exposed to """ parser = argparse.ArgumentParser( description='This script loads a pretrained model ' 'and prints names of weights and layers activations ' 'to use with other collect commands', prog='ludwig collect_summary', usage='%(prog)s [options]' ) # ---------------- # Model parameters # ---------------- parser.add_argument( '-m', '--model_path', help='model to load', required=True ) # ------------------ # Runtime parameters # ------------------ parser.add_argument( '-l', '--logging_level', default='info', help='the level of logging to use', choices=['critical', 'error', 'warning', 'info', 'debug', 'notset'] ) args = parser.parse_args(sys_argv) args.logging_level = logging_level_registry[args.logging_level] logging.getLogger('ludwig').setLevel( args.logging_level ) global logger logger = logging.getLogger('ludwig.collect') print_ludwig('Collect Summary', LUDWIG_VERSION) print_model_summary(**vars(args))
uber/ludwig
[ 8787, 1030, 8787, 265, 1545955092 ]
def setup_loader(request): setup_loader_modules = {pdbedit: {}} with pytest.helpers.loader_mock(request, setup_loader_modules) as loader_mock: yield loader_mock
saltstack/salt
[ 13089, 5388, 13089, 3074, 1298233016 ]
def test_when_no_users_returned_no_data_should_be_returned(verbose): expected_users = {} if verbose else [] with patch.dict( pdbedit.__salt__, { "cmd.run_all": MagicMock( return_value={"stdout": "", "stderr": "", "retcode": 0} ) }, ): actual_users = pdbedit.list_users(verbose=verbose) assert actual_users == expected_users
saltstack/salt
[ 13089, 5388, 13089, 3074, 1298233016 ]
def test_when_verbose_and_single_good_output_expected_data_should_be_parsed(): expected_data = { "roscivs": { "unix username": "roscivs", "nt username": "bottia", "full name": "Roscivs Bottia", "user sid": "42", "primary group sid": "99", "home directory": r"\\samba\roscivs", "account desc": "separators! xxx so long and thanks for all the fish", "logoff time": "Sat, 14 Aug 2010 15:06:39 UTC", "kickoff time": "Sat, 14 Aug 2010 15:06:39 UTC", "password must change": "never", } } pdb_output = dedent( r""" Unix username: roscivs NT username: bottia User SID: 42 Primary Group SID: 99 Full Name: Roscivs Bottia Home Directory: \\samba\roscivs Account desc: separators! xxx so long and thanks for all the fish Logoff time: Sat, 14 Aug 2010 15:06:39 UTC Kickoff time: Sat, 14 Aug 2010 15:06:39 UTC Password must change: never """ ).strip() with patch.dict( pdbedit.__salt__, { "cmd.run_all": MagicMock( return_value={"stdout": pdb_output, "stderr": "", "retcode": 0} ) }, ): actual_data = pdbedit.list_users(verbose=True) assert actual_data == expected_data
saltstack/salt
[ 13089, 5388, 13089, 3074, 1298233016 ]
def __init__(self): self._swaps = [] self._data = []
xunilrj/sandbox
[ 8, 4, 8, 117, 1469995922 ]
def WriteResponse(self): print(len(self._swaps)) for swap in self._swaps: print(swap[0], swap[1])
xunilrj/sandbox
[ 8, 4, 8, 117, 1469995922 ]
def Solve(self): self.ReadData() self.GenerateSwaps() self.WriteResponse()
xunilrj/sandbox
[ 8, 4, 8, 117, 1469995922 ]
def __init__(self): self._must_match_all_bonds: bool = True self._smiles_with_h: bool = False self._smiles_with_labels: bool = True # A variant on matching is to consider all N and O as neutral forms during # matching, and then as a post processing step, see whether a valid, # neutral, molecule can be formed. self._neutral_forms_during_bond_matching: bool = False # If not a bond is being considered during matching. self._consider_not_bonded = False # Avoid destroying rings if not bonded is enabled. # Note that only the ring atom count is considered. self._ring_atom_count_cannot_decrease = True
google-research/google-research
[ 27788, 6881, 27788, 944, 1538678568 ]
def must_match_all_bonds(self): return self._must_match_all_bonds
google-research/google-research
[ 27788, 6881, 27788, 944, 1538678568 ]
def must_match_all_bonds(self, value): self._must_match_all_bonds = value
google-research/google-research
[ 27788, 6881, 27788, 944, 1538678568 ]
def smiles_with_h(self): return self._smiles_with_h
google-research/google-research
[ 27788, 6881, 27788, 944, 1538678568 ]
def smiles_with_h(self, value): self._smiles_with_h = value
google-research/google-research
[ 27788, 6881, 27788, 944, 1538678568 ]
def smiles_with_labels(self): return self._smiles_with_labels
google-research/google-research
[ 27788, 6881, 27788, 944, 1538678568 ]
def smiles_with_labels(self, value): self._smiles_with_labels = value
google-research/google-research
[ 27788, 6881, 27788, 944, 1538678568 ]
def neutral_forms_during_bond_matching(self): return self._neutral_forms_during_bond_matching
google-research/google-research
[ 27788, 6881, 27788, 944, 1538678568 ]
def neutral_forms_during_bond_matching(self, value): self._neutral_forms_during_bond_matching = value
google-research/google-research
[ 27788, 6881, 27788, 944, 1538678568 ]
def consider_not_bonded(self): return self._consider_not_bonded
google-research/google-research
[ 27788, 6881, 27788, 944, 1538678568 ]
def consider_not_bonded(self, value): self._consider_not_bonded = value
google-research/google-research
[ 27788, 6881, 27788, 944, 1538678568 ]
def ring_atom_count_cannot_decrease(self): return self._ring_atom_count_cannot_decrease
google-research/google-research
[ 27788, 6881, 27788, 944, 1538678568 ]
def ring_atom_count_cannot_decrease(self, value): self._ring_atom_count_cannot_decrease = value
google-research/google-research
[ 27788, 6881, 27788, 944, 1538678568 ]
def __init__(self, hydrogens_attached, bonds_to_scores, matching_parameters): """Class to perform bonding assessments. Args: hydrogens_attached: a BondTopology that has all atoms, and the bonds associated with the Hydrogen atoms. bonds_to_scores: A dict that maps tuples of pairs of atoms, to a numpy array of scores [0,3], for each possible bond type. matching_parameters: contains possible optional behaviour modifiers. Returns: """ self._starting_bond_topology = hydrogens_attached self._natoms = len(hydrogens_attached.atoms) self._heavy_atoms = sum(1 for atom in hydrogens_attached.atoms if atom != dataset_pb2.BondTopology.ATOM_H) self._contains_both_oxygen_and_nitrogen = False # If the molecule contains both N and O atoms, then we can # do more extensive atom type matching if requested. if matching_parameters.neutral_forms_during_bond_matching: self.set_contains_both_oxygen_and_nitrogen(hydrogens_attached) # For each atom, the maximum number of bonds that can be attached. self._max_bonds = np.zeros(self._natoms, dtype=np.int32) if matching_parameters.neutral_forms_during_bond_matching and self._contains_both_oxygen_and_nitrogen: for i in range(0, self._natoms): self._max_bonds[i] = smu_utils_lib.ATOM_TYPE_TO_MAX_BONDS_ANY_FORM[ hydrogens_attached.atoms[i]] else: for i in range(0, self._natoms): self._max_bonds[i] = smu_utils_lib.ATOM_TYPE_TO_MAX_BONDS[ hydrogens_attached.atoms[i]] # With the Hydrogens attached, the number of bonds to each atom. self._bonds_with_hydrogens_attached = np.zeros((self._natoms), dtype=np.int32) for bond in hydrogens_attached.bonds: self._bonds_with_hydrogens_attached[bond.atom_a] += 1 self._bonds_with_hydrogens_attached[bond.atom_b] += 1 self._current_bonds_attached = np.zeros((self._natoms), dtype=np.int32) # We turn bonds_to_scores into two arrays. So they can be iterated # via itertools. self._bonds = list(bonds_to_scores.keys()) self._scores = list(bonds_to_scores.values()) # Initialize for probability type accumulation self._initial_score = 1.0 self._accumulate_score = operator.mul # For testing, it can be convenient to allow for partial matches # For example this allows matching C-C and C=C without the need # to add explicit hydrogens self._must_match_all_bonds = matching_parameters.must_match_all_bonds
google-research/google-research
[ 27788, 6881, 27788, 944, 1538678568 ]
def set_initial_score_and_incrementer(self, initial_score, op): """Update values used for computing scores.""" self._initial_score = initial_score self._accumulate_score = op
google-research/google-research
[ 27788, 6881, 27788, 944, 1538678568 ]
def _place_bond(self, a1, a2, btype): """Possibly add a new bond to the current config. If the bond can be placed, updates self._current_bonds_attached for both `a`` and `a2`. Args: a1: a2: btype: Returns: Bool. """ if self._current_bonds_attached[a1] + btype > self._max_bonds[a1]: return False if self._current_bonds_attached[a2] + btype > self._max_bonds[a2]: return False self._current_bonds_attached[a1] += btype self._current_bonds_attached[a2] += btype return True
google-research/google-research
[ 27788, 6881, 27788, 944, 1538678568 ]
def place_bonds_inner(self, state): """Place bonds corresponding to `state`. No validity checking is done, the calling function is responsible for that. Args: state: for each pair of atoms, the kind of bond to be placed. Returns: If successful, a BondTopology. """ self._current_bonds_attached = np.copy(self._bonds_with_hydrogens_attached) result = dataset_pb2.BondTopology() result.CopyFrom(self._starting_bond_topology) # only Hydrogens attached. result.score = self._initial_score # Make sure each atoms gets at least one bond atom_got_bond = np.zeros(self._heavy_atoms) for i, btype in enumerate(state): if btype != dataset_pb2.BondTopology.BOND_UNDEFINED: a1 = self._bonds[i][0] a2 = self._bonds[i][1] if not self._place_bond(a1, a2, btype): return None add_bond(a1, a2, btype, result) atom_got_bond[a1] = 1 atom_got_bond[a2] = 1 result.score = self._accumulate_score(result.score, self._scores[i][btype]) if not np.all(atom_got_bond): return None return result
google-research/google-research
[ 27788, 6881, 27788, 944, 1538678568 ]
def get(ar,index): l=len(ar); if index<0: return ar[l+index]; else: return ar[index];
ferventdesert/Hawk-Projects
[ 148, 102, 148, 1, 1462091270 ]
def find(ar,filter): for r in ar: if filter(r): return r; return None;
ferventdesert/Hawk-Projects
[ 148, 102, 148, 1, 1462091270 ]
def work2(x): x.Enabled=not_repeat;
ferventdesert/Hawk-Projects
[ 148, 102, 148, 1, 1462091270 ]
def is_operational(method): # Decorator to check we are operational before provisioning. def wrapper(*args, **kwargs): instance = args[0] if instance.operational: try: return method(*args, **kwargs) except IOError as ioe: LOG.error('IO Error detected: %s' % method.__name__) LOG.error(str(ioe)) raise ioe else: LOG.error('Cannot execute %s. Not operational. Re-initializing.' % method.__name__) instance._init_bigips() return wrapper
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def __init__(self, conf, registerOpts=True): # The registerOpts parameter allows a test to # turn off config option handling so that it can # set the options manually instead. super(iControlDriver, self).__init__(conf) self.conf = conf if registerOpts: self.conf.register_opts(OPTS) self.initialized = False self.hostnames = None self.device_type = conf.f5_device_type self.plugin_rpc = None # overrides base, same value self.agent_report_state = None # overrides base, same value self.operational = False # overrides base, same value self.driver_name = 'f5-lbaasv2-icontrol' # # BIG-IP containers # # BIG-IPs which currectly active self.__bigips = {} self.__last_connect_attempt = None # HA and traffic group validation self.ha_validated = False self.tg_initialized = False # traffic groups discovered from BIG-IPs for service placement self.__traffic_groups = [] # base configurations to report to Neutron agent state reports self.agent_configurations = {} # overrides base, same value self.agent_configurations['device_drivers'] = [self.driver_name] self.agent_configurations['icontrol_endpoints'] = {} # to store the verified esd names self.esd_names = [] # service component managers self.tenant_manager = None self.cluster_manager = None self.system_helper = None self.lbaas_builder = None self.service_adapter = None self.vlan_binding = None self.l3_binding = None self.cert_manager = None # overrides register_OPTS # server helpers self.stat_helper = stat_helper.StatHelper() self.network_helper = network_helper.NetworkHelper() # f5-sdk helpers self.vs_manager = resource_helper.BigIPResourceHelper( resource_helper.ResourceType.virtual) self.pool_manager = resource_helper.BigIPResourceHelper( resource_helper.ResourceType.pool) try: # debug logging of service requests recieved by driver if self.conf.trace_service_requests: path = '/var/log/neutron/service/' if not os.path.exists(path): os.makedirs(path) self.file_name = path + strftime("%H%M%S-%m%d%Y") + '.json' with open(self.file_name, 'w') as fp: fp.write('[{}] ') # driver mode settings - GRM vs L2 adjacent if self.conf.f5_global_routed_mode: LOG.info('WARNING - f5_global_routed_mode enabled.' ' There will be no L2 or L3 orchestration' ' or tenant isolation provisioned. All vips' ' and pool members must be routable through' ' pre-provisioned SelfIPs.') self.conf.use_namespaces = False self.conf.f5_snat_mode = True self.conf.f5_snat_addresses_per_subnet = 0 self.agent_configurations['tunnel_types'] = [] self.agent_configurations['bridge_mappings'] = {} else: self.agent_configurations['tunnel_types'] = \ self.conf.advertised_tunnel_types for net_id in self.conf.common_network_ids: LOG.debug('network %s will be mapped to /Common/%s' % (net_id, self.conf.common_network_ids[net_id])) self.agent_configurations['common_networks'] = \ self.conf.common_network_ids LOG.debug('Setting static ARP population to %s' % self.conf.f5_populate_static_arp) self.agent_configurations['f5_common_external_networks'] = \ self.conf.f5_common_external_networks f5const.FDB_POPULATE_STATIC_ARP = \ self.conf.f5_populate_static_arp # parse the icontrol_hostname setting self._init_bigip_hostnames() # instantiate the managers self._init_bigip_managers() self.initialized = True LOG.debug('iControlDriver loaded successfully') except Exception as exc: LOG.error("exception in intializing driver %s" % str(exc)) self._set_agent_status(False)
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def get_valid_esd_names(self): LOG.debug("verified esd names in get_valid_esd_names():") LOG.debug(self.esd_names) return self.esd_names
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def _init_bigip_hostnames(self): # Validate and parse bigip credentials if not self.conf.icontrol_hostname: raise f5ex.F5InvalidConfigurationOption( opt_name='icontrol_hostname', opt_value='valid hostname or IP address' ) if not self.conf.icontrol_username: raise f5ex.F5InvalidConfigurationOption( opt_name='icontrol_username', opt_value='valid username' ) if not self.conf.icontrol_password: raise f5ex.F5InvalidConfigurationOption( opt_name='icontrol_password', opt_value='valid password' ) self.hostnames = self.conf.icontrol_hostname.split(',') self.hostnames = [item.strip() for item in self.hostnames] self.hostnames = sorted(self.hostnames) # initialize per host agent_configurations for hostname in self.hostnames: self.__bigips[hostname] = bigip = type('', (), {})() bigip.hostname = hostname bigip.status = 'creating' bigip.status_message = 'creating BIG-IP from iControl hostnames' bigip.device_interfaces = dict() self.agent_configurations[ 'icontrol_endpoints'][hostname] = {} self.agent_configurations[ 'icontrol_endpoints'][hostname]['failover_state'] = \ 'undiscovered' self.agent_configurations[ 'icontrol_endpoints'][hostname]['status'] = 'unknown' self.agent_configurations[ 'icontrol_endpoints'][hostname]['status_message'] = ''
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def _init_errored_bigips(self): try: errored_bigips = self.get_errored_bigips_hostnames() if errored_bigips: LOG.debug('attempting to recover %s BIG-IPs' % len(errored_bigips)) for hostname in errored_bigips: # try to connect and set status bigip = self._open_bigip(hostname) if bigip.status == 'connected': # set the status down until we assure initialized bigip.status = 'initializing' bigip.status_message = 'initializing HA viability' LOG.debug('initializing HA viability %s' % hostname) LOG.debug('proceeding to initialize %s' % hostname) device_group_name = None if not self.ha_validated: device_group_name = self._validate_ha(bigip) LOG.debug('HA validated from %s with DSG %s' % (hostname, device_group_name)) self.ha_validated = True if not self.tg_initialized: self._init_traffic_groups(bigip) LOG.debug('known traffic groups initialized', ' from %s as %s' % (hostname, self.__traffic_groups)) self.tg_initialized = True LOG.debug('initializing bigip %s' % hostname) self._init_bigip(bigip, hostname, device_group_name) LOG.debug('initializing agent configurations %s' % hostname) self._init_agent_config(bigip) # Assure basic BIG-IP HA is operational LOG.debug('validating HA state for %s' % hostname) bigip.status = 'validating_HA' bigip.status_message = \ 'validating the current HA state' if self._validate_ha_operational(bigip): LOG.debug('setting status to active for %s' % hostname) bigip.status = 'active' bigip.status_message = \ 'BIG-IP ready for provisioning' self._post_init() self._set_agent_status(True) else: LOG.debug('setting status to error for %s' % hostname) bigip.status = 'error' bigip.status_message = 'BIG-IP is not operational' self._set_agent_status(False) else: LOG.debug('there are no BIG-IPs with error status') except Exception as exc: LOG.error('Invalid agent configuration: %s' % exc.message) raise
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def _init_bigip(self, bigip, hostname, check_group_name=None): # Prepare a bigip for usage try: major_version, minor_version = self._validate_bigip_version( bigip, hostname) device_group_name = None extramb = self.system_helper.get_provision_extramb(bigip) if int(extramb) < f5const.MIN_EXTRA_MB: raise f5ex.ProvisioningExtraMBValidateFailed( 'Device %s BIG-IP not provisioned for ' 'management LARGE.' % hostname) if self.conf.f5_ha_type == 'pair' and \ self.cluster_manager.get_sync_status(bigip) == \ 'Standalone': raise f5ex.BigIPClusterInvalidHA( 'HA mode is pair and bigip %s in standalone mode' % hostname) if self.conf.f5_ha_type == 'scalen' and \ self.cluster_manager.get_sync_status(bigip) == \ 'Standalone': raise f5ex.BigIPClusterInvalidHA( 'HA mode is scalen and bigip %s in standalone mode' % hostname) if self.conf.f5_ha_type != 'standalone': device_group_name = \ self.cluster_manager.get_device_group(bigip) if not device_group_name: raise f5ex.BigIPClusterInvalidHA( 'HA mode is %s and no sync failover ' 'device group found for device %s.' % (self.conf.f5_ha_type, hostname)) if check_group_name and device_group_name != check_group_name: raise f5ex.BigIPClusterInvalidHA( 'Invalid HA. Device %s is in device group' ' %s but should be in %s.' % (hostname, device_group_name, check_group_name)) bigip.device_group_name = device_group_name if self.network_builder: for network in self.conf.common_network_ids.values(): if not self.network_builder.vlan_exists(bigip, network, folder='Common'): raise f5ex.MissingNetwork( 'Common network %s on %s does not exist' % (network, bigip.hostname)) bigip.device_name = self.cluster_manager.get_device_name(bigip) bigip.mac_addresses = self.system_helper.get_mac_addresses(bigip) LOG.debug("Initialized BIG-IP %s with MAC addresses %s" % (bigip.device_name, ', '.join(bigip.mac_addresses))) bigip.device_interfaces = \ self.system_helper.get_interface_macaddresses_dict(bigip) bigip.assured_networks = {} bigip.assured_tenant_snat_subnets = {} bigip.assured_gateway_subnets = [] if self.conf.f5_ha_type != 'standalone': self.cluster_manager.disable_auto_sync( device_group_name, bigip) # validate VTEP SelfIPs if not self.conf.f5_global_routed_mode: self.network_builder.initialize_tunneling(bigip) # Turn off tunnel syncing between BIG-IP # as our VTEPs properly use only local SelfIPs if self.system_helper.get_tunnel_sync(bigip) == 'enable': self.system_helper.set_tunnel_sync(bigip, enabled=False) LOG.debug('connected to iControl %s @ %s ver %s.%s' % (self.conf.icontrol_username, hostname, major_version, minor_version)) except Exception as exc: bigip.status = 'error' bigip.status_message = str(exc)[:80] raise return bigip
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def _validate_ha(self, bigip): # if there was only one address supplied and # this is not a standalone device, get the # devices trusted by this device. device_group_name = None if self.conf.f5_ha_type == 'standalone': if len(self.hostnames) != 1: bigip.status = 'error' bigip.status_message = \ 'HA mode is standalone and %d hosts found.'\ % len(self.hostnames) raise f5ex.BigIPClusterInvalidHA( 'HA mode is standalone and %d hosts found.' % len(self.hostnames)) device_group_name = 'standalone' elif self.conf.f5_ha_type == 'pair': device_group_name = self.cluster_manager.\ get_device_group(bigip) if len(self.hostnames) != 2: mgmt_addrs = [] devices = self.cluster_manager.devices(bigip) for device in devices: mgmt_addrs.append( self.cluster_manager.get_mgmt_addr_by_device( bigip, device)) self.hostnames = mgmt_addrs if len(self.hostnames) != 2: bigip.status = 'error' bigip.status_message = 'HA mode is pair and %d hosts found.' \ % len(self.hostnames) raise f5ex.BigIPClusterInvalidHA( 'HA mode is pair and %d hosts found.' % len(self.hostnames)) elif self.conf.f5_ha_type == 'scalen': device_group_name = self.cluster_manager.\ get_device_group(bigip) if len(self.hostnames) < 2: mgmt_addrs = [] devices = self.cluster_manager.devices(bigip) for device in devices: mgmt_addrs.append( self.cluster_manager.get_mgmt_addr_by_device( bigip, device) ) self.hostnames = mgmt_addrs if len(self.hostnames) < 2: bigip.status = 'error' bigip.status_message = 'HA mode is scale and 1 hosts found.' raise f5ex.BigIPClusterInvalidHA( 'HA mode is pair and 1 hosts found.') return device_group_name
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def _init_agent_config(self, bigip): # Init agent config ic_host = {} ic_host['version'] = self.system_helper.get_version(bigip) ic_host['device_name'] = bigip.device_name ic_host['platform'] = self.system_helper.get_platform(bigip) ic_host['serial_number'] = self.system_helper.get_serial_number(bigip) ic_host['status'] = bigip.status ic_host['status_message'] = bigip.status_message ic_host['failover_state'] = self.get_failover_state(bigip) if hasattr(bigip, 'local_ip') and bigip.local_ip: ic_host['local_ip'] = bigip.local_ip else: ic_host['local_ip'] = 'VTEP disabled' self.agent_configurations['tunnel_types'] = list() self.agent_configurations['icontrol_endpoints'][bigip.hostname] = \ ic_host if self.network_builder: self.agent_configurations['bridge_mappings'] = \ self.network_builder.interface_mapping
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def get_failover_state(self, bigip): try: if hasattr(bigip, 'tm'): fs = bigip.tm.sys.dbs.db.load(name='failover.state') bigip.failover_state = fs.value return bigip.failover_state else: return 'error' except Exception as exc: LOG.exception('Error getting %s failover state' % bigip.hostname) bigip.status = 'error' bigip.status_message = str(exc)[:80] self._set_agent_status(False) return 'error'
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def recover_errored_devices(self): # trigger a retry on errored BIG-IPs try: self._init_errored_bigips() except Exception as exc: LOG.error('Could not recover devices: %s' % exc.message)
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def generate_capacity_score(self, capacity_policy=None): """Generate the capacity score of connected devices.""" if capacity_policy: highest_metric = 0.0 highest_metric_name = None my_methods = dir(self) bigips = self.get_all_bigips() for metric in capacity_policy: func_name = 'get_' + metric if func_name in my_methods: max_capacity = int(capacity_policy[metric]) metric_func = getattr(self, func_name) metric_value = 0 for bigip in bigips: if bigip.status == 'active': global_stats = \ self.stat_helper.get_global_statistics(bigip) value = int( metric_func(bigip=bigip, global_statistics=global_stats) ) LOG.debug('calling capacity %s on %s returned: %s' % (func_name, bigip.hostname, value)) else: value = 0 if value > metric_value: metric_value = value metric_capacity = float(metric_value) / float(max_capacity) if metric_capacity > highest_metric: highest_metric = metric_capacity highest_metric_name = metric else: LOG.warn('capacity policy has method ' '%s which is not implemented in this driver' % metric) LOG.debug('capacity score: %s based on %s' % (highest_metric, highest_metric_name)) return highest_metric return 0
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def set_plugin_rpc(self, plugin_rpc): # Provide Plugin RPC access self.plugin_rpc = plugin_rpc
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def set_l2pop_rpc(self, l2pop_rpc): # Provide FDB Connector with ML2 RPC access if self.network_builder: self.network_builder.set_l2pop_rpc(l2pop_rpc)
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def service_exists(self, service): return self._service_exists(service)
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def get_all_deployed_loadbalancers(self, purge_orphaned_folders=False): LOG.debug('getting all deployed loadbalancers on BIG-IPs') deployed_lb_dict = {} for bigip in self.get_all_bigips(): folders = self.system_helper.get_folders(bigip) for folder in folders: tenant_id = folder[len(self.service_adapter.prefix):] if str(folder).startswith(self.service_adapter.prefix): resource = resource_helper.BigIPResourceHelper( resource_helper.ResourceType.virtual_address) deployed_lbs = resource.get_resources(bigip, folder) if deployed_lbs: for lb in deployed_lbs: lb_id = lb.name[len(self.service_adapter.prefix):] if lb_id in deployed_lb_dict: deployed_lb_dict[lb_id][ 'hostnames'].append(bigip.hostname) else: deployed_lb_dict[lb_id] = { 'id': lb_id, 'tenant_id': tenant_id, 'hostnames': [bigip.hostname] } else: # delay to assure we are not in the tenant creation # process before a virtual address is created. greenthread.sleep(10) deployed_lbs = resource.get_resources(bigip, folder) if deployed_lbs: for lb in deployed_lbs: lb_id = lb.name[ len(self.service_adapter.prefix):] deployed_lb_dict[lb_id] = \ {'id': lb_id, 'tenant_id': tenant_id} else: # Orphaned folder! if purge_orphaned_folders: try: self.system_helper.purge_folder_contents( bigip, folder) self.system_helper.purge_folder( bigip, folder) LOG.error('orphaned folder %s on %s' % (folder, bigip.hostname)) except Exception as exc: LOG.error('error purging folder %s: %s' % (folder, str(exc))) return deployed_lb_dict
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def get_all_deployed_listeners(self, expand_subcollections=False): LOG.debug('getting all deployed listeners on BIG-IPs') deployed_virtual_dict = {} for bigip in self.get_all_bigips(): folders = self.system_helper.get_folders(bigip) for folder in folders: tenant_id = folder[len(self.service_adapter.prefix):] if str(folder).startswith(self.service_adapter.prefix): resource = resource_helper.BigIPResourceHelper( resource_helper.ResourceType.virtual) deployed_listeners = resource.get_resources( bigip, folder, expand_subcollections) if deployed_listeners: for virtual in deployed_listeners: virtual_id = \ virtual.name[len(self.service_adapter.prefix):] l7_policy = '' if hasattr(virtual, 'policiesReference') and \ 'items' in virtual.policiesReference: l7_policy = \ virtual.policiesReference['items'][0] l7_policy = l7_policy['fullPath'] if virtual_id in deployed_virtual_dict: deployed_virtual_dict[virtual_id][ 'hostnames'].append(bigip.hostname) else: deployed_virtual_dict[virtual_id] = { 'id': virtual_id, 'tenant_id': tenant_id, 'hostnames': [bigip.hostname], 'l7_policy': l7_policy } return deployed_virtual_dict
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def purge_orphaned_nodes(self, tenant_members): node_helper = resource_helper.BigIPResourceHelper( resource_helper.ResourceType.node) node_dict = dict() for bigip in self.get_all_bigips(): for tenant_id, members in tenant_members.iteritems(): partition = self.service_adapter.prefix + tenant_id nodes = node_helper.get_resources(bigip, partition=partition) for n in nodes: node_dict[n.name] = n for member in members: rd = self.network_builder.find_subnet_route_domain( tenant_id, member.get('subnet_id', None)) node_name = "{}%{}".format(member['address'], rd) node_dict.pop(node_name, None) for node_name, node in node_dict.iteritems(): try: node_helper.delete(bigip, name=urllib.quote(node_name), partition=partition) except HTTPError as error: if error.response.status_code == 400: LOG.error(error.response)
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def get_all_deployed_pools(self): LOG.debug('getting all deployed pools on BIG-IPs') deployed_pool_dict = {} for bigip in self.get_all_bigips(): folders = self.system_helper.get_folders(bigip) for folder in folders: tenant_id = folder[len(self.service_adapter.prefix):] if str(folder).startswith(self.service_adapter.prefix): resource = resource_helper.BigIPResourceHelper( resource_helper.ResourceType.pool) deployed_pools = resource.get_resources(bigip, folder) if deployed_pools: for pool in deployed_pools: pool_id = \ pool.name[len(self.service_adapter.prefix):] monitor_id = '' if hasattr(pool, 'monitor'): monitor = pool.monitor.split('/')[2].strip() monitor_id = \ monitor[len(self.service_adapter.prefix):] LOG.debug( 'pool {} has monitor {}'.format( pool.name, monitor)) else: LOG.debug( 'pool {} has no healthmonitors'.format( pool.name)) if pool_id in deployed_pool_dict: deployed_pool_dict[pool_id][ 'hostnames'].append(bigip.hostname) else: deployed_pool_dict[pool_id] = { 'id': pool_id, 'tenant_id': tenant_id, 'hostnames': [bigip.hostname], 'monitors': monitor_id } return deployed_pool_dict
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def purge_orphaned_pool(self, tenant_id=None, pool_id=None, hostnames=list()): node_helper = resource_helper.BigIPResourceHelper( resource_helper.ResourceType.node) for bigip in self.get_all_bigips(): if bigip.hostname in hostnames: try: pool_name = self.service_adapter.prefix + pool_id partition = self.service_adapter.prefix + tenant_id pool = resource_helper.BigIPResourceHelper( resource_helper.ResourceType.pool).load( bigip, pool_name, partition) members = pool.members_s.get_collection() pool.delete() for member in members: node_name = member.address try: node_helper.delete(bigip, name=urllib.quote(node_name), partition=partition) except HTTPError as e: if e.response.status_code == 404: pass if e.response.status_code == 400: LOG.warn("Failed to delete node -- in use") else: LOG.exception("Failed to delete node") except HTTPError as err: if err.response.status_code == 404: LOG.debug('pool %s not on BIG-IP %s.' % (pool_id, bigip.hostname)) except Exception as exc: LOG.exception('Exception purging pool %s' % str(exc))
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def get_all_deployed_health_monitors(self): """Retrieve a list of all Health Monitors deployed""" LOG.debug('getting all deployed monitors on BIG-IP\'s') monitor_types = ['http_monitor', 'https_monitor', 'tcp_monitor', 'ping_monitor'] deployed_monitor_dict = {} adapter_prefix = self.service_adapter.prefix for bigip in self.get_all_bigips(): folders = self.system_helper.get_folders(bigip) for folder in folders: tenant_id = folder[len(adapter_prefix):] if str(folder).startswith(adapter_prefix): resources = map( lambda x: resource_helper.BigIPResourceHelper( getattr(resource_helper.ResourceType, x)), monitor_types) for resource in resources: deployed_monitors = resource.get_resources( bigip, folder) if deployed_monitors: for monitor in deployed_monitors: monitor_id = monitor.name[len(adapter_prefix):] if monitor_id in deployed_monitor_dict: deployed_monitor_dict[monitor_id][ 'hostnames'].append(bigip.hostname) else: deployed_monitor_dict[monitor_id] = { 'id': monitor_id, 'tenant_id': tenant_id, 'hostnames': [bigip.hostname] } return deployed_monitor_dict
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]
def purge_orphaned_health_monitor(self, tenant_id=None, monitor_id=None, hostnames=list()): """Purge all monitors that exist on the BIG-IP but not in Neutron""" resource_types = [ resource_helper.BigIPResourceHelper(x) for x in [ resource_helper.ResourceType.http_monitor, resource_helper.ResourceType.https_monitor, resource_helper.ResourceType.ping_monitor, resource_helper.ResourceType.tcp_monitor]] for bigip in self.get_all_bigips(): if bigip.hostname in hostnames: try: monitor_name = self.service_adapter.prefix + monitor_id partition = self.service_adapter.prefix + tenant_id monitor = None for monitor_type in resource_types: try: monitor = monitor_type.load(bigip, monitor_name, partition) break except HTTPError as err: if err.response.status_code == 404: continue monitor.delete() except TypeError as err: if 'NoneType' in err: LOG.exception("Could not find monitor {}".format( monitor_name)) except Exception as exc: LOG.exception('Exception purging monitor %s' % str(exc))
F5Networks/f5-openstack-agent
[ 14, 37, 14, 83, 1444753730 ]