Unnamed: 0
int64
0
10k
function
stringlengths
79
138k
label
stringclasses
20 values
info
stringlengths
42
261
6,500
def comment_line(path, regex, char='#', cmnt=True, backup='.bak'): r''' Comment or Uncomment a line in a text file. :param path: string The full path to the text file. :param regex: string A regex expression that begins with ``^`` that will find the line you wish to comment. Can be as simple as ``^color =`` :param char: string The character used to comment a line in the type of file you're referencing. Default is ``#`` :param cmnt: boolean True to comment the line. False to uncomment the line. Default is True. :param backup: string The file extension to give the backup file. Default is ``.bak`` Set to False/None to not keep a backup. :return: boolean Returns True if successful, False if not CLI Example: The following example will comment out the ``pcspkr`` line in the ``/etc/modules`` file using the default ``#`` character and create a backup file named ``modules.bak`` .. code-block:: bash salt '*' file.comment_line '/etc/modules' '^pcspkr' CLI Example: The following example will uncomment the ``log_level`` setting in ``minion`` config file if it is set to either ``warning``, ``info``, or ``debug`` using the ``#`` character and create a backup file named ``minion.bk`` .. code-block:: bash salt '*' file.comment_line 'C:\salt\conf\minion' '^log_level: (warning|info|debug)' '#' False '.bk' ''' # Get the regex for comment or uncomment if cmnt: regex = '{0}({1}){2}'.format( '^' if regex.startswith('^') else '', regex.lstrip('^').rstrip('$'), '$' if regex.endswith('$') else '') else: regex = r'^{0}\s*({1}){2}'.format( char, regex.lstrip('^').rstrip('$'), '$' if regex.endswith('$') else '') # Load the real path to the file path = os.path.realpath(os.path.expanduser(path)) # Make sure the file exists if not os.path.isfile(path): raise SaltInvocationError('File not found: {0}'.format(path)) # Make sure it is a text file if not salt.utils.istextfile(path): raise SaltInvocationError( 'Cannot perform string replacements on a binary file: {0}'.format(path)) # First check the whole file, determine whether to make the replacement # Searching first avoids modifying the time stamp if there are no changes found = False # Dictionaries for comparing changes orig_file = [] new_file = [] # Buffer size for fopen bufsize = os.path.getsize(path) try: # Use a read-only handle to open the file with salt.utils.fopen(path, mode='rb', buffering=bufsize) as r_file: # Loop through each line of the file and look for a match for line in r_file: # Is it in this line if re.match(regex, line): # Load lines into dictionaries, set found to True orig_file.append(line) if cmnt: new_file.append('{0}{1}'.format(char, line)) else: new_file.append(line.lstrip(char)) found = True except (OSError, IOError) as exc: raise CommandExecutionError( "Unable to open file '{0}'. " "Exception: {1}".format(path, exc) ) # We've searched the whole file. If we didn't find anything, return False if not found: return False if not salt.utils.is_windows(): pre_user = get_user(path) pre_group = get_group(path) pre_mode = __salt__['config.manage_mode'](get_mode(path)) # Create a copy to read from and to use as a backup later try: temp_file = _mkstemp_copy(path=path, preserve_inode=False) except (OSError, IOError) as exc: raise CommandExecutionError("Exception: {0}".format(exc)) try: # Open the file in write mode with salt.utils.fopen(path, mode='wb', buffering=bufsize) as w_file: try: # Open the temp file in read mode with salt.utils.fopen(temp_file, mode='rb', buffering=bufsize) as r_file: # Loop through each line of the file and look for a match for line in r_file: try: # Is it in this line if re.match(regex, line): # Write the new line if cmnt: w_file.write('{0}{1}'.format(char, line)) else: w_file.write(line.lstrip(char)) else: # Write the existing line (no change) w_file.write(line) except (OSError, IOError) as exc: raise CommandExecutionError( "Unable to write file '{0}'. Contents may " "be truncated. Temporary file contains copy " "at '{1}'. " "Exception: {2}".format(path, temp_file, exc) ) except (OSError, IOError) as exc: raise CommandExecutionError("Exception: {0}".format(exc)) except (OSError, __HOLE__) as exc: raise CommandExecutionError("Exception: {0}".format(exc)) if backup: # Move the backup file to the original directory backup_name = '{0}{1}'.format(path, backup) try: shutil.move(temp_file, backup_name) except (OSError, IOError) as exc: raise CommandExecutionError( "Unable to move the temp file '{0}' to the " "backup file '{1}'. " "Exception: {2}".format(path, temp_file, exc) ) else: os.remove(temp_file) if not salt.utils.is_windows(): check_perms(path, None, pre_user, pre_group, pre_mode) # Return a diff using the two dictionaries return ''.join(difflib.unified_diff(orig_file, new_file))
IOError
dataset/ETHPy150Open saltstack/salt/salt/modules/file.py/comment_line
6,501
def _mkstemp_copy(path, preserve_inode=True): ''' Create a temp file and move/copy the contents of ``path`` to the temp file. Return the path to the temp file. path The full path to the file whose contents will be moved/copied to a temp file. Whether it's moved or copied depends on the value of ``preserve_inode``. preserve_inode Preserve the inode of the file, so that any hard links continue to share the inode with the original filename. This works by *copying* the file, reading from the copy, and writing to the file at the original inode. If ``False``, the file will be *moved* rather than copied, and a new file will be written to a new inode, but using the original filename. Hard links will then share an inode with the backup, instead (if using ``backup`` to create a backup copy). Default is ``True``. ''' temp_file = None # Create the temp file try: temp_file = salt.utils.mkstemp() except (OSError, IOError) as exc: raise CommandExecutionError( "Unable to create temp file. " "Exception: {0}".format(exc) ) # use `copy` to preserve the inode of the # original file, and thus preserve hardlinks # to the inode. otherwise, use `move` to # preserve prior behavior, which results in # writing the file to a new inode. if preserve_inode: try: shutil.copy2(path, temp_file) except (__HOLE__, IOError) as exc: raise CommandExecutionError( "Unable to copy file '{0}' to the " "temp file '{1}'. " "Exception: {2}".format(path, temp_file, exc) ) else: try: shutil.move(path, temp_file) except (OSError, IOError) as exc: raise CommandExecutionError( "Unable to move file '{0}' to the " "temp file '{1}'. " "Exception: {2}".format(path, temp_file, exc) ) return temp_file
OSError
dataset/ETHPy150Open saltstack/salt/salt/modules/file.py/_mkstemp_copy
6,502
def line(path, content, match=None, mode=None, location=None, before=None, after=None, show_changes=True, backup=False, quiet=False, indent=True): ''' .. versionadded:: 2015.8.0 Edit a line in the configuration file. :param path: Filesystem path to the file to be edited. :param content: Content of the line. :param match: Match the target line for an action by a fragment of a string or regular expression. :param mode: :Ensure: If line does not exist, it will be added. :Replace: If line already exist, it will be replaced. :Delete: Delete the line, once found. :Insert: Insert a line. :param location: :start: Place the content at the beginning of the file. :end: Place the content at the end of the file. :param before: Regular expression or an exact case-sensitive fragment of the string. :param after: Regular expression or an exact case-sensitive fragment of the string. :param show_changes: Output a unified diff of the old file and the new file. If ``False`` return a boolean if any changes were made. Default is ``True`` .. note:: Using this option will store two copies of the file in-memory (the original version and the edited version) in order to generate the diff. :param backup: Create a backup of the original file with the extension: "Year-Month-Day-Hour-Minutes-Seconds". :param quiet: Do not raise any exceptions. E.g. ignore the fact that the file that is tried to be edited does not exist and nothing really happened. :param indent: Keep indentation with the previous line. If an equal sign (``=``) appears in an argument to a Salt command, it is interpreted as a keyword argument in the format of ``key=val``. That processing can be bypassed in order to pass an equal sign through to the remote shell command by manually specifying the kwarg: .. code-block:: bash salt '*' file.line /path/to/file content="CREATEMAIL_SPOOL=no" match="CREATE_MAIL_SPOOL=yes" mode="replace" CLI Examples: .. code-block:: bash salt '*' file.line /etc/nsswitch.conf "networks:\tfiles dns" after="hosts:.*?" mode='ensure' ''' path = os.path.realpath(os.path.expanduser(path)) if not os.path.isfile(path): if not quiet: raise CommandExecutionError('File "{0}" does not exists or is not a file.'.format(path)) return False # No changes had happened mode = mode and mode.lower() or mode if mode not in ['insert', 'ensure', 'delete', 'replace']: if mode is None: raise CommandExecutionError('Mode was not defined. How to process the file?') else: raise CommandExecutionError('Unknown mode: "{0}"'.format(mode)) # Before/after has privilege. If nothing defined, match is used by content. if before is None and after is None and not match: match = content body = salt.utils.fopen(path, mode='r').read() body_before = hashlib.sha256(salt.utils.to_bytes(body)).hexdigest() after = _regex_to_static(body, after) before = _regex_to_static(body, before) match = _regex_to_static(body, match) if mode == 'delete': body = os.linesep.join([line for line in body.split(os.linesep) if line.find(match) < 0]) elif mode == 'replace': body = os.linesep.join([(_get_line_indent(line, content, indent) if (line.find(match) > -1 and not line == content) else line) for line in body.split(os.linesep)]) elif mode == 'insert': if not location and not before and not after: raise CommandExecutionError('On insert must be defined either "location" or "before/after" conditions.') if not location: if before and after: _assert_occurrence(body, before, 'before') _assert_occurrence(body, after, 'after') out = [] lines = body.split(os.linesep) for idx in range(len(lines)): _line = lines[idx] if _line.find(before) > -1 and idx <= len(lines) and lines[idx - 1].find(after) > -1: out.append(_get_line_indent(_line, content, indent)) out.append(_line) else: out.append(_line) body = os.linesep.join(out) if before and not after: _assert_occurrence(body, before, 'before') out = [] lines = body.split(os.linesep) for idx in range(len(lines)): _line = lines[idx] if _line.find(before) > -1: cnd = _get_line_indent(_line, content, indent) if not idx or (idx and _starts_till(lines[idx - 1], cnd) < 0): # Job for replace instead out.append(cnd) out.append(_line) body = os.linesep.join(out) elif after and not before: _assert_occurrence(body, after, 'after') out = [] lines = body.split(os.linesep) for idx in range(len(lines)): _line = lines[idx] out.append(_line) cnd = _get_line_indent(_line, content, indent) if _line.find(after) > -1: # No dupes or append, if "after" is the last line if (idx < len(lines) and _starts_till(lines[idx + 1], cnd) < 0) or idx + 1 == len(lines): out.append(cnd) body = os.linesep.join(out) else: if location == 'start': body = ''.join([content, body]) elif location == 'end': body = ''.join([body, _get_line_indent(body[-1], content, indent) if body else content]) elif mode == 'ensure': after = after and after.strip() before = before and before.strip() content = content and content.strip() if before and after: _assert_occurrence(body, before, 'before') _assert_occurrence(body, after, 'after') a_idx = b_idx = -1 idx = 0 body = body.split(os.linesep) for _line in body: idx += 1 if _line.find(before) > -1 and b_idx < 0: b_idx = idx if _line.find(after) > -1 and a_idx < 0: a_idx = idx # Add if not b_idx - a_idx - 1: body = body[:a_idx] + [content] + body[b_idx - 1:] elif b_idx - a_idx - 1 == 1: if _starts_till(body[a_idx:b_idx - 1][0], content) > -1: body[a_idx] = _get_line_indent(body[a_idx - 1], content, indent) else: raise CommandExecutionError('Found more than one line between boundaries "before" and "after".') body = os.linesep.join(body) elif before and not after: _assert_occurrence(body, before, 'before') body = body.split(os.linesep) out = [] for idx in range(len(body)): if body[idx].find(before) > -1: prev = (idx > 0 and idx or 1) - 1 out.append(_get_line_indent(body[prev], content, indent)) if _starts_till(out[prev], content) > -1: del out[prev] out.append(body[idx]) body = os.linesep.join(out) elif not before and after: _assert_occurrence(body, after, 'after') body = body.split(os.linesep) skip = None out = [] for idx in range(len(body)): if skip != body[idx]: out.append(body[idx]) if body[idx].find(after) > -1: next_line = idx + 1 < len(body) and body[idx + 1] or None if next_line is not None and _starts_till(next_line, content) > -1: skip = next_line out.append(_get_line_indent(body[idx], content, indent)) body = os.linesep.join(out) else: raise CommandExecutionError("Wrong conditions? " "Unable to ensure line without knowing " "where to put it before and/or after.") changed = body_before != hashlib.sha256(salt.utils.to_bytes(body)).hexdigest() if backup and changed and __opts__['test'] is False: try: temp_file = _mkstemp_copy(path=path, preserve_inode=True) shutil.move(temp_file, '{0}.{1}'.format(path, time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime()))) except (OSError, __HOLE__) as exc: raise CommandExecutionError("Unable to create the backup file of {0}. Exception: {1}".format(path, exc)) changes_diff = None if changed: if show_changes: changes_diff = ''.join(difflib.unified_diff(salt.utils.fopen(path, 'r').read().splitlines(), body.splitlines())) if __opts__['test'] is False: fh_ = None try: fh_ = salt.utils.atomicfile.atomic_open(path, 'w') fh_.write(body) finally: if fh_: fh_.close() return show_changes and changes_diff or changed
IOError
dataset/ETHPy150Open saltstack/salt/salt/modules/file.py/line
6,503
def replace(path, pattern, repl, count=0, flags=8, bufsize=1, append_if_not_found=False, prepend_if_not_found=False, not_found_content=None, backup='.bak', dry_run=False, search_only=False, show_changes=True, ignore_if_missing=False, preserve_inode=True, ): ''' .. versionadded:: 0.17.0 Replace occurrences of a pattern in a file. If ``show_changes`` is ``True``, then a diff of what changed will be returned, otherwise a ``True`` will be returnd when changes are made, and ``False`` when no changes are made. This is a pure Python implementation that wraps Python's :py:func:`~re.sub`. path Filesystem path to the file to be edited pattern A regular expression, to be matched using Python's :py:func:`~re.search`. repl The replacement text count : 0 Maximum number of pattern occurrences to be replaced. If count is a positive integer ``n``, only ``n`` occurrences will be replaced, otherwise all occurrences will be replaced. flags (list or int) A list of flags defined in the :ref:`re module documentation <contents-of-module-re>`. Each list item should be a string that will correlate to the human-friendly flag name. E.g., ``['IGNORECASE', 'MULTILINE']``. Optionally, ``flags`` may be an int, with a value corresponding to the XOR (``|``) of all the desired flags. Defaults to 8 (which supports 'MULTILINE'). bufsize (int or str) How much of the file to buffer into memory at once. The default value ``1`` processes one line at a time. The special value ``file`` may be specified which will read the entire file into memory before processing. append_if_not_found : False .. versionadded:: 2014.7.0 If set to ``True``, and pattern is not found, then the content will be appended to the file. prepend_if_not_found : False .. versionadded:: 2014.7.0 If set to ``True`` and pattern is not found, then the content will be prepended to the file. not_found_content .. versionadded:: 2014.7.0 Content to use for append/prepend if not found. If None (default), uses ``repl``. Useful when ``repl`` uses references to group in pattern. backup : .bak The file extension to use for a backup of the file before editing. Set to ``False`` to skip making a backup. dry_run : False If set to ``True``, no changes will be made to the file, the function will just return the changes that would have been made (or a ``True``/``False`` value if ``show_changes`` is set to ``False``). search_only : False If set to true, this no changes will be performed on the file, and this function will simply return ``True`` if the pattern was matched, and ``False`` if not. show_changes : True If ``True``, return a diff of changes made. Otherwise, return ``True`` if changes were made, and ``False`` if not. .. note:: Using this option will store two copies of the file in memory (the original version and the edited version) in order to generate the diff. This may not normally be a concern, but could impact performance if used with large files. ignore_if_missing : False .. versionadded:: 2015.8.0 If set to ``True``, this function will simply return ``False`` if the file doesn't exist. Otherwise, an error will be thrown. preserve_inode : True .. versionadded:: 2015.8.0 Preserve the inode of the file, so that any hard links continue to share the inode with the original filename. This works by *copying* the file, reading from the copy, and writing to the file at the original inode. If ``False``, the file will be *moved* rather than copied, and a new file will be written to a new inode, but using the original filename. Hard links will then share an inode with the backup, instead (if using ``backup`` to create a backup copy). If an equal sign (``=``) appears in an argument to a Salt command it is interpreted as a keyword argument in the format ``key=val``. That processing can be bypassed in order to pass an equal sign through to the remote shell command by manually specifying the kwarg: .. code-block:: bash salt '*' file.replace /path/to/file pattern='=' repl=':' salt '*' file.replace /path/to/file pattern="bind-address\\s*=" repl='bind-address:' CLI Examples: .. code-block:: bash salt '*' file.replace /etc/httpd/httpd.conf pattern='LogLevel warn' repl='LogLevel info' salt '*' file.replace /some/file pattern='before' repl='after' flags='[MULTILINE, IGNORECASE]' ''' symlink = False if is_link(path): symlink = True target_path = os.readlink(path) given_path = os.path.expanduser(path) path = os.path.realpath(os.path.expanduser(path)) if not os.path.exists(path): if ignore_if_missing: return False else: raise SaltInvocationError('File not found: {0}'.format(path)) if not salt.utils.istextfile(path): raise SaltInvocationError( 'Cannot perform string replacements on a binary file: {0}' .format(path) ) if search_only and (append_if_not_found or prepend_if_not_found): raise SaltInvocationError( 'search_only cannot be used with append/prepend_if_not_found' ) if append_if_not_found and prepend_if_not_found: raise SaltInvocationError( 'Only one of append and prepend_if_not_found is permitted' ) flags_num = _get_flags(flags) cpattern = re.compile(str(pattern), flags_num) filesize = os.path.getsize(path) if bufsize == 'file': bufsize = filesize # Search the file; track if any changes have been made for the return val has_changes = False orig_file = [] # used if show_changes new_file = [] # used if show_changes if not salt.utils.is_windows(): pre_user = get_user(path) pre_group = get_group(path) pre_mode = __salt__['config.manage_mode'](get_mode(path)) # Avoid TypeErrors by forcing repl to be a string repl = str(repl) found = False temp_file = None content = str(not_found_content) if not_found_content and \ (prepend_if_not_found or append_if_not_found) \ else repl try: # First check the whole file, determine whether to make the replacement # Searching first avoids modifying the time stamp if there are no changes r_data = None # Use a read-only handle to open the file with salt.utils.fopen(path, mode='rb', buffering=bufsize) as r_file: try: # mmap throws a ValueError if the file is empty. r_data = mmap.mmap(r_file.fileno(), 0, access=mmap.ACCESS_READ) except (ValueError, mmap.error): # size of file in /proc is 0, but contains data r_data = "".join(r_file) if search_only: # Just search; bail as early as a match is found if re.search(cpattern, r_data): return True # `with` block handles file closure else: result, nrepl = re.subn(cpattern, repl, r_data, count) # found anything? (even if no change) if nrepl > 0: found = True # Identity check the potential change has_changes = True if pattern != repl else has_changes if prepend_if_not_found or append_if_not_found: # Search for content, to avoid pre/appending the # content if it was pre/appended in a previous run. if re.search('^{0}$'.format(re.escape(content)), r_data, flags=flags_num): # Content was found, so set found. found = True # Keep track of show_changes here, in case the file isn't # modified if show_changes or append_if_not_found or \ prepend_if_not_found: orig_file = r_data.read(filesize).splitlines(True) \ if hasattr(r_data, 'read') \ else r_data.splitlines(True) new_file = result.splitlines(True) except (OSError, IOError) as exc: raise CommandExecutionError( "Unable to open file '{0}'. " "Exception: {1}".format(path, exc) ) finally: if r_data and isinstance(r_data, mmap.mmap): r_data.close() if has_changes and not dry_run: # Write the replacement text in this block. try: # Create a copy to read from and to use as a backup later temp_file = _mkstemp_copy(path=path, preserve_inode=preserve_inode) except (__HOLE__, IOError) as exc: raise CommandExecutionError("Exception: {0}".format(exc)) r_data = None try: # Open the file in write mode with salt.utils.fopen(path, mode='w', buffering=bufsize) as w_file: try: # Open the temp file in read mode with salt.utils.fopen(temp_file, mode='r', buffering=bufsize) as r_file: r_data = mmap.mmap(r_file.fileno(), 0, access=mmap.ACCESS_READ) result, nrepl = re.subn(cpattern, repl, r_data, count) try: w_file.write(result) except (OSError, IOError) as exc: raise CommandExecutionError( "Unable to write file '{0}'. Contents may " "be truncated. Temporary file contains copy " "at '{1}'. " "Exception: {2}".format(path, temp_file, exc) ) except (OSError, IOError) as exc: raise CommandExecutionError("Exception: {0}".format(exc)) finally: if r_data and isinstance(r_data, mmap.mmap): r_data.close() except (OSError, IOError) as exc: raise CommandExecutionError("Exception: {0}".format(exc)) if not found and (append_if_not_found or prepend_if_not_found): if not_found_content is None: not_found_content = repl if prepend_if_not_found: new_file.insert(0, not_found_content + '\n') else: # append_if_not_found # Make sure we have a newline at the end of the file if 0 != len(new_file): if not new_file[-1].endswith('\n'): new_file[-1] += '\n' new_file.append(not_found_content + '\n') has_changes = True if not dry_run: try: # Create a copy to read from and for later use as a backup temp_file = _mkstemp_copy(path=path, preserve_inode=preserve_inode) except (OSError, IOError) as exc: raise CommandExecutionError("Exception: {0}".format(exc)) # write new content in the file while avoiding partial reads try: fh_ = salt.utils.atomicfile.atomic_open(path, 'w') for line in new_file: fh_.write(line) finally: fh_.close() if backup and has_changes and not dry_run: # keep the backup only if it was requested # and only if there were any changes backup_name = '{0}{1}'.format(path, backup) try: shutil.move(temp_file, backup_name) except (OSError, IOError) as exc: raise CommandExecutionError( "Unable to move the temp file '{0}' to the " "backup file '{1}'. " "Exception: {2}".format(path, temp_file, exc) ) if symlink: symlink_backup = '{0}{1}'.format(given_path, backup) target_backup = '{0}{1}'.format(target_path, backup) # Always clobber any existing symlink backup # to match the behaviour of the 'backup' option try: os.symlink(target_backup, symlink_backup) except OSError: os.remove(symlink_backup) os.symlink(target_backup, symlink_backup) except: raise CommandExecutionError( "Unable create backup symlink '{0}'. " "Target was '{1}'. " "Exception: {2}".format(symlink_backup, target_backup, exc) ) elif temp_file: try: os.remove(temp_file) except (OSError, IOError) as exc: raise CommandExecutionError( "Unable to delete temp file '{0}'. " "Exception: {1}".format(temp_file, exc) ) if not dry_run and not salt.utils.is_windows(): check_perms(path, None, pre_user, pre_group, pre_mode) if show_changes: return ''.join(difflib.unified_diff(orig_file, new_file)) return has_changes
OSError
dataset/ETHPy150Open saltstack/salt/salt/modules/file.py/replace
6,504
def contains(path, text): ''' .. deprecated:: 0.17.0 Use :func:`search` instead. Return ``True`` if the file at ``path`` contains ``text`` CLI Example: .. code-block:: bash salt '*' file.contains /etc/crontab 'mymaintenance.sh' ''' path = os.path.expanduser(path) if not os.path.exists(path): return False stripped_text = str(text).strip() try: with salt.utils.filebuffer.BufferedReader(path) as breader: for chunk in breader: if stripped_text in chunk: return True return False except (__HOLE__, OSError): return False
IOError
dataset/ETHPy150Open saltstack/salt/salt/modules/file.py/contains
6,505
def contains_regex(path, regex, lchar=''): ''' .. deprecated:: 0.17.0 Use :func:`search` instead. Return True if the given regular expression matches on any line in the text of a given file. If the lchar argument (leading char) is specified, it will strip `lchar` from the left side of each line before trying to match CLI Example: .. code-block:: bash salt '*' file.contains_regex /etc/crontab ''' path = os.path.expanduser(path) if not os.path.exists(path): return False try: with salt.utils.fopen(path, 'r') as target: for line in target: if lchar: line = line.lstrip(lchar) if re.search(regex, line): return True return False except (__HOLE__, OSError): return False
IOError
dataset/ETHPy150Open saltstack/salt/salt/modules/file.py/contains_regex
6,506
def contains_glob(path, glob_expr): ''' .. deprecated:: 0.17.0 Use :func:`search` instead. Return ``True`` if the given glob matches a string in the named file CLI Example: .. code-block:: bash salt '*' file.contains_glob /etc/foobar '*cheese*' ''' path = os.path.expanduser(path) if not os.path.exists(path): return False try: with salt.utils.filebuffer.BufferedReader(path) as breader: for chunk in breader: if fnmatch.fnmatch(chunk, glob_expr): return True return False except (__HOLE__, OSError): return False
IOError
dataset/ETHPy150Open saltstack/salt/salt/modules/file.py/contains_glob
6,507
def append(path, *args, **kwargs): ''' .. versionadded:: 0.9.5 Append text to the end of a file path path to file `*args` strings to append to file CLI Example: .. code-block:: bash salt '*' file.append /etc/motd \\ "With all thine offerings thou shalt offer salt." \\ "Salt is what makes things taste bad when it isn't in them." .. admonition:: Attention If you need to pass a string to append and that string contains an equal sign, you **must** include the argument name, args. For example: .. code-block:: bash salt '*' file.append /etc/motd args='cheese=spam' salt '*' file.append /etc/motd args="['cheese=spam','spam=cheese']" ''' path = os.path.expanduser(path) # Largely inspired by Fabric's contrib.files.append() if 'args' in kwargs: if isinstance(kwargs['args'], list): args = kwargs['args'] else: args = [kwargs['args']] # Make sure we have a newline at the end of the file. Do this in binary # mode so SEEK_END with nonzero offset will work. with salt.utils.fopen(path, 'rb+') as ofile: linesep = salt.utils.to_bytes(os.linesep) try: ofile.seek(-len(linesep), os.SEEK_END) except __HOLE__ as exc: if exc.errno in (errno.EINVAL, errno.ESPIPE): # Empty file, simply append lines at the beginning of the file pass else: raise else: if ofile.read(len(linesep)) != linesep: ofile.seek(0, os.SEEK_END) ofile.write(linesep) # Append lines in text mode with salt.utils.fopen(path, 'r+') as ofile: ofile.seek(0, os.SEEK_END) for line in args: ofile.write('{0}\n'.format(line)) return 'Wrote {0} lines to "{1}"'.format(len(args), path)
IOError
dataset/ETHPy150Open saltstack/salt/salt/modules/file.py/append
6,508
def prepend(path, *args, **kwargs): ''' .. versionadded:: 2014.7.0 Prepend text to the beginning of a file path path to file `*args` strings to prepend to the file CLI Example: .. code-block:: bash salt '*' file.prepend /etc/motd \\ "With all thine offerings thou shalt offer salt." \\ "Salt is what makes things taste bad when it isn't in them." .. admonition:: Attention If you need to pass a string to append and that string contains an equal sign, you **must** include the argument name, args. For example: .. code-block:: bash salt '*' file.prepend /etc/motd args='cheese=spam' salt '*' file.prepend /etc/motd args="['cheese=spam','spam=cheese']" ''' path = os.path.expanduser(path) if 'args' in kwargs: if isinstance(kwargs['args'], list): args = kwargs['args'] else: args = [kwargs['args']] try: with salt.utils.fopen(path) as fhr: contents = fhr.readlines() except __HOLE__: contents = [] preface = [] for line in args: preface.append('{0}\n'.format(line)) with salt.utils.fopen(path, "w") as ofile: contents = preface + contents ofile.write(''.join(contents)) return 'Prepended {0} lines to "{1}"'.format(len(args), path)
IOError
dataset/ETHPy150Open saltstack/salt/salt/modules/file.py/prepend
6,509
def touch(name, atime=None, mtime=None): ''' .. versionadded:: 0.9.5 Just like the ``touch`` command, create a file if it doesn't exist or simply update the atime and mtime if it already does. atime: Access time in Unix epoch time mtime: Last modification in Unix epoch time CLI Example: .. code-block:: bash salt '*' file.touch /var/log/emptyfile ''' name = os.path.expanduser(name) if atime and atime.isdigit(): atime = int(atime) if mtime and mtime.isdigit(): mtime = int(mtime) try: if not os.path.exists(name): with salt.utils.fopen(name, 'a') as fhw: fhw.write('') if not atime and not mtime: times = None elif not mtime and atime: times = (atime, time.time()) elif not atime and mtime: times = (time.time(), mtime) else: times = (atime, mtime) os.utime(name, times) except __HOLE__: raise SaltInvocationError('atime and mtime must be integers') except (IOError, OSError) as exc: raise CommandExecutionError(exc.strerror) return os.path.exists(name)
TypeError
dataset/ETHPy150Open saltstack/salt/salt/modules/file.py/touch
6,510
def link(src, path): ''' .. versionadded:: 2014.1.0 Create a hard link to a file CLI Example: .. code-block:: bash salt '*' file.link /path/to/file /path/to/link ''' src = os.path.expanduser(src) if not os.path.isabs(src): raise SaltInvocationError('File path must be absolute.') try: os.link(src, path) return True except (OSError, __HOLE__): raise CommandExecutionError('Could not create \'{0}\''.format(path)) return False
IOError
dataset/ETHPy150Open saltstack/salt/salt/modules/file.py/link
6,511
def symlink(src, path): ''' Create a symbolic link (symlink, soft link) to a file CLI Example: .. code-block:: bash salt '*' file.symlink /path/to/file /path/to/link ''' path = os.path.expanduser(path) if not os.path.isabs(path): raise SaltInvocationError('File path must be absolute.') try: os.symlink(src, path) return True except (OSError, __HOLE__): raise CommandExecutionError('Could not create \'{0}\''.format(path)) return False
IOError
dataset/ETHPy150Open saltstack/salt/salt/modules/file.py/symlink
6,512
def rename(src, dst): ''' Rename a file or directory CLI Example: .. code-block:: bash salt '*' file.rename /path/to/src /path/to/dst ''' src = os.path.expanduser(src) dst = os.path.expanduser(dst) if not os.path.isabs(src): raise SaltInvocationError('File path must be absolute.') try: os.rename(src, dst) return True except __HOLE__: raise CommandExecutionError( 'Could not rename \'{0}\' to \'{1}\''.format(src, dst) ) return False
OSError
dataset/ETHPy150Open saltstack/salt/salt/modules/file.py/rename
6,513
def copy(src, dst, recurse=False, remove_existing=False): ''' Copy a file or directory from source to dst In order to copy a directory, the recurse flag is required, and will by default overwrite files in the destination with the same path, and retain all other existing files. (similar to cp -r on unix) remove_existing will remove all files in the target directory, and then copy files from the source. CLI Example: .. code-block:: bash salt '*' file.copy /path/to/src /path/to/dst salt '*' file.copy /path/to/src_dir /path/to/dst_dir recurse=True salt '*' file.copy /path/to/src_dir /path/to/dst_dir recurse=True remove_existing=True ''' src = os.path.expanduser(src) dst = os.path.expanduser(dst) if not os.path.isabs(src): raise SaltInvocationError('File path must be absolute.') if not os.path.exists(src): raise CommandExecutionError('No such file or directory \'{0}\''.format(src)) if not salt.utils.is_windows(): pre_user = get_user(src) pre_group = get_group(src) pre_mode = __salt__['config.manage_mode'](get_mode(src)) try: if (os.path.exists(dst) and os.path.isdir(dst)) or os.path.isdir(src): if not recurse: raise SaltInvocationError( "Cannot copy overwriting a directory without recurse flag set to true!") if remove_existing: if os.path.exists(dst): shutil.rmtree(dst) shutil.copytree(src, dst) else: salt.utils.files.recursive_copy(src, dst) else: shutil.copyfile(src, dst) except __HOLE__: raise CommandExecutionError( 'Could not copy \'{0}\' to \'{1}\''.format(src, dst) ) if not salt.utils.is_windows(): check_perms(dst, None, pre_user, pre_group, pre_mode) return True
OSError
dataset/ETHPy150Open saltstack/salt/salt/modules/file.py/copy
6,514
def statvfs(path): ''' .. versionadded:: 2014.1.0 Perform a statvfs call against the filesystem that the file resides on CLI Example: .. code-block:: bash salt '*' file.statvfs /path/to/file ''' path = os.path.expanduser(path) if not os.path.isabs(path): raise SaltInvocationError('File path must be absolute.') try: stv = os.statvfs(path) return dict((key, getattr(stv, key)) for key in ('f_bavail', 'f_bfree', 'f_blocks', 'f_bsize', 'f_favail', 'f_ffree', 'f_files', 'f_flag', 'f_frsize', 'f_namemax')) except (OSError, __HOLE__): raise CommandExecutionError('Could not statvfs \'{0}\''.format(path)) return False
IOError
dataset/ETHPy150Open saltstack/salt/salt/modules/file.py/statvfs
6,515
def stats(path, hash_type=None, follow_symlinks=True): ''' Return a dict containing the stats for a given file CLI Example: .. code-block:: bash salt '*' file.stats /etc/passwd ''' path = os.path.expanduser(path) ret = {} if not os.path.exists(path): try: # Broken symlinks will return False for os.path.exists(), but still # have a uid and gid pstat = os.lstat(path) except __HOLE__: # Not a broken symlink, just a nonexistent path return ret else: if follow_symlinks: pstat = os.stat(path) else: pstat = os.lstat(path) ret['inode'] = pstat.st_ino ret['uid'] = pstat.st_uid ret['gid'] = pstat.st_gid ret['group'] = gid_to_group(pstat.st_gid) ret['user'] = uid_to_user(pstat.st_uid) ret['atime'] = pstat.st_atime ret['mtime'] = pstat.st_mtime ret['ctime'] = pstat.st_ctime ret['size'] = pstat.st_size ret['mode'] = str(oct(stat.S_IMODE(pstat.st_mode))) if hash_type: ret['sum'] = get_hash(path, hash_type) ret['type'] = 'file' if stat.S_ISDIR(pstat.st_mode): ret['type'] = 'dir' if stat.S_ISCHR(pstat.st_mode): ret['type'] = 'char' if stat.S_ISBLK(pstat.st_mode): ret['type'] = 'block' if stat.S_ISREG(pstat.st_mode): ret['type'] = 'file' if stat.S_ISLNK(pstat.st_mode): ret['type'] = 'link' if stat.S_ISFIFO(pstat.st_mode): ret['type'] = 'pipe' if stat.S_ISSOCK(pstat.st_mode): ret['type'] = 'socket' ret['target'] = os.path.realpath(path) return ret
OSError
dataset/ETHPy150Open saltstack/salt/salt/modules/file.py/stats
6,516
def rmdir(path): ''' .. versionadded:: 2014.1.0 Remove the specified directory. Fails if a directory is not empty. CLI Example: .. code-block:: bash salt '*' file.rmdir /tmp/foo/ ''' path = os.path.expanduser(path) if not os.path.isabs(path): raise SaltInvocationError('File path must be absolute.') if not os.path.isdir(path): raise SaltInvocationError('A valid directory was not specified.') try: os.rmdir(path) return True except __HOLE__ as exc: return exc.strerror
OSError
dataset/ETHPy150Open saltstack/salt/salt/modules/file.py/rmdir
6,517
def remove(path): ''' Remove the named file. If a directory is supplied, it will be recursively deleted. CLI Example: .. code-block:: bash salt '*' file.remove /tmp/foo ''' path = os.path.expanduser(path) if not os.path.isabs(path): raise SaltInvocationError('File path must be absolute: {0}'.format(path)) try: if os.path.isfile(path) or os.path.islink(path): os.remove(path) return True elif os.path.isdir(path): shutil.rmtree(path) return True except (__HOLE__, IOError) as exc: raise CommandExecutionError( 'Could not remove \'{0}\': {1}'.format(path, exc) ) return False
OSError
dataset/ETHPy150Open saltstack/salt/salt/modules/file.py/remove
6,518
def get_selinux_context(path): ''' Get an SELinux context from a given path CLI Example: .. code-block:: bash salt '*' file.get_selinux_context /etc/hosts ''' out = __salt__['cmd.run'](['ls', '-Z', path], python_shell=False) try: ret = re.search(r'\w+:\w+:\w+:\w+', out).group(0) except __HOLE__: ret = ( 'No selinux context information is available for {0}'.format(path) ) return ret
AttributeError
dataset/ETHPy150Open saltstack/salt/salt/modules/file.py/get_selinux_context
6,519
def get_managed( name, template, source, source_hash, user, group, mode, saltenv, context, defaults, skip_verify, **kwargs): ''' Return the managed file data for file.managed name location where the file lives on the server template template format source managed source file source_hash hash of the source file user user owner group group owner mode file mode context variables to add to the environment defaults default values of for context_dict skip_verify If ``True``, hash verification of remote file sources (``http://``, ``https://``, ``ftp://``) will be skipped, and the ``source_hash`` argument will be ignored. .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' file.get_managed /etc/httpd/conf.d/httpd.conf jinja salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' root root '755' base None None ''' # Copy the file to the minion and templatize it sfn = '' source_sum = {} def _get_local_file_source_sum(path): ''' DRY helper for getting the source_sum value from a locally cached path. ''' return {'hsum': get_hash(path), 'hash_type': 'sha256'} # If we have a source defined, let's figure out what the hash is if source: urlparsed_source = _urlparse(source) if urlparsed_source.scheme == 'salt': source_sum = __salt__['cp.hash_file'](source, saltenv) if not source_sum: return '', {}, 'Source file {0} not found'.format(source) elif not source_hash and urlparsed_source.scheme == 'file': source_sum = _get_local_file_source_sum(urlparsed_source.path) elif not source_hash and source.startswith('/'): source_sum = _get_local_file_source_sum(source) else: if not skip_verify: if source_hash: protos = ('salt', 'http', 'https', 'ftp', 'swift', 's3', 'file') def _invalid_source_hash_format(): ''' DRY helper for reporting invalid source_hash input ''' msg = ( 'Source hash {0} format is invalid. It ' 'must be in the format <hash type>=<hash>, ' 'or it must be a supported protocol: {1}' .format(source_hash, ', '.join(protos)) ) return '', {}, msg try: source_hash_scheme = _urlparse(source_hash).scheme except __HOLE__: return '', {}, ('Invalid format for source_hash ' 'parameter') if source_hash_scheme in protos: # The source_hash is a file on a server hash_fn = __salt__['cp.cache_file']( source_hash, saltenv) if not hash_fn: return '', {}, ('Source hash file {0} not found' .format(source_hash)) source_sum = extract_hash(hash_fn, '', name) if source_sum is None: return _invalid_source_hash_format() else: # The source_hash is a hash string comps = source_hash.split('=') if len(comps) < 2: return _invalid_source_hash_format() source_sum['hsum'] = comps[1].strip() source_sum['hash_type'] = comps[0].strip() else: return '', {}, ('Unable to determine upstream hash of ' 'source file {0}'.format(source)) # if the file is a template we need to actually template the file to get # a checksum, but we can cache the template itself, but only if there is # a template source (it could be a templated contents) if template and source: # Check if we have the template cached template_dest = __salt__['cp.is_cached'](source, saltenv) if template_dest and source_hash: comps = source_hash.split('=') cached_template_sum = get_hash(template_dest, form=source_sum['hash_type']) if cached_template_sum == source_sum['hsum']: sfn = template_dest # If we didn't have the template file, let's get it if not sfn: try: sfn = __salt__['cp.cache_file'](source, saltenv) except Exception as exc: # A 404 or other error code may raise an exception, catch it # and return a comment that will fail the calling state. return '', {}, ('Failed to cache template file {0}: {1}' .format(source, exc)) # exists doesn't play nice with sfn as bool # but if cache failed, sfn == False if not sfn or not os.path.exists(sfn): return sfn, {}, 'Source file \'{0}\' not found'.format(source) if sfn == name: raise SaltInvocationError( 'Source file cannot be the same as destination' ) if template in salt.utils.templates.TEMPLATE_REGISTRY: context_dict = defaults if defaults else {} if context: context_dict.update(context) data = salt.utils.templates.TEMPLATE_REGISTRY[template]( sfn, name=name, source=source, user=user, group=group, mode=mode, saltenv=saltenv, context=context_dict, salt=__salt__, pillar=__pillar__, grains=__grains__, opts=__opts__, **kwargs) else: return sfn, {}, ('Specified template format {0} is not supported' ).format(template) if data['result']: sfn = data['data'] hsum = get_hash(sfn) source_sum = {'hash_type': 'sha256', 'hsum': hsum} else: __clean_tmp(sfn) return sfn, {}, data['data'] return sfn, source_sum, ''
TypeError
dataset/ETHPy150Open saltstack/salt/salt/modules/file.py/get_managed
6,520
def check_perms(name, ret, user, group, mode, follow_symlinks=False): ''' Check the permissions on files and chown if needed CLI Example: .. code-block:: bash salt '*' file.check_perms /etc/sudoers '{}' root root 400 .. versionchanged:: 2014.1.3 ``follow_symlinks`` option added ''' name = os.path.expanduser(name) if not ret: ret = {'name': name, 'changes': {}, 'comment': [], 'result': True} orig_comment = '' else: orig_comment = ret['comment'] ret['comment'] = [] # Check permissions perms = {} cur = stats(name, follow_symlinks=follow_symlinks) if not cur: raise CommandExecutionError('{0} does not exist'.format(name)) perms['luser'] = cur['user'] perms['lgroup'] = cur['group'] perms['lmode'] = __salt__['config.manage_mode'](cur['mode']) # Mode changes if needed if mode is not None: # File is a symlink, ignore the mode setting # if follow_symlinks is False if os.path.islink(name) and not follow_symlinks: pass else: mode = __salt__['config.manage_mode'](mode) if mode != perms['lmode']: if __opts__['test'] is True: ret['changes']['mode'] = mode else: set_mode(name, mode) if mode != __salt__['config.manage_mode'](get_mode(name)): ret['result'] = False ret['comment'].append( 'Failed to change mode to {0}'.format(mode) ) else: ret['changes']['mode'] = mode # user/group changes if needed, then check if it worked if user: if isinstance(user, int): user = uid_to_user(user) if user != perms['luser']: perms['cuser'] = user if group: if isinstance(group, int): group = gid_to_group(group) if group != perms['lgroup']: perms['cgroup'] = group if 'cuser' in perms or 'cgroup' in perms: if not __opts__['test']: if os.path.islink(name) and not follow_symlinks: chown_func = lchown else: chown_func = chown if user is None: user = perms['luser'] if group is None: group = perms['lgroup'] try: chown_func(name, user, group) except __HOLE__: ret['result'] = False if user: if isinstance(user, int): user = uid_to_user(user) if user != get_user(name, follow_symlinks=follow_symlinks) and user != '': if __opts__['test'] is True: ret['changes']['user'] = user else: ret['result'] = False ret['comment'].append('Failed to change user to {0}' .format(user)) elif 'cuser' in perms and user != '': ret['changes']['user'] = user if group: if isinstance(group, int): group = gid_to_group(group) if group != get_group(name, follow_symlinks=follow_symlinks) and user != '': if __opts__['test'] is True: ret['changes']['group'] = group else: ret['result'] = False ret['comment'].append('Failed to change group to {0}' .format(group)) elif 'cgroup' in perms and user != '': ret['changes']['group'] = group if isinstance(orig_comment, six.string_types): if orig_comment: ret['comment'].insert(0, orig_comment) ret['comment'] = '; '.join(ret['comment']) if __opts__['test'] is True and ret['changes']: ret['result'] = None return ret, perms
OSError
dataset/ETHPy150Open saltstack/salt/salt/modules/file.py/check_perms
6,521
def manage_file(name, sfn, ret, source, source_sum, user, group, mode, saltenv, backup, makedirs=False, template=None, # pylint: disable=W0613 show_changes=True, contents=None, dir_mode=None, follow_symlinks=True, skip_verify=False): ''' Checks the destination against what was retrieved with get_managed and makes the appropriate modifications (if necessary). name location to place the file sfn location of cached file on the minion This is the path to the file stored on the minion. This file is placed on the minion using cp.cache_file. If the hash sum of that file matches the source_sum, we do not transfer the file to the minion again. This file is then grabbed and if it has template set, it renders the file to be placed into the correct place on the system using salt.files.utils.copyfile() ret The initial state return data structure. Pass in ``None`` to use the default structure. source file reference on the master source_hash sum hash for source user user owner group group owner backup backup_mode makedirs make directories if they do not exist template format of templating show_changes Include diff in state return contents: contents to be placed in the file dir_mode mode for directories created with makedirs skip_verify : False If ``True``, hash verification of remote file sources (``http://``, ``https://``, ``ftp://``) will be skipped, and the ``source_hash`` argument will be ignored. .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' file.manage_file /etc/httpd/conf.d/httpd.conf '' '{}' salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' root root '755' base '' .. versionchanged:: 2014.7.0 ``follow_symlinks`` option added ''' name = os.path.expanduser(name) if not ret: ret = {'name': name, 'changes': {}, 'comment': '', 'result': True} if source and not sfn: # File is not present, cache it sfn = __salt__['cp.cache_file'](source, saltenv) if not sfn: return _error( ret, 'Source file \'{0}\' not found'.format(source)) htype = source_sum.get('hash_type', __opts__.get('hash_type', 'md5')) # Recalculate source sum now that file has been cached source_sum = { 'hash_type': htype, 'hsum': get_hash(sfn, form=htype) } # Check changes if the target file exists if os.path.isfile(name) or os.path.islink(name): if os.path.islink(name) and follow_symlinks: real_name = os.path.realpath(name) else: real_name = name # Only test the checksums on files with managed contents if source and not (not follow_symlinks and os.path.islink(real_name)): name_sum = get_hash(real_name, source_sum['hash_type']) else: name_sum = None # Check if file needs to be replaced if source and (name_sum is None or source_sum['hsum'] != name_sum): if not sfn: sfn = __salt__['cp.cache_file'](source, saltenv) if not sfn: return _error( ret, 'Source file \'{0}\' not found'.format(source)) # If the downloaded file came from a non salt server or local # source, and we are not skipping checksum verification, then # verify that it matches the specified checksum. if not skip_verify \ and _urlparse(source).scheme not in ('salt', ''): dl_sum = get_hash(sfn, source_sum['hash_type']) if dl_sum != source_sum['hsum']: ret['comment'] = ( 'Specified {0} checksum for {1} ({2}) does not match ' 'actual checksum ({3})'.format( source_sum['hash_type'], name, source_sum['hsum'], dl_sum ) ) ret['result'] = False return ret # Print a diff equivalent to diff -u old new if __salt__['config.option']('obfuscate_templates'): ret['changes']['diff'] = '<Obfuscated Template>' elif not show_changes: ret['changes']['diff'] = '<show_changes=False>' else: # Check to see if the files are bins bdiff = _binary_replace(real_name, sfn) if bdiff: ret['changes']['diff'] = bdiff else: with contextlib.nested( salt.utils.fopen(sfn, 'r'), salt.utils.fopen(real_name, 'r')) as (src, name_): slines = src.readlines() nlines = name_.readlines() sndiff = ''.join(difflib.unified_diff(nlines, slines)) if sndiff: ret['changes']['diff'] = sndiff # Pre requisites are met, and the file needs to be replaced, do it try: salt.utils.files.copyfile(sfn, real_name, __salt__['config.backup_mode'](backup), __opts__['cachedir']) except __HOLE__ as io_error: __clean_tmp(sfn) return _error( ret, 'Failed to commit change: {0}'.format(io_error)) if contents is not None: # Write the static contents to a temporary file tmp = salt.utils.mkstemp(text=True) if salt.utils.is_windows(): contents = os.linesep.join(contents.splitlines()) with salt.utils.fopen(tmp, 'w') as tmp_: tmp_.write(str(contents)) # Compare contents of files to know if we need to replace with contextlib.nested( salt.utils.fopen(tmp, 'r'), salt.utils.fopen(real_name, 'r')) as (src, name_): slines = src.readlines() nlines = name_.readlines() different = ''.join(slines) != ''.join(nlines) if different: if __salt__['config.option']('obfuscate_templates'): ret['changes']['diff'] = '<Obfuscated Template>' elif not show_changes: ret['changes']['diff'] = '<show_changes=False>' else: if salt.utils.istextfile(real_name): ret['changes']['diff'] = \ ''.join(difflib.unified_diff(nlines, slines)) else: ret['changes']['diff'] = \ 'Replace binary file with text file' # Pre requisites are met, the file needs to be replaced, do it try: salt.utils.files.copyfile(tmp, real_name, __salt__['config.backup_mode'](backup), __opts__['cachedir']) except IOError as io_error: __clean_tmp(tmp) return _error( ret, 'Failed to commit change: {0}'.format(io_error)) __clean_tmp(tmp) # Check for changing symlink to regular file here if os.path.islink(name) and not follow_symlinks: if not sfn: sfn = __salt__['cp.cache_file'](source, saltenv) if not sfn: return _error( ret, 'Source file \'{0}\' not found'.format(source)) # If the downloaded file came from a non salt server source verify # that it matches the intended sum value if not skip_verify and _urlparse(source).scheme != 'salt': dl_sum = get_hash(sfn, source_sum['hash_type']) if dl_sum != source_sum['hsum']: ret['comment'] = ( 'Specified {0} checksum for {1} ({2}) does not match ' 'actual checksum ({3})'.format( source_sum['hash_type'], name, source_sum['hsum'], dl_sum ) ) ret['result'] = False return ret try: salt.utils.files.copyfile(sfn, name, __salt__['config.backup_mode'](backup), __opts__['cachedir']) except IOError as io_error: __clean_tmp(sfn) return _error( ret, 'Failed to commit change: {0}'.format(io_error)) ret['changes']['diff'] = \ 'Replace symbolic link with regular file' ret, _ = check_perms(name, ret, user, group, mode, follow_symlinks) if ret['changes']: ret['comment'] = 'File {0} updated'.format(name) elif not ret['changes'] and ret['result']: ret['comment'] = u'File {0} is in the correct state'.format(name) if sfn: __clean_tmp(sfn) return ret else: # target file does not exist contain_dir = os.path.dirname(name) def _set_mode_and_make_dirs(name, dir_mode, mode, user, group): # check for existence of windows drive letter if salt.utils.is_windows(): drive, _ = os.path.splitdrive(name) if drive and not os.path.exists(drive): __clean_tmp(sfn) return _error(ret, '{0} drive not present'.format(drive)) if dir_mode is None and mode is not None: # Add execute bit to each nonzero digit in the mode, if # dir_mode was not specified. Otherwise, any # directories created with makedirs_() below can't be # listed via a shell. mode_list = [x for x in str(mode)][-3:] for idx in range(len(mode_list)): if mode_list[idx] != '0': mode_list[idx] = str(int(mode_list[idx]) | 1) dir_mode = ''.join(mode_list) makedirs_(name, user=user, group=group, mode=dir_mode) if source: # It is a new file, set the diff accordingly ret['changes']['diff'] = 'New file' # Apply the new file if not sfn: sfn = __salt__['cp.cache_file'](source, saltenv) if not sfn: return _error( ret, 'Source file \'{0}\' not found'.format(source)) # If the downloaded file came from a non salt server source verify # that it matches the intended sum value if not skip_verify \ and _urlparse(source).scheme != 'salt': dl_sum = get_hash(sfn, source_sum['hash_type']) if dl_sum != source_sum['hsum']: ret['comment'] = ( 'Specified {0} checksum for {1} ({2}) does not match ' 'actual checksum ({3})'.format( source_sum['hash_type'], name, source_sum['hsum'], dl_sum ) ) ret['result'] = False return ret if not os.path.isdir(contain_dir): if makedirs: _set_mode_and_make_dirs(name, dir_mode, mode, user, group) else: __clean_tmp(sfn) # No changes actually made ret['changes'].pop('diff', None) return _error(ret, 'Parent directory not present') else: # source != True if not os.path.isdir(contain_dir): if makedirs: _set_mode_and_make_dirs(name, dir_mode, mode, user, group) else: __clean_tmp(sfn) # No changes actually made ret['changes'].pop('diff', None) return _error(ret, 'Parent directory not present') # Create the file, user rw-only if mode will be set to prevent # a small security race problem before the permissions are set if mode: current_umask = os.umask(0o77) # Create a new file when test is False and source is None if contents is None: if not __opts__['test']: if touch(name): ret['changes']['new'] = 'file {0} created'.format(name) ret['comment'] = 'Empty file' else: return _error( ret, 'Empty file {0} not created'.format(name) ) else: if not __opts__['test']: if touch(name): ret['changes']['diff'] = 'New file' else: return _error( ret, 'File {0} not created'.format(name) ) if mode: os.umask(current_umask) if contents is not None: # Write the static contents to a temporary file tmp = salt.utils.mkstemp(text=True) if salt.utils.is_windows(): contents = os.linesep.join(contents.splitlines()) with salt.utils.fopen(tmp, 'w') as tmp_: tmp_.write(str(contents)) # Copy into place salt.utils.files.copyfile(tmp, name, __salt__['config.backup_mode'](backup), __opts__['cachedir']) __clean_tmp(tmp) # Now copy the file contents if there is a source file elif sfn: salt.utils.files.copyfile(sfn, name, __salt__['config.backup_mode'](backup), __opts__['cachedir']) __clean_tmp(sfn) # This is a new file, if no mode specified, use the umask to figure # out what mode to use for the new file. if mode is None and not salt.utils.is_windows(): # Get current umask mask = os.umask(0) os.umask(mask) # Calculate the mode value that results from the umask mode = oct((0o777 ^ mask) & 0o666) ret, _ = check_perms(name, ret, user, group, mode) if not ret['comment']: ret['comment'] = 'File ' + name + ' updated' if __opts__['test']: ret['comment'] = 'File ' + name + ' not updated' elif not ret['changes'] and ret['result']: ret['comment'] = 'File ' + name + ' is in the correct state' if sfn: __clean_tmp(sfn) return ret
IOError
dataset/ETHPy150Open saltstack/salt/salt/modules/file.py/manage_file
6,522
def makedirs_perms(name, user=None, group=None, mode='0755'): ''' Taken and modified from os.makedirs to set user, group and mode for each directory created. CLI Example: .. code-block:: bash salt '*' file.makedirs_perms /opt/code ''' name = os.path.expanduser(name) path = os.path head, tail = path.split(name) if not tail: head, tail = path.split(head) if head and tail and not path.exists(head): try: makedirs_perms(head, user, group, mode) except __HOLE__ as exc: # be happy if someone already created the path if exc.errno != errno.EEXIST: raise if tail == os.curdir: # xxx/newdir/. exists if xxx/newdir exists return os.mkdir(name) check_perms(name, None, user, group, int('{0}'.format(mode)) if mode else None)
OSError
dataset/ETHPy150Open saltstack/salt/salt/modules/file.py/makedirs_perms
6,523
def is_chrdev(name): ''' Check if a file exists and is a character device. CLI Example: .. code-block:: bash salt '*' file.is_chrdev /dev/chr ''' name = os.path.expanduser(name) stat_structure = None try: stat_structure = os.stat(name) except __HOLE__ as exc: if exc.errno == errno.ENOENT: # If the character device does not exist in the first place return False else: raise return stat.S_ISCHR(stat_structure.st_mode)
OSError
dataset/ETHPy150Open saltstack/salt/salt/modules/file.py/is_chrdev
6,524
def mknod_chrdev(name, major, minor, user=None, group=None, mode='0660'): ''' .. versionadded:: 0.17.0 Create a character device. CLI Example: .. code-block:: bash salt '*' file.mknod_chrdev /dev/chr 180 31 ''' name = os.path.expanduser(name) ret = {'name': name, 'changes': {}, 'comment': '', 'result': False} log.debug('Creating character device name:{0} major:{1} minor:{2} mode:{3}' .format(name, major, minor, mode)) try: if __opts__['test']: ret['changes'] = {'new': 'Character device {0} created.'.format(name)} ret['result'] = None else: if os.mknod(name, int(str(mode).lstrip('0Oo'), 8) | stat.S_IFCHR, os.makedev(major, minor)) is None: ret['changes'] = {'new': 'Character device {0} created.'.format(name)} ret['result'] = True except __HOLE__ as exc: # be happy it is already there....however, if you are trying to change the # major/minor, you will need to unlink it first as os.mknod will not overwrite if exc.errno != errno.EEXIST: raise else: ret['comment'] = 'File {0} exists and cannot be overwritten'.format(name) # quick pass at verifying the permissions of the newly created character device check_perms(name, None, user, group, int('{0}'.format(mode)) if mode else None) return ret
OSError
dataset/ETHPy150Open saltstack/salt/salt/modules/file.py/mknod_chrdev
6,525
def is_blkdev(name): ''' Check if a file exists and is a block device. CLI Example: .. code-block:: bash salt '*' file.is_blkdev /dev/blk ''' name = os.path.expanduser(name) stat_structure = None try: stat_structure = os.stat(name) except __HOLE__ as exc: if exc.errno == errno.ENOENT: # If the block device does not exist in the first place return False else: raise return stat.S_ISBLK(stat_structure.st_mode)
OSError
dataset/ETHPy150Open saltstack/salt/salt/modules/file.py/is_blkdev
6,526
def mknod_blkdev(name, major, minor, user=None, group=None, mode='0660'): ''' .. versionadded:: 0.17.0 Create a block device. CLI Example: .. code-block:: bash salt '*' file.mknod_blkdev /dev/blk 8 999 ''' name = os.path.expanduser(name) ret = {'name': name, 'changes': {}, 'comment': '', 'result': False} log.debug('Creating block device name:{0} major:{1} minor:{2} mode:{3}' .format(name, major, minor, mode)) try: if __opts__['test']: ret['changes'] = {'new': 'Block device {0} created.'.format(name)} ret['result'] = None else: if os.mknod(name, int(str(mode).lstrip('0Oo'), 8) | stat.S_IFBLK, os.makedev(major, minor)) is None: ret['changes'] = {'new': 'Block device {0} created.'.format(name)} ret['result'] = True except __HOLE__ as exc: # be happy it is already there....however, if you are trying to change the # major/minor, you will need to unlink it first as os.mknod will not overwrite if exc.errno != errno.EEXIST: raise else: ret['comment'] = 'File {0} exists and cannot be overwritten'.format(name) # quick pass at verifying the permissions of the newly created block device check_perms(name, None, user, group, int('{0}'.format(mode)) if mode else None) return ret
OSError
dataset/ETHPy150Open saltstack/salt/salt/modules/file.py/mknod_blkdev
6,527
def is_fifo(name): ''' Check if a file exists and is a FIFO. CLI Example: .. code-block:: bash salt '*' file.is_fifo /dev/fifo ''' name = os.path.expanduser(name) stat_structure = None try: stat_structure = os.stat(name) except __HOLE__ as exc: if exc.errno == errno.ENOENT: # If the fifo does not exist in the first place return False else: raise return stat.S_ISFIFO(stat_structure.st_mode)
OSError
dataset/ETHPy150Open saltstack/salt/salt/modules/file.py/is_fifo
6,528
def mknod_fifo(name, user=None, group=None, mode='0660'): ''' .. versionadded:: 0.17.0 Create a FIFO pipe. CLI Example: .. code-block:: bash salt '*' file.mknod_fifo /dev/fifo ''' name = os.path.expanduser(name) ret = {'name': name, 'changes': {}, 'comment': '', 'result': False} log.debug('Creating FIFO name: {0}'.format(name)) try: if __opts__['test']: ret['changes'] = {'new': 'Fifo pipe {0} created.'.format(name)} ret['result'] = None else: if os.mkfifo(name, int(str(mode).lstrip('0Oo'), 8)) is None: ret['changes'] = {'new': 'Fifo pipe {0} created.'.format(name)} ret['result'] = True except __HOLE__ as exc: # be happy it is already there if exc.errno != errno.EEXIST: raise else: ret['comment'] = 'File {0} exists and cannot be overwritten'.format(name) # quick pass at verifying the permissions of the newly created fifo check_perms(name, None, user, group, int('{0}'.format(mode)) if mode else None) return ret
OSError
dataset/ETHPy150Open saltstack/salt/salt/modules/file.py/mknod_fifo
6,529
def list_backups(path, limit=None): ''' .. versionadded:: 0.17.0 Lists the previous versions of a file backed up using Salt's :doc:`file state backup </ref/states/backup_mode>` system. path The path on the minion to check for backups limit Limit the number of results to the most recent N backups CLI Example: .. code-block:: bash salt '*' file.list_backups /foo/bar/baz.txt ''' path = os.path.expanduser(path) try: limit = int(limit) except TypeError: pass except __HOLE__: log.error('file.list_backups: \'limit\' value must be numeric') limit = None bkroot = _get_bkroot() parent_dir, basename = os.path.split(path) if salt.utils.is_windows(): # ':' is an illegal filesystem path character on Windows src_dir = parent_dir.replace(':', '_') else: src_dir = parent_dir[1:] # Figure out full path of location of backup file in minion cache bkdir = os.path.join(bkroot, src_dir) if not os.path.isdir(bkdir): return {} files = {} for fname in [x for x in os.listdir(bkdir) if os.path.isfile(os.path.join(bkdir, x))]: if salt.utils.is_windows(): # ':' is an illegal filesystem path character on Windows strpfmt = '{0}_%a_%b_%d_%H-%M-%S_%f_%Y'.format(basename) else: strpfmt = '{0}_%a_%b_%d_%H:%M:%S_%f_%Y'.format(basename) try: timestamp = datetime.datetime.strptime(fname, strpfmt) except ValueError: # File didn't match the strp format string, so it's not a backup # for this file. Move on to the next one. continue if salt.utils.is_windows(): str_format = '%a %b %d %Y %H-%M-%S.%f' else: str_format = '%a %b %d %Y %H:%M:%S.%f' files.setdefault(timestamp, {})['Backup Time'] = \ timestamp.strftime(str_format) location = os.path.join(bkdir, fname) files[timestamp]['Size'] = os.stat(location).st_size files[timestamp]['Location'] = location return dict(list(zip( list(range(len(files))), [files[x] for x in sorted(files, reverse=True)[:limit]] )))
ValueError
dataset/ETHPy150Open saltstack/salt/salt/modules/file.py/list_backups
6,530
def list_backups_dir(path, limit=None): ''' Lists the previous versions of a directory backed up using Salt's :doc:`file state backup </ref/states/backup_mode>` system. path The directory on the minion to check for backups limit Limit the number of results to the most recent N backups CLI Example: .. code-block:: bash salt '*' file.list_backups_dir /foo/bar/baz/ ''' path = os.path.expanduser(path) try: limit = int(limit) except TypeError: pass except __HOLE__: log.error('file.list_backups_dir: \'limit\' value must be numeric') limit = None bkroot = _get_bkroot() parent_dir, basename = os.path.split(path) # Figure out full path of location of backup folder in minion cache bkdir = os.path.join(bkroot, parent_dir[1:]) if not os.path.isdir(bkdir): return {} files = {} f = dict([(i, len(list(n))) for i, n in itertools.groupby([x.split("_")[0] for x in sorted(os.listdir(bkdir))])]) ff = os.listdir(bkdir) for i, n in six.iteritems(f): ssfile = {} for x in sorted(ff): basename = x.split('_')[0] if i == basename: strpfmt = '{0}_%a_%b_%d_%H:%M:%S_%f_%Y'.format(basename) try: timestamp = datetime.datetime.strptime(x, strpfmt) except ValueError: # Folder didn't match the strp format string, so it's not a backup # for this folder. Move on to the next one. continue ssfile.setdefault(timestamp, {})['Backup Time'] = \ timestamp.strftime('%a %b %d %Y %H:%M:%S.%f') location = os.path.join(bkdir, x) ssfile[timestamp]['Size'] = os.stat(location).st_size ssfile[timestamp]['Location'] = location sfiles = dict(list(zip(list(range(n)), [ssfile[x] for x in sorted(ssfile, reverse=True)[:limit]]))) sefiles = {i: sfiles} files.update(sefiles) return files
ValueError
dataset/ETHPy150Open saltstack/salt/salt/modules/file.py/list_backups_dir
6,531
def restore_backup(path, backup_id): ''' .. versionadded:: 0.17.0 Restore a previous version of a file that was backed up using Salt's :doc:`file state backup </ref/states/backup_mode>` system. path The path on the minion to check for backups backup_id The numeric id for the backup you wish to restore, as found using :mod:`file.list_backups <salt.modules.file.list_backups>` CLI Example: .. code-block:: bash salt '*' file.restore_backup /foo/bar/baz.txt 0 ''' path = os.path.expanduser(path) # Note: This only supports minion backups, so this function will need to be # modified if/when master backups are implemented. ret = {'result': False, 'comment': 'Invalid backup_id \'{0}\''.format(backup_id)} try: if len(str(backup_id)) == len(str(int(backup_id))): backup = list_backups(path)[int(backup_id)] else: return ret except ValueError: return ret except __HOLE__: ret['comment'] = 'backup_id \'{0}\' does not exist for ' \ '{1}'.format(backup_id, path) return ret salt.utils.backup_minion(path, _get_bkroot()) try: shutil.copyfile(backup['Location'], path) except IOError as exc: ret['comment'] = \ 'Unable to restore {0} to {1}: ' \ '{2}'.format(backup['Location'], path, exc) return ret else: ret['result'] = True ret['comment'] = 'Successfully restored {0} to ' \ '{1}'.format(backup['Location'], path) # Try to set proper ownership if not salt.utils.is_windows(): try: fstat = os.stat(path) except (OSError, IOError): ret['comment'] += ', but was unable to set ownership' else: os.chown(path, fstat.st_uid, fstat.st_gid) return ret
KeyError
dataset/ETHPy150Open saltstack/salt/salt/modules/file.py/restore_backup
6,532
def delete_backup(path, backup_id): ''' .. versionadded:: 0.17.0 Delete a previous version of a file that was backed up using Salt's :doc:`file state backup </ref/states/backup_mode>` system. path The path on the minion to check for backups backup_id The numeric id for the backup you wish to delete, as found using :mod:`file.list_backups <salt.modules.file.list_backups>` CLI Example: .. code-block:: bash salt '*' file.delete_backup /var/cache/salt/minion/file_backup/home/foo/bar/baz.txt 0 ''' path = os.path.expanduser(path) ret = {'result': False, 'comment': 'Invalid backup_id \'{0}\''.format(backup_id)} try: if len(str(backup_id)) == len(str(int(backup_id))): backup = list_backups(path)[int(backup_id)] else: return ret except ValueError: return ret except KeyError: ret['comment'] = 'backup_id \'{0}\' does not exist for ' \ '{1}'.format(backup_id, path) return ret try: os.remove(backup['Location']) except __HOLE__ as exc: ret['comment'] = 'Unable to remove {0}: {1}'.format(backup['Location'], exc) else: ret['result'] = True ret['comment'] = 'Successfully removed {0}'.format(backup['Location']) return ret
IOError
dataset/ETHPy150Open saltstack/salt/salt/modules/file.py/delete_backup
6,533
def grep(path, pattern, *opts): ''' Grep for a string in the specified file .. note:: This function's return value is slated for refinement in future versions of Salt path Path to the file to be searched .. note:: Globbing is supported (i.e. ``/var/log/foo/*.log``, but if globbing is being used then the path should be quoted to keep the shell from attempting to expand the glob expression. pattern Pattern to match. For example: ``test``, or ``a[0-5]`` opts Additional command-line flags to pass to the grep command. For example: ``-v``, or ``-i -B2`` .. note:: The options should come after a double-dash (as shown in the examples below) to keep Salt's own argument parser from interpreting them. CLI Example: .. code-block:: bash salt '*' file.grep /etc/passwd nobody salt '*' file.grep /etc/sysconfig/network-scripts/ifcfg-eth0 ipaddr -- -i salt '*' file.grep /etc/sysconfig/network-scripts/ifcfg-eth0 ipaddr -- -i -B2 salt '*' file.grep "/etc/sysconfig/network-scripts/*" ipaddr -- -i -l ''' path = os.path.expanduser(path) split_opts = [] for opt in opts: try: opt = salt.utils.shlex_split(opt) except AttributeError: opt = salt.utils.shlex_split(str(opt)) if len(opt) > 1: salt.utils.warn_until( 'Carbon', 'Additional command line options for file.grep should be ' 'passed one at a time, please do not pass more than one in a ' 'single argument.' ) split_opts.extend(opt) cmd = ['grep'] + split_opts + [pattern, path] try: ret = __salt__['cmd.run_all'](cmd, python_shell=False) except (IOError, __HOLE__) as exc: raise CommandExecutionError(exc.strerror) return ret
OSError
dataset/ETHPy150Open saltstack/salt/salt/modules/file.py/grep
6,534
def open_files(by_pid=False): ''' Return a list of all physical open files on the system. CLI Examples: .. code-block:: bash salt '*' file.open_files salt '*' file.open_files by_pid=True ''' # First we collect valid PIDs pids = {} procfs = os.listdir('/proc/') for pfile in procfs: try: pids[int(pfile)] = [] except ValueError: # Not a valid PID, move on pass # Then we look at the open files for each PID files = {} for pid in pids: ppath = '/proc/{0}'.format(pid) try: tids = os.listdir('{0}/task'.format(ppath)) except __HOLE__: continue # Collect the names of all of the file descriptors fd_ = [] #try: # fd_.append(os.path.realpath('{0}/task/{1}exe'.format(ppath, tid))) #except: # pass for fpath in os.listdir('{0}/fd'.format(ppath)): fd_.append('{0}/fd/{1}'.format(ppath, fpath)) for tid in tids: try: fd_.append( os.path.realpath('{0}/task/{1}/exe'.format(ppath, tid)) ) except OSError: continue for tpath in os.listdir('{0}/task/{1}/fd'.format(ppath, tid)): fd_.append('{0}/task/{1}/fd/{2}'.format(ppath, tid, tpath)) fd_ = sorted(set(fd_)) # Loop through file descriptors and return useful data for each file for fdpath in fd_: # Sometimes PIDs and TIDs disappear before we can query them try: name = os.path.realpath(fdpath) # Running stat on the file cuts out all of the sockets and # deleted files from the list os.stat(name) except OSError: continue if name not in files: files[name] = [pid] else: # We still want to know which PIDs are using each file files[name].append(pid) files[name] = sorted(set(files[name])) pids[pid].append(name) pids[pid] = sorted(set(pids[pid])) if by_pid: return pids return files
OSError
dataset/ETHPy150Open saltstack/salt/salt/modules/file.py/open_files
6,535
def move(src, dst): ''' Move a file or directory CLI Example: .. code-block:: bash salt '*' file.move /path/to/src /path/to/dst ''' src = os.path.expanduser(src) dst = os.path.expanduser(dst) if not os.path.isabs(src): raise SaltInvocationError('Source path must be absolute.') if not os.path.isabs(dst): raise SaltInvocationError('Destination path must be absolute.') ret = { 'result': True, 'comment': "'{0}' moved to '{1}'".format(src, dst), } try: shutil.move(src, dst) except (OSError, __HOLE__) as exc: raise CommandExecutionError( "Unable to move '{0}' to '{1}': {2}".format(src, dst, exc) ) return ret
IOError
dataset/ETHPy150Open saltstack/salt/salt/modules/file.py/move
6,536
def diskusage(path): ''' Recursively calculate disk usage of path and return it in bytes CLI Example: .. code-block:: bash salt '*' file.diskusage /path/to/check ''' total_size = 0 seen = set() if os.path.isfile(path): stat_structure = os.stat(path) ret = stat_structure.st_size return ret for dirpath, dirnames, filenames in os.walk(path): for f in filenames: fp = os.path.join(dirpath, f) try: stat_structure = os.stat(fp) except __HOLE__: continue if stat_structure.st_ino in seen: continue seen.add(stat_structure.st_ino) total_size += stat_structure.st_size ret = total_size return ret
OSError
dataset/ETHPy150Open saltstack/salt/salt/modules/file.py/diskusage
6,537
def _get_db_settings(self): """Create DB settings according to the configuration file.""" config_path = os.path.expanduser( self.config.FrameworkConfigGet('DATABASE_SETTINGS_FILE')) settings = {} with FileOperations.open(config_path, 'r') as f: for line in f: line = line.rstrip() # Ignore empty/comment lines. if not line or line.startswith('#'): continue try: key, value = line.split(':') settings[key.strip()] = value.strip() except __HOLE__: self.error_handler.FrameworkAbort( "Problem in config file: '%s' -> Cannot parse line: %s" % (config_path, line)) return settings
ValueError
dataset/ETHPy150Open owtf/owtf/framework/db/db.py/DB._get_db_settings
6,538
def CreateEngine(self, BaseClass): try: engine = create_engine( "postgresql+psycopg2://{0}:{1}@{2}:{3}/{4}".format( self._db_settings['DATABASE_USER'], self._db_settings['DATABASE_PASS'], self._db_settings['DATABASE_IP'], self._db_settings['DATABASE_PORT'], self._db_settings['DATABASE_NAME']), poolclass=QueuePool, pool_size=5, max_overflow=10,) BaseClass.metadata.create_all(engine) # Fix for forking register_after_fork(engine, engine.dispose) return engine except __HOLE__ as e: # Potentially corrupted DB config. self.error_handler.FrameworkAbort( 'Database configuration file is potentially corrupted. ' 'Please check ' + self.config.FrameworkConfigGet('DATABASE_SETTINGS_FILE') + '\n' '[DB] ' + str(e)) except KeyError: # Indicates incomplete db config file self.error_handler.FrameworkAbort( "Incomplete database configuration settings in " "" + self.config.FrameworkConfigGet('DATABASE_SETTINGS_FILE')) except exc.OperationalError as e: self.error_handler.FrameworkAbort( "[DB] " + str(e) + "\nRun scripts/db_run.sh to start/setup db")
ValueError
dataset/ETHPy150Open owtf/owtf/framework/db/db.py/DB.CreateEngine
6,539
@packageCallback def packageCallback_importControllers(packagename): try: __import__('{}._webapp'.format(packagename)) except __HOLE__, e: print "packageCallback_importControllers", packagename, e #raise pass
ImportError
dataset/ETHPy150Open elistevens/solari/wsgi/solariwsgi/core.py/packageCallback_importControllers
6,540
def _get_servers(self, req, is_detail): """Returns a list of servers, based on any search options specified.""" search_opts = {} search_opts.update(req.GET) context = req.environ['nova.context'] remove_invalid_options(context, search_opts, self._get_server_search_options()) # Verify search by 'status' contains a valid status. # Convert it to filter by vm_state or task_state for compute_api. search_opts.pop('status', None) if 'status' in req.GET.keys(): statuses = req.GET.getall('status') states = common.task_and_vm_state_from_status(statuses) vm_state, task_state = states if not vm_state and not task_state: return {'servers': []} search_opts['vm_state'] = vm_state # When we search by vm state, task state will return 'default'. # So we don't need task_state search_opt. if 'default' not in task_state: search_opts['task_state'] = task_state if 'changes-since' in search_opts: try: parsed = timeutils.parse_isotime(search_opts['changes-since']) except __HOLE__: msg = _('Invalid changes-since value') raise exc.HTTPBadRequest(explanation=msg) search_opts['changes-since'] = parsed # By default, compute's get_all() will return deleted instances. # If an admin hasn't specified a 'deleted' search option, we need # to filter out deleted instances by setting the filter ourselves. # ... Unless 'changes-since' is specified, because 'changes-since' # should return recently deleted images according to the API spec. if 'deleted' not in search_opts: if 'changes-since' not in search_opts: # No 'changes-since', so we only want non-deleted servers search_opts['deleted'] = False else: # Convert deleted filter value to a valid boolean. # Return non-deleted servers if an invalid value # is passed with deleted filter. search_opts['deleted'] = strutils.bool_from_string( search_opts['deleted'], default=False) if search_opts.get("vm_state") == ['deleted']: if context.is_admin: search_opts['deleted'] = True else: msg = _("Only administrators may list deleted instances") raise exc.HTTPForbidden(explanation=msg) all_tenants = common.is_all_tenants(search_opts) # use the boolean from here on out so remove the entry from search_opts # if it's present search_opts.pop('all_tenants', None) elevated = None if all_tenants: policy.enforce(context, 'compute:get_all_tenants', {'project_id': context.project_id, 'user_id': context.user_id}) elevated = context.elevated() else: if context.project_id: search_opts['project_id'] = context.project_id else: search_opts['user_id'] = context.user_id limit, marker = common.get_limit_and_marker(req) # Sorting by multiple keys and directions is conditionally enabled sort_keys, sort_dirs = None, None if self.ext_mgr.is_loaded('os-server-sort-keys'): sort_keys, sort_dirs = common.get_sort_params(req.params) expected_attrs = None if is_detail: # merge our expected attrs with what the view builder needs for # showing details expected_attrs = self._view_builder.get_show_expected_attrs( expected_attrs) try: instance_list = self.compute_api.get_all(elevated or context, search_opts=search_opts, limit=limit, marker=marker, want_objects=True, expected_attrs=expected_attrs, sort_keys=sort_keys, sort_dirs=sort_dirs) except exception.MarkerNotFound: msg = _('marker [%s] not found') % marker raise exc.HTTPBadRequest(explanation=msg) except exception.FlavorNotFound: LOG.debug("Flavor '%s' could not be found", search_opts['flavor']) instance_list = objects.InstanceList() if is_detail: instance_list._context = context instance_list.fill_faults() response = self._view_builder.detail(req, instance_list) else: response = self._view_builder.index(req, instance_list) req.cache_db_instances(instance_list) return response
ValueError
dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/api/openstack/compute/legacy_v2/servers.py/Controller._get_servers
6,541
def _get_injected_files(self, personality): """Create a list of injected files from the personality attribute. At this time, injected_files must be formatted as a list of (file_path, file_content) pairs for compatibility with the underlying compute service. """ injected_files = [] for item in personality: try: path = item['path'] contents = item['contents'] except KeyError as key: expl = _('Bad personality format: missing %s') % key raise exc.HTTPBadRequest(explanation=expl) except __HOLE__: expl = _('Bad personality format') raise exc.HTTPBadRequest(explanation=expl) if self._decode_base64(contents) is None: expl = _('Personality content for %s cannot be decoded') % path raise exc.HTTPBadRequest(explanation=expl) injected_files.append((path, contents)) return injected_files
TypeError
dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/api/openstack/compute/legacy_v2/servers.py/Controller._get_injected_files
6,542
def _get_requested_networks(self, requested_networks): """Create a list of requested networks from the networks attribute.""" networks = [] network_uuids = [] for network in requested_networks: request = objects.NetworkRequest() try: try: request.port_id = network.get('port', None) except ValueError: msg = _("Bad port format: port uuid is " "not in proper format " "(%s)") % network.get('port') raise exc.HTTPBadRequest(explanation=msg) if request.port_id: request.network_id = None if not utils.is_neutron(): # port parameter is only for neutron v2.0 msg = _("Unknown argument : port") raise exc.HTTPBadRequest(explanation=msg) else: request.network_id = network['uuid'] if (not request.port_id and not uuidutils.is_uuid_like(request.network_id)): br_uuid = request.network_id.split('-', 1)[-1] if not uuidutils.is_uuid_like(br_uuid): msg = _("Bad networks format: network uuid is " "not in proper format " "(%s)") % request.network_id raise exc.HTTPBadRequest(explanation=msg) # fixed IP address is optional # if the fixed IP address is not provided then # it will use one of the available IP address from the network try: request.address = network.get('fixed_ip', None) except ValueError: msg = (_("Invalid fixed IP address (%s)") % network.get('fixed_ip')) raise exc.HTTPBadRequest(explanation=msg) # duplicate networks are allowed only for neutron v2.0 if (not utils.is_neutron() and request.network_id and request.network_id in network_uuids): expl = (_("Duplicate networks" " (%s) are not allowed") % request.network_id) raise exc.HTTPBadRequest(explanation=expl) network_uuids.append(request.network_id) networks.append(request) except KeyError as key: expl = _('Bad network format: missing %s') % key raise exc.HTTPBadRequest(explanation=expl) except __HOLE__: expl = _('Bad networks format') raise exc.HTTPBadRequest(explanation=expl) return objects.NetworkRequestList(objects=networks) # NOTE(vish): Without this regex, b64decode will happily # ignore illegal bytes in the base64 encoded # data.
TypeError
dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/api/openstack/compute/legacy_v2/servers.py/Controller._get_requested_networks
6,543
def _decode_base64(self, data): if isinstance(data, six.binary_type) and hasattr(data, "decode"): try: data = data.decode("utf-8") except __HOLE__: return None data = re.sub(r'\s', '', data) if not self.B64_REGEX.match(data): return None try: return base64.b64decode(data) except TypeError: return None
UnicodeDecodeError
dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/api/openstack/compute/legacy_v2/servers.py/Controller._decode_base64
6,544
@wsgi.response(202) def create(self, req, body): """Creates a new server for a given user.""" if not self.is_valid_body(body, 'server'): raise exc.HTTPUnprocessableEntity() context = req.environ['nova.context'] server_dict = body['server'] password = self._get_server_admin_password(server_dict) if 'name' not in server_dict: msg = _("Server name is not defined") raise exc.HTTPBadRequest(explanation=msg) name = server_dict['name'] self._validate_server_name(name) name = name.strip() image_uuid = self._image_from_req_data(body) personality = server_dict.get('personality') config_drive = None if self.ext_mgr.is_loaded('os-config-drive'): config_drive = server_dict.get('config_drive') injected_files = [] if personality: injected_files = self._get_injected_files(personality) sg_names = [] if self.ext_mgr.is_loaded('os-security-groups'): security_groups = server_dict.get('security_groups') if security_groups is not None: try: sg_names = [sg['name'] for sg in security_groups if sg.get('name')] except AttributeError: msg = _("Invalid input for field/attribute %(path)s." " Value: %(value)s. %(message)s") % { 'path': 'security_groups', 'value': security_groups, 'message': '' } raise exc.HTTPBadRequest(explanation=msg) if not sg_names: sg_names.append('default') sg_names = list(set(sg_names)) requested_networks = self._determine_requested_networks(server_dict) (access_ip_v4, ) = server_dict.get('accessIPv4'), if access_ip_v4 is not None: self._validate_access_ipv4(access_ip_v4) (access_ip_v6, ) = server_dict.get('accessIPv6'), if access_ip_v6 is not None: self._validate_access_ipv6(access_ip_v6) flavor_id = self._flavor_id_from_req_data(body) # optional openstack extensions: key_name = self._extract(server_dict, 'os-keypairs', 'key_name') availability_zone = self._extract(server_dict, 'os-availability-zone', 'availability_zone') user_data = self._extract(server_dict, 'os-user-data', 'user_data') self._validate_user_data(user_data) image_uuid_specified = bool(image_uuid) legacy_bdm, block_device_mapping = self._extract_bdm(server_dict, image_uuid_specified) ret_resv_id = False # min_count and max_count are optional. If they exist, they may come # in as strings. Verify that they are valid integers and > 0. # Also, we want to default 'min_count' to 1, and default # 'max_count' to be 'min_count'. min_count = 1 max_count = 1 if self.ext_mgr.is_loaded('os-multiple-create'): ret_resv_id = server_dict.get('return_reservation_id', False) min_count = server_dict.get('min_count', 1) max_count = server_dict.get('max_count', min_count) try: min_count = utils.validate_integer( min_count, "min_count", min_value=1) max_count = utils.validate_integer( max_count, "max_count", min_value=1) except exception.InvalidInput as e: raise exc.HTTPBadRequest(explanation=e.format_message()) if min_count > max_count: msg = _('min_count must be <= max_count') raise exc.HTTPBadRequest(explanation=msg) auto_disk_config = False if self.ext_mgr.is_loaded('OS-DCF'): auto_disk_config = server_dict.get('auto_disk_config') scheduler_hints = {} if self.ext_mgr.is_loaded('OS-SCH-HNT'): scheduler_hints = server_dict.get('scheduler_hints', {}) parse_az = self.compute_api.parse_availability_zone availability_zone, host, node = parse_az(context, availability_zone) check_server_group_quota = self.ext_mgr.is_loaded( 'os-server-group-quotas') try: _get_inst_type = flavors.get_flavor_by_flavor_id inst_type = _get_inst_type(flavor_id, ctxt=context, read_deleted="no") (instances, resv_id) = self.compute_api.create(context, inst_type, image_uuid, display_name=name, display_description=name, key_name=key_name, metadata=server_dict.get('metadata', {}), access_ip_v4=access_ip_v4, access_ip_v6=access_ip_v6, injected_files=injected_files, admin_password=password, min_count=min_count, max_count=max_count, requested_networks=requested_networks, security_group=sg_names, user_data=user_data, availability_zone=availability_zone, forced_host=host, forced_node=node, config_drive=config_drive, block_device_mapping=block_device_mapping, auto_disk_config=auto_disk_config, scheduler_hints=scheduler_hints, legacy_bdm=legacy_bdm, check_server_group_quota=check_server_group_quota) except (exception.QuotaError, exception.PortLimitExceeded) as error: raise exc.HTTPForbidden( explanation=error.format_message()) except messaging.RemoteError as err: msg = "%(err_type)s: %(err_msg)s" % {'err_type': err.exc_type, 'err_msg': err.value} raise exc.HTTPBadRequest(explanation=msg) except __HOLE__ as error: msg = "UnicodeError: %s" % error raise exc.HTTPBadRequest(explanation=msg) except Exception: # The remaining cases can be handled in a standard fashion. self._handle_create_exception(*sys.exc_info()) # If the caller wanted a reservation_id, return it if ret_resv_id: return wsgi.ResponseObject({'reservation_id': resv_id}) req.cache_db_instances(instances) server = self._view_builder.create(req, instances[0]) if CONF.enable_instance_password: server['server']['adminPass'] = password robj = wsgi.ResponseObject(server) return self._add_location(robj)
UnicodeDecodeError
dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/api/openstack/compute/legacy_v2/servers.py/Controller.create
6,545
def _image_ref_from_req_data(self, data): try: return six.text_type(data['server']['imageRef']) except (__HOLE__, KeyError): msg = _("Missing imageRef attribute") raise exc.HTTPBadRequest(explanation=msg)
TypeError
dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/api/openstack/compute/legacy_v2/servers.py/Controller._image_ref_from_req_data
6,546
def _flavor_id_from_req_data(self, data): try: flavor_ref = data['server']['flavorRef'] except (__HOLE__, KeyError): msg = _("Missing flavorRef attribute") raise exc.HTTPBadRequest(explanation=msg) try: return common.get_id_from_href(flavor_ref) except ValueError: msg = _("Invalid flavorRef provided.") raise exc.HTTPBadRequest(explanation=msg)
TypeError
dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/api/openstack/compute/legacy_v2/servers.py/Controller._flavor_id_from_req_data
6,547
@wsgi.response(202) @wsgi.action('changePassword') def _action_change_password(self, req, id, body): context = req.environ['nova.context'] if (not body.get('changePassword') or 'adminPass' not in body['changePassword']): msg = _("No adminPass was specified") raise exc.HTTPBadRequest(explanation=msg) password = self._get_server_admin_password(body['changePassword']) server = self._get_server(context, req, id) try: self.compute_api.set_admin_password(context, server, password) except exception.InstancePasswordSetFailed as e: raise exc.HTTPConflict(explanation=e.format_message()) except exception.InstanceInvalidState as e: raise common.raise_http_conflict_for_instance_invalid_state( e, 'changePassword', id) except __HOLE__: msg = _("Unable to set password on instance") raise exc.HTTPNotImplemented(explanation=msg) return webob.Response(status_int=202)
NotImplementedError
dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/api/openstack/compute/legacy_v2/servers.py/Controller._action_change_password
6,548
def _validate_metadata(self, metadata): """Ensure that we can work with the metadata given.""" try: six.iteritems(metadata) except __HOLE__: msg = _("Unable to parse metadata key/value pairs.") LOG.debug(msg) raise exc.HTTPBadRequest(explanation=msg)
AttributeError
dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/api/openstack/compute/legacy_v2/servers.py/Controller._validate_metadata
6,549
@wsgi.response(202) @wsgi.action('resize') def _action_resize(self, req, id, body): """Resizes a given instance to the flavor size requested.""" try: flavor_ref = str(body["resize"]["flavorRef"]) if not flavor_ref: msg = _("Resize request has invalid 'flavorRef' attribute.") raise exc.HTTPBadRequest(explanation=msg) except (__HOLE__, TypeError): msg = _("Resize requests require 'flavorRef' attribute.") raise exc.HTTPBadRequest(explanation=msg) kwargs = {} if 'auto_disk_config' in body['resize']: kwargs['auto_disk_config'] = body['resize']['auto_disk_config'] return self._resize(req, id, flavor_ref, **kwargs)
KeyError
dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/api/openstack/compute/legacy_v2/servers.py/Controller._action_resize
6,550
@wsgi.response(202) @wsgi.action('rebuild') def _action_rebuild(self, req, id, body): """Rebuild an instance with the given attributes.""" body = body['rebuild'] try: image_href = body["imageRef"] except (KeyError, TypeError): msg = _("Could not parse imageRef from request.") raise exc.HTTPBadRequest(explanation=msg) image_href = self._image_uuid_from_href(image_href) password = self._get_server_admin_password(body) context = req.environ['nova.context'] instance = self._get_server(context, req, id) attr_map = { 'personality': 'files_to_inject', 'name': 'display_name', 'accessIPv4': 'access_ip_v4', 'accessIPv6': 'access_ip_v6', 'metadata': 'metadata', 'auto_disk_config': 'auto_disk_config', } kwargs = {} # take the preserve_ephemeral value into account only when the # corresponding extension is active if (self.ext_mgr.is_loaded('os-preserve-ephemeral-rebuild') and 'preserve_ephemeral' in body): kwargs['preserve_ephemeral'] = strutils.bool_from_string( body['preserve_ephemeral'], strict=True) if 'accessIPv4' in body: self._validate_access_ipv4(body['accessIPv4']) if 'accessIPv6' in body: self._validate_access_ipv6(body['accessIPv6']) if 'name' in body: self._validate_server_name(body['name']) for request_attribute, instance_attribute in attr_map.items(): try: kwargs[instance_attribute] = body[request_attribute] except (KeyError, __HOLE__): pass self._validate_metadata(kwargs.get('metadata', {})) if 'files_to_inject' in kwargs: personality = kwargs.pop('files_to_inject') files_to_inject = self._get_injected_files(personality) else: files_to_inject = None try: self.compute_api.rebuild(context, instance, image_href, password, files_to_inject=files_to_inject, **kwargs) except exception.InstanceIsLocked as e: raise exc.HTTPConflict(explanation=e.format_message()) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'rebuild', id) except exception.InstanceNotFound: msg = _("Instance could not be found") raise exc.HTTPNotFound(explanation=msg) except exception.InvalidMetadataSize as error: raise exc.HTTPRequestEntityTooLarge( explanation=error.format_message()) except exception.ImageNotFound: msg = _("Cannot find image for rebuild") raise exc.HTTPBadRequest(explanation=msg) except exception.QuotaError as error: raise exc.HTTPForbidden(explanation=error.format_message()) except (exception.ImageNotActive, exception.FlavorDiskTooSmall, exception.FlavorMemoryTooSmall, exception.InvalidMetadata, exception.AutoDiskConfigDisabledByImage) as error: raise exc.HTTPBadRequest(explanation=error.format_message()) instance = self._get_server(context, req, id, is_detail=True) view = self._view_builder.show(req, instance) # Add on the adminPass attribute since the view doesn't do it # unless instance passwords are disabled if CONF.enable_instance_password: view['server']['adminPass'] = password robj = wsgi.ResponseObject(view) return self._add_location(robj)
TypeError
dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/api/openstack/compute/legacy_v2/servers.py/Controller._action_rebuild
6,551
@wsgi.response(202) @wsgi.action('createImage') @common.check_snapshots_enabled def _action_create_image(self, req, id, body): """Snapshot a server instance.""" context = req.environ['nova.context'] entity = body.get("createImage", {}) image_name = entity.get("name") if not image_name: msg = _("createImage entity requires name attribute") raise exc.HTTPBadRequest(explanation=msg) props = {} metadata = entity.get('metadata', {}) common.check_img_metadata_properties_quota(context, metadata) try: props.update(metadata) except __HOLE__: msg = _("Invalid metadata") raise exc.HTTPBadRequest(explanation=msg) instance = self._get_server(context, req, id) bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( context, instance.uuid) try: if self.compute_api.is_volume_backed_instance(context, instance, bdms): policy.enforce(context, 'compute:snapshot_volume_backed', {'project_id': context.project_id, 'user_id': context.user_id}) image = self.compute_api.snapshot_volume_backed( context, instance, image_name, extra_properties=props) else: image = self.compute_api.snapshot(context, instance, image_name, extra_properties=props) except exception.InstanceInvalidState as state_error: common.raise_http_conflict_for_instance_invalid_state(state_error, 'createImage', id) except exception.Invalid as err: raise exc.HTTPBadRequest(explanation=err.format_message()) # build location of newly-created image entity image_id = str(image['id']) url_prefix = self._view_builder._update_glance_link_prefix( req.application_url) image_ref = common.url_join(url_prefix, context.project_id, 'images', image_id) resp = webob.Response(status_int=202) resp.headers['Location'] = image_ref return resp
ValueError
dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/api/openstack/compute/legacy_v2/servers.py/Controller._action_create_image
6,552
def _get_server_admin_password(self, server): """Determine the admin password for a server on creation.""" try: password = server['adminPass'] self._validate_admin_password(password) except __HOLE__: password = utils.generate_password() except ValueError: raise exc.HTTPBadRequest(explanation=_("Invalid adminPass")) return password
KeyError
dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/api/openstack/compute/legacy_v2/servers.py/Controller._get_server_admin_password
6,553
def getSession(self, sessionInterface = None): # Session management if not self.session: cookiename = string.join(['TWISTED_SESSION'] + self.sitepath, "_") sessionCookie = self.getCookie(cookiename) if sessionCookie: try: self.session = self.site.getSession(sessionCookie) except __HOLE__: pass # if it still hasn't been set, fix it up. if not self.session: self.session = self.site.makeSession() self.addCookie(cookiename, self.session.uid, path='/') self.session.touch() if sessionInterface: return self.session.getComponent(sessionInterface) return self.session
KeyError
dataset/ETHPy150Open kuri65536/python-for-android/python-modules/twisted/twisted/web/server.py/Request.getSession
6,554
def test_transientlogger(): import random, time happy, nightmare = luv(), bad_things() try: while True: if random.randrange(60) == 0: logger.warning(nightmare.next()) else: logger.debug(happy.next()) time.sleep(0.02) except __HOLE__: pass
StopIteration
dataset/ETHPy150Open jart/fabulous/fabulous/test_transientlogging.py/test_transientlogger
6,555
def test_transientlogger2(): import time, random gothic = lorem_gotham() try: while True: if random.randrange(20) == 0: logger.warning(red(gothic.next())) else: logger.debug(gothic.next()) time.sleep(0.1) except __HOLE__: pass
StopIteration
dataset/ETHPy150Open jart/fabulous/fabulous/test_transientlogging.py/test_transientlogger2
6,556
def installed( name, version=None, refresh=None, fromrepo=None, skip_verify=False, skip_suggestions=False, pkgs=None, sources=None, allow_updates=False, pkg_verify=False, normalize=True, ignore_epoch=False, reinstall=False, **kwargs): ''' Ensure that the package is installed, and that it is the correct version (if specified). :param str name: The name of the package to be installed. This parameter is ignored if either "pkgs" or "sources" is used. Additionally, please note that this option can only be used to install packages from a software repository. To install a package file manually, use the "sources" option detailed below. :param str version: Install a specific version of a package. This option is ignored if "sources" is used. Currently, this option is supported for the following pkg providers: :mod:`apt <salt.modules.aptpkg>`, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`yumpkg <salt.modules.yumpkg>`, and :mod:`zypper <salt.modules.zypper>`. The version number includes the release designation where applicable, to allow Salt to target a specific release of a given version. When in doubt, using the ``pkg.latest_version`` function for an uninstalled package will tell you the version available. .. code-block:: bash # salt myminion pkg.latest_version vim-enhanced myminion: 2:7.4.160-1.el7 .. important:: As of version 2015.8.7, for distros which use yum/dnf, packages which have a version with a nonzero epoch (that is, versions which start with a number followed by a colon like in the ``pkg.latest_version`` output above) must have the epoch included when specifying the version number. For example: .. code-block:: yaml vim-enhanced: pkg.installed: - version: 2:7.4.160-1.el7 In version 2015.8.9, an **ignore_epoch** argument has been added to :py:mod:`pkg.installed <salt.states.pkg.installed>`, :py:mod:`pkg.removed <salt.states.pkg.installed>`, and :py:mod:`pkg.purged <salt.states.pkg.installed>` states, which causes the epoch to be disregarded when the state checks to see if the desired version was installed. Also, while this function is not yet implemented for all pkg frontends, :mod:`pkg.list_repo_pkgs <salt.modules.yumpkg.list_repo_pkgs>` will show all versions available in the various repositories for a given package, irrespective of whether or not it is installed. .. code-block:: bash # salt myminion pkg.list_repo_pkgs httpd myminion: ---------- base: |_ ---------- httpd: 2.2.15-29.el6.centos updates: |_ ---------- httpd: 2.2.15-30.el6.centos The version strings returned by either of these functions can be used as version specifiers in pkg states. You can install a specific version when using the ``pkgs`` argument by including the version after the package: .. code-block:: yaml common_packages: pkg.installed: - pkgs: - unzip - dos2unix - salt-minion: 2015.8.5-1.el6 :param bool refresh: Update the repo database of available packages prior to installing the requested package. :param str fromrepo: Specify a repository from which to install .. note:: Distros which use APT (Debian, Ubuntu, etc.) do not have a concept of repositories, in the same way as YUM-based distros do. When a source is added, it is assigned to a given release. Consider the following source configuration: .. code-block:: text deb http://ppa.launchpad.net/saltstack/salt/ubuntu precise main The packages provided by this source would be made available via the ``precise`` release, therefore ``fromrepo`` would need to be set to ``precise`` for Salt to install the package from this source. Having multiple sources in the same release may result in the default install candidate being newer than what is desired. If this is the case, the desired version must be specified using the ``version`` parameter. If the ``pkgs`` parameter is being used to install multiple packages in the same state, then instead of using ``version``, use the method of version specification described in the **Multiple Package Installation Options** section below. Running the shell command ``apt-cache policy pkgname`` on a minion can help elucidate the APT configuration and aid in properly configuring states: .. code-block:: bash root@saltmaster:~# salt ubuntu01 cmd.run 'apt-cache policy ffmpeg' ubuntu01: ffmpeg: Installed: (none) Candidate: 7:0.10.11-1~precise1 Version table: 7:0.10.11-1~precise1 0 500 http://ppa.launchpad.net/jon-severinsson/ffmpeg/ubuntu/ precise/main amd64 Packages 4:0.8.10-0ubuntu0.12.04.1 0 500 http://us.archive.ubuntu.com/ubuntu/ precise-updates/main amd64 Packages 500 http://security.ubuntu.com/ubuntu/ precise-security/main amd64 Packages 4:0.8.1-0ubuntu1 0 500 http://us.archive.ubuntu.com/ubuntu/ precise/main amd64 Packages The release is located directly after the source's URL. The actual release name is the part before the slash, so to install version **4:0.8.10-0ubuntu0.12.04.1** either ``precise-updates`` or ``precise-security`` could be used for the ``fromrepo`` value. :param bool skip_verify: Skip the GPG verification check for the package to be installed :param bool skip_suggestions: Force strict package naming. Disables lookup of package alternatives. .. versionadded:: 2014.1.1 :param bool allow_updates: Allow the package to be updated outside Salt's control (e.g. auto updates on Windows). This means a package on the Minion can have a newer version than the latest available in the repository without enforcing a re-installation of the package. .. versionadded:: 2014.7.0 Example: .. code-block:: yaml httpd: pkg.installed: - fromrepo: mycustomrepo - skip_verify: True - skip_suggestions: True - version: 2.0.6~ubuntu3 - refresh: True - allow_updates: True - hold: False :param bool pkg_verify: .. versionadded:: 2014.7.0 For requested packages that are already installed and would not be targeted for upgrade or downgrade, use pkg.verify to determine if any of the files installed by the package have been altered. If files have been altered, the reinstall option of pkg.install is used to force a reinstall. Types to ignore can be passed to pkg.verify (see example below). Currently, this option is supported for the following pkg providers: :mod:`yumpkg <salt.modules.yumpkg>`. Examples: .. code-block:: yaml httpd: pkg.installed: - version: 2.2.15-30.el6.centos - pkg_verify: True .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz - pkg_verify: - ignore_types: [config,doc] :param bool normalize: Normalize the package name by removing the architecture, if the architecture of the package is different from the architecture of the operating system. The ability to disable this behavior is useful for poorly-created packages which include the architecture as an actual part of the name, such as kernel modules which match a specific kernel version. .. versionadded:: 2014.7.0 Example: .. code-block:: yaml gpfs.gplbin-2.6.32-279.31.1.el6.x86_64: pkg.installed: - normalize: False :param bool ignore_epoch: When a package version contains an non-zero epoch (e.g. ``1:3.14.159-2.el7``, and a specific version of a package is desired, set this option to ``True`` to ignore the epoch when comparing versions. This allows for the following SLS to be used: .. code-block:: yaml # Actual vim-enhanced version: 2:7.4.160-1.el7 vim-enhanced: pkg.installed: - version: 7.4.160-1.el7 - ignore_epoch: True Without this option set to ``True`` in the above example, the package would be installed, but the state would report as failed because the actual installed version would be ``2:7.4.160-1.el7``. Alternatively, this option can be left as ``False`` and the full version string (with epoch) can be specified in the SLS file: .. code-block:: yaml vim-enhanced: pkg.installed: - version: 2:7.4.160-1.el7 .. versionadded:: 2015.8.9 | **MULTIPLE PACKAGE INSTALLATION OPTIONS: (not supported in Windows or pkgng)** :param list pkgs: A list of packages to install from a software repository. All packages listed under ``pkgs`` will be installed via a single command. Example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar - baz - hold: True ``NOTE:`` For :mod:`apt <salt.modules.aptpkg>`, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>`, :mod:`yumpkg <salt.modules.yumpkg>`, and :mod:`zypper <salt.modules.zypper>`, version numbers can be specified in the ``pkgs`` argument. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: 1.2.3-4 - baz Additionally, :mod:`ebuild <salt.modules.ebuild>`, :mod:`pacman <salt.modules.pacman>` and :mod:`zypper <salt.modules.zypper>` support the ``<``, ``<=``, ``>=``, and ``>`` operators for more control over what versions will be installed. For Example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo - bar: '>=1.2.3-4' - baz ``NOTE:`` When using comparison operators, the expression must be enclosed in quotes to avoid a YAML render error. With :mod:`ebuild <salt.modules.ebuild>` is also possible to specify a use flag list and/or if the given packages should be in package.accept_keywords file and/or the overlay from which you want the package to be installed. For example: .. code-block:: yaml mypkgs: pkg.installed: - pkgs: - foo: '~' - bar: '~>=1.2:slot::overlay[use,-otheruse]' - baz :param list sources: A list of packages to install, along with the source URI or local path from which to install each package. In the example below, ``foo``, ``bar``, ``baz``, etc. refer to the name of the package, as it would appear in the output of the ``pkg.version`` or ``pkg.list_pkgs`` salt CLI commands. .. code-block:: yaml mypkgs: pkg.installed: - sources: - foo: salt://rpms/foo.rpm - bar: http://somesite.org/bar.rpm - baz: ftp://someothersite.org/baz.rpm - qux: /minion/path/to/qux.rpm **PLATFORM-SPECIFIC ARGUMENTS** These are specific to each OS. If it does not apply to the execution module for your OS, it is ignored. :param bool hold: Force the package to be held at the current installed version. Currently works with YUM & APT based systems. .. versionadded:: 2014.7.0 :param list names: A list of packages to install from a software repository. Each package will be installed individually by the package manager. .. warning:: Unlike ``pkgs``, the ``names`` parameter cannot specify a version. In addition, it makes a separate call to the package management frontend to install each package, whereas ``pkgs`` makes just a single call. It is therefore recommended to use ``pkgs`` instead of ``names`` to install multiple packages, both for the additional features and the performance improvement that it brings. :param bool install_recommends: Whether to install the packages marked as recommended. Default is ``True``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.installed: - install_recommends: False :param bool only_upgrade: Only upgrade the packages, if they are already installed. Default is ``False``. Currently only works with APT-based systems. .. versionadded:: 2015.5.0 .. code-block:: yaml httpd: pkg.installed: - only_upgrade: True :return: A dictionary containing the state of the software installation :rtype dict: ''' if isinstance(pkgs, list) and len(pkgs) == 0: return {'name': name, 'changes': {}, 'result': True, 'comment': 'No packages to install provided'} kwargs['saltenv'] = __env__ rtag = __gen_rtag() refresh = bool(salt.utils.is_true(refresh) or (os.path.isfile(rtag) and salt.utils.is_true(refresh)) ) if not isinstance(pkg_verify, list): pkg_verify = pkg_verify is True if (pkg_verify or isinstance(pkg_verify, list)) \ and 'pkg.verify' not in __salt__: return {'name': name, 'changes': {}, 'result': False, 'comment': 'pkg.verify not implemented'} if not isinstance(version, six.string_types) and version is not None: version = str(version) if version is not None and version == 'latest': version = __salt__['pkg.latest_version'](name) # If version is empty, it means the latest version is installed # so we grab that version to avoid passing an empty string if not version: version = __salt__['pkg.version'](name) kwargs['allow_updates'] = allow_updates result = _find_install_targets(name, version, pkgs, sources, fromrepo=fromrepo, skip_suggestions=skip_suggestions, pkg_verify=pkg_verify, normalize=normalize, ignore_epoch=ignore_epoch, reinstall=reinstall, **kwargs) try: (desired, targets, to_unpurge, to_reinstall, altered_files, warnings) = result except __HOLE__: # _find_install_targets() found no targets or encountered an error # check that the hold function is available if 'pkg.hold' in __salt__: if 'hold' in kwargs: try: if kwargs['hold']: hold_ret = __salt__['pkg.hold']( name=name, pkgs=pkgs, sources=sources ) else: hold_ret = __salt__['pkg.unhold']( name=name, pkgs=pkgs, sources=sources ) except (CommandExecutionError, SaltInvocationError) as exc: return {'name': name, 'changes': {}, 'result': False, 'comment': str(exc)} if 'result' in hold_ret and not hold_ret['result']: return {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while ' 'holding/unholding package(s): {0}' .format(hold_ret['comment'])} else: modified_hold = [hold_ret[x] for x in hold_ret if hold_ret[x]['changes']] not_modified_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['changes'] and hold_ret[x]['result']] failed_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['result']] if modified_hold: for i in modified_hold: result['comment'] += ' {0}'.format(i['comment']) result['result'] = i['result'] result['changes'][i['name']] = i['changes'] if not_modified_hold: for i in not_modified_hold: result['comment'] += ' {0}'.format(i['comment']) result['result'] = i['result'] if failed_hold: for i in failed_hold: result['comment'] += ' {0}'.format(i['comment']) result['result'] = i['result'] return result if to_unpurge and 'lowpkg.unpurge' not in __salt__: ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'lowpkg.unpurge not implemented'} if warnings: ret['comment'] += '.' + '. '.join(warnings) + '.' return ret # Remove any targets not returned by _find_install_targets if pkgs: pkgs = [dict([(x, y)]) for x, y in six.iteritems(targets)] pkgs.extend([dict([(x, y)]) for x, y in six.iteritems(to_reinstall)]) elif sources: oldsources = sources sources = [x for x in oldsources if next(iter(list(x.keys()))) in targets] sources.extend([x for x in oldsources if next(iter(list(x.keys()))) in to_reinstall]) comment = [] if __opts__['test']: if targets: if sources: summary = ', '.join(targets) else: summary = ', '.join([_get_desired_pkg(x, targets) for x in targets]) comment.append('The following packages would be ' 'installed/updated: {0}'.format(summary)) if to_unpurge: comment.append( 'The following packages would have their selection status ' 'changed from \'purge\' to \'install\': {0}' .format(', '.join(to_unpurge)) ) if to_reinstall: # Add a comment for each package in to_reinstall with its # pkg.verify output if reinstall: reinstall_targets = [] for reinstall_pkg in to_reinstall: if sources: reinstall_targets.append(reinstall_pkg) else: reinstall_targets.append( _get_desired_pkg(reinstall_pkg, to_reinstall) ) msg = 'The following packages would be reinstalled: ' msg += ', '.join(reinstall_targets) comment.append(msg) else: for reinstall_pkg in to_reinstall: if sources: pkgstr = reinstall_pkg else: pkgstr = _get_desired_pkg(reinstall_pkg, to_reinstall) comment.append( 'Package \'{0}\' would be reinstalled because the ' 'following files have been altered:'.format(pkgstr) ) comment.append( _nested_output(altered_files[reinstall_pkg]) ) ret = {'name': name, 'changes': {}, 'result': None, 'comment': '\n'.join(comment)} if warnings: ret['comment'] += '\n' + '. '.join(warnings) + '.' return ret changes = {'installed': {}} modified_hold = None not_modified_hold = None failed_hold = None if targets or to_reinstall: try: pkg_ret = __salt__['pkg.install'](name, refresh=refresh, version=version, fromrepo=fromrepo, skip_verify=skip_verify, pkgs=pkgs, sources=sources, reinstall=bool(to_reinstall), normalize=normalize, **kwargs) if os.path.isfile(rtag) and refresh: os.remove(rtag) except CommandExecutionError as exc: ret = {'name': name, 'result': False} if exc.info: # Get information for state return from the exception. ret['changes'] = exc.info.get('changes', {}) ret['comment'] = exc.strerror_without_changes else: ret['changes'] = {} ret['comment'] = ('An error was encountered while installing ' 'package(s): {0}'.format(exc)) if warnings: ret['comment'] += '\n\n' + '. '.join(warnings) + '.' return ret if isinstance(pkg_ret, dict): changes['installed'].update(pkg_ret) elif isinstance(pkg_ret, six.string_types): comment.append(pkg_ret) # Code below will be looking for a dictionary. If this is a string # it means that there was an exception raised and that no packages # changed, so now that we have added this error to the comments we # set this to an empty dictionary so that the code below which # checks reinstall targets works. pkg_ret = {} if 'pkg.hold' in __salt__: if 'hold' in kwargs: try: if kwargs['hold']: hold_ret = __salt__['pkg.hold']( name=name, pkgs=pkgs, sources=sources ) else: hold_ret = __salt__['pkg.unhold']( name=name, pkgs=pkgs, sources=sources ) except (CommandExecutionError, SaltInvocationError) as exc: comment.append(str(exc)) ret = {'name': name, 'changes': changes, 'result': False, 'comment': '\n'.join(comment)} if warnings: ret['comment'] += '.' + '. '.join(warnings) + '.' return ret else: if 'result' in hold_ret and not hold_ret['result']: ret = {'name': name, 'changes': {}, 'result': False, 'comment': 'An error was encountered while ' 'holding/unholding package(s): {0}' .format(hold_ret['comment'])} if warnings: ret['comment'] += '.' + '. '.join(warnings) + '.' return ret else: modified_hold = [hold_ret[x] for x in hold_ret if hold_ret[x]['changes']] not_modified_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['changes'] and hold_ret[x]['result']] failed_hold = [hold_ret[x] for x in hold_ret if not hold_ret[x]['result']] if to_unpurge: changes['purge_desired'] = __salt__['lowpkg.unpurge'](*to_unpurge) # Analyze pkg.install results for packages in targets if sources: modified = [x for x in changes['installed'] if x in targets] not_modified = [x for x in desired if x not in targets and x not in to_reinstall] failed = [x for x in targets if x not in modified] else: if __grains__['os'] == 'FreeBSD': kwargs['with_origin'] = True new_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True, **kwargs) ok, failed = _verify_install(desired, new_pkgs, ignore_epoch=ignore_epoch) modified = [x for x in ok if x in targets] not_modified = [x for x in ok if x not in targets and x not in to_reinstall] failed = [x for x in failed if x in targets] # If there was nothing unpurged, just set the changes dict to the contents # of changes['installed']. if not changes.get('purge_desired'): changes = changes['installed'] if modified: if sources: summary = ', '.join(modified) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in modified]) if len(summary) < 20: comment.append('The following packages were installed/updated: ' '{0}'.format(summary)) else: comment.append( '{0} targeted package{1} {2} installed/updated.'.format( len(modified), 's' if len(modified) > 1 else '', 'were' if len(modified) > 1 else 'was' ) ) if modified_hold: for i in modified_hold: change_name = i['name'] if change_name in changes: comment.append(i['comment']) if len(changes[change_name]['new']) > 0: changes[change_name]['new'] += '\n' changes[change_name]['new'] += '{0}'.format(i['changes']['new']) if len(changes[change_name]['old']) > 0: changes[change_name]['old'] += '\n' changes[change_name]['old'] += '{0}'.format(i['changes']['old']) # Any requested packages that were not targeted for install or reinstall if not_modified: if sources: summary = ', '.join(not_modified) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in not_modified]) if len(not_modified) <= 20: comment.append('The following packages were already installed: ' '{0}'.format(summary)) else: comment.append( '{0} targeted package{1} {2} already installed'.format( len(not_modified), 's' if len(not_modified) > 1 else '', 'were' if len(not_modified) > 1 else 'was' ) ) if not_modified_hold: for i in not_modified_hold: comment.append(i['comment']) result = True if failed: if sources: summary = ', '.join(failed) else: summary = ', '.join([_get_desired_pkg(x, desired) for x in failed]) comment.insert(0, 'The following packages failed to ' 'install/update: {0}'.format(summary)) result = False if failed_hold: for i in failed_hold: comment.append(i['comment']) result = False # Get the ignore_types list if any from the pkg_verify argument if isinstance(pkg_verify, list) \ and any(x.get('ignore_types') is not None for x in pkg_verify if isinstance(x, _OrderedDict) and 'ignore_types' in x): ignore_types = next(x.get('ignore_types') for x in pkg_verify if 'ignore_types' in x) else: ignore_types = [] # Rerun pkg.verify for packages in to_reinstall to determine failed modified = [] failed = [] for reinstall_pkg in to_reinstall: if reinstall: if reinstall_pkg in pkg_ret: modified.append(reinstall_pkg) else: failed.append(reinstall_pkg) elif pkg_verify: verify_result = __salt__['pkg.verify'](reinstall_pkg, ignore_types=ignore_types) if verify_result: failed.append(reinstall_pkg) altered_files[reinstall_pkg] = verify_result else: modified.append(reinstall_pkg) if modified: # Add a comment for each package in modified with its pkg.verify output for modified_pkg in modified: if sources: pkgstr = modified_pkg else: pkgstr = _get_desired_pkg(modified_pkg, desired) msg = 'Package {0} was reinstalled.'.format(pkgstr) if modified_pkg in altered_files: msg += ' The following files were remediated:' comment.append(msg) comment.append(_nested_output(altered_files[modified_pkg])) else: comment.append(msg) if failed: # Add a comment for each package in failed with its pkg.verify output for failed_pkg in failed: if sources: pkgstr = failed_pkg else: pkgstr = _get_desired_pkg(failed_pkg, desired) msg = ('Reinstall was not successful for package {0}.' .format(pkgstr)) if failed_pkg in altered_files: msg += ' The following files could not be remediated:' comment.append(msg) comment.append(_nested_output(altered_files[failed_pkg])) else: comment.append(msg) result = False ret = {'name': name, 'changes': changes, 'result': result, 'comment': '\n'.join(comment)} if warnings: ret['comment'] += '\n' + '. '.join(warnings) + '.' return ret
ValueError
dataset/ETHPy150Open saltstack/salt/salt/states/pkg.py/installed
6,557
def testViewSignatures(self): for app_name in settings.INSTALLED_APPS: try: views = import_module(app_name+'.views') except __HOLE__: continue for view_name in dir(views): view = getattr(views, view_name) if not isinstance(view, type): continue if not BaseView in view.__mro__: continue metadata_sig = None breadcrumb_sig = None handler_sigs = [] initial_context_sig = None for func_name in dir(view): func = getattr(view, func_name) if func_name == 'get_metadata': metadata_sig = getargspec(func) elif func_name == 'initial_context': initial_context_sig = getargspec(func) elif func_name.startswith('handle_') and func_name[7:].upper() == func_name[7:]: handler_sigs.append( (func_name, getargspec(func)) ) elif func_name == 'breadcrumb': if func is BaseView.breadcrumb: breadcrumb_sig = True continue # If it's not gone through BreadcrumbFactory elif type(func) == type(BaseView.breadcrumb): breadcrumb_sig = getargspec(func) else: breadcrumb_sig = getargspec(func.breadcrumb_func) else: continue if not handler_sigs: continue if not breadcrumb_sig: self.fail('%s.%s does not define a breadcrumb' % (app_name, view_name)) # Keep track of the first handler sig to compare things to fhn, fhs = handler_sigs[0] self.assertEqual( fhs.args[:3], ['self','request','context'], "View handler %s.views.%s.%s must take (self, request, context) as its first three arguments" % ( app_name, view_name, fhn, ) ) for handler_name, argspec in handler_sigs: if handler_name != 'handle_HEAD': self.assertEqual( fhs.args, argspec.args, 'View handler signatures differ for %s.views.%s: %s and %s' % ( app_name, view_name, fhn, handler_name ), ) #self.assertEqual( # argspec.varargs, None, # "View handler %s.views.%s.%s takes *%s when it shouldn't" % ( # app_name, view_name, handler_name, argspec.varargs # ), #) #self.assertEqual( # argspec.keywords, None, # "View handler %s.views.%s.%s takes **%s when it shouldn't" % ( # app_name, view_name, handler_name, argspec.keywords # ), #) if not (initial_context_sig.varargs or initial_context_sig.keywords): self.assertEqual( initial_context_sig.args, fhs.args[:2] + fhs.args[3:], "initial_context for %s.views.%s has a signature inconsistent with the handlers" % ( app_name, view_name, ) ) if metadata_sig: self.assertEqual( metadata_sig.args, fhs.args[:2] + fhs.args[3:], "get_metadata for %s.views.%s has a signature inconsistent with the handlers" % ( app_name, view_name, ) ) self.assertEqual( metadata_sig.varargs, None, "get_metadata() for %s.views.%s takes *%s when it shouldn't" % ( app_name, view_name, metadata_sig.varargs ), ) self.assertEqual( metadata_sig.keywords, None, "get_metadata() for %s.views.%s takes **%s when it shouldn't" % ( app_name, view_name, metadata_sig.keywords ), ) if breadcrumb_sig != True: if breadcrumb_sig[0][0] != 'self': fhs = (fhs[0][1:], fhs[1], fhs[2], fhs[3]) self.assertEqual( breadcrumb_sig, fhs, "breadcrumb signature for %s.%s differs from its view handlers (%s, %s)" % ( app_name, view_name, breadcrumb_sig, fhs ) ) else: self.assertEqual( breadcrumb_sig, fhs, "breadcrumb signature for %s.%s differs from its view handlers (%s, %s)" % ( app_name, view_name, breadcrumb_sig, fhs ) )
ImportError
dataset/ETHPy150Open mollyproject/mollyproject/molly/apps/search/tests.py/GenericSearchTestCase.testViewSignatures
6,558
def parse_timestamp(s): ''' Returns (datetime, tz offset in minutes) or (None, None). ''' m = re.match(""" ^ (?P<year>-?[0-9]{4}) - (?P<month>[0-9]{2}) - (?P<day>[0-9]{2}) T (?P<hour>[0-9]{2}) : (?P<minute>[0-9]{2}) : (?P<second>[0-9]{2}) (?P<microsecond>\.[0-9]{1,6})? (?P<tz> Z | (?P<tz_hr>[-+][0-9]{2}) : (?P<tz_min>[0-9]{2}) )? $ """, s, re.X) if m is not None: values = m.groupdict() if values["tz"] in ("Z", None): tz = 0 else: tz = int(values["tz_hr"]) * 60 + int(values["tz_min"]) if values["microsecond"] is None: values["microsecond"] = 0 else: values["microsecond"] = values["microsecond"][1:] values["microsecond"] += "0" * (6 - len(values["microsecond"])) values = dict((k, int(v)) for k, v in list(values.items()) if not k.startswith("tz")) try: return datetime(**values), tz except __HOLE__: pass return None, None
ValueError
dataset/ETHPy150Open Esri/solutions-geoprocessing-toolbox/data_management/toolboxes/scripts/ImportPatrolRptXML.py/parse_timestamp
6,559
def _open_write(self): try: backend.SaveFile(self.name, '') except __HOLE__ as e: self._closed = True raise e
IOError
dataset/ETHPy150Open mockfs/mockfs/mockfs/storage.py/file._open_write
6,560
def provision_persistent_stores(self, app_names, options): """ Provision all persistent stores for all apps or for only the app name given. """ # Set refresh parameter database_refresh = options['refresh'] # Get the app harvester app_harvester = SingletonAppHarvester() # Define the list of target apps target_apps = [] target_apps_check = [] # Execute on all apps loaded if ALL_APPS in app_names: target_apps = app_harvester.apps # Execute only on apps given else: for app in app_harvester.apps: # Derive app_name from the index which follows the pattern app_name:home if app.package in app_names: target_apps.append(app) target_apps_check.append(app.package) # Verify all apps included in target apps for app_name in app_names: if app_name not in target_apps_check: self.stdout.write('{0}WARNING:{1} The app named "{2}" cannot be found. Please make sure it is installed ' 'and try again.'.format(TerminalColors.WARNING, TerminalColors.ENDC, app_name)) # Notify user of database provisioning self.stdout.write(TerminalColors.BLUE + '\nProvisioning Persistent Stores...' + TerminalColors.ENDC) # Get database manager url from the config database_manager_db = settings.TETHYS_DATABASES['tethys_db_manager'] database_manager_url = 'postgresql://{0}:{1}@{2}:{3}/{4}'.format(database_manager_db['USER'] if 'USER' in database_manager_db else 'tethys_db_manager', database_manager_db['PASSWORD'] if 'PASSWORD' in database_manager_db else 'pass', database_manager_db['HOST'] if 'HOST' in database_manager_db else '127.0.0.1', database_manager_db['PORT'] if 'PORT' in database_manager_db else '5435', database_manager_db['NAME'] if 'NAME' in database_manager_db else 'tethys_db_manager') database_manager_name = database_manager_url.split('://')[1].split(':')[0] #--------------------------------------------------------------------------------------------------------------# # Get a list of existing databases #--------------------------------------------------------------------------------------------------------------# # Create connection engine engine = create_engine(database_manager_url) # Cannot create databases in a transaction: connect and commit to close transaction connection = engine.connect() # Check for Database existing_dbs_statement = ''' SELECT d.datname as name FROM pg_catalog.pg_database d LEFT JOIN pg_catalog.pg_user u ON d.datdba = u.usesysid ORDER BY 1; ''' existing_dbs = connection.execute(existing_dbs_statement) connection.close() # Compile list of db names existing_db_names = [] for existing_db in existing_dbs: existing_db_names.append(existing_db.name) # Get apps and provision persistent stores if not already created for app in target_apps: # Create multiple persistent stores if necessary persistent_stores = app.persistent_stores() if persistent_stores: # Assemble list of target persistent stores target_persistent_stores = [] # Target the persistent store provided if options['database']: for persistent_store in persistent_stores: if options['database'] == persistent_store.name: target_persistent_stores.append(persistent_store) # Target all persistent stores else: target_persistent_stores = persistent_stores for persistent_store in target_persistent_stores: full_db_name = '_'.join((app.package, persistent_store.name)) new_database = True #--------------------------------------------------------------------------------------------------# # 1. Drop database if refresh option is included #--------------------------------------------------------------------------------------------------# if database_refresh and full_db_name in existing_db_names: # Provide update for user self.stdout.write('Dropping database {2}"{0}"{3} for app {2}"{1}"{3}...'.format( persistent_store.name, app.package, TerminalColors.BLUE, TerminalColors.ENDC )) # Connection delete_connection = engine.connect() # Drop db drop_db_statement = 'DROP DATABASE IF EXISTS {0}'.format(full_db_name) # Close transaction first then execute. delete_connection.execute('commit') delete_connection.execute(drop_db_statement) delete_connection.close() # Update the existing dbs query existing_db_names.pop(existing_db_names.index(full_db_name)) #--------------------------------------------------------------------------------------------------# # 2. Create the database if it does not already exist #--------------------------------------------------------------------------------------------------# if full_db_name not in existing_db_names: # Provide Update for User self.stdout.write('Creating database {2}"{0}"{3} for app {2}"{1}"{3}...'.format( persistent_store.name, app.package, TerminalColors.BLUE, TerminalColors.ENDC )) # Cannot create databases in a transaction: connect and commit to close transaction create_connection = engine.connect() # Create db create_db_statement = ''' CREATE DATABASE {0} WITH OWNER {1} TEMPLATE template0 ENCODING 'UTF8' '''.format(full_db_name, database_manager_name) # Close transaction first and then execute create_connection.execute('commit') create_connection.execute(create_db_statement) create_connection.close() else: # Provide Update for User self.stdout.write('Database {2}"{0}"{3} already exists for app {2}"{1}"{3}, skipping...'.format( persistent_store.name, app.package, TerminalColors.BLUE, TerminalColors.ENDC )) # Set var that is passed to initialization functions new_database = False #--------------------------------------------------------------------------------------------------# # 3. Enable PostGIS extension #--------------------------------------------------------------------------------------------------# if (hasattr(persistent_store, 'spatial') and persistent_store.spatial) or persistent_store.postgis: # Get URL for Tethys Superuser to enable extensions super_db = settings.TETHYS_DATABASES['tethys_super'] super_url = 'postgresql://{0}:{1}@{2}:{3}/{4}'.format(super_db['USER'] if 'USER' in super_db else 'tethys_super', super_db['PASSWORD'] if 'PASSWORD' in super_db else 'pass', super_db['HOST'] if 'HOST' in super_db else '127.0.0.1', super_db['PORT'] if 'PORT' in super_db else '5435', super_db['NAME'] if 'NAME' in super_db else 'tethys_super') super_parts = super_url.split('/') new_db_url = '{0}//{1}/{2}'.format(super_parts[0], super_parts[2], full_db_name) # Connect to new database new_db_engine = create_engine(new_db_url) new_db_connection = new_db_engine.connect() # Notify user self.stdout.write('Enabling PostGIS on database {2}"{0}"{3} for app {2}"{1}"{3}...'.format( persistent_store.name, app.package, TerminalColors.BLUE, TerminalColors.ENDC )) enable_postgis_statement = 'CREATE EXTENSION IF NOT EXISTS postgis' # Execute postgis statement new_db_connection.execute(enable_postgis_statement) new_db_connection.close() #------------------------------------------------------------------------------------------------------# # 4. Run initialization functions for each store here #------------------------------------------------------------------------------------------------------# for persistent_store in target_persistent_stores: if persistent_store.initializer_is_valid: initializer = persistent_store.initializer_function else: if ':' in persistent_store.initializer: print('DEPRECATION WARNING: The initializer attribute of a PersistentStore should now be in the form: "my_first_app.init_stores.init_spatial_db". The form "init_stores:init_spatial_db" is now deprecated.') # Split into module name and function name initializer_mod, initializer_function = persistent_store.initializer.split(':') # Pre-process initializer path initializer_path = '.'.join(('tethys_apps.tethysapp', app.package, initializer_mod)) try: # Import module module = __import__(initializer_path, fromlist=[initializer_function]) except __HOLE__: pass else: # Get the function initializer = getattr(module, initializer_function) try: if not initializer: raise ValueError('"{0}" is not a valid function.'.format(persistent_store.initializer)) except UnboundLocalError: raise ValueError('"{0}" is not a valid function.'.format(persistent_store.initializer)) self.stdout.write('Initializing database {3}"{0}"{4} for app {3}"{1}"{4} using initializer ' '{3}"{2}"{4}...'.format(persistent_store.name, app.package, initializer.__name__, TerminalColors.BLUE, TerminalColors.ENDC )) if options['first_time']: initializer(True) else: initializer(new_database)
ImportError
dataset/ETHPy150Open tethysplatform/tethys/tethys_apps/management/commands/syncstores.py/Command.provision_persistent_stores
6,561
def is_waiting_to_run(self): if self.status != self.WAITING: LOG.info("Migration: {} has already run!".format(self)) return False inspect = control.inspect() scheduled_tasks = inspect.scheduled() try: hosts = scheduled_tasks.keys() except Exception as e: LOG.info("Could not retrieve celery scheduled tasks: {}".format(e)) return False for host in hosts: try: scheduled_tasks = scheduled_tasks[host] except __HOLE__: LOG.warn("There are no scheduled tasks") LOG.info(scheduled_tasks) continue for task in scheduled_tasks: if task['request']['id'] == self.celery_task_id: return True return False
TypeError
dataset/ETHPy150Open globocom/database-as-a-service/dbaas/region_migration/models.py/DatabaseRegionMigrationDetail.is_waiting_to_run
6,562
def run(self): from docutils.core import publish_cmdline from docutils.nodes import raw from docutils.parsers import rst from genshi.input import HTMLParser from genshi.template import TemplateLoader docutils_conf = os.path.join(TOOLS_DIR, 'conf', 'docutils.ini') epydoc_conf = os.path.join(TOOLS_DIR, 'conf', 'epydoc.ini') try: from pygments import highlight from pygments.lexers import get_lexer_by_name from pygments.formatters import HtmlFormatter def code_block(name, arguments, options, content, lineno, content_offset, block_text, state, state_machine): lexer = get_lexer_by_name(arguments[0]) html = highlight('\n'.join(content), lexer, HtmlFormatter()) return [raw('', html, format='html')] code_block.arguments = (1, 0, 0) code_block.options = {'language' : rst.directives.unchanged} code_block.content = 1 rst.directives.register_directive('code-block', code_block) except __HOLE__: print('Pygments not installed, syntax highlighting disabled') loader = TemplateLoader(['doc', 'doc/common'], variable_lookup='strict') for source in glob('doc/*.txt'): dest = os.path.splitext(source)[0] + '.html' if self.force or not os.path.exists(dest) or \ os.path.getmtime(dest) < os.path.getmtime(source): print('building documentation file %s' % dest) publish_cmdline(writer_name='html', argv=['--config=%s' % docutils_conf, source, dest]) fileobj = open(dest) try: html = HTMLParser(fileobj, encoding='utf-8') template = loader.load('template.html') output = template.generate( html=html, project=self.distribution ).render('html', encoding='utf-8') finally: fileobj.close() fileobj = open(dest, 'w') try: fileobj.write(output) finally: fileobj.close() if not self.without_apidocs: try: from epydoc import cli old_argv = sys.argv[1:] sys.argv[1:] = [ '--config=%s' % epydoc_conf, '--top=%s' % self.distribution.packages[0], '--no-private', # epydoc bug, not read from config '--simple-term', '--verbose' ] + self.distribution.packages cli.cli() sys.argv[1:] = old_argv except ImportError: print('epydoc not installed, skipping API documentation.')
ImportError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/Babel-0.9.6/doc/common/doctools.py/build_doc.run
6,563
def get_theme(self, matcher_info): if not matcher_info or matcher_info not in self.local_themes: return self.theme match = self.local_themes[matcher_info] try: return match['theme'] except __HOLE__: match['theme'] = Theme( theme_config=match['config'], main_theme_config=self.theme_config, **self.theme_kwargs ) return match['theme']
KeyError
dataset/ETHPy150Open powerline/powerline/powerline/renderers/lemonbar.py/LemonbarRenderer.get_theme
6,564
def run(self): # We cannot use os.waitpid because it works only for child processes. from errno import EINTR while True: try: if os.getppid() == 1: os._exit(1) time.sleep(1.0) except __HOLE__ as e: if e.errno == EINTR: continue raise
OSError
dataset/ETHPy150Open ipython/ipython-py3k/IPython/zmq/parentpoller.py/ParentPollerUnix.run
6,565
def __getattr__(self, name): frame = self._storage while frame: try: return getattr(frame, name) except __HOLE__: frame = frame._parent_storage #raise AttributeError(name) raise AttributeError("{} has no attribute {}".format( self.__class__.__name__, name))
AttributeError
dataset/ETHPy150Open SmartTeleMax/iktomi/iktomi/utils/storage.py/VersionedStorage.__getattr__
6,566
def init(names, host=None, saltcloud_mode=False, quiet=False, **kwargs): ''' Initialize a new container .. code-block:: bash salt-run lxc.init name host=minion_id [cpuset=cgroups_cpuset] \\ [cpushare=cgroups_cpushare] [memory=cgroups_memory] \\ [template=lxc_template_name] [clone=original name] \\ [profile=lxc_profile] [network_proflile=network_profile] \\ [nic=network_profile] [nic_opts=nic_opts] \\ [start=(true|false)] [seed=(true|false)] \\ [install=(true|false)] [config=minion_config] \\ [snapshot=(true|false)] names Name of the containers, supports a single name or a comma delimited list of names. host Minion on which to initialize the container **(required)** path path to the container parent default: /var/lib/lxc (system default) .. versionadded:: 2015.8.0 saltcloud_mode init the container with the saltcloud opts format instead See lxc.init_interface module documentation cpuset cgroups cpuset. cpushare cgroups cpu shares. memory cgroups memory limit, in MB .. versionchanged:: 2015.5.0 If no value is passed, no limit is set. In earlier Salt versions, not passing this value causes a 1024MB memory limit to be set, and it was necessary to pass ``memory=0`` to set no limit. template Name of LXC template on which to base this container clone Clone this container from an existing container profile A LXC profile (defined in config or pillar). network_profile Network profile to use for the container .. versionadded:: 2015.5.2 nic .. deprecated:: 2015.5.0 Use ``network_profile`` instead nic_opts Extra options for network interfaces. E.g.: ``{"eth0": {"mac": "aa:bb:cc:dd:ee:ff", "ipv4": "10.1.1.1", "ipv6": "2001:db8::ff00:42:8329"}}`` start Start the newly created container. seed Seed the container with the minion config and autosign its key. Default: true install If salt-minion is not already installed, install it. Default: true config Optional config parameters. By default, the id is set to the name of the container. ''' path = kwargs.get('path', None) if quiet: log.warning("'quiet' argument is being deprecated." ' Please migrate to --quiet') ret = {'comment': '', 'result': True} if host is None: # TODO: Support selection of host based on available memory/cpu/etc. ret['comment'] = 'A host must be provided' ret['result'] = False return ret if isinstance(names, six.string_types): names = names.split(',') if not isinstance(names, list): ret['comment'] = 'Container names are not formed as a list' ret['result'] = False return ret # check that the host is alive client = salt.client.get_local_client(__opts__['conf_file']) alive = False try: if client.cmd(host, 'test.ping', timeout=20).get(host, None): alive = True except (__HOLE__, KeyError): pass if not alive: ret['comment'] = 'Host {0} is not reachable'.format(host) ret['result'] = False return ret log.info('Searching for LXC Hosts') data = __salt__['lxc.list'](host, quiet=True, path=path) for host, containers in six.iteritems(data): for name in names: if name in sum(six.itervalues(containers), []): log.info('Container \'{0}\' already exists' ' on host \'{1}\',' ' init can be a NO-OP'.format( name, host)) if host not in data: ret['comment'] = 'Host \'{0}\' was not found'.format(host) ret['result'] = False return ret kw = salt.utils.clean_kwargs(**kwargs) pub_key = kw.get('pub_key', None) priv_key = kw.get('priv_key', None) explicit_auth = pub_key and priv_key approve_key = kw.get('approve_key', True) seeds = {} seed_arg = kwargs.get('seed', True) if approve_key and not explicit_auth: skey = salt.key.Key(__opts__) all_minions = skey.all_keys().get('minions', []) for name in names: seed = seed_arg if name in all_minions: try: if client.cmd(name, 'test.ping', timeout=20).get(name, None): seed = False except (TypeError, KeyError): pass seeds[name] = seed kv = salt.utils.virt.VirtKey(host, name, __opts__) if kv.authorize(): log.info('Container key will be preauthorized') else: ret['comment'] = 'Container key preauthorization failed' ret['result'] = False return ret log.info('Creating container(s) \'{0}\'' ' on host \'{1}\''.format(names, host)) cmds = [] for name in names: args = [name] kw = salt.utils.clean_kwargs(**kwargs) if saltcloud_mode: kw = copy.deepcopy(kw) kw['name'] = name saved_kwargs = {} kw = client.cmd( host, 'lxc.cloud_init_interface', args + [kw], expr_form='list', timeout=600).get(host, {}) name = kw.pop('name', name) # be sure not to seed an already seeded host kw['seed'] = seeds.get(name, seed_arg) if not kw['seed']: kw.pop('seed_cmd', '') kw.update(saved_kwargs) cmds.append( (host, name, client.cmd_iter(host, 'lxc.init', args, kwarg=kw, timeout=600))) done = ret.setdefault('done', []) errors = ret.setdefault('errors', _OrderedDict()) for ix, acmd in enumerate(cmds): hst, container_name, cmd = acmd containers = ret.setdefault(hst, []) herrs = errors.setdefault(hst, _OrderedDict()) serrs = herrs.setdefault(container_name, []) sub_ret = next(cmd) error = None if isinstance(sub_ret, dict) and host in sub_ret: j_ret = sub_ret[hst] container = j_ret.get('ret', {}) if container and isinstance(container, dict): if not container.get('result', False): error = container else: error = 'Invalid return for {0}: {1} {2}'.format( container_name, container, sub_ret) else: error = sub_ret if not error: error = 'unknown error (no return)' if error: ret['result'] = False serrs.append(error) else: container['container_name'] = name containers.append(container) done.append(container) # marking ping status as True only and only if we have at # least provisioned one container ret['ping_status'] = bool(len(done)) # for all provisioned containers, last job is to verify # - the key status # - we can reach them for container in done: # explicitly check and update # the minion key/pair stored on the master container_name = container['container_name'] key = os.path.join(__opts__['pki_dir'], 'minions', container_name) if explicit_auth: fcontent = '' if os.path.exists(key): with salt.utils.fopen(key) as fic: fcontent = fic.read().strip() if pub_key.strip() != fcontent: with salt.utils.fopen(key, 'w') as fic: fic.write(pub_key) fic.flush() mid = j_ret.get('mid', None) if not mid: continue def testping(**kw): mid_ = kw['mid'] ping = client.cmd(mid_, 'test.ping', timeout=20) time.sleep(1) if ping: return 'OK' raise Exception('Unresponsive {0}'.format(mid_)) ping = salt.utils.cloud.wait_for_fun(testping, timeout=21, mid=mid) if ping != 'OK': ret['ping_status'] = False ret['result'] = False # if no lxc detected as touched (either inited or verified) # we result to False if not done: ret['result'] = False if not quiet: __jid_event__.fire_event({'message': ret}, 'progress') return ret
TypeError
dataset/ETHPy150Open saltstack/salt/salt/runners/lxc.py/init
6,567
def get_filename_variant(file_name, resource_suffix_map): # Given a filename # Get a list of variant IDs, and the root file name file_name_parts = os.path.splitext(file_name) if file_name_parts[0] == '~': raise Exception('Cannot start a file name with a ~ character') split = file_name_parts[0].split("~") tags = split[1:] try: ids = [resource_suffix_map['~'+tag] for tag in tags] except __HOLE__ as key: raise ValueError('Unrecognised tag %s' % key) root_file_name = split[0] + file_name_parts[1] return ids, root_file_name
KeyError
dataset/ETHPy150Open pebble/cloudpebble/ide/tasks/archive.py/get_filename_variant
6,568
@task(acks_late=True) def do_import_archive(project_id, archive, delete_project=False): project = Project.objects.get(pk=project_id) try: with tempfile.NamedTemporaryFile(suffix='.zip') as archive_file: archive_file.write(archive) archive_file.flush() with zipfile.ZipFile(str(archive_file.name), 'r') as z: contents = z.infolist() # Requirements: # - Find the folder containing the project. This may or may not be at the root level. # - Read in the source files, resources and resource map. # Observations: # - Legal projects must keep their source in a directory called 'src' containing at least one *.c file. # - Legal projects must have a resource map at resources/src/resource_map.json # Strategy: # - Find the shortest common prefix for 'resources/src/resource_map.json' and 'src/'. # - This is taken to be the project directory. # - Import every file in 'src/' with the extension .c or .h as a source file # - Parse resource_map.json and import files it references MANIFEST = 'appinfo.json' SRC_DIR = 'src/' WORKER_SRC_DIR = 'worker_src/' RES_PATH = 'resources' if len(contents) > 400: raise Exception("Too many files in zip file.") file_list = [x.filename for x in contents] base_dir = find_project_root(file_list) dir_end = len(base_dir) def make_valid_filename(zip_entry): entry_filename = zip_entry.filename if entry_filename[:dir_end] != base_dir: return False entry_filename = entry_filename[dir_end:] if entry_filename == '': return False if not os.path.normpath('/SENTINEL_DO_NOT_ACTUALLY_USE_THIS_NAME/%s' % entry_filename).startswith('/SENTINEL_DO_NOT_ACTUALLY_USE_THIS_NAME/'): raise SuspiciousOperation("Invalid zip file contents.") if zip_entry.file_size > 5242880: # 5 MB raise Exception("Excessively large compressed file.") return entry_filename # Now iterate over the things we found with transaction.atomic(): for entry in contents: filename = make_valid_filename(entry) if not filename: continue if filename == MANIFEST: # We have a resource map! We can now try importing things from it. with z.open(entry) as f: m = json.loads(f.read()) project.app_uuid = m['uuid'] project.app_short_name = m['shortName'] project.app_long_name = m['longName'] project.app_company_name = m['companyName'] project.app_version_label = m['versionLabel'] project.sdk_version = m.get('sdkVersion', '2') project.app_is_watchface = m.get('watchapp', {}).get('watchface', False) project.app_is_hidden = m.get('watchapp', {}).get('hiddenApp', False) project.app_is_shown_on_communication = m.get('watchapp', {}).get('onlyShownOnCommunication', False) project.app_capabilities = ','.join(m.get('capabilities', [])) project.app_modern_multi_js = m.get('enableMultiJS', False) if 'targetPlatforms' in m: project.app_platforms = ','.join(m['targetPlatforms']) project.app_keys = dict_to_pretty_json(m.get('appKeys', {})) project.project_type = m.get('projectType', 'native') if project.project_type not in [x[0] for x in Project.PROJECT_TYPES]: raise Exception("Illegal project type %s" % project.project_type) media_map = m['resources']['media'] tag_map = {v: k for k, v in ResourceVariant.VARIANT_STRINGS.iteritems() if v} desired_resources = {} resources_files = {} resource_variants = {} file_exists_for_root = {} # Go through the media map and look for resources for resource in media_map: file_name = resource['file'] identifier = resource['name'] # Pebble.js and simply.js both have some internal resources that we don't import. if project.project_type in {'pebblejs', 'simplyjs'}: if identifier in {'MONO_FONT_14', 'IMAGE_MENU_ICON', 'IMAGE_LOGO_SPLASH', 'IMAGE_TILE_SPLASH'}: continue tags, root_file_name = get_filename_variant(file_name, tag_map) if (len(tags) != 0): raise ValueError("Generic resource filenames cannot contain a tilde (~)") if file_name not in desired_resources: desired_resources[root_file_name] = [] print "Desired resource: %s" % root_file_name desired_resources[root_file_name].append(resource) file_exists_for_root[root_file_name] = False for zipitem in contents: # Let's just try opening the file filename = make_valid_filename(zipitem) if filename is False or not filename.startswith(RES_PATH): continue filename = filename[len(RES_PATH)+1:] try: extracted = z.open("%s%s/%s"%(base_dir, RES_PATH, filename)) except __HOLE__: print "Failed to open %s" % filename continue # Now we know the file exists and is in the resource directory - is it one we want? tags, root_file_name = get_filename_variant(filename, tag_map) tags_string = ",".join(str(int(t)) for t in tags) print "Importing file %s with root %s " % (zipitem.filename, root_file_name) if root_file_name in desired_resources: medias = desired_resources[root_file_name] print "Looking for variants of %s" % root_file_name # Because 'kind' and 'is_menu_icons' are properties of ResourceFile in the database, # we just use the first one. resource = medias[0] # Make only one resource file per base resource. if root_file_name not in resources_files: kind = resource['type'] is_menu_icon = resource.get('menuIcon', False) resources_files[root_file_name] = ResourceFile.objects.create( project=project, file_name=os.path.basename(root_file_name), kind=kind, is_menu_icon=is_menu_icon) # But add a resource variant for every file print "Adding variant %s with tags [%s]" % (root_file_name, tags_string) actual_file_name = resource['file'] resource_variants[actual_file_name] = ResourceVariant.objects.create(resource_file=resources_files[root_file_name], tags=tags_string) resource_variants[actual_file_name].save_file(extracted) file_exists_for_root[root_file_name] = True # Now add all the resource identifiers for root_file_name in desired_resources: for resource in desired_resources[root_file_name]: target_platforms = json.dumps(resource['targetPlatforms']) if 'targetPlatforms' in resource else None ResourceIdentifier.objects.create( resource_file=resources_files[root_file_name], resource_id=resource['name'], target_platforms=target_platforms, # Font options character_regex=resource.get('characterRegex', None), tracking=resource.get('trackingAdjust', None), compatibility=resource.get('compatibility', None), # Bitmap options memory_format=resource.get('memoryFormat', None), storage_format=resource.get('storageFormat', None), space_optimisation=resource.get('spaceOptimization', None) ) # Check that at least one variant of each specified resource exists. for root_file_name, loaded in file_exists_for_root.iteritems(): if not loaded: raise KeyError("No file was found to satisfy the manifest filename: {}".format(root_file_name)) elif filename.startswith(SRC_DIR): if (not filename.startswith('.')) and (filename.endswith('.c') or filename.endswith('.h') or filename.endswith('.js')): base_filename = filename[len(SRC_DIR):] if project.app_modern_multi_js and filename.endswith('.js') and filename.startswith('js/'): base_filename = base_filename[len('js/'):] source = SourceFile.objects.create(project=project, file_name=base_filename) with z.open(entry.filename) as f: source.save_file(f.read().decode('utf-8')) elif filename.startswith(WORKER_SRC_DIR): if (not filename.startswith('.')) and (filename.endswith('.c') or filename.endswith('.h') or filename.endswith('.js')): base_filename = filename[len(WORKER_SRC_DIR):] source = SourceFile.objects.create(project=project, file_name=base_filename, target='worker') with z.open(entry.filename) as f: source.save_file(f.read().decode('utf-8')) project.save() send_td_event('cloudpebble_zip_import_succeeded', project=project) # At this point we're supposed to have successfully created the project. return True except Exception as e: if delete_project: try: Project.objects.get(pk=project_id).delete() except: pass send_td_event('cloudpebble_zip_import_failed', data={ 'data': { 'reason': e.message } }, user=project.owner) raise
KeyError
dataset/ETHPy150Open pebble/cloudpebble/ide/tasks/archive.py/do_import_archive
6,569
def _parseLabelSpec(label_spec): if not ':' in label_spec: raise error.TopologyError('Invalid label description: %s' % label_spec) label_type_alias, label_range = label_spec.split(':', 1) try: label_type = LABEL_TYPES[label_type_alias] except __HOLE__: raise error.TopologyError('Label type %s does not map to proper label.' % label_type_alias) return nsa.Label(label_type, label_range) # range is parsed in nsa.Label
KeyError
dataset/ETHPy150Open NORDUnet/opennsa/opennsa/topology/nrm.py/_parseLabelSpec
6,570
def parsePortSpec(source): # Parse the entries like the following: ## type name remote label bandwidth interface authorization # #ethernet ps - vlan:1780-1783 1000 em0 [email protected] #ethernet netherlight netherlight#nordunet-(in|out) vlan:1780-1783 1000 em1 - #ethernet uvalight uvalight#nordunet-(in|out) vlan:1780-1783 1000 em2 nsa=aruba.net:nsa # Line starting with # and blank lines should be ignored assert isinstance(source, file) or isinstance(source, StringIO.StringIO), 'Topology source must be file or StringIO instance' nrm_ports = [] for line in source: line = line.strip() if not line or line.startswith('#'): continue tokens = [ t for t in line.split(' ') if t != '' ] if len(tokens) != 7: raise NRMSpecificationError('Invalid number of entries for entry: %s' % line) port_type, port_name, remote_spec, label_spec, bandwidth, interface, authz_spec = tokens if not port_type in PORT_TYPES: raise error.TopologyError('Port type %s is not a valid port type' % port_type) remote_network, remote_port, in_suffix, out_suffix = _parseRemoteSpec(remote_spec) label = _parseLabelSpec(label_spec) try: bandwidth = int(bandwidth) except __HOLE__ as e: raise NRMSpecificationError('Invalid bandwidth: %s' % str(e)) if port_type == cnt.NRM_ETHERNET: if remote_network is None: remote_port = None remote_in = None remote_out = None else: if not in_suffix or not out_suffix: raise NRMSpecificationError('Suffix not defined for bidirectional port %s' % port_name) remote_port = remote_network + ':' + remote_port remote_in = remote_port + in_suffix remote_out = remote_port + out_suffix else: raise AssertionError('do not know what to with port of type %s' % port_type) # these are more than auth attributes, but thats what they where for initially authz_attributes = [] link_vectors = {} transit_restricted = False if authz_spec != '-': for aa in authz_spec.split(','): if '=' in aa: ak, av = aa.split('=',2) if ak in authz.AUTH_ATTRIBUTES: # warn about bad authz if ak in authz.HEADER_ATTRIBUTES: log.msg("WARNING: Port %s: Using header attribute %s as authorization isn't really secure. Be careful." % (port_name, ak) ) authz_attributes.append( authz.AuthorizationAttribute(ak, av) ) elif ak in PATH_ATTRIBUTES: if not '@' in av: raise config.ConfigurationError('Invalid path value: %s' % av) network, weight = av.split('@', 1) link_vectors[network] = int(weight) else: raise config.ConfigurationError('Invalid attribute: %s' % aa) elif aa in ATTRIBUTES and aa == cnt.NRM_RESTRICTTRANSIT: transit_restricted = True else: raise config.ConfigurationError('Invalid attribute: %s' % aa) nrm_ports.append( NRMPort(port_type, port_name, remote_network, remote_port, remote_in, remote_out, label, bandwidth, interface, authz_attributes, link_vectors, transit_restricted) ) return nrm_ports
ValueError
dataset/ETHPy150Open NORDUnet/opennsa/opennsa/topology/nrm.py/parsePortSpec
6,571
def __call__(self, doc, context=None): items = _evaluate_items_expression(self._items_expression, doc, context) # all items should be iterable, if not return empty list for item in items: if not isinstance(item, list): return [] try: return(list(itertools.chain(*items))) except __HOLE__: return []
TypeError
dataset/ETHPy150Open dimagi/commcare-hq/corehq/apps/userreports/expressions/list_specs.py/FlattenExpressionSpec.__call__
6,572
def __call__(self, doc, context=None): items = _evaluate_items_expression(self._items_expression, doc, context) try: return sorted( items, key=lambda i: self._sort_expression(i, context), reverse=True if self.order == self.DESC else False ) except __HOLE__: return []
TypeError
dataset/ETHPy150Open dimagi/commcare-hq/corehq/apps/userreports/expressions/list_specs.py/SortItemsExpressionSpec.__call__
6,573
def validate_int_range(parsed_args, attr_name, min_value=None, max_value=None): val = getattr(parsed_args, attr_name, None) if val is None: return try: if not isinstance(val, int): int_val = int(val, 0) else: int_val = val if ((min_value is None or min_value <= int_val) and (max_value is None or int_val <= max_value)): return except (ValueError, __HOLE__): pass if min_value is not None and max_value is not None: msg = (_('%(attr_name)s "%(val)s" should be an integer ' '[%(min)i:%(max)i].') % {'attr_name': attr_name.replace('_', '-'), 'val': val, 'min': min_value, 'max': max_value}) elif min_value is not None: msg = (_('%(attr_name)s "%(val)s" should be an integer ' 'greater than or equal to %(min)i.') % {'attr_name': attr_name.replace('_', '-'), 'val': val, 'min': min_value}) elif max_value is not None: msg = (_('%(attr_name)s "%(val)s" should be an integer ' 'smaller than or equal to %(max)i.') % {'attr_name': attr_name.replace('_', '-'), 'val': val, 'max': max_value}) else: msg = (_('%(attr_name)s "%(val)s" should be an integer.') % {'attr_name': attr_name.replace('_', '-'), 'val': val}) raise exceptions.CommandError(msg)
TypeError
dataset/ETHPy150Open openstack/python-neutronclient/neutronclient/common/validators.py/validate_int_range
6,574
def validate_ip_subnet(parsed_args, attr_name): val = getattr(parsed_args, attr_name) if not val: return try: netaddr.IPNetwork(val) except (netaddr.AddrFormatError, __HOLE__): raise exceptions.CommandError( (_('%(attr_name)s "%(val)s" is not a valid CIDR.') % {'attr_name': attr_name.replace('_', '-'), 'val': val}))
ValueError
dataset/ETHPy150Open openstack/python-neutronclient/neutronclient/common/validators.py/validate_ip_subnet
6,575
def OnData(self, x, y, default_drag_result): """ Called when OnDrop returns True. """ # First, if we have a source in the clipboard and the source # doesn't allow moves then change the default to copy if clipboard.drop_source is not None and \ not clipboard.drop_source.allow_move: default_drag_result = wx.DragCopy elif clipboard.drop_source is None: # This means we might be receiving a file; try to import # the right packages to nicely handle a file drop. If those # packages can't be imported, then just pass through. if self.GetData(): try: from apptools.io import File from apptools.naming.api import Binding names = self.file_data.GetFilenames() files = [] bindings = [] for name in names: f = File(name) files.append(f) bindings.append(Binding(name = name, obj = f)) clipboard.data = files clipboard.node = bindings except __HOLE__: pass # Pass the object on the clipboard it to the handler. # # fixme: We allow 'wx_dropped_on' and 'on_drop' because both Dave # and Martin want different things! Unify! if hasattr(self.handler, 'wx_dropped_on'): drag_result = self.handler.wx_dropped_on( x, y, clipboard.data, default_drag_result ) elif hasattr(self.handler, 'on_drop'): drag_result = self.handler.on_drop( x, y, clipboard.data, default_drag_result ) else: self.handler(x, y, clipboard.data) drag_result = default_drag_result # Let the source of the drag/drop know that the operation is complete. drop_source = clipboard.drop_source if drop_source is not None: drop_source.on_dropped(drag_result) # Clean out the drop source! clipboard.drop_source = None # The return value tells the source what to do with the original data # (move, copy, etc.). In this case we just return the suggested value # given to us. return default_drag_result # Some virtual methods that track the progress of the drag.
ImportError
dataset/ETHPy150Open enthought/pyface/pyface/wx/drag_and_drop.py/PythonDropTarget.OnData
6,576
def test_no_state_var_err(self): try: self.prob.setup(check=False) except __HOLE__ as err: self.assertEqual(str(err), "'state_var' option in Brent solver of root must be specified") else: self.fail('ValueError Expected')
ValueError
dataset/ETHPy150Open OpenMDAO/OpenMDAO/openmdao/solvers/test/test_brent_solver.py/TestBrentSolver.test_no_state_var_err
6,577
def test_data_pass_bounds(self): p = Problem() p.root = Group() p.root.add('lower', ExecComp('low = 2*a'), promotes=['low', 'a']) p.root.add('upper', ExecComp('high = 2*b'), promotes=['high', 'b']) sub = p.root.add('sub', Group(), promotes=['x','low', 'high']) sub.add('comp', CompTest(), promotes=['a','x','n','b','c']) sub.add('dummy1', ExecComp('d=low'), promotes=['low']) sub.add('dummy2', ExecComp('d=high'), promotes=['high']) sub.nl_solver = Brent() sub.nl_solver.options['state_var'] = 'x' # sub.nl_solver.options['lower_bound'] = -10. # sub.nl_solver.options['upper_bound'] = 110. sub.nl_solver.options['var_lower_bound'] = 'flow' # bad value for testing error sub.nl_solver.options['var_upper_bound'] = 'high' try: p.setup(check=False) except __HOLE__ as err: self.assertEqual(str(err), "'var_lower_bound' variable 'flow' was not found as a parameter on any component in sub") else: self.fail('ValueError expected') sub.ln_solver=ScipyGMRES() sub.nl_solver.options['var_lower_bound'] = 'low' # correct value p.setup(check=False) p['a'] = -5. p['b'] = 55. p.run() assert_rel_error(self, p.root.unknowns['x'], 110, .0001)
ValueError
dataset/ETHPy150Open OpenMDAO/OpenMDAO/openmdao/solvers/test/test_brent_solver.py/TestBrentSolver.test_data_pass_bounds
6,578
def is_valid_javascript_identifier(identifier, escape=r'\u', ucd_cat=category): """Return whether the given ``id`` is a valid Javascript identifier.""" if not identifier: return False if not isinstance(identifier, unicode): try: identifier = unicode(identifier, 'utf-8') except __HOLE__: return False if escape in identifier: new = []; add_char = new.append split_id = identifier.split(escape) add_char(split_id.pop(0)) for segment in split_id: if len(segment) < 4: return False try: add_char(unichr(int('0x' + segment[:4], 16))) except Exception: return False add_char(segment[4:]) identifier = u''.join(new) if is_reserved_js_word(identifier): return False first_char = identifier[0] if not ((first_char in valid_jsid_chars) or (ucd_cat(first_char) in valid_jsid_categories_start)): return False for char in identifier[1:]: if not ((char in valid_jsid_chars) or (ucd_cat(char) in valid_jsid_categories)): return False return True
UnicodeDecodeError
dataset/ETHPy150Open dgraziotin/dycapo/piston/validate_jsonp.py/is_valid_javascript_identifier
6,579
def Node(*args): kind = args[0] if nodes.has_key(kind): try: return nodes[kind](*args[1:]) except __HOLE__: print nodes[kind], len(args), args raise else: raise WalkerError, "Can't find appropriate Node type: %s" % str(args) #return apply(ast.Node, args)
TypeError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/compiler/transformer.py/Node
6,580
def __deepcopy__(self, memo): """Method used by copy.deepcopy(). This also uses the state_pickler to work correctly. """ # Create a new instance. new = self.__class__() # If we have a saved state, use it for the new instance. If # not, get our state and save that. saved_state = self._saved_state if len(saved_state) == 0: state = state_pickler.get_state(self) #FIXME: This is for streamline seed point widget position which #does not get serialized correctly if not is_old_pipeline(): try: st = state.children[0].children[4] l_pos = st.seed.widget.position st.seed.widget.position = [pos.item() for pos in l_pos] except (__HOLE__, AttributeError): pass saved_state = pickle.dumps(state) new._saved_state = saved_state # In the unlikely case that a new instance is running, load # the saved state. if new.running: new._load_saved_state() return new ###################################################################### # `Base` interface ######################################################################
IndexError
dataset/ETHPy150Open enthought/mayavi/mayavi/core/base.py/Base.__deepcopy__
6,581
def _load_view_non_cached(self, name, view_element): """ Loads the view by execing a file. Useful when tweaking views. """ result = {} view_filename = self._view_filename try: exec(compile( open(view_filename).read(), view_filename, 'exec'), {}, result ) view = result['view'] except __HOLE__: logger.debug("No view found for [%s] in [%s]. " "Using the base class trait_view instead.", self, view_filename) view = super(Base, self).trait_view(name, view_element) return view
IOError
dataset/ETHPy150Open enthought/mayavi/mayavi/core/base.py/Base._load_view_non_cached
6,582
def run(): setup_logger() logger.info('Started') event_handler = EventHandler() observer = Observer(timeout=0.1) observer.event_queue.maxsize = EVENT_QUEUE_MAX_SIZE try: delete_all_files(FRAMES_PATH) observer.schedule(event_handler, path=FRAMES_PATH, recursive=True) observer.start() while True: time.sleep(1) now = datetime.datetime.now() if now - event_handler.last_event > datetime.timedelta(minutes=1): logger.warning("No events received in the last minute.") # Sometimes watchdog stops receiving events. # We exit, so the process can be restarted. break except __HOLE__ as err: logger.warning("Keyboard interruption") except Exception as err: logger.exception(err) finally: observer.stop() observer.join() logger.warning("Bye")
KeyboardInterrupt
dataset/ETHPy150Open jbochi/live_thumb/broadcaster.py/run
6,583
def get(self, service_id, bay_ident): try: return self._list(self._path(service_id, bay_ident))[0] except __HOLE__: return None
IndexError
dataset/ETHPy150Open openstack/python-magnumclient/magnumclient/v1/services.py/ServiceManager.get
6,584
def parse(self, ofx): try: for line in ofx.splitlines(): if line.strip() == "": break header, value = line.split(":") self.headers[header] = value except __HOLE__: pass except: raise finally: if "OFXHEADER" not in self.headers: self.headers["OFXHEADER"] = "100" if "VERSION" not in self.headers: self.headers["VERSION"] = "102" if "SECURITY" not in self.headers: self.headers["SECURITY"] = "NONE" if "OLDFILEUID" not in self.headers: self.headers["OLDFILEUID"] = "NONE" if "NEWFILEUID" not in self.headers: self.headers["NEWFILEUID"] = "NONE" try: tags = ofx.split("<") if len(tags) > 1: tags = ["<" + t.strip() for t in tags[1:]] heirarchy = [] can_open = True for i, tag in enumerate(tags): gt = tag.index(">") if tag[1] != "/": # Is an opening tag if not can_open: tags[i - 1] = tags[i - 1] + "</" + \ heirarchy.pop() + ">" can_open = True tag_name = tag[1:gt].split()[0] heirarchy.append(tag_name) if len(tag) > gt + 1: can_open = False else: # Is a closing tag tag_name = tag[2:gt].split()[0] if tag_name not in heirarchy: # Close tag with no matching open, so delete it tags[i] = tag[gt + 1:] else: # Close tag with matching open, but other open # tags that need to be closed first while(tag_name != heirarchy[-1]): tags[i - 1] = tags[i - 1] + "</" + \ heirarchy.pop() + ">" can_open = True heirarchy.pop() self.xml = ET.fromstringlist(tags) self.load_from_xml(self, self.xml) except: raise InvalidOFXStructureException
ValueError
dataset/ETHPy150Open jseutter/ofxparse/ofxparse/ofxutil.py/OfxUtil.parse
6,585
def start_config_thread(self, filename, section=None, refresh_config_seconds=10): """ Start a daemon thread to reload the given config file and section periodically. Load the config once before returning. This function must be called at most once. """ assert not self._loading_thread_started, "start_config_thread called twice!" self._update_from_config(filename, section=section) def refresh_config_loop(): while True: time.sleep(refresh_config_seconds) try: self._update_from_config(filename, section=section) stats.set_gauge("successful-config-update", 1) except (__HOLE__, yaml.parser.ParserError): stats.set_gauge("successful-config-update", 0) thread = threading.Thread(target=refresh_config_loop) thread.daemon = True thread.start() self._loading_thread_started = True
IOError
dataset/ETHPy150Open dropbox/grouper/grouper/settings.py/Settings.start_config_thread
6,586
def __getattr__(self, name): with self.lock: try: return self.settings[name] except __HOLE__ as err: raise AttributeError(err)
KeyError
dataset/ETHPy150Open dropbox/grouper/grouper/settings.py/Settings.__getattr__
6,587
def domains_to_metadata(domains): '''Construct a metadata dict out of the domains dict. The domains dict has the following form: keys: variable names from a factor graph vals: list of possible values the variable can have The metadata dict has the following form: keys: (same as above) vals: A string representing the sqlite data type (i.e 'integer' for bool and 'varchar' for str)''' metadata = dict() for k, v in domains.items(): # Assume that all values in the domain # are of the same type. TODO: verify this! try: metadata[k.name] = P2S_MAPPING[type(v[0])] except __HOLE__: print k, v raise UnsupportedTypeException return metadata
KeyError
dataset/ETHPy150Open eBay/bayesian-belief-networks/bayesian/persistance.py/domains_to_metadata
6,588
def build_row_factory(conn): ''' Introspect the samples table to build the row_factory function. We will assume that numeric values are Boolean and all other values are Strings. Should we encounter a numeric value not in (0, 1) we will raise an error. ''' cur = conn.cursor() cur.execute("pragma table_info('samples')") cols = cur.fetchall() column_metadata = dict([(col[1], col[2]) for col in cols]) def row_factory(cursor, row): row_dict = dict() for idx, desc in enumerate(cursor.description): col_name = desc[0] col_val = row[idx] try: row_dict[col_name] = \ S2P_MAPPING[column_metadata[col_name]](col_val) except __HOLE__: raise UnsupportedTypeException( 'A column in the SQLite samples ' 'database has an unsupported type. ' 'Supported types are %s. ' % str(S2P_MAPPING.keys())) return row_dict return row_factory
KeyError
dataset/ETHPy150Open eBay/bayesian-belief-networks/bayesian/persistance.py/build_row_factory
6,589
@classmethod def resolve_contents(cls, contents, env): """Resolve bundle names.""" result = [] for f in contents: try: result.append(env[f]) except __HOLE__: result.append(f) return result
KeyError
dataset/ETHPy150Open miracle2k/webassets/src/webassets/ext/jinja2.py/AssetsExtension.resolve_contents
6,590
def main(argv=None): parser = E.OptionParser( version="%prog version: $Id: quality2masks.py 2781 2009-09-10 11:33:14Z andreas $", usage=globals()["__doc__"]) parser.add_option("--quality-threshold", dest="quality_threshold", type="int", help="quality threshold for masking positions [default=%default]") parser.add_option("--random", dest="random", action="store_true", help="shuffle quality scores before masking [default=%default]") parser.add_option("--map-tsv-file", dest="filename_map", type="string", help="filename in psl format mapping entries in multiple alignment to the genome [default=%default]") parser.add_option("-q", "--quality-file", dest="quality_file", type="string", help="filename with genomic base quality information [default=%default].") parser.set_defaults( quality_threshold=40, quality_file="quality", filename_map=None, frame=3, ) (options, args) = E.Start(parser) ################################################## ################################################## ################################################## # read map ################################################## infile = open(options.filename_map) map_genes2genome = {} for match in Blat.iterator(infile): assert match.mQueryId not in map_genes2genome, "duplicate entry %s" % match.mQueryId map_genes2genome[match.mQueryId] = match infile.close() ################################################## ################################################## ################################################## # get quality scores ################################################## quality = IndexedFasta.IndexedFasta(options.quality_file) quality.setTranslator(IndexedFasta.TranslatorBytes()) ################################################## ################################################## ################################################## # main loop ################################################## ninput, noutput, nmissed = 0, 0, 0 options.stdout.write("cluster_id\tstart\tend\n") for line in options.stdin: if line.startswith("cluster_id"): continue ninput += 1 cluster_id, gene_id, alignment = line[:-1].split("\t") if gene_id not in map_genes2genome: nmissed += 1 E.warn("gene_id %s not found in map." % gene_id) continue match = map_genes2genome[gene_id] map_gene2genome = match.getMapQuery2Target() is_negative = match.strand == "-" # if strand is negative, the coordinates are # on the negative strand of the gene/query # in order to work in the right coordinate system # revert the sequence if is_negative: alignment = alignment[::-1] # get map of gene to alignment map_gene2mali = alignlib_lite.py_makeAlignmentVector() fillAlignment(map_gene2mali, alignment) # get quality scores try: quality_scores = quality.getSequence( match.mSbjctId, "+", match.mSbjctFrom, match.mSbjctTo) except __HOLE__, msg: nmissed += 1 E.warn("could not retrieve quality scores for %s:%i-%i: %s" % (match.mSbjctId, match.mSbjctFrom, match.mSbjctTo, msg)) continue # print str(alignlib_lite.py_AlignmentFormatEmissions( map_gene2genome)) # print str(alignlib_lite.py_AlignmentFormatEmissions( map_gene2mali)) # print quality_scores map_mali2genome = alignlib_lite.py_makeAlignmentVector() alignlib_lite.py_combineAlignment( map_mali2genome, map_gene2mali, map_gene2genome, alignlib_lite.py_RR) # print str(alignlib_lite.py_AlignmentFormatEmissions( # map_mali2genome)) # shuffle quality scores, but only those that are aligned if options.random: positions = [] for fp, c in enumerate(alignment): if c == "-": continue y = map_mali2genome.mapRowToCol(fp) - match.mSbjctFrom if y < 0: continue positions.append(y) scores = [quality_scores[x] for x in positions] random.shuffle(scores) for p, q in zip(positions, scores): quality_scores[p] = q # negative strand to_mask = [] # reverse position rp = len(alignment) for fp, c in enumerate(alignment): rp -= 1 if c == "-": continue y = map_mali2genome.mapRowToCol(fp) - match.mSbjctFrom if y < 0: continue if quality_scores[y] < options.quality_threshold: if is_negative: p = rp else: p = fp E.debug("low quality base: id=%s, mali=%i, char=%s, contig=%s, strand=%s, pos=%i, quality=%i" % (cluster_id, p, c, match.mSbjctId, match.strand, map_mali2genome.mapRowToCol(fp), quality_scores[y])) if options.frame > 1: start = (p // options.frame) * options.frame to_mask.extend(list(range(start, start + options.frame))) else: to_mask.append(p) regions = Iterators.group_by_distance(sorted(to_mask)) for start, end in regions: options.stdout.write("%s\t%i\t%i\n" % (cluster_id, start, end)) noutput += 1 E.info("ninput=%i, noutput=%i, nmissed=%i" % (ninput, noutput, nmissed)) E.Stop()
ValueError
dataset/ETHPy150Open CGATOxford/cgat/scripts/quality2masks.py/main
6,591
def test_works_with_unconfigured_configuration(self): try: # reset class level attributes on Configuration set in test helper imp.reload(braintree.configuration) config = Configuration( environment=braintree.Environment.Sandbox, merchant_id='my_merchant_id', public_key='public_key', private_key='private_key' ) config.http_strategy() except __HOLE__ as e: print(e) self.assertTrue(False) finally: # repopulate class level attributes on Configuration import tests.test_helper imp.reload(tests.test_helper)
AttributeError
dataset/ETHPy150Open braintree/braintree_python/tests/unit/test_configuration.py/TestConfiguration.test_works_with_unconfigured_configuration
6,592
def shrink_case(case): toks = case.split("-") def shrink_if_number(x): try: cvt = int(x) return str(cvt) except __HOLE__: return x return "-".join([shrink_if_number(t) for t in toks])
ValueError
dataset/ETHPy150Open woshialex/diagnose-heart/CNN_A/preprocess.py/shrink_case
6,593
def publish_display_data(data, source='bokeh'): ''' Compatibility wrapper for IPython ``publish_display_data`` Later versions of IPython remove the ``source`` (first) argument. This function insulates Bokeh library code from this change. Args: source (str, optional) : the source arg for IPython (default: "bokeh") data (dict) : the data dict to pass to ``publish_display_data`` Typically has the form ``{'text/html': html}`` ''' import IPython.core.displaypub as displaypub try: displaypub.publish_display_data(source, data) except __HOLE__: displaypub.publish_display_data(data)
TypeError
dataset/ETHPy150Open bokeh/bokeh/bokeh/util/notebook.py/publish_display_data
6,594
def api_request(host, url, data=None, method=None): if data: method = method or 'POST' else: method = method or 'GET' if ssl is False: msg.warn('Error importing ssl. Using system python...') return proxy_api_request(host, url, data, method) try: r = hit_url(host, url, data, method) except __HOLE__ as e: r = e except URLError as e: msg.warn('Error hitting url ', url, ': ', e) r = e if not PY2: msg.warn('Retrying using system python...') return proxy_api_request(host, url, data, method) return APIResponse(r)
HTTPError
dataset/ETHPy150Open Floobits/floobits-vim/plugin/floo/common/api.py/api_request
6,595
def get_mysql_credentials(cfg_file): """Get the credentials and database name from options in config file.""" try: parser = ConfigParser.ConfigParser() cfg_fp = open(cfg_file) parser.readfp(cfg_fp) cfg_fp.close() except ConfigParser.NoOptionError: cfg_fp.close() print('Failed to find mysql connections credentials.') sys.exit(1) except IOError: print('ERROR: Cannot open %s.', cfg_file) sys.exit(1) value = parser.get('dfa_mysql', 'connection') try: # Find location of pattern in connection parameter as shown below: # http://username:password@host/databasename?characterset=encoding' sobj = re.search(r"(://).*(@).*(/).*(\?)", value) # The list parameter contains: # indices[0], is the index of '://' # indices[1], is the index of '@' # indices[2], is the index of '/' # indices[3], is the index of '?' indices = [sobj.start(1), sobj.start(2), sobj.start(3), sobj.start(4)] # Get the credentials cred = value[indices[0] + 3:indices[1]].split(':') # Get the host name host = value[indices[1] + 1:indices[2]] # Get the database name db_name = value[indices[2] + 1:indices[3]] # Get the character encoding charset = value[indices[3] + 1:].split('=')[1] return cred[0], cred[1], host, db_name, charset except (__HOLE__, IndexError, AttributeError): print('Failed to find mysql connections credentials.') sys.exit(1)
ValueError
dataset/ETHPy150Open openstack/networking-cisco/tools/saf_prepare_setup.py/get_mysql_credentials
6,596
def modify_conf(cfgfile, service_name, outfn): """Modify config file neutron and keystone to include enabler options.""" if not cfgfile or not outfn: print('ERROR: There is no config file.') sys.exit(0) options = service_options[service_name] with open(cfgfile, 'r') as cf: lines = cf.readlines() for opt in options: op = opt.get('option') res = [line for line in lines if line.startswith(op)] if len(res) > 1: print('ERROR: There are more than one %s option.' % res) sys.exit(0) if res: (op, sep, val) = (res[0].strip('\n').replace(' ', ''). partition('=')) new_val = None if opt.get('is_list'): # Value for this option can contain list of values. # Append the value if it does not exist. if not any(opt.get('value') == value for value in val.split(',')): new_val = ','.join((val, opt.get('value'))) else: if val != opt.get('value'): new_val = opt.get('value') if new_val: opt_idx = lines.index(res[0]) # The setting is different, replace it with new one. lines.pop(opt_idx) lines.insert(opt_idx, '='.join((opt.get('option'), new_val + '\n'))) else: # Option does not exist. Add the option. try: sec_idx = lines.index('[' + opt.get('section') + ']\n') lines.insert(sec_idx + 1, '='.join( (opt.get('option'), opt.get('value') + '\n'))) except __HOLE__: print('Invalid %s section name.' % opt.get('section')) sys.exit(0) with open(outfn, 'w') as fwp: all_lines = '' for line in lines: all_lines += line fwp.write(all_lines)
ValueError
dataset/ETHPy150Open openstack/networking-cisco/tools/saf_prepare_setup.py/modify_conf
6,597
def Cleanup(self, vm): """Clean up RDS instances, cleanup the extra subnet created for the creation of the RDS instance. Args: vm: The VM that was used as the test client, which also stores states for clean-up. """ # Now, we can delete the DB instance. vm.db_instance_id is the id to call. # We need to keep querying the status of the deletion here before we let # this go. RDS DB deletion takes some time to finish. And we have to # wait until this DB is deleted before we proceed because this DB holds # references to various other resources: subnet groups, subnets, vpc, etc. delete_db_cmd = util.AWS_PREFIX + [ 'rds', 'delete-db-instance', '--db-instance-identifier', vm.db_instance_id, '--skip-final-snapshot'] logging.info('Deleting db instance %s...', vm.db_instance_id) # Note below, the status of this deletion command is validated below in the # loop. both stdout and stderr are checked. stdout, stderr, _ = vm_util.IssueCommand(delete_db_cmd) logging.info('Request to delete the DB has been issued, stdout:\n%s\n' 'stderr:%s\n', stdout, stderr) status_query_cmd = util.AWS_PREFIX + [ 'rds', 'describe-db-instances', '--db-instance-id', vm.db_instance_id] db_status = None for status_query_count in xrange(1, DB_STATUS_QUERY_LIMIT + 1): try: response = json.loads(stdout) except __HOLE__: # stdout cannot be parsed into json, it might simply be empty because # deletion has been completed. break db_status = _RDSParseDBInstanceStatus(response) if db_status == 'deleting': logging.info('DB is still in the deleting state, status_query_count ' 'is %d', status_query_count) # Wait for a few seconds and query status time.sleep(DB_STATUS_QUERY_INTERVAL) stdout, stderr, _ = vm_util.IssueCommand(status_query_cmd) else: logging.info('DB deletion status is no longer in deleting, it is %s', db_status) break else: logging.warn('DB is still in deleting state after long wait, bail.') db_instance_deletion_failed = False if db_status == 'deleted' or re.findall('DBInstanceNotFound', stderr): # Sometimes we get a 'deleted' status from DB status query command, # but even more times, the DB status query command would fail with # an "not found" error, both are positive confirmation that the DB has # been deleted. logging.info('DB has been successfully deleted, got confirmation.') else: # We did not get a positive confirmation that the DB is deleted even after # long wait, we have to bail. But we will log an error message, and # then raise an exception at the end of this function so this particular # run will show as a failed run to the user and allow them to examine # the logs db_instance_deletion_failed = True logging.error( 'RDS DB instance %s failed to be deleted, we did not get ' 'final confirmation from stderr, which is:\n %s', vm.db_instance_id, stderr) if hasattr(vm, 'db_subnet_group_name'): delete_db_subnet_group_cmd = util.AWS_PREFIX + [ 'rds', 'delete-db-subnet-group', '--db-subnet-group-name', vm.db_subnet_group_name] stdout, stderr, _ = vm_util.IssueCommand(delete_db_subnet_group_cmd) logging.info('Deleted the db subnet group. stdout is:\n%s, stderr: \n%s', stdout, stderr) if hasattr(vm, 'extra_subnet_for_db'): vm.extra_subnet_for_db.Delete() if db_instance_deletion_failed: raise DBStatusQueryError('Failed to get confirmation of DB instance ' 'deletion! Check the log for details!')
ValueError
dataset/ETHPy150Open GoogleCloudPlatform/PerfKitBenchmarker/perfkitbenchmarker/linux_benchmarks/mysql_service_benchmark.py/RDSMySQLBenchmark.Cleanup
6,598
def loadfile(self, filename): try: # open the file in binary mode so that we can handle # end-of-line convention ourselves. with open(filename, 'rb') as f: chars = f.read() except __HOLE__ as msg: tkMessageBox.showerror("I/O Error", str(msg), master=self.text) return False chars = self.decode(chars) # We now convert all end-of-lines to '\n's firsteol = self.eol_re.search(chars) if firsteol: self.eol_convention = firsteol.group(0) if isinstance(self.eol_convention, unicode): # Make sure it is an ASCII string self.eol_convention = self.eol_convention.encode("ascii") chars = self.eol_re.sub(r"\n", chars) self.text.delete("1.0", "end") self.set_filename(None) self.text.insert("1.0", chars) self.reset_undo() self.set_filename(filename) self.text.mark_set("insert", "1.0") self.text.yview("insert") self.updaterecentfileslist(filename) return True
IOError
dataset/ETHPy150Open francelabs/datafari/windows/python/Lib/idlelib/IOBinding.py/IOBinding.loadfile
6,599
def save(self, event): if not self.filename: self.save_as(event) else: if self.writefile(self.filename): self.set_saved(True) try: self.editwin.store_file_breaks() except __HOLE__: # may be a PyShell pass self.text.focus_set() return "break"
AttributeError
dataset/ETHPy150Open francelabs/datafari/windows/python/Lib/idlelib/IOBinding.py/IOBinding.save