Unnamed: 0
int64
0
10k
function
stringlengths
79
138k
label
stringclasses
20 values
info
stringlengths
42
261
7,200
def update(self, recursive=False, init=True, to_latest_revision=False, progress=None, dry_run=False, force=False, keep_going=False): """Update the repository of this submodule to point to the checkout we point at with the binsha of this instance. :param recursive: if True, we will operate recursively and update child- modules as well. :param init: if True, the module repository will be cloned into place if necessary :param to_latest_revision: if True, the submodule's sha will be ignored during checkout. Instead, the remote will be fetched, and the local tracking branch updated. This only works if we have a local tracking branch, which is the case if the remote repository had a master branch, or of the 'branch' option was specified for this submodule and the branch existed remotely :param progress: UpdateProgress instance or None if no progress should be shown :param dry_run: if True, the operation will only be simulated, but not performed. All performed operations are read-only :param force: If True, we may reset heads even if the repository in question is dirty. Additinoally we will be allowed to set a tracking branch which is ahead of its remote branch back into the past or the location of the remote branch. This will essentially 'forget' commits. If False, local tracking branches that are in the future of their respective remote branches will simply not be moved. :param keep_going: if True, we will ignore but log all errors, and keep going recursively. Unless dry_run is set as well, keep_going could cause subsequent/inherited errors you wouldn't see otherwise. In conjunction with dry_run, it can be useful to anticipate all errors when updating submodules :note: does nothing in bare repositories :note: method is definitely not atomic if recurisve is True :return: self""" if self.repo.bare: return self # END pass in bare mode if progress is None: progress = UpdateProgress() # END handle progress prefix = '' if dry_run: prefix = "DRY-RUN: " # END handle prefix # to keep things plausible in dry-run mode if dry_run: mrepo = None # END init mrepo try: # ASSURE REPO IS PRESENT AND UPTODATE ##################################### try: mrepo = self.module() rmts = mrepo.remotes len_rmts = len(rmts) for i, remote in enumerate(rmts): op = FETCH if i == 0: op |= BEGIN # END handle start progress.update(op, i, len_rmts, prefix + "Fetching remote %s of submodule %r" % (remote, self.name)) #=============================== if not dry_run: remote.fetch(progress=progress) # END handle dry-run #=============================== if i == len_rmts - 1: op |= END # END handle end progress.update(op, i, len_rmts, prefix + "Done fetching remote of submodule %r" % self.name) # END fetch new data except InvalidGitRepositoryError: if not init: return self # END early abort if init is not allowed # there is no git-repository yet - but delete empty paths checkout_module_abspath = self.abspath if not dry_run and os.path.isdir(checkout_module_abspath): try: os.rmdir(checkout_module_abspath) except OSError: raise OSError("Module directory at %r does already exist and is non-empty" % checkout_module_abspath) # END handle OSError # END handle directory removal # don't check it out at first - nonetheless it will create a local # branch according to the remote-HEAD if possible progress.update(BEGIN | CLONE, 0, 1, prefix + "Cloning url '%s' to '%s' in submodule %r" % (self.url, checkout_module_abspath, self.name)) if not dry_run: mrepo = self._clone_repo(self.repo, self.url, self.path, self.name, n=True) # END handle dry-run progress.update(END | CLONE, 0, 1, prefix + "Done cloning to %s" % checkout_module_abspath) if not dry_run: # see whether we have a valid branch to checkout try: # find a remote which has our branch - we try to be flexible remote_branch = find_first_remote_branch(mrepo.remotes, self.branch_name) local_branch = mkhead(mrepo, self.branch_path) # have a valid branch, but no checkout - make sure we can figure # that out by marking the commit with a null_sha local_branch.set_object(util.Object(mrepo, self.NULL_BIN_SHA)) # END initial checkout + branch creation # make sure HEAD is not detached mrepo.head.set_reference(local_branch, logmsg="submodule: attaching head to %s" % local_branch) mrepo.head.ref.set_tracking_branch(remote_branch) except __HOLE__: log.warn("Failed to checkout tracking branch %s", self.branch_path) # END handle tracking branch # NOTE: Have to write the repo config file as well, otherwise # the default implementation will be offended and not update the repository # Maybe this is a good way to assure it doesn't get into our way, but # we want to stay backwards compatible too ... . Its so redundant ! writer = self.repo.config_writer() writer.set_value(sm_section(self.name), 'url', self.url) writer.release() # END handle dry_run # END handle initalization # DETERMINE SHAS TO CHECKOUT ############################ binsha = self.binsha hexsha = self.hexsha if mrepo is not None: # mrepo is only set if we are not in dry-run mode or if the module existed is_detached = mrepo.head.is_detached # END handle dry_run if mrepo is not None and to_latest_revision: msg_base = "Cannot update to latest revision in repository at %r as " % mrepo.working_dir if not is_detached: rref = mrepo.head.ref.tracking_branch() if rref is not None: rcommit = rref.commit binsha = rcommit.binsha hexsha = rcommit.hexsha else: log.error("%s a tracking branch was not set for local branch '%s'", msg_base, mrepo.head.ref) # END handle remote ref else: log.error("%s there was no local tracking branch", msg_base) # END handle detached head # END handle to_latest_revision option # update the working tree # handles dry_run if mrepo is not None and mrepo.head.commit.binsha != binsha: # We must assure that our destination sha (the one to point to) is in the future of our current head. # Otherwise, we will reset changes that might have been done on the submodule, but were not yet pushed # We also handle the case that history has been rewritten, leaving no merge-base. In that case # we behave conservatively, protecting possible changes the user had done may_reset = True if mrepo.head.commit.binsha != self.NULL_BIN_SHA: base_commit = mrepo.merge_base(mrepo.head.commit, hexsha) if len(base_commit) == 0 or base_commit[0].hexsha == hexsha: if force: msg = "Will force checkout or reset on local branch that is possibly in the future of" msg += "the commit it will be checked out to, effectively 'forgetting' new commits" log.debug(msg) else: msg = "Skipping %s on branch '%s' of submodule repo '%s' as it contains un-pushed commits" msg %= (is_detached and "checkout" or "reset", mrepo.head, mrepo) log.info(msg) may_reset = False # end handle force # end handle if we are in the future if may_reset and not force and mrepo.is_dirty(index=True, working_tree=True, untracked_files=True): raise RepositoryDirtyError(mrepo, "Cannot reset a dirty repository") # end handle force and dirty state # end handle empty repo # end verify future/past progress.update(BEGIN | UPDWKTREE, 0, 1, prefix + "Updating working tree at %s for submodule %r to revision %s" % (self.path, self.name, hexsha)) if not dry_run and may_reset: if is_detached: # NOTE: for now we force, the user is no supposed to change detached # submodules anyway. Maybe at some point this becomes an option, to # properly handle user modifications - see below for future options # regarding rebase and merge. mrepo.git.checkout(hexsha, force=force) else: mrepo.head.reset(hexsha, index=True, working_tree=True) # END handle checkout # if we may reset/checkout progress.update(END | UPDWKTREE, 0, 1, prefix + "Done updating working tree for submodule %r" % self.name) # END update to new commit only if needed except Exception as err: if not keep_going: raise log.error(str(err)) # end handle keep_going # HANDLE RECURSION ################## if recursive: # in dry_run mode, the module might not exist if mrepo is not None: for submodule in self.iter_items(self.module()): submodule.update(recursive, init, to_latest_revision, progress=progress, dry_run=dry_run, force=force, keep_going=keep_going) # END handle recursive update # END handle dry run # END for each submodule return self
IndexError
dataset/ETHPy150Open gitpython-developers/GitPython/git/objects/submodule/base.py/Submodule.update
7,201
@unbare_repo def move(self, module_path, configuration=True, module=True): """Move the submodule to a another module path. This involves physically moving the repository at our current path, changing the configuration, as well as adjusting our index entry accordingly. :param module_path: the path to which to move our module in the parent repostory's working tree, given as repository-relative or absolute path. Intermediate directories will be created accordingly. If the path already exists, it must be empty. Trailing (back)slashes are removed automatically :param configuration: if True, the configuration will be adjusted to let the submodule point to the given path. :param module: if True, the repository managed by this submodule will be moved as well. If False, we don't move the submodule's checkout, which may leave the parent repository in an inconsistent state. :return: self :raise ValueError: if the module path existed and was not empty, or was a file :note: Currently the method is not atomic, and it could leave the repository in an inconsistent state if a sub-step fails for some reason """ if module + configuration < 1: raise ValueError("You must specify to move at least the module or the configuration of the submodule") # END handle input module_checkout_path = self._to_relative_path(self.repo, module_path) # VERIFY DESTINATION if module_checkout_path == self.path: return self # END handle no change module_checkout_abspath = join_path_native(self.repo.working_tree_dir, module_checkout_path) if os.path.isfile(module_checkout_abspath): raise ValueError("Cannot move repository onto a file: %s" % module_checkout_abspath) # END handle target files index = self.repo.index tekey = index.entry_key(module_checkout_path, 0) # if the target item already exists, fail if configuration and tekey in index.entries: raise ValueError("Index entry for target path did already exist") # END handle index key already there # remove existing destination if module: if os.path.exists(module_checkout_abspath): if len(os.listdir(module_checkout_abspath)): raise ValueError("Destination module directory was not empty") # END handle non-emptiness if os.path.islink(module_checkout_abspath): os.remove(module_checkout_abspath) else: os.rmdir(module_checkout_abspath) # END handle link else: # recreate parent directories # NOTE: renames() does that now pass # END handle existence # END handle module # move the module into place if possible cur_path = self.abspath renamed_module = False if module and os.path.exists(cur_path): os.renames(cur_path, module_checkout_abspath) renamed_module = True if os.path.isfile(os.path.join(module_checkout_abspath, '.git')): module_abspath = self._module_abspath(self.repo, self.path, self.name) self._write_git_file_and_module_config(module_checkout_abspath, module_abspath) # end handle git file rewrite # END move physical module # rename the index entry - have to manipulate the index directly as # git-mv cannot be used on submodules ... yeah previous_sm_path = self.path try: if configuration: try: ekey = index.entry_key(self.path, 0) entry = index.entries[ekey] del(index.entries[ekey]) nentry = git.IndexEntry(entry[:3] + (module_checkout_path,) + entry[4:]) index.entries[tekey] = nentry except __HOLE__: raise InvalidGitRepositoryError("Submodule's entry at %r did not exist" % (self.path)) # END handle submodule doesn't exist # update configuration writer = self.config_writer(index=index) # auto-write writer.set_value('path', module_checkout_path) self.path = module_checkout_path writer.release() del(writer) # END handle configuration flag except Exception: if renamed_module: os.renames(module_checkout_abspath, cur_path) # END undo module renaming raise # END handle undo rename # Auto-rename submodule if it's name was 'default', that is, the checkout directory if previous_sm_path == self.name: self.rename(module_checkout_path) # end return self
KeyError
dataset/ETHPy150Open gitpython-developers/GitPython/git/objects/submodule/base.py/Submodule.move
7,202
@unbare_repo def remove(self, module=True, force=False, configuration=True, dry_run=False): """Remove this submodule from the repository. This will remove our entry from the .gitmodules file and the entry in the .git/config file. :param module: If True, the module checkout we point to will be deleted as well. If the module is currently on a commit which is not part of any branch in the remote, if the currently checked out branch working tree, or untracked files, is ahead of its tracking branch, if you have modifications in the In case the removal of the repository fails for these reasons, the submodule status will not have been altered. If this submodule has child-modules on its own, these will be deleted prior to touching the own module. :param force: Enforces the deletion of the module even though it contains modifications. This basically enforces a brute-force file system based deletion. :param configuration: if True, the submodule is deleted from the configuration, otherwise it isn't. Although this should be enabled most of the times, this flag enables you to safely delete the repository of your submodule. :param dry_run: if True, we will not actually do anything, but throw the errors we would usually throw :return: self :note: doesn't work in bare repositories :note: doesn't work atomically, as failure to remove any part of the submodule will leave an inconsistent state :raise InvalidGitRepositoryError: thrown if the repository cannot be deleted :raise OSError: if directories or files could not be removed""" if not (module or configuration): raise ValueError("Need to specify to delete at least the module, or the configuration") # END handle parameters # Recursively remove children of this submodule nc = 0 for csm in self.children(): nc += 1 csm.remove(module, force, configuration, dry_run) del(csm) # end if configuration and not dry_run and nc > 0: # Assure we don't leave the parent repository in a dirty state, and commit our changes # It's important for recursive, unforced, deletions to work as expected self.module().index.commit("Removed at least one of child-modules of '%s'" % self.name) # end handle recursion # DELETE REPOSITORY WORKING TREE ################################ if module and self.module_exists(): mod = self.module() git_dir = mod.git_dir if force: # take the fast lane and just delete everything in our module path # TODO: If we run into permission problems, we have a highly inconsistent # state. Delete the .git folders last, start with the submodules first mp = self.abspath method = None if os.path.islink(mp): method = os.remove elif os.path.isdir(mp): method = rmtree elif os.path.exists(mp): raise AssertionError("Cannot forcibly delete repository as it was neither a link, nor a directory") # END handle brutal deletion if not dry_run: assert method method(mp) # END apply deletion method else: # verify we may delete our module if mod.is_dirty(index=True, working_tree=True, untracked_files=True): raise InvalidGitRepositoryError( "Cannot delete module at %s with any modifications, unless force is specified" % mod.working_tree_dir) # END check for dirt # figure out whether we have new commits compared to the remotes # NOTE: If the user pulled all the time, the remote heads might # not have been updated, so commits coming from the remote look # as if they come from us. But we stay strictly read-only and # don't fetch beforehand. for remote in mod.remotes: num_branches_with_new_commits = 0 rrefs = remote.refs for rref in rrefs: num_branches_with_new_commits += len(mod.git.cherry(rref)) != 0 # END for each remote ref # not a single remote branch contained all our commits if num_branches_with_new_commits == len(rrefs): raise InvalidGitRepositoryError( "Cannot delete module at %s as there are new commits" % mod.working_tree_dir) # END handle new commits # have to manually delete references as python's scoping is # not existing, they could keep handles open ( on windows this is a problem ) if len(rrefs): del(rref) # END handle remotes del(rrefs) del(remote) # END for each remote # finally delete our own submodule if not dry_run: wtd = mod.working_tree_dir del(mod) # release file-handles (windows) rmtree(wtd) # END delete tree if possible # END handle force if not dry_run and os.path.isdir(git_dir): rmtree(git_dir) # end handle separate bare repository # END handle module deletion # void our data not to delay invalid access if not dry_run: self._clear_cache() # DELETE CONFIGURATION ###################### if configuration and not dry_run: # first the index-entry parent_index = self.repo.index try: del(parent_index.entries[parent_index.entry_key(self.path, 0)]) except __HOLE__: pass # END delete entry parent_index.write() # now git config - need the config intact, otherwise we can't query # information anymore writer = self.repo.config_writer() writer.remove_section(sm_section(self.name)) writer.release() writer = self.config_writer() writer.remove_section() writer.release() # END delete configuration return self
KeyError
dataset/ETHPy150Open gitpython-developers/GitPython/git/objects/submodule/base.py/Submodule.remove
7,203
def set_parent_commit(self, commit, check=True): """Set this instance to use the given commit whose tree is supposed to contain the .gitmodules blob. :param commit: Commit'ish reference pointing at the root_tree, or None to always point to the most recent commit :param check: if True, relatively expensive checks will be performed to verify validity of the submodule. :raise ValueError: if the commit's tree didn't contain the .gitmodules blob. :raise ValueError: if the parent commit didn't store this submodule under the current path :return: self""" if commit is None: self._parent_commit = None return self # end handle None pcommit = self.repo.commit(commit) pctree = pcommit.tree if self.k_modules_file not in pctree: raise ValueError("Tree of commit %s did not contain the %s file" % (commit, self.k_modules_file)) # END handle exceptions prev_pc = self._parent_commit self._parent_commit = pcommit if check: parser = self._config_parser(self.repo, self._parent_commit, read_only=True) if not parser.has_section(sm_section(self.name)): self._parent_commit = prev_pc raise ValueError("Submodule at path %r did not exist in parent commit %s" % (self.path, commit)) # END handle submodule did not exist # END handle checking mode # update our sha, it could have changed # If check is False, we might see a parent-commit that doens't even contain the submodule anymore. # in that case, mark our sha as being NULL try: self.binsha = pctree[self.path].binsha except __HOLE__: self.binsha = self.NULL_BIN_SHA # end self._clear_cache() return self
KeyError
dataset/ETHPy150Open gitpython-developers/GitPython/git/objects/submodule/base.py/Submodule.set_parent_commit
7,204
def exists(self): """ :return: True if the submodule exists, False otherwise. Please note that a submodule may exist (in the .gitmodules file) even though its module doesn't exist on disk""" # keep attributes for later, and restore them if we have no valid data # this way we do not actually alter the state of the object loc = locals() for attr in self._cache_attrs: try: if hasattr(self, attr): loc[attr] = getattr(self, attr) # END if we have the attribute cache except (cp.NoSectionError, __HOLE__): # on PY3, this can happen apparently ... don't know why this doesn't happen on PY2 pass # END for each attr self._clear_cache() try: try: self.path return True except Exception: return False # END handle exceptions finally: for attr in self._cache_attrs: if attr in loc: setattr(self, attr, loc[attr]) # END if we have a cache # END reapply each attribute # END handle object state consistency
ValueError
dataset/ETHPy150Open gitpython-developers/GitPython/git/objects/submodule/base.py/Submodule.exists
7,205
@classmethod def iter_items(cls, repo, parent_commit='HEAD'): """:return: iterator yielding Submodule instances available in the given repository""" pc = repo.commit(parent_commit) # parent commit instance try: parser = cls._config_parser(repo, pc, read_only=True) except IOError: raise StopIteration # END handle empty iterator rt = pc.tree # root tree for sms in parser.sections(): n = sm_name(sms) p = parser.get_value(sms, 'path') u = parser.get_value(sms, 'url') b = cls.k_head_default if parser.has_option(sms, cls.k_head_option): b = str(parser.get_value(sms, cls.k_head_option)) # END handle optional information # get the binsha index = repo.index try: sm = rt[p] except __HOLE__: # try the index, maybe it was just added try: entry = index.entries[index.entry_key(p, 0)] sm = Submodule(repo, entry.binsha, entry.mode, entry.path) except KeyError: raise InvalidGitRepositoryError( "Gitmodule path %r did not exist in revision of parent commit %s" % (p, parent_commit)) # END handle keyerror # END handle critical error # fill in remaining info - saves time as it doesn't have to be parsed again sm._name = n if pc != repo.commit(): sm._parent_commit = pc # end set only if not most recent ! sm._branch_path = git.Head.to_full_path(b) sm._url = u yield sm # END for each section #} END iterable interface
KeyError
dataset/ETHPy150Open gitpython-developers/GitPython/git/objects/submodule/base.py/Submodule.iter_items
7,206
def get_result_and_row_class(cl, field_name, result): if django.VERSION >= (1, 9): empty_value_display = cl.model_admin.get_empty_value_display() else: from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE empty_value_display = EMPTY_CHANGELIST_VALUE row_classes = ['field-%s' % field_name] try: f, attr, value = lookup_field(field_name, result, cl.model_admin) except __HOLE__: result_repr = empty_value_display else: if django.VERSION >= (1, 9): empty_value_display = getattr( attr, 'empty_value_display', empty_value_display) if f is None: if field_name == 'action_checkbox': row_classes = ['action-checkbox'] allow_tags = getattr(attr, 'allow_tags', False) boolean = getattr(attr, 'boolean', False) if django.VERSION >= (1, 9): result_repr = display_for_value( value, empty_value_display, boolean) else: result_repr = display_for_value(value, boolean) # Strip HTML tags in the resulting text, except if the # function has an "allow_tags" attribute set to True. # WARNING: this will be deprecated in Django 2.0 if allow_tags: result_repr = mark_safe(result_repr) if isinstance(value, (datetime.date, datetime.time)): row_classes.append('nowrap') else: if isinstance(f.rel, models.ManyToOneRel): field_val = getattr(result, f.name) if field_val is None: result_repr = empty_value_display else: result_repr = field_val else: if django.VERSION >= (1, 9): result_repr = display_for_field( value, f, empty_value_display) else: result_repr = display_for_field(value, f) if isinstance(f, (models.DateField, models.TimeField, models.ForeignKey)): row_classes.append('nowrap') if force_str(result_repr) == '': result_repr = mark_safe('&nbsp;') row_class = mark_safe(' class="%s"' % ' '.join(row_classes)) return result_repr, row_class
ObjectDoesNotExist
dataset/ETHPy150Open tabo/django-treebeard/treebeard/templatetags/admin_tree.py/get_result_and_row_class
7,207
def find_in_migration(self, anchor, needles, terminus='\n', distance=None): """Assert the presence of the given anchor in the output. Once the anchor is found, assert the presence of *each* provided needle within bounds. If `terminus` is provided, each needle must appear before the next instance of `terminus`. If `distance` is provided, each needle must appear within `distance` characters, and `terminus` is ignored. """ # The anchor must exist. If it doesn't, we have another problem. assert anchor in self.migration_code, 'Could not find: %s\nIn: %s' % ( anchor, self.migration_code, ) start = self.migration_code.index(anchor) + len(anchor) # If a distance is provided, get the substring based on it. # Otherwise, use the terminus. if distance: block = self.migration_code[start:start + distance] else: try: end = self.migration_code.index(terminus, start) except __HOLE__: end = None block = self.migration_code[start:end] # Assert that each of my expected needles is found in the # given haystack. for needle in needles: assert needle in block, 'Could not find text:\n%s' % needle
ValueError
dataset/ETHPy150Open lukesneeringer/django-pgfields/tests/south_migrations/tests.py/MigrationCreationSuite.find_in_migration
7,208
def get_url(self, urlbase, *args, **kw): #~ kw[constants.URL_PARAM_USER_LANGUAGE] = self.language if self.ar.bound_action.action.action_name == 'detail': if len(args) == 0: try: args = [str(iter(self.ar).next().pk)] except __HOLE__: return None return urlbase + self.ar.get_request_url(*args, **kw) #~ return '%s%s?ul=%s' % (root,url,self.language.django_code) #~ def register_screenshot(*args,**kw): #~ ss = Screenshot(*args,**kw) #~ SCREENSHOTS[ss.name] = ss #~ return ss
StopIteration
dataset/ETHPy150Open lsaffre/lino/lino/utils/screenshots.py/Screenshot.get_url
7,209
def subtopics(store, folders, folder_id, subfolder_id, ann_id=None): '''Yields an unordered generator of subtopics in a subfolder. Each item of the generator is a 4-tuple of ``content_id``, ``subtopic_id``, ``subtopic_type`` and ``data``. Subtopic type is one of the following Unicode strings: ``text``, ``image`` or ``manual``. The type of ``data`` is dependent on the subtopic type. For ``image``, ``data`` is a ``(unicode, str)``, where the first element is the URL and the second element is the binary image data. For all other types, ``data`` is a ``unicode`` string. :param str folder_id: Folder id :param str subfolder_id: Subfolder id :param str ann_id: Username :rtype: generator of ``(content_id, subtopic_id, url, subtopic_type, data)`` ''' # This code will be changed soon. In essence, it implements the # convention established in SortingDesk for storing subtopic data. # Currently, subtopic data is stored in the FC that the data (i.e., # image or snippet) came from. This is bad because it causes pretty # severe race conditions. # # Our current plan is to put each subtopic datum in its own FC. It will # require this code to make more FC fetches, but we should be able to # do it with one `store.get_many` call. items = folders.grouped_items(folder_id, subfolder_id, ann_id=ann_id) fcs = dict([(cid, fc) for cid, fc in store.get_many(items.keys())]) for cid, subids in items.iteritems(): fc = fcs[cid] for subid in subids: try: data = typed_subtopic_data(fc, subid) except __HOLE__: # We have a dangling label folks! continue yield cid, subid, fc['meta_url'], subtopic_type(subid), data
KeyError
dataset/ETHPy150Open dossier/dossier.models/dossier/models/subtopic.py/subtopics
7,210
def runloop(self): while self.keep_going: eventlet.sleep(0.1) ## Only start the number of children we need number = self.num_processes - self.children_count() if number > 0: self.log.debug('Should start %d new children', number) self.spawn_children(number=number) continue if not self.children: ## If we don't yet have children, let's loop continue pid, result = None, None try: pid, result = os.wait() except __HOLE__, e: if e.errno != errno.EINTR: raise if pid and self.children.get(pid): try: child = self.children.pop(pid) os.close(child.kill_pipe) except (IOError, OSError): pass if result: signum = os.WTERMSIG(result) exitcode = os.WEXITSTATUS(result) self.log.info('(%s) Child died from signal %s with code %s', pid, signum, exitcode)
OSError
dataset/ETHPy150Open rtyler/Spawning/spawning/spawning_controller.py/Controller.runloop
7,211
def kill_children(self): for pid, child in self.children.items(): try: os.write(child.kill_pipe, 'k') child.active = False # all maintenance of children's membership happens in runloop() # as children die and os.wait() gets results except __HOLE__, e: if e.errno != errno.EPIPE: raise
OSError
dataset/ETHPy150Open rtyler/Spawning/spawning/spawning_controller.py/Controller.kill_children
7,212
def run(self): self.log.info('(%s) *** Controller starting at %s' % (self.controller_pid, time.asctime())) if self.config.get('pidfile'): with open(self.config.get('pidfile'), 'w') as fd: fd.write('%s\n' % self.controller_pid) spawning.setproctitle("spawn: controller " + self.args.get('argv_str', '')) if self.sock is None: self.sock = bind_socket(self.config) signal.signal(signal.SIGHUP, self.handle_sighup) signal.signal(signal.SIGUSR1, self.handle_deadlychild) if self.config.get('status_port'): from spawning.util import status eventlet.spawn(status.Server, self, self.config['status_host'], self.config['status_port']) try: self.runloop() except __HOLE__: self.keep_going = False self.kill_children() self.log.info('(%s) *** Controller exiting' % (self.controller_pid))
KeyboardInterrupt
dataset/ETHPy150Open rtyler/Spawning/spawning/spawning_controller.py/Controller.run
7,213
def main(): current_directory = os.path.realpath('.') if current_directory not in sys.path: sys.path.append(current_directory) parser = optparse.OptionParser(description="Spawning is an easy-to-use and flexible wsgi server. It supports graceful restarting so that your site finishes serving any old requests while starting new processes to handle new requests with the new code. For the simplest usage, simply pass the dotted path to your wsgi application: 'spawn my_module.my_wsgi_app'", version=spawning.__version__) parser.add_option('-v', '--verbose', dest='verbose', action='store_true', help='Display verbose configuration ' 'information when starting up or restarting.') parser.add_option("-f", "--factory", dest='factory', default='spawning.wsgi_factory.config_factory', help="""Dotted path (eg mypackage.mymodule.myfunc) to a callable which takes a dictionary containing the command line arguments and figures out what needs to be done to start the wsgi application. Current valid values are: spawning.wsgi_factory.config_factory, spawning.paste_factory.config_factory, and spawning.django_factory.config_factory. The factory used determines what the required positional command line arguments will be. See the spawning.wsgi_factory module for documentation on how to write a new factory. """) parser.add_option("-i", "--host", dest='host', default=DEFAULTS['host'], help='The local ip address to bind.') parser.add_option("-p", "--port", dest='port', type='int', default=DEFAULTS['port'], help='The local port address to bind.') parser.add_option("-s", "--processes", dest='processes', type='int', default=DEFAULTS['num_processes'], help='The number of unix processes to start to use for handling web i/o.') parser.add_option("-t", "--threads", dest='threads', type='int', default=DEFAULTS['threadpool_workers'], help="The number of posix threads to use for handling web requests. " "If threads is 0, do not use threads but instead use eventlet's cooperative " "greenlet-based microthreads, monkeypatching the socket and pipe operations which normally block " "to cooperate instead. Note that most blocking database api modules will not " "automatically cooperate.") parser.add_option('-d', '--daemonize', dest='daemonize', action='store_true', help="Daemonize after starting children.") parser.add_option('-u', '--chuid', dest='chuid', metavar="ID", help="Change user ID in daemon mode (and group ID if given, " "separate with colon.)") parser.add_option('--pidfile', dest='pidfile', metavar="FILE", help="Write own process ID to FILE in daemon mode.") parser.add_option('--stdout', dest='stdout', metavar="FILE", help="Redirect stdout to FILE in daemon mode.") parser.add_option('--stderr', dest='stderr', metavar="FILE", help="Redirect stderr to FILE in daemon mode.") parser.add_option('-w', '--watch', dest='watch', action='append', help="Watch the given file's modification time. If the file changes, the web server will " 'restart gracefully, allowing old requests to complete in the old processes ' 'while starting new processes with the latest code or configuration.') ## TODO Hook up the svn reloader again parser.add_option("-r", "--reload", type='str', dest='reload', help='If --reload=dev is passed, reload any time ' 'a loaded module or configuration file changes.') parser.add_option("--deadman", "--deadman_timeout", type='int', dest='deadman_timeout', default=DEFAULTS['deadman_timeout'], help='When killing an old i/o process because the code has changed, don\'t wait ' 'any longer than the deadman timeout value for the process to gracefully exit. ' 'If all requests have not completed by the deadman timeout, the process will be mercilessly killed.') parser.add_option('-l', '--access-log-file', dest='access_log_file', default=None, help='The file to log access log lines to. If not given, log to stdout. Pass /dev/null to discard logs.') parser.add_option('-c', '--coverage', dest='coverage', action='store_true', help='If given, gather coverage data from the running program and make the ' 'coverage report available from the /_coverage url. See the figleaf docs ' 'for more info: http://darcs.idyll.org/~t/projects/figleaf/doc/') parser.add_option('--sysinfo', dest='sysinfo', action='store_true', help='If given, gather system information data and make the ' 'report available from the /_sysinfo url.') parser.add_option('-m', '--max-memory', dest='max_memory', type='int', default=0, help='If given, the maximum amount of memory this instance of Spawning ' 'is allowed to use. If all of the processes started by this Spawning controller ' 'use more than this amount of memory, send a SIGHUP to the controller ' 'to get the children to restart.') parser.add_option('--backdoor', dest='backdoor', action='store_true', help='Start a backdoor bound to localhost:3000') parser.add_option('-a', '--max-age', dest='max_age', type='int', help='If given, the maximum amount of time (in seconds) an instance of spawning_child ' 'is allowed to run. Once this time limit has expired the child will' 'gracefully kill itself while the server starts a replacement.') parser.add_option('--no-keepalive', dest='no_keepalive', action='store_true', help='Disable HTTP/1.1 KeepAlive') parser.add_option('-z', '--z-restart-args', dest='restart_args', help='For internal use only') parser.add_option('--status-port', dest='status_port', type='int', default=0, help='If given, hosts a server status page at that port. Two pages are served: a human-readable HTML version at http://host:status_port/status, and a machine-readable version at http://host:status_port/status.json') parser.add_option('--status-host', dest='status_host', type='string', default='', help='If given, binds the server status page to the specified local ip address. Defaults to the same value as --host. If --status-port is not supplied, the status page will not be activated.') options, positional_args = parser.parse_args() if len(positional_args) < 1 and not options.restart_args: parser.error("At least one argument is required. " "For the default factory, it is the dotted path to the wsgi application " "(eg my_package.my_module.my_wsgi_application). For the paste factory, it " "is the ini file to load. Pass --help for detailed information about available options.") if options.backdoor: try: eventlet.spawn(eventlet.backdoor.backdoor_server, eventlet.listen(('localhost', 3000))) except Exception, ex: sys.stderr.write('**> Error opening backdoor: %s\n' % ex) sock = None if options.restart_args: restart_args = json.loads(options.restart_args) factory = restart_args['factory'] factory_args = restart_args['factory_args'] start_delay = restart_args.get('start_delay') if start_delay is not None: factory_args['start_delay'] = start_delay print "(%s) delaying startup by %s" % (os.getpid(), start_delay) time.sleep(start_delay) fd = restart_args.get('fd') if fd is not None: sock = socket.fromfd(restart_args['fd'], socket.AF_INET, socket.SOCK_STREAM) ## socket.fromfd doesn't result in a socket object that has the same fd. ## The old fd is still open however, so we close it so we don't leak. os.close(restart_args['fd']) return start_controller(sock, factory, factory_args) ## We're starting up for the first time. if options.daemonize: # Do the daemon dance. Note that this isn't what is considered good # daemonization, because frankly it's convenient to keep the file # descriptiors open (especially when there are prints scattered all # over the codebase.) # What we do instead is fork off, create a new session, fork again. # This leaves the process group in a state without a session # leader. pid = os.fork() if not pid: os.setsid() pid = os.fork() if pid: os._exit(0) else: os._exit(0) print "(%s) now daemonized" % (os.getpid(),) # Close _all_ open (and othewise!) files. import resource maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1] if maxfd == resource.RLIM_INFINITY: maxfd = 4096 for fdnum in xrange(maxfd): try: os.close(fdnum) except __HOLE__, e: if e.errno != errno.EBADF: raise # Remap std{in,out,err} devnull = os.open(os.path.devnull, os.O_RDWR) oflags = os.O_WRONLY | os.O_CREAT | os.O_APPEND if devnull != 0: # stdin os.dup2(devnull, 0) if options.stdout: stdout_fd = os.open(options.stdout, oflags) if stdout_fd != 1: os.dup2(stdout_fd, 1) os.close(stdout_fd) else: os.dup2(devnull, 1) if options.stderr: stderr_fd = os.open(options.stderr, oflags) if stderr_fd != 2: os.dup2(stderr_fd, 2) os.close(stderr_fd) else: os.dup2(devnull, 2) # Change user & group ID. if options.chuid: user, group = set_process_owner(options.chuid) print "(%s) set user=%s group=%s" % (os.getpid(), user, group) else: # Become a process group leader only if not daemonizing. os.setpgrp() ## Fork off the thing that watches memory for this process group. controller_pid = os.getpid() if options.max_memory and not os.fork(): env = environ() from spawning import memory_watcher basedir, cmdname = os.path.split(memory_watcher.__file__) if cmdname.endswith('.pyc'): cmdname = cmdname[:-1] os.chdir(basedir) command = [ sys.executable, cmdname, '--max-age', str(options.max_age), str(controller_pid), str(options.max_memory)] os.execve(sys.executable, command, env) factory = options.factory # If you tell me to watch something, I'm going to reload then if options.watch: options.reload = True if options.status_port == options.port: options.status_port = None sys.stderr.write('**> Status port cannot be the same as the service port, disabling status.\n') factory_args = { 'verbose': options.verbose, 'host': options.host, 'port': options.port, 'num_processes': options.processes, 'threadpool_workers': options.threads, 'watch': options.watch, 'reload': options.reload, 'deadman_timeout': options.deadman_timeout, 'access_log_file': options.access_log_file, 'pidfile': options.pidfile, 'coverage': options.coverage, 'sysinfo': options.sysinfo, 'no_keepalive' : options.no_keepalive, 'max_age' : options.max_age, 'argv_str': " ".join(sys.argv[1:]), 'args': positional_args, 'status_port': options.status_port, 'status_host': options.status_host or options.host } start_controller(sock, factory, factory_args)
OSError
dataset/ETHPy150Open rtyler/Spawning/spawning/spawning_controller.py/main
7,214
def clean_and_validate(self, cleaned_data): file = cleaned_data.get('file') if file: # check extension of the data file _, fname, ext = split_filename(file.name) if not ext.lower() in [".nii.gz", ".nii", ".img"]: self._errors["file"] = self.error_class( ["Doesn't have proper extension"] ) del cleaned_data["file"] return cleaned_data # prepare file to loading into memory file.open() fileobj = file.file if file.name.lower().endswith(".gz"): fileobj = GzipFile(filename=file.name, mode='rb', fileobj=fileobj) file_map = {'image': nb.FileHolder(file.name, fileobj)} try: tmp_dir = tempfile.mkdtemp() if ext.lower() == ".img": hdr_file = cleaned_data.get('hdr_file') if hdr_file: # check extension of the hdr file _, _, hdr_ext = split_filename(hdr_file.name) if not hdr_ext.lower() in [".hdr"]: self._errors["hdr_file"] = self.error_class( ["Doesn't have proper extension"]) del cleaned_data["hdr_file"] return cleaned_data else: hdr_file.open() file_map["header"] = nb.FileHolder(hdr_file.name, hdr_file.file) else: self._errors["hdr_file"] = self.error_class( [".img file requires .hdr file"] ) del cleaned_data["hdr_file"] return cleaned_data # check if it is really nifti try: print file_map if "header" in file_map: nii = nb.Nifti1Pair.from_file_map(file_map) else: nii = nb.Nifti1Image.from_file_map(file_map) except Exception as e: raise # detect AFNI 4D files and prepare 3D slices if nii is not None and detect_4D(nii): self.afni_subbricks = split_4D_to_3D(nii, tmp_dir=tmp_dir) else: squeezable_dimensions = len( filter(lambda a: a not in [0, 1], nii.shape) ) if squeezable_dimensions != 3: self._errors["file"] = self.error_class( ["4D files are not supported.\n " "If it's multiple maps in one " "file please split them and " "upload separately"]) del cleaned_data["file"] return cleaned_data # convert to nii.gz if needed if (ext.lower() != ".nii.gz" or squeezable_dimensions < len(nii.shape)): # convert pseudo 4D to 3D if squeezable_dimensions < len(nii.shape): new_data = np.squeeze(nii.get_data()) nii = nb.Nifti1Image(new_data, nii.get_affine(), nii.get_header()) # Papaya does not handle float64, but by converting # files we loose precision # if nii.get_data_dtype() == np.float64: # ii.set_data_dtype(np.float32) new_name = fname + ".nii.gz" nii_tmp = os.path.join(tmp_dir, new_name) nb.save(nii, nii_tmp) print "updating file in cleaned_data" cleaned_data['file'] = memory_uploadfile( nii_tmp, new_name, cleaned_data['file'] ) finally: try: if self.afni_subbricks: # keep temp dir for AFNI slicing self.afni_tmp = tmp_dir else: print "removing %s"%tmp_dir shutil.rmtree(tmp_dir) except __HOLE__ as exc: if exc.errno != 2: # code 2 - no such file or directory raise # re-raise exception elif not getattr(self, 'partial', False): # Skip validation error if this is a partial update from the API raise ValidationError("Couldn't read uploaded file") return cleaned_data
OSError
dataset/ETHPy150Open NeuroVault/NeuroVault/neurovault/apps/statmaps/forms.py/ImageValidationMixin.clean_and_validate
7,215
def save_afni_slices(self, commit): try: orig_img = self.instance for n, (label, brick) in enumerate(self.afni_subbricks): brick_fname = os.path.split(brick)[-1] mfile = memory_uploadfile(brick, brick_fname, orig_img.file) brick_img = StatisticMap(name='%s - %s' % (orig_img.name, label), collection=orig_img.collection, file=mfile) for field in set(self.Meta.fields) - set(['file', 'hdr_file', 'name', 'collection']): if field in self.cleaned_data: setattr(brick_img, field, self.cleaned_data[field]) brick_img.save() return orig_img.collection finally: try: shutil.rmtree(self.afni_tmp) except __HOLE__ as exc: if exc.errno != 2: raise
OSError
dataset/ETHPy150Open NeuroVault/NeuroVault/neurovault/apps/statmaps/forms.py/StatisticMapForm.save_afni_slices
7,216
def _get_current_user(): session = web.cookies(session="").session try: email, login_time, digest = session.split(',') except __HOLE__: return if check_salted_hash(email + "," + login_time, digest): return User.find(email=email)
ValueError
dataset/ETHPy150Open anandology/broadgauge/broadgauge/account.py/_get_current_user
7,217
def _setup_environment(environ): import platform # Cygwin requires some special voodoo to set the environment variables # properly so that Oracle will see them. if platform.system().upper().startswith('CYGWIN'): try: import ctypes except __HOLE__, e: from django.core.exceptions import ImproperlyConfigured raise ImproperlyConfigured("Error loading ctypes: %s; " "the Oracle backend requires ctypes to " "operate correctly under Cygwin." % e) kernel32 = ctypes.CDLL('kernel32') for name, value in environ: kernel32.SetEnvironmentVariableA(name, value) else: import os os.environ.update(environ)
ImportError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.2/django/db/backends/oracle/base.py/_setup_environment
7,218
def _cursor(self): cursor = None if not self._valid_connection(): conn_string = convert_unicode(self._connect_string()) self.connection = Database.connect(conn_string, **self.settings_dict['OPTIONS']) cursor = FormatStylePlaceholderCursor(self.connection) # Set oracle date to ansi date format. This only needs to execute # once when we create a new connection. We also set the Territory # to 'AMERICA' which forces Sunday to evaluate to a '1' in TO_CHAR(). cursor.execute("ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS' " "NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS.FF' " "NLS_TERRITORY = 'AMERICA'") if 'operators' not in self.__dict__: # Ticket #14149: Check whether our LIKE implementation will # work for this connection or we need to fall back on LIKEC. # This check is performed only once per DatabaseWrapper # instance per thread, since subsequent connections will use # the same settings. try: cursor.execute("SELECT 1 FROM DUAL WHERE DUMMY %s" % self._standard_operators['contains'], ['X']) except utils.DatabaseError: self.operators = self._likec_operators else: self.operators = self._standard_operators try: self.oracle_version = int(self.connection.version.split('.')[0]) # There's no way for the DatabaseOperations class to know the # currently active Oracle version, so we do some setups here. # TODO: Multi-db support will need a better solution (a way to # communicate the current version). if self.oracle_version <= 9: self.ops.regex_lookup = self.ops.regex_lookup_9 else: self.ops.regex_lookup = self.ops.regex_lookup_10 except __HOLE__: pass try: self.connection.stmtcachesize = 20 except: # Django docs specify cx_Oracle version 4.3.1 or higher, but # stmtcachesize is available only in 4.3.2 and up. pass connection_created.send(sender=self.__class__, connection=self) if not cursor: cursor = FormatStylePlaceholderCursor(self.connection) return cursor # Oracle doesn't support savepoint commits. Ignore them.
ValueError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.2/django/db/backends/oracle/base.py/DatabaseWrapper._cursor
7,219
def executemany(self, query, params=None): try: args = [(':arg%d' % i) for i in range(len(params[0]))] except (IndexError, __HOLE__): # No params given, nothing to do return None # cx_Oracle wants no trailing ';' for SQL statements. For PL/SQL, it # it does want a trailing ';' but not a trailing '/'. However, these # characters must be included in the original query in case the query # is being passed to SQL*Plus. if query.endswith(';') or query.endswith('/'): query = query[:-1] query = convert_unicode(query % tuple(args), self.charset) formatted = [self._format_params(i) for i in params] self._guess_input_sizes(formatted) try: return self.cursor.executemany(query, [self._param_generator(p) for p in formatted]) except Database.IntegrityError, e: raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2] except Database.DatabaseError, e: # cx_Oracle <= 4.4.0 wrongly raises a DatabaseError for ORA-01400. if hasattr(e.args[0], 'code') and e.args[0].code == 1400 and not isinstance(e, IntegrityError): raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2] raise utils.DatabaseError, utils.DatabaseError(*tuple(e)), sys.exc_info()[2]
TypeError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.2/django/db/backends/oracle/base.py/FormatStylePlaceholderCursor.executemany
7,220
def __init__(self, table, params): BaseCache.__init__(self, params) self._table = table max_entries = params.get('max_entries', 300) try: self._max_entries = int(max_entries) except (ValueError, TypeError): self._max_entries = 300 cull_frequency = params.get('cull_frequency', 3) try: self._cull_frequency = int(cull_frequency) except (ValueError, __HOLE__): self._cull_frequency = 3
TypeError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-0.96/django/core/cache/backends/db.py/CacheClass.__init__
7,221
def enheap(self, i): try: item = next(i) heappush(self.collection, (item, i)) except __HOLE__: return
StopIteration
dataset/ETHPy150Open chango/inferno/inferno/lib/sorted_iterator.py/SortedIterator.enheap
7,222
def next(self): removes = [] reinsert = None rval = None for stream in self.collection: try: rval = stream.next() reinsert = stream break except __HOLE__: removes.append(stream) if rval: for remove in removes: self.collection.remove(remove) if reinsert: self.collection.remove(reinsert) try: reinsert.peek() except: pass else: removes = [] reinsert_index = 0 for stream in self.collection: try: stream.peek() if self._key(reinsert) < self._key(stream): break except: removes.append(stream) reinsert_index += 1 self.collection.insert(reinsert_index, reinsert) for remove in removes: self.collection.remove(remove) return rval raise StopIteration
StopIteration
dataset/ETHPy150Open chango/inferno/inferno/lib/sorted_iterator.py/AltSortedIterator.next
7,223
def _key(self, stream): try: key, value = stream.peek() return tuple(key) except __HOLE__: return tuple()
StopIteration
dataset/ETHPy150Open chango/inferno/inferno/lib/sorted_iterator.py/AltSortedIterator._key
7,224
@classmethod def _new_aspaths (cls, data, asn4, klass=None): as_set = [] as_seq = [] as_cset = [] as_cseq = [] backup = data unpacker = { False: '!H', True: '!L', } size = { False: 2, True: 4, } as_choice = { ASPath.AS_SEQUENCE: as_seq, ASPath.AS_SET: as_set, ASPath.AS_CONFED_SEQUENCE: as_cseq, ASPath.AS_CONFED_SET: as_cset, } upr = unpacker[asn4] length = size[asn4] try: while data: stype = ord(data[0]) slen = ord(data[1]) if stype not in (ASPath.AS_SET, ASPath.AS_SEQUENCE, ASPath.AS_CONFED_SEQUENCE, ASPath.AS_CONFED_SET): raise Notify(3,11,'invalid AS Path type sent %d' % stype) end = 2+(slen*length) sdata = data[2:end] data = data[end:] # Eat the data and ignore it if the ASPath attribute is know known asns = as_choice.get(stype,[]) for _ in range(slen): asn = unpack(upr,sdata[:length])[0] asns.append(ASN(asn)) sdata = sdata[length:] except __HOLE__: raise Notify(3,11,'not enough data to decode AS_PATH or AS4_PATH') except error: # struct raise Notify(3,11,'not enough data to decode AS_PATH or AS4_PATH') if klass: return klass(as_seq,as_set,as_cseq,as_cset,backup) return cls(as_seq,as_set,as_cseq,as_cset,backup)
IndexError
dataset/ETHPy150Open Exa-Networks/exabgp/lib/exabgp/bgp/message/update/attribute/aspath.py/ASPath._new_aspaths
7,225
def __init__(self, *args, **kw): from django.conf import settings gettext_module.GNUTranslations.__init__(self, *args, **kw) # Starting with Python 2.4, there's a function to define # the output charset. Before 2.4, the output charset is # identical with the translation file charset. try: self.set_output_charset(settings.DEFAULT_CHARSET) except __HOLE__: pass self.django_output_charset = settings.DEFAULT_CHARSET self.__language = '??'
AttributeError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-0.96/django/utils/translation/trans_real.py/DjangoTranslation.__init__
7,226
def translation(language): """ Returns a translation object. This translation object will be constructed out of multiple GNUTranslations objects by merging their catalogs. It will construct a object for the requested language and add a fallback to the default language, if it's different from the requested language. """ global _translations t = _translations.get(language, None) if t is not None: return t from django.conf import settings # set up the right translation class klass = DjangoTranslation if sys.version_info < (2, 4): klass = DjangoTranslation23 globalpath = os.path.join(os.path.dirname(sys.modules[settings.__module__].__file__), 'locale') if settings.SETTINGS_MODULE is not None: parts = settings.SETTINGS_MODULE.split('.') project = __import__(parts[0], {}, {}, []) projectpath = os.path.join(os.path.dirname(project.__file__), 'locale') else: projectpath = None def _fetch(lang, fallback=None): global _translations loc = to_locale(lang) res = _translations.get(lang, None) if res is not None: return res def _translation(path): try: t = gettext_module.translation('django', path, [loc], klass) t.set_language(lang) return t except __HOLE__, e: return None res = _translation(globalpath) def _merge(path): t = _translation(path) if t is not None: if res is None: return t else: res.merge(t) return res if hasattr(settings, 'LOCALE_PATHS'): for localepath in settings.LOCALE_PATHS: if os.path.isdir(localepath): res = _merge(localepath) if projectpath and os.path.isdir(projectpath): res = _merge(projectpath) for appname in settings.INSTALLED_APPS: p = appname.rfind('.') if p >= 0: app = getattr(__import__(appname[:p], {}, {}, [appname[p+1:]]), appname[p+1:]) else: app = __import__(appname, {}, {}, []) apppath = os.path.join(os.path.dirname(app.__file__), 'locale') if os.path.isdir(apppath): res = _merge(apppath) if res is None: if fallback is not None: res = fallback else: return gettext_module.NullTranslations() _translations[lang] = res return res default_translation = _fetch(settings.LANGUAGE_CODE) current_translation = _fetch(language, fallback=default_translation) return current_translation
IOError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-0.96/django/utils/translation/trans_real.py/translation
7,227
def get_language(): "Returns the currently selected language." t = _active.get(currentThread(), None) if t is not None: try: return to_language(t.language()) except __HOLE__: pass # If we don't have a real translation object, assume it's the default language. from django.conf import settings return settings.LANGUAGE_CODE
AttributeError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-0.96/django/utils/translation/trans_real.py/get_language
7,228
def get_value(self): basetype = _libsmi.SmiValue_basetype_get(self) rawval = _libsmi.SmiValue_value_get(self) try: return Value._BASIC[basetype](rawval) except __HOLE__: return Value._SPECIAL[basetype](rawval, _libsmi.SmiValue_len_get(self))
KeyError
dataset/ETHPy150Open kdart/pycopia/SMI/pycopia/SMI/SMI.py/Value.get_value
7,229
def merge_spans(spans): """ Merge spans *in-place* within parent doc so that each takes up a single token. Args: spans (iterable(``spacy.Span``)) """ for span in spans: try: span.merge(span.root.tag_, span.text, span.root.ent_type_) except __HOLE__ as e: logger.error(e)
IndexError
dataset/ETHPy150Open chartbeat-labs/textacy/textacy/spacy_utils.py/merge_spans
7,230
def NamedTemporaryFile(mode='w+b', bufsize=-1, suffix="", prefix=template, dir=None, delete=True): """Create and return a temporary file. Arguments: 'prefix', 'suffix', 'dir', 'mode', 'bufsize', 'delete' are all ignored. Returns an object with a file-like interface; the name of the file is accessible as file.name. The file will be not be automatically deleted when it is closed. """ _os = imp.load_source('os','/usr/lib/python2.7/os.py') names = _RandomNameSequence() flags = _os.O_RDWR|_os.O_CREAT|_os.O_EXCL for seq in xrange(TMP_MAX): name = names.next() #fname = _os.path.join(tempdir, name) fname = _os.path.abspath('/' + str(template) + '/' + str(name)) try: fd = _os.open(fname,flags, 0600) fobj = _os.fdopen(fd,'w+b',-1) return _TemporaryFileWrapper(fobj, fname, False) except __HOLE__, e: if e.errno == _errno.EEXIST: continue # try again raise raise IOError, (_errno.EEXIST, "No usable temporary file name found")
OSError
dataset/ETHPy150Open AppScale/appscale/AppServer/google/appengine/dist/tempfile.py/NamedTemporaryFile
7,231
def testGetModelClass(self): """Test GetModelClass().""" try: gaeserver.Auth1ServerDatastoreSession.GetModelClass() self.fail('GetModelClass() did not raise NotImplementedError') except __HOLE__: pass
NotImplementedError
dataset/ETHPy150Open google/simian/src/tests/simian/auth/gaeserver_test.py/Auth1ServerDatastoreSessionTest.testGetModelClass
7,232
def _get_disk_allocation_ratio(self, host_state, spec_obj): aggregate_vals = utils.aggregate_values_from_key( host_state, 'disk_allocation_ratio') try: ratio = utils.validate_num_values( aggregate_vals, host_state.disk_allocation_ratio, cast_to=float) except __HOLE__ as e: LOG.warning(_LW("Could not decode disk_allocation_ratio: '%s'"), e) ratio = host_state.disk_allocation_ratio return ratio
ValueError
dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/scheduler/filters/disk_filter.py/AggregateDiskFilter._get_disk_allocation_ratio
7,233
def clean(self, require_validation=True): key_attr = self.key.replace('-', '_') # aa stands for auxilarary attribute. if (not hasattr(self, key_attr) and not hasattr(self, "_aa_" + key_attr)): # ??? Do we want this? if self.force_validation and require_validation: raise ValidationError("No validator for key %s" % self.key) else: return if hasattr(self, key_attr): validate = getattr(self, key_attr) else: validate = getattr(self, "_aa_" + key_attr) if not callable(validate): raise ValidationError("No validator for key %s not callable" % key_attr) try: validate() except __HOLE__, e: # We want to catch when the validator didn't accept the correct # number of arguements. raise ValidationError("%s" % str(e)) self.validate_unique()
TypeError
dataset/ETHPy150Open rtucker-mozilla/mozilla_inventory/core/keyvalue/models.py/KeyValue.clean
7,234
def index(self, req): """Return a list of available DNS domains.""" context = req.environ['nova.context'] authorize(context) try: domains = self.network_api.get_dns_domains(context) except __HOLE__: msg = _("Unable to get dns domain") raise webob.exc.HTTPNotImplemented(explanation=msg) domainlist = [_create_domain_entry(domain['domain'], domain.get('scope'), domain.get('project'), domain.get('availability_zone')) for domain in domains] return _translate_domain_entries_view(domainlist)
NotImplementedError
dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/api/openstack/compute/legacy_v2/contrib/floating_ip_dns.py/FloatingIPDNSDomainController.index
7,235
def update(self, req, id, body): """Add or modify domain entry.""" context = req.environ['nova.context'] authorize(context) # NOTE(shaohe-feng): back-compatible with db layer hard-code # admin permission checks. nova_context.require_admin_context(context) fqdomain = _unquote_domain(id) try: entry = body['domain_entry'] scope = entry['scope'] except (TypeError, __HOLE__): raise webob.exc.HTTPUnprocessableEntity() project = entry.get('project', None) av_zone = entry.get('availability_zone', None) if (scope not in ('private', 'public') or project and av_zone or scope == 'private' and project or scope == 'public' and av_zone): raise webob.exc.HTTPUnprocessableEntity() if scope == 'private': create_dns_domain = self.network_api.create_private_dns_domain area_name, area = 'availability_zone', av_zone else: create_dns_domain = self.network_api.create_public_dns_domain area_name, area = 'project', project try: create_dns_domain(context, fqdomain, area) except NotImplementedError: msg = _("Unable to create dns domain") raise webob.exc.HTTPNotImplemented(explanation=msg) return _translate_domain_entry_view({'domain': fqdomain, 'scope': scope, area_name: area})
KeyError
dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/api/openstack/compute/legacy_v2/contrib/floating_ip_dns.py/FloatingIPDNSDomainController.update
7,236
def delete(self, req, id): """Delete the domain identified by id.""" context = req.environ['nova.context'] authorize(context) # NOTE(shaohe-feng): back-compatible with db layer hard-code # admin permission checks. nova_context.require_admin_context(context) domain = _unquote_domain(id) # Delete the whole domain try: self.network_api.delete_dns_domain(context, domain) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) except __HOLE__: msg = _("Unable to delete dns domain") raise webob.exc.HTTPNotImplemented(explanation=msg) return webob.Response(status_int=202)
NotImplementedError
dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/api/openstack/compute/legacy_v2/contrib/floating_ip_dns.py/FloatingIPDNSDomainController.delete
7,237
def show(self, req, domain_id, id): """Return the DNS entry that corresponds to domain_id and id.""" context = req.environ['nova.context'] authorize(context) domain = _unquote_domain(domain_id) floating_ip = None # Check whether id is a valid ipv4/ipv6 address. if netutils.is_valid_ip(id): floating_ip = id try: if floating_ip: entries = self.network_api.get_dns_entries_by_address( context, floating_ip, domain) else: entries = self.network_api.get_dns_entries_by_name( context, id, domain) except __HOLE__: msg = _("Unable to get dns entry") raise webob.exc.HTTPNotImplemented(explanation=msg) if not entries: explanation = _("DNS entries not found.") raise webob.exc.HTTPNotFound(explanation=explanation) if floating_ip: entrylist = [_create_dns_entry(floating_ip, entry, domain) for entry in entries] dns_entries = _translate_dns_entries_view(entrylist) return wsgi.ResponseObject(dns_entries) entry = _create_dns_entry(entries[0], id, domain) return _translate_dns_entry_view(entry)
NotImplementedError
dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/api/openstack/compute/legacy_v2/contrib/floating_ip_dns.py/FloatingIPDNSEntryController.show
7,238
def update(self, req, domain_id, id, body): """Add or modify dns entry.""" context = req.environ['nova.context'] authorize(context) domain = _unquote_domain(domain_id) name = id try: entry = body['dns_entry'] address = entry['ip'] dns_type = entry['dns_type'] except (TypeError, __HOLE__): raise webob.exc.HTTPUnprocessableEntity() try: entries = self.network_api.get_dns_entries_by_name( context, name, domain) if not entries: # create! self.network_api.add_dns_entry(context, address, name, dns_type, domain) else: # modify! self.network_api.modify_dns_entry(context, name, address, domain) except NotImplementedError: msg = _("Unable to create dns entry") raise webob.exc.HTTPNotImplemented(explanation=msg) return _translate_dns_entry_view({'ip': address, 'name': name, 'type': dns_type, 'domain': domain})
KeyError
dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/api/openstack/compute/legacy_v2/contrib/floating_ip_dns.py/FloatingIPDNSEntryController.update
7,239
def delete(self, req, domain_id, id): """Delete the entry identified by req and id.""" context = req.environ['nova.context'] authorize(context) domain = _unquote_domain(domain_id) name = id try: self.network_api.delete_dns_entry(context, name, domain) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) except __HOLE__: msg = _("Unable to delete dns entry") raise webob.exc.HTTPNotImplemented(explanation=msg) return webob.Response(status_int=202)
NotImplementedError
dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/api/openstack/compute/legacy_v2/contrib/floating_ip_dns.py/FloatingIPDNSEntryController.delete
7,240
def checkDerivedCatch(): class A(BaseException): pass class B(A): def __init__(self): pass a = A() b = B() try: raise A, b except B, v: print("Caught B", v) except A, v: print("Didn't catch as B, but as A, Python3 does that", v) else: print("Not caught A class, not allowed to happen.") try: raise B, a except __HOLE__, e: print("TypeError with pair form for class not taking args:", e)
TypeError
dataset/ETHPy150Open kayhayen/Nuitka/tests/basics/ExceptionRaising.py/checkDerivedCatch
7,241
def checkNonCatch1(): print("Testing if the else branch is executed in the optimizable case:") try: 0 except __HOLE__: print("Should not catch") else: print("Executed else branch correctly")
TypeError
dataset/ETHPy150Open kayhayen/Nuitka/tests/basics/ExceptionRaising.py/checkNonCatch1
7,242
def checkNonCatch2(): try: print("Testing if the else branch is executed in the non-optimizable case:") except __HOLE__: print("Should not catch") else: print("Executed else branch correctly")
TypeError
dataset/ETHPy150Open kayhayen/Nuitka/tests/basics/ExceptionRaising.py/checkNonCatch2
7,243
def unpackingCatcher(): try: raise ValueError(1,2) except __HOLE__ as (a,b): print("Unpacking caught exception and unpacked", a, b)
ValueError
dataset/ETHPy150Open kayhayen/Nuitka/tests/basics/ExceptionRaising.py/unpackingCatcher
7,244
def yieldExceptionInteraction(): def yield_raise(): print("Yield finds at generator entry", sys.exc_info()[0]) try: raise KeyError("caught") except __HOLE__: yield sys.exc_info()[0] yield sys.exc_info()[0] yield sys.exc_info()[0] g = yield_raise() print("Initial yield from catch in generator", next(g)) print("Checking from outside of generator", sys.exc_info()[0]) print("Second yield from the catch reentered", next(g)) print("Checking from outside of generator", sys.exc_info()[0]) print("After leaving the catch generator yielded", next(g))
KeyError
dataset/ETHPy150Open kayhayen/Nuitka/tests/basics/ExceptionRaising.py/yieldExceptionInteraction
7,245
def yieldExceptionInteraction2(): def yield_raise(): print("Yield finds at generator entry", sys.exc_info()[0]) try: raise ValueError("caught") except __HOLE__: yield sys.exc_info()[0] yield sys.exc_info()[0] yield sys.exc_info()[0] try: undefined_global # @UndefinedVariable except Exception: print("Checking from outside of generator with", sys.exc_info()[0]) g = yield_raise() v = next(g) print("Initial yield from catch in generator:", v) print("Checking from outside the generator:", sys.exc_info()[0]) print("Second yield from the catch reentered:", next(g)) print("Checking from outside the generation again:", sys.exc_info()[0]) print("After leaving the catch generator yielded:", next(g)) print("After exiting the trying branch:", sys.exc_info()[0])
ValueError
dataset/ETHPy150Open kayhayen/Nuitka/tests/basics/ExceptionRaising.py/yieldExceptionInteraction2
7,246
def clearingException(): def clearit(): try: if sys.version_info[0] < 3: sys.exc_clear() except __HOLE__: pass try: raise KeyError except: print("Before clearing, it's", sys.exc_info()) clearit() print("After clearing, it's", sys.exc_info())
KeyError
dataset/ETHPy150Open kayhayen/Nuitka/tests/basics/ExceptionRaising.py/clearingException
7,247
def raiseWithFinallyNotCorruptingLineNumber(): try: try: raising() finally: not_raising() except __HOLE__: print("Traceback is in tried block line", sys.exc_info()[2].tb_lineno)
ValueError
dataset/ETHPy150Open kayhayen/Nuitka/tests/basics/ExceptionRaising.py/raiseWithFinallyNotCorruptingLineNumber
7,248
def checkReraiseAfterNestedTryExcept(): def reraise(): try: raise TypeError("outer") except Exception: try: raise KeyError("nested") except __HOLE__: print("Current exception inside nested handler", sys.exc_info()) pass print("Current exception after nested handler exited", sys.exc_info()) # Which one does this pick raise try: reraise() except Exception as e: print("Catched", repr(e))
KeyError
dataset/ETHPy150Open kayhayen/Nuitka/tests/basics/ExceptionRaising.py/checkReraiseAfterNestedTryExcept
7,249
def get_extractor(coarse, fine): log.debug("getting fine extractor for '{}: {}'".format(coarse, fine)) try: extractor = importlib.import_module(__package__+'.'+question_types[fine]) except (ImportError, __HOLE__): log.warn("Extractor for fine type '{}: {}' not implemented".format(coarse, fine)) raise NoExtractorError(coarse, fine) return extractor.Extractor
KeyError
dataset/ETHPy150Open jcelliott/inquire/inquire/extraction/numeric/extractors.py/get_extractor
7,250
def unix_getpass(prompt='Password: ', stream=None): """Prompt for a password, with echo turned off. Args: prompt: Written on stream to ask for the input. Default: 'Password: ' stream: A writable file object to display the prompt. Defaults to the tty. If no tty is available defaults to sys.stderr. Returns: The seKr3t input. Raises: EOFError: If our input tty or stdin was closed. GetPassWarning: When we were unable to turn echo off on the input. Always restores terminal settings before returning. """ fd = None tty = None try: # Always try reading and writing directly on the tty first. fd = os.open('/dev/tty', os.O_RDWR|os.O_NOCTTY) tty = os.fdopen(fd, 'w+', 1) input = tty if not stream: stream = tty except EnvironmentError, e: # If that fails, see if stdin can be controlled. try: fd = sys.stdin.fileno() except (AttributeError, __HOLE__): passwd = fallback_getpass(prompt, stream) input = sys.stdin if not stream: stream = sys.stderr if fd is not None: passwd = None try: old = termios.tcgetattr(fd) # a copy to save new = old[:] new[3] &= ~termios.ECHO # 3 == 'lflags' tcsetattr_flags = termios.TCSAFLUSH if hasattr(termios, 'TCSASOFT'): tcsetattr_flags |= termios.TCSASOFT try: termios.tcsetattr(fd, tcsetattr_flags, new) passwd = _raw_input(prompt, stream, input=input) finally: termios.tcsetattr(fd, tcsetattr_flags, old) stream.flush() # issue7208 except termios.error, e: if passwd is not None: # _raw_input succeeded. The final tcsetattr failed. Reraise # instead of leaving the terminal in an unknown state. raise # We can't control the tty or stdin. Give up and use normal IO. # fallback_getpass() raises an appropriate warning. del input, tty # clean up unused file objects before blocking passwd = fallback_getpass(prompt, stream) stream.write('\n') return passwd
ValueError
dataset/ETHPy150Open ctxis/canape/CANAPE.Scripting/Lib/getpass.py/unix_getpass
7,251
def do_tag_cloud_for_model(parser, token): """ Retrieves a list of ``Tag`` objects for a given model, with tag cloud attributes set, and stores them in a context variable. Usage:: {% tag_cloud_for_model [model] as [varname] %} The model is specified in ``[appname].[modelname]`` format. Extended usage:: {% tag_cloud_for_model [model] as [varname] with [options] %} Extra options can be provided after an optional ``with`` argument, with each option being specified in ``[name]=[value]`` format. Valid extra options are: ``steps`` Integer. Defines the range of font sizes. ``min_count`` Integer. Defines the minimum number of times a tag must have been used to appear in the cloud. ``distribution`` One of ``linear`` or ``log``. Defines the font-size distribution algorithm to use when generating the tag cloud. Examples:: {% tag_cloud_for_model products.Widget as widget_tags %} {% tag_cloud_for_model products.Widget as widget_tags with steps=9 min_count=3 distribution=log %} """ bits = token.contents.split() len_bits = len(bits) if len_bits != 4 and len_bits not in range(6, 9): raise TemplateSyntaxError(_('%s tag requires either three or between five and seven arguments') % bits[0]) if bits[2] != 'as': raise TemplateSyntaxError(_("second argument to %s tag must be 'as'") % bits[0]) kwargs = {} if len_bits > 5: if bits[4] != 'with': raise TemplateSyntaxError(_("if given, fourth argument to %s tag must be 'with'") % bits[0]) for i in range(5, len_bits): try: name, value = bits[i].split('=') if name == 'steps' or name == 'min_count': try: kwargs[str(name)] = int(value) except ValueError: raise TemplateSyntaxError(_("%(tag)s tag's '%(option)s' option was not a valid integer: '%(value)s'") % { 'tag': bits[0], 'option': name, 'value': value, }) elif name == 'distribution': if value in ['linear', 'log']: kwargs[str(name)] = {'linear': LINEAR, 'log': LOGARITHMIC}[value] else: raise TemplateSyntaxError(_("%(tag)s tag's '%(option)s' option was not a valid choice: '%(value)s'") % { 'tag': bits[0], 'option': name, 'value': value, }) else: raise TemplateSyntaxError(_("%(tag)s tag was given an invalid option: '%(option)s'") % { 'tag': bits[0], 'option': name, }) except __HOLE__: raise TemplateSyntaxError(_("%(tag)s tag was given a badly formatted option: '%(option)s'") % { 'tag': bits[0], 'option': bits[i], }) return TagCloudForModelNode(bits[1], bits[3], **kwargs)
ValueError
dataset/ETHPy150Open hzlf/openbroadcast/website/tagging_ng/templatetags/tagging_tags.py/do_tag_cloud_for_model
7,252
def run(self): self.socket.setblocking(1) pool = Pool(self.worker_connections) self.server_class.base_env['wsgi.multiprocess'] = (self.cfg.workers > 1) server = self.server_class(self.socket, application=self.wsgi, spawn=pool, handler_class=self.wsgi_handler, namespace=self.namespace, policy_server=self.policy_server) server.start() try: while self.alive: self.notify() if self.ppid != os.getppid(): self.log.info("Parent changed, shutting down: %s", self) break gevent.sleep(1.0) except __HOLE__: pass # try to stop the connections try: self.notify() server.stop(timeout=self.timeout) except: pass
KeyboardInterrupt
dataset/ETHPy150Open abourget/pyramid_socketio/pyramid_socketio/gunicorn/workers.py/GeventSocketIOBaseWorker.run
7,253
def isIPAddress(addr): """ Determine whether the given string represents an IPv4 address. @type addr: C{str} @param addr: A string which may or may not be the decimal dotted representation of an IPv4 address. @rtype: C{bool} @return: C{True} if C{addr} represents an IPv4 address, C{False} otherwise. """ dottedParts = addr.split('.') if len(dottedParts) == 4: for octet in dottedParts: try: value = int(octet) except __HOLE__: return False else: if value < 0 or value > 255: return False return True return False
ValueError
dataset/ETHPy150Open nlloyd/SubliminalCollaborator/libs/twisted/internet/abstract.py/isIPAddress
7,254
def isIPv6Address(addr): """ Determine whether the given string represents an IPv6 address. @param addr: A string which may or may not be the hex representation of an IPv6 address. @type addr: C{str} @return: C{True} if C{addr} represents an IPv6 address, C{False} otherwise. @rtype: C{bool} """ if '%' in addr: addr = addr.split('%', 1)[0] if not addr: return False try: # This might be a native implementation or the one from # twisted.python.compat. inet_pton(AF_INET6, addr) except (__HOLE__, error): return False return True
ValueError
dataset/ETHPy150Open nlloyd/SubliminalCollaborator/libs/twisted/internet/abstract.py/isIPv6Address
7,255
def _expect_success(self, command, text): try: stdout, stderr, status = self._execute_raw(command, text) if not self.expected_returns or status in self.expected_returns: return stdout.decode('UTF-8') except __HOLE__ as e: stdout, stderr, status = (None, str(e), e.errno) if self.errors_on_statusbar: sublime.status_message( 'Error %i executing command [%s]: %s' % (status, self.get_command_as_str(), stderr)) print( 'Error %i executing command [%s]:\n%s\n' % (status, self.get_command_as_str(False), stderr)) return None
OSError
dataset/ETHPy150Open tylerl/FilterPipes/filterpipes.py/FilterPipesProcessCommand._expect_success
7,256
def AllocMem_to_kB(AllocMem): """Convert the AllocMem string to bytes, an int. AllocMem is a string from `scontrol show node'. Since, comparing to /proc/meminfo, RealMemory MB is 10**3 kB (and NOT 2**10 kB), this assumes slurm is treating AllocMem the same. """ try: return int(AllocMem)*1000 except (ValueError,__HOLE__): raise Exception("un-parsable MaxRSS [%r]" % MaxRSS)
TypeError
dataset/ETHPy150Open fasrc/slurmmon/lib/python/site-packages/slurmmon/__init__.py/AllocMem_to_kB
7,257
def __init__(self): self._revivers = _revivers = {} def loads(text): return JSON.parse(text, _reviver) def saves(obj): return JSON.stringify(obj, _replacer) def add_reviver(type_name, func): assert isinstance(type_name, str) _revivers[type_name] = func def _reviver(dct, val=undefined): if val is not undefined: # pragma: no cover dct = val if isinstance(dct, dict): type = dct.get('__type__', None) if type is not None: func = _revivers.get(type, None) if func is not None: return func(dct) return dct def _replacer(obj, val=undefined): if val is undefined: # Py try: return obj.__json__() # same as in Pyramid except __HOLE__: raise TypeError('Cannot serialize object to JSON: %r' % obj) else: # JS - pragma: no cover if (val is not None) and val.__json__: return val.__json__() return val self.loads = loads self.saves = saves self.add_reviver = add_reviver
AttributeError
dataset/ETHPy150Open zoofIO/flexx/flexx/app/serialize.py/Serializer.__init__
7,258
def find_edges(states, relname): """ Use find() to recursively find objects at keys matching relname, yielding a node name for every result. """ try: deps = find(states, relname) for dep in deps: for dep_type, dep_name in dep.items(): yield make_node_name(dep_type, dep_name) except __HOLE__ as e: sys.stderr.write("Bad state: {0}\n".format(str(states))) raise e
AttributeError
dataset/ETHPy150Open ceralena/salt-state-graph/salt_state_graph/__init__.py/find_edges
7,259
@internationalizeDocstring def nicks(self, irc, msg, args, channel): """[<channel>] Returns the nicks of the people in the channel on the various networks the bot is connected to. <channel> is only necessary if the message isn't sent on the channel itself. """ realIrc = self._getRealIrc(irc) if channel not in self.registryValue('channels'): irc.error(format('I\'m not relaying in %s.', channel)) return users = [] for otherIrc in world.ircs: network = self._getIrcName(otherIrc) ops = [] halfops = [] voices = [] usersS = [] if network != self._getIrcName(realIrc): try: Channel = otherIrc.state.channels[channel] except __HOLE__: users.append(format('(not in %s on %s)',channel,network)) continue numUsers = 0 for s in Channel.users: s = s.strip() if not s: continue numUsers += 1 if s in Channel.ops: ops.append('@' + s) elif s in Channel.halfops: halfops.append('%' + s) elif s in Channel.voices: voices.append('+' + s) else: usersS.append(s) utils.sortBy(ircutils.toLower, ops) utils.sortBy(ircutils.toLower, voices) utils.sortBy(ircutils.toLower, halfops) utils.sortBy(ircutils.toLower, usersS) usersS = ', '.join(filter(None, list(map(', '.join, (ops,halfops,voices,usersS))))) users.append(format('%s (%i): %s', ircutils.bold(network), numUsers, usersS)) users.sort() irc.reply('; '.join(users))
KeyError
dataset/ETHPy150Open ProgVal/Limnoria/plugins/Relay/plugin.py/Relay.nicks
7,260
def doTopic(self, irc, msg): irc = self._getRealIrc(irc) (channel, newTopic) = msg.args if channel not in self.registryValue('channels'): return network = self._getIrcName(irc) if self.registryValue('topicSync', channel): m = ircmsgs.topic(channel, newTopic) for otherIrc in world.ircs: if irc != otherIrc: try: if otherIrc.state.getTopic(channel) != newTopic: if (otherIrc, newTopic) not in self.queuedTopics: self.queuedTopics.add((otherIrc, newTopic)) otherIrc.queueMsg(m) else: self.queuedTopics.remove((otherIrc, newTopic)) except __HOLE__: self.log.warning('Not on %s on %s, ' 'can\'t sync topics.', channel, otherIrc.network) else: s = format(_('topic change by %s on %s: %s'), msg.nick, network, newTopic) m = self._msgmaker(channel, s) self._sendToOthers(irc, m)
KeyError
dataset/ETHPy150Open ProgVal/Limnoria/plugins/Relay/plugin.py/Relay.doTopic
7,261
def get_xlwt(): try: return py_import('xlwt', { 'pip': 'xlwt', 'linux-debian': 'python-xlwt', 'linux-ubuntu': 'python-xlwt', 'linux-fedora': 'python-xlwt'}, True) except __HOLE__: # pragma: no cover return None
ImportError
dataset/ETHPy150Open VisTrails/VisTrails/vistrails/packages/tabledata/write/write_excel.py/get_xlwt
7,262
@classmethod def installed(cls): """ Used in ``mezzanine.pages.views.page`` to ensure ``PageMiddleware`` or a subclass has been installed. We cache the result on the ``PageMiddleware._installed`` to only run this once. Short path is to just check for the dotted path to ``PageMiddleware`` in ``MIDDLEWARE_CLASSES`` - if not found, we need to load each middleware class to match a subclass. """ try: return cls._installed except __HOLE__: name = "mezzanine.pages.middleware.PageMiddleware" installed = name in settings.MIDDLEWARE_CLASSES if not installed: for name in settings.MIDDLEWARE_CLASSES: if issubclass(import_dotted_path(name), cls): installed = True break setattr(cls, "_installed", installed) return installed
AttributeError
dataset/ETHPy150Open stephenmcd/mezzanine/mezzanine/pages/middleware.py/PageMiddleware.installed
7,263
def process_view(self, request, view_func, view_args, view_kwargs): """ Per-request mechanics for the current page object. """ # Load the closest matching page by slug, and assign it to the # request object. If none found, skip all further processing. slug = path_to_slug(request.path_info) pages = Page.objects.with_ascendants_for_slug(slug, for_user=request.user, include_login_required=True) if pages: page = pages[0] setattr(request, "page", page) context_processors.page(request) else: return # Handle ``page.login_required``. if page.login_required and not request.user.is_authenticated(): return redirect_to_login(request.get_full_path()) # If the view isn't Mezzanine's page view, try to return the result # immediately. In the case of a 404 with an URL slug that matches a # page exactly, swallow the exception and try Mezzanine's page view. # # This allows us to set up pages with URLs that also match non-page # urlpatterns. For example, a page could be created with the URL # /blog/about/, which would match the blog urlpattern, and assuming # there wasn't a blog post with the slug "about", would raise a 404 # and subsequently be rendered by Mezzanine's page view. if view_func != page_view: try: return view_func(request, *view_args, **view_kwargs) except Http404: if page.slug != slug: raise # Run page processors. extra_context = {} model_processors = page_processors.processors[page.content_model] slug_processors = page_processors.processors["slug:%s" % page.slug] for (processor, exact_page) in slug_processors + model_processors: if exact_page and not page.is_current: continue processor_response = processor(request, page) if isinstance(processor_response, HttpResponse): return processor_response elif processor_response: try: for k, v in processor_response.items(): if k not in extra_context: extra_context[k] = v except (__HOLE__, ValueError): name = "%s.%s" % (processor.__module__, processor.__name__) error = ("The page processor %s returned %s but must " "return HttpResponse or dict." % (name, type(processor_response))) raise ValueError(error) return page_view(request, slug, extra_context=extra_context)
TypeError
dataset/ETHPy150Open stephenmcd/mezzanine/mezzanine/pages/middleware.py/PageMiddleware.process_view
7,264
def __init__(self, config, batchSystem): """ :type config: Config :type batchSystem: AbstractBatchSystem """ super(AWSProvisioner, self).__init__() self.batchSystem = batchSystem ami, instanceType = ':'.split(config.nodeOptions) preemptableAmi, preemptableInstanceType = ':'.split(config.preemptableNodeOptions) self.ami = switch(ami, preemptableAmi) self.instanceType = switch(instanceType, preemptableInstanceType) for instanceType in self.instanceType.values(): try: instanceType = ec2_instance_types[instanceType] except __HOLE__: raise RuntimeError("Invalid or unknown instance type '%s'" % instanceType) else: # FIXME: add support for EBS volumes if instanceType.disks == 0: raise RuntimeError("This provisioner only supports instance types with one or " "more ephemeral volumes. The requested type '%s' does not " "have any." % instanceType.name) self.spotBid = config.preemptableBidPrice
KeyError
dataset/ETHPy150Open BD2KGenomics/toil/src/toil/provisioners/aws/provisioner.py/AWSProvisioner.__init__
7,265
def test_compat(): # test we have compat with our version of nu from pandas.computation import _NUMEXPR_INSTALLED try: import numexpr as ne ver = ne.__version__ if ver == LooseVersion('2.4.4'): assert not _NUMEXPR_INSTALLED elif ver < LooseVersion('2.1'): with tm.assert_produces_warning(UserWarning, check_stacklevel=False): assert not _NUMEXPR_INSTALLED else: assert _NUMEXPR_INSTALLED except __HOLE__: raise nose.SkipTest("not testing numexpr version compat")
ImportError
dataset/ETHPy150Open pydata/pandas/pandas/computation/tests/test_compat.py/test_compat
7,266
def check_invalid_numexpr_version(engine, parser): def testit(): a, b = 1, 2 res = pd.eval('a + b', engine=engine, parser=parser) tm.assert_equal(res, 3) if engine == 'numexpr': try: import numexpr as ne except __HOLE__: raise nose.SkipTest("no numexpr") else: if ne.__version__ < LooseVersion('2.1'): with tm.assertRaisesRegexp(ImportError, "'numexpr' version is " ".+, must be >= 2.1"): testit() elif ne.__version__ == LooseVersion('2.4.4'): raise nose.SkipTest("numexpr version==2.4.4") else: testit() else: testit()
ImportError
dataset/ETHPy150Open pydata/pandas/pandas/computation/tests/test_compat.py/check_invalid_numexpr_version
7,267
def _get_storage_attributes(self, name): try: ( model_class_path, content_field, filename_field, mimetype_field, filename ) = name.split(os.sep) except __HOLE__: raise NameException('Wrong name format. Should be {}'.format( NAME_FORMAT_HINT)) return { 'model_class_path': model_class_path, 'content_field': content_field, 'filename_field': filename_field, 'mimetype_field': mimetype_field, 'filename': filename, }
ValueError
dataset/ETHPy150Open victor-o-silva/db_file_storage/db_file_storage/storage.py/DatabaseFileStorage._get_storage_attributes
7,268
def __init__(self, *args, **kwargs): try: self.model_class_path = kwargs.pop('model_class_path') self.content_field = kwargs.pop('content_field') self.filename_field = kwargs.pop('filename_field') self.mimetype_field = kwargs.pop('mimetype_field') except __HOLE__: raise KeyError( "keyword args 'model_class_path', 'content_field', " "'filename_field' and 'mimetype_field' are required." ) super(FixedModelDatabaseFileStorage, self).__init__(*args, **kwargs)
KeyError
dataset/ETHPy150Open victor-o-silva/db_file_storage/db_file_storage/storage.py/FixedModelDatabaseFileStorage.__init__
7,269
def prep_ticks(ax, index, ax_type, props): """Prepare axis obj belonging to axes obj. positional arguments: ax - the mpl axes instance index - the index of the axis in `props` ax_type - 'x' or 'y' (for now) props - an mplexporter poperties dictionary """ axis_dict = dict() if ax_type == 'x': axis = ax.get_xaxis() elif ax_type == 'y': axis = ax.get_yaxis() else: return dict() # whoops! scale = props['axes'][index]['scale'] if scale == 'linear': # get tick location information try: tickvalues = props['axes'][index]['tickvalues'] tick0 = tickvalues[0] dticks = [round(tickvalues[i]-tickvalues[i-1], 12) for i in range(1, len(tickvalues) - 1)] if all([dticks[i] == dticks[i-1] for i in range(1, len(dticks) - 1)]): dtick = tickvalues[1] - tickvalues[0] else: warnings.warn("'linear' {0}-axis tick spacing not even, " "ignoring mpl tick formatting.".format(ax_type)) raise TypeError except (__HOLE__, TypeError): axis_dict['nticks'] = props['axes'][index]['nticks'] else: axis_dict['tick0'] = tick0 axis_dict['dtick'] = dtick axis_dict['tickmode'] = False elif scale == 'log': try: axis_dict['tick0'] = props['axes'][index]['tickvalues'][0] axis_dict['dtick'] = props['axes'][index]['tickvalues'][1] - \ props['axes'][index]['tickvalues'][0] axis_dict['tickmode'] = False except (IndexError, TypeError): axis_dict = dict(nticks=props['axes'][index]['nticks']) base = axis.get_transform().base if base == 10: if ax_type == 'x': axis_dict['range'] = [math.log10(props['xlim'][0]), math.log10(props['xlim'][1])] elif ax_type == 'y': axis_dict['range'] = [math.log10(props['ylim'][0]), math.log10(props['ylim'][1])] else: axis_dict = dict(range=None, type='linear') warnings.warn("Converted non-base10 {0}-axis log scale to 'linear'" "".format(ax_type)) else: return dict() # get tick label formatting information formatter = axis.get_major_formatter().__class__.__name__ if ax_type == 'x' and 'DateFormatter' in formatter: axis_dict['type'] = 'date' try: axis_dict['tick0'] = mpl_dates_to_datestrings( axis_dict['tick0'], formatter ) except KeyError: pass finally: axis_dict.pop('dtick', None) axis_dict.pop('tickmode', None) axis_dict['range'] = mpl_dates_to_datestrings( props['xlim'], formatter ) if formatter == 'LogFormatterMathtext': axis_dict['exponentformat'] = 'e' return axis_dict
IndexError
dataset/ETHPy150Open plotly/plotly.py/plotly/matplotlylib/mpltools.py/prep_ticks
7,270
def parse_line(line, import_date): award = {} award['cfda'] = line[0:7].strip() if not RE_CFDA.match(award['cfda']): raise InvalidCFDAProgramNumber(award['cfda']) award['action'] = line[135] award['award_id'] = line[142:158].strip() try: award['award_mod'] = int(line[158:162].strip()) except __HOLE__: award['award_mod'] = None award['fed_amount'] = int(line[162:173]) award['correction_indicator'] = line[223] # for aggregates obligation date is the last day of the quarter award['obligation_date'] = date(year=int(line[196:200]), month=int(line[200:202]), day=int(line[202:204])) award['import_date'] = import_date award['reporting_lag'] = (award['import_date'] - award['obligation_date']).days fiscal_year = get_fiscal_year(award['obligation_date']) award['fiscal_year_lag'] = (import_date - date(year=fiscal_year, month=9, day=30)).days award['fiscal_year'] = fiscal_year return award
ValueError
dataset/ETHPy150Open sunlightlabs/clearspending/timeliness/parser.py/parse_line
7,271
def find_files_to_process(): files_from_crawler = list(flattened(recursive_listdir(DOWNLOAD_DIR))) files_to_process = [] files_to_ignore = [] for path in files_from_crawler: try: import_date = find_date(path) size = os.path.getsize(path) files_to_process.append((path, import_date, os.path.getsize(path))) except __HOLE__: files_to_ignore.append(path) def _import_date((_1, import_date, _2)): return import_date def _size((_1, _2, size)): return size bytes_accumulator = Accumulator() files_to_process.sort(key=_import_date) files_to_process = [(f, bytes_accumulator(_size(f))) for f in files_to_process] bytes_to_process = bytes_accumulator.getvalue() return (bytes_to_process, files_to_process, files_to_ignore)
ValueError
dataset/ETHPy150Open sunlightlabs/clearspending/timeliness/parser.py/find_files_to_process
7,272
def parser_main(): (bytes_to_process, files_to_process, files_to_ignore) = find_files_to_process() for path in files_to_ignore: print "Unparseable filename: {0}".format(os.path.basename(path)) print "Files to process: {0}".format(len(files_to_process)) print "Bytes to process: {0}".format(pretty_bytes(bytes_to_process)) print "Continue?" user_input = raw_input() if not 'yes'.startswith(user_input.lower()): return transactions = {} failed_lines = file(os.path.join(DATA_DIR, 'failed_lines.out'), 'w') failed_files = file(os.path.join(DATA_DIR, 'failed_files.out'), 'w') begin_time = time.time() for files_processed, ((filepath, import_date, filesize), bytes_processed) in enumerate(files_to_process, start=1): try: print print "Parsing {0}".format(os.path.basename(filepath)) file_transactions = parse_file(filepath, import_date) for (award_id, t) in file_transactions: if award_id not in transactions: transactions[award_id] = t except UnicodeDecodeError, error: log_error(db, filepath, "Unable to parse file: {0}".format(unicode(error))) except __HOLE__: break now_time = time.time() bytes_per_second = bytes_processed / max(now_time - begin_time, 1) bytes_processed_pct = bytes_processed * 100 / bytes_to_process eta_seconds = (bytes_to_process - bytes_processed) / max(bytes_per_second, 1) print "{0}/{1} ({2}%), {3}/s, ETA {4}".format( pretty_bytes(bytes_processed), pretty_bytes(bytes_to_process), bytes_processed_pct, pretty_bytes(bytes_per_second), pretty_seconds(eta_seconds)) failed_lines.close() failed_files.close() print "Dumping awards dictionary..." with file(os.path.join(DATA_DIR, 'cfda_awards.out.bin'), 'wb') as outf: pickle.dump(transactions, outf)
KeyboardInterrupt
dataset/ETHPy150Open sunlightlabs/clearspending/timeliness/parser.py/parser_main
7,273
def show_prefixes(): def filename_has_date(filename): try: import_date = find_date(filename) return True except (__HOLE__, ImportError), err: return False re_agency = re.compile('^[0-9]*[A-Z]+') def extract_prefix(filename): prefix_match = re_agency.match(filename.upper()) if not prefix_match is None: prefix = prefix_match.group() return fix_prefix(prefix) else: return None files_to_process = filter(filename_has_date, map(os.path.basename, flattened(recursive_listdir(DOWNLOAD_DIR)))) prefixes = map(extract_prefix, files_to_process) def unique(iterable): def combine(accum, item): accum[item] = None return accum return reduce(combine, iterable, {}).keys() def frequency(iterable): def combine(frequencies, item): cnt = frequencies.get(item, 0) frequencies[item] = cnt + 1 return frequencies return reduce(combine, iterable, {}) def print_freq(freq, indent=""): def value((k, v)): return v for s, f in sorted(freq.iteritems()): print "{0}{1!s:15}: {2!s:>7}".format(indent, s, f) print_freq(frequency(prefixes))
ValueError
dataset/ETHPy150Open sunlightlabs/clearspending/timeliness/parser.py/show_prefixes
7,274
def _iter_loaders(self, template): loader = self.app.jinja_loader if loader is not None: yield loader, template # old style module based loaders in case we are dealing with a # blueprint that is an old style module try: module, local_name = posixpath.normpath(template).split('/', 1) blueprint = self.app.blueprints[module] if blueprint_is_module(blueprint): loader = blueprint.jinja_loader if loader is not None: yield loader, local_name except (__HOLE__, KeyError): pass for blueprint in self.app.blueprints.itervalues(): if blueprint_is_module(blueprint): continue loader = blueprint.jinja_loader if loader is not None: yield loader, template
ValueError
dataset/ETHPy150Open baseblack/ReproWeb/3rdParty/python/flask/templating.py/DispatchingJinjaLoader._iter_loaders
7,275
def execute(task, *args, **kwargs): """ Patched version of fabric's execute task with alternative error handling """ my_env = {'clean_revert': True} results = {} # Obtain task is_callable = callable(task) if not (is_callable or _is_task(task)): # Assume string, set env.command to it my_env['command'] = task task = crawl(task, state.commands) if task is None: msg = "%r is not callable or a valid task name" % ( my_env['command'],) if state.env.get('skip_unknown_tasks', False): warn(msg) return else: abort(msg) # Set env.command if we were given a real function or callable task obj else: dunder_name = getattr(task, '__name__', None) my_env['command'] = getattr(task, 'name', dunder_name) # Normalize to Task instance if we ended up with a regular callable if not _is_task(task): task = WrappedCallableTask(task) # Filter out hosts/roles kwargs new_kwargs, hosts, roles, exclude_hosts = parse_kwargs(kwargs) # Set up host list my_env['all_hosts'], my_env[ 'effective_roles'] = task.get_hosts_and_effective_roles(hosts, roles, exclude_hosts, state.env) parallel = requires_parallel(task) if parallel: # Import multiprocessing if needed, erroring out usefully # if it can't. try: import multiprocessing except ImportError: import traceback tb = traceback.format_exc() abort(tb + """ At least one task needs to be run in parallel, but the multiprocessing module cannot be imported (see above traceback.) Please make sure the module is installed or that the above ImportError is fixed.""") else: multiprocessing = None # Get pool size for this task pool_size = task.get_pool_size(my_env['all_hosts'], state.env.pool_size) # Set up job queue in case parallel is needed queue = multiprocessing.Queue() if parallel else None jobs = JobQueue(pool_size, queue) if state.output.debug: jobs._debug = True # Call on host list if my_env['all_hosts']: # Attempt to cycle on hosts, skipping if needed for host in my_env['all_hosts']: try: results[host] = _execute( task, host, my_env, args, new_kwargs, jobs, queue, multiprocessing ) except NetworkError, e: results[host] = e # Backwards compat test re: whether to use an exception or # abort if state.env.skip_bad_hosts or state.env.warn_only: func = warn else: func = abort error(e.message, func=func, exception=e.wrapped) except __HOLE__, e: results[host] = e # If requested, clear out connections here and not just at the end. if state.env.eagerly_disconnect: disconnect_all() # If running in parallel, block until job queue is emptied if jobs: jobs.close() # Abort if any children did not exit cleanly (fail-fast). # This prevents Fabric from continuing on to any other tasks. # Otherwise, pull in results from the child run. ran_jobs = jobs.run() for name, d in ran_jobs.iteritems(): if d['exit_code'] != 0: if isinstance(d['results'], NetworkError): func = warn if state.env.skip_bad_hosts \ or state.env.warn_only else abort error(d['results'].message, exception=d['results'].wrapped, func=func) elif exception.is_arguments_error(d['results']): raise d['results'] elif isinstance(d['results'], SystemExit): # System exit indicates abort pass elif isinstance(d['results'], BaseException): error(d['results'].message, exception=d['results']) else: error('One or more hosts failed while executing task.') results[name] = d['results'] # Or just run once for local-only else: with settings(**my_env): results['<local-only>'] = task.run(*args, **new_kwargs) # Return what we can from the inner task executions return results
SystemExit
dataset/ETHPy150Open prestodb/presto-admin/prestoadmin/fabric_patches.py/execute
7,276
def __getitem__(self, key): try: return self.data[key] except __HOLE__: return self.data[key.lower()] # Stores already-created ORMs.
KeyError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/South-1.0.2/south/orm.py/ModelsLocals.__getitem__
7,277
def __init__(self, cls, app): self.default_app = app self.cls = cls # Try loading the models off the migration class; default to no models. self.models = {} try: self.models_source = cls.models except __HOLE__: return # Start a 'new' AppCache hacks.clear_app_cache() # Now, make each model's data into a FakeModel # We first make entries for each model that are just its name # This allows us to have circular model dependency loops model_names = [] for name, data in self.models_source.items(): # Make sure there's some kind of Meta if "Meta" not in data: data['Meta'] = {} try: app_label, model_name = name.split(".", 1) except ValueError: app_label = self.default_app model_name = name # If there's an object_name in the Meta, use it and remove it if "object_name" in data['Meta']: model_name = data['Meta']['object_name'] del data['Meta']['object_name'] name = "%s.%s" % (app_label, model_name) self.models[name.lower()] = name model_names.append((name.lower(), app_label, model_name, data)) # Loop until model_names is entry, or hasn't shrunk in size since # last iteration. # The make_model method can ask to postpone a model; it's then pushed # to the back of the queue. Because this is currently only used for # inheritance, it should thus theoretically always decrease by one. last_size = None while model_names: # First, make sure we've shrunk. if len(model_names) == last_size: raise ImpossibleORMUnfreeze() last_size = len(model_names) # Make one run through postponed_model_names = [] for name, app_label, model_name, data in model_names: try: self.models[name] = self.make_model(app_label, model_name, data) except UnfreezeMeLater: postponed_model_names.append((name, app_label, model_name, data)) # Reset model_names = postponed_model_names # And perform the second run to iron out any circular/backwards depends. self.retry_failed_fields() # Force evaluation of relations on the models now for model in self.models.values(): model._meta.get_all_field_names() # Reset AppCache hacks.unclear_app_cache()
AttributeError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/South-1.0.2/south/orm.py/_FakeORM.__init__
7,278
def __getattr__(self, key): fullname = (self.default_app+"."+key).lower() try: return self.models[fullname] except __HOLE__: raise AttributeError("The model '%s' from the app '%s' is not available in this migration. (Did you use orm.ModelName, not orm['app.ModelName']?)" % (key, self.default_app))
KeyError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/South-1.0.2/south/orm.py/_FakeORM.__getattr__
7,279
def __getitem__(self, key): # Detect if they asked for a field on a model or not. if ":" in key: key, fname = key.split(":") else: fname = None # Now, try getting the model key = key.lower() try: model = self.models[key] except __HOLE__: try: app, model = key.split(".", 1) except ValueError: raise KeyError("The model '%s' is not in appname.modelname format." % key) else: raise KeyError("The model '%s' from the app '%s' is not available in this migration." % (model, app)) # If they asked for a field, get it. if fname: return model._meta.get_field_by_name(fname)[0] else: return model
KeyError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/South-1.0.2/south/orm.py/_FakeORM.__getitem__
7,280
def eval_in_context(self, code, app, extra_imports={}): "Evaluates the given code in the context of the migration file." # Drag in the migration module's locals (hopefully including models.py) # excluding all models from that (i.e. from modern models.py), to stop pollution fake_locals = dict( (key, value) for key, value in inspect.getmodule(self.cls).__dict__.items() if not ( isinstance(value, type) and issubclass(value, models.Model) and hasattr(value, "_meta") ) ) # We add our models into the locals for the eval fake_locals.update(dict([ (name.split(".")[-1], model) for name, model in self.models.items() ])) # Make sure the ones for this app override. fake_locals.update(dict([ (name.split(".")[-1], model) for name, model in self.models.items() if name.split(".")[0] == app ])) # Ourselves as orm, to allow non-fail cross-app referencing fake_locals['orm'] = self # And a fake _ function fake_locals['_'] = lambda x: x # Datetime; there should be no datetime direct accesses fake_locals['datetime'] = datetime_utils # Now, go through the requested imports and import them. for name, value in extra_imports.items(): # First, try getting it out of locals. parts = value.split(".") try: obj = fake_locals[parts[0]] for part in parts[1:]: obj = getattr(obj, part) except (__HOLE__, AttributeError): pass else: fake_locals[name] = obj continue # OK, try to import it directly try: fake_locals[name] = ask_for_it_by_name(value) except ImportError: if name == "SouthFieldClass": raise ValueError("Cannot import the required field '%s'" % value) else: print("WARNING: Cannot import '%s'" % value) # Use ModelsLocals to make lookups work right for CapitalisedModels fake_locals = ModelsLocals(fake_locals) return eval(code, globals(), fake_locals)
KeyError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/South-1.0.2/south/orm.py/_FakeORM.eval_in_context
7,281
def make_meta(self, app, model, data, stub=False): "Makes a Meta class out of a dict of eval-able arguments." results = {'app_label': app} for key, code in data.items(): # Some things we never want to use. if key in ["_bases", "_ormbases"]: continue # Some things we don't want with stubs. if stub and key in ["order_with_respect_to"]: continue # OK, add it. try: results[key] = self.eval_in_context(code, app) except (__HOLE__, AttributeError) as e: raise ValueError("Cannot successfully create meta field '%s' for model '%s.%s': %s." % ( key, app, model, e )) return type("Meta", tuple(), results)
NameError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/South-1.0.2/south/orm.py/_FakeORM.make_meta
7,282
def make_model(self, app, name, data): "Makes a Model class out of the given app name, model name and pickled data." # Extract any bases out of Meta if "_ormbases" in data['Meta']: # Make sure everything we depend on is done already; otherwise, wait. for key in data['Meta']['_ormbases']: key = key.lower() if key not in self.models: raise ORMBaseNotIncluded("Cannot find ORM base %s" % key) elif isinstance(self.models[key], string_types): # Then the other model hasn't been unfrozen yet. # We postpone ourselves; the situation will eventually resolve. raise UnfreezeMeLater() bases = [self.models[key.lower()] for key in data['Meta']['_ormbases']] # Perhaps the old style? elif "_bases" in data['Meta']: bases = map(ask_for_it_by_name, data['Meta']['_bases']) # Ah, bog standard, then. else: bases = [models.Model] # Turn the Meta dict into a basic class meta = self.make_meta(app, name, data['Meta'], data.get("_stub", False)) failed_fields = {} fields = {} stub = False # Now, make some fields! for fname, params in data.items(): # If it's the stub marker, ignore it. if fname == "_stub": stub = bool(params) continue elif fname == "Meta": continue elif not params: raise ValueError("Field '%s' on model '%s.%s' has no definition." % (fname, app, name)) elif isinstance(params, string_types): # It's a premade definition string! Let's hope it works... code = params extra_imports = {} else: # If there's only one parameter (backwards compat), make it 3. if len(params) == 1: params = (params[0], [], {}) # There should be 3 parameters. Code is a tuple of (code, what-to-import) if len(params) == 3: code = "SouthFieldClass(%s)" % ", ".join( params[1] + ["%s=%s" % (n, v) for n, v in params[2].items()] ) extra_imports = {"SouthFieldClass": params[0]} else: raise ValueError("Field '%s' on model '%s.%s' has a weird definition length (should be 1 or 3 items)." % (fname, app, name)) try: # Execute it in a probably-correct context. field = self.eval_in_context(code, app, extra_imports) except (__HOLE__, AttributeError, AssertionError, KeyError): # It might rely on other models being around. Add it to the # model for the second pass. failed_fields[fname] = (code, extra_imports) else: fields[fname] = field # Find the app in the Django core, and get its module more_kwds = {} try: app_module = models.get_app(app) more_kwds['__module__'] = app_module.__name__ except ImproperlyConfigured: # The app this belonged to has vanished, but thankfully we can still # make a mock model, so ignore the error. more_kwds['__module__'] = '_south_mock' more_kwds['Meta'] = meta # Make our model fields.update(more_kwds) model = type( str(name), tuple(bases), fields, ) # If this is a stub model, change Objects to a whiny class if stub: model.objects = WhinyManager() # Also, make sure they can't instantiate it model.__init__ = whiny_method else: model.objects = NoDryRunManager(model.objects) if failed_fields: model._failed_fields = failed_fields return model
NameError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/South-1.0.2/south/orm.py/_FakeORM.make_model
7,283
def retry_failed_fields(self): "Tries to re-evaluate the _failed_fields for each model." for modelkey, model in self.models.items(): app, modelname = modelkey.split(".", 1) if hasattr(model, "_failed_fields"): for fname, (code, extra_imports) in model._failed_fields.items(): try: field = self.eval_in_context(code, app, extra_imports) except (NameError, AttributeError, AssertionError, __HOLE__) as e: # It's failed again. Complain. raise ValueError("Cannot successfully create field '%s' for model '%s': %s." % ( fname, modelname, e )) else: # Startup that field. model.add_to_class(fname, field)
KeyError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/South-1.0.2/south/orm.py/_FakeORM.retry_failed_fields
7,284
def configKey(self, key, val, fileName = "override", lineno = '<No line>'): try: option = self._cfg_def[key] except __HOLE__: if self._ignoreErrors: return raise ParseError("%s:%s: unknown config item '%s'" % (fileName, lineno, key)) try: if self._keyLimiters and option.name not in self._keyLimiters: return value = self._cow(key) value.updateFromString(val, fileName, lineno) except ParseError, msg: if not self._ignoreErrors: raise ParseError, "%s:%s: %s for configuration item '%s'" \ % (fileName, lineno, msg, key)
KeyError
dataset/ETHPy150Open sassoftware/conary/conary/lib/cfg.py/ConfigFile.configKey
7,285
def _openUrl(self, url): oldTimeout = socket.getdefaulttimeout() timeout = 2 socket.setdefaulttimeout(timeout) # Extra headers to send up headers = { 'X-Conary-Version' : constants.version or "UNRELEASED", 'X-Conary-Config-Version' : str(configVersion), } opener = self._getOpener() try: for i in range(4): try: return opener.open(url, headers=headers) except socket.timeout: # CNY-1161 # We double the socket time out after each run; this # should allow very slow links to catch up while # providing some feedback to the user. For now, only # on stderr since logging is not enabled yet. sys.stderr.write("Timeout reading configuration " "file %s; retrying...\n" % url) timeout *= 2 socket.setdefaulttimeout(timeout) continue except (__HOLE__, socket.error), err: if len(err.args) > 1: raise CfgEnvironmentError(url, err.args[1]) else: raise CfgEnvironmentError(url, err.args[0]) except EnvironmentError, err: raise CfgEnvironmentError(err.filename, err.msg) else: # for # URL timed out raise CfgEnvironmentError(url, "socket timeout") finally: socket.setdefaulttimeout(oldTimeout)
IOError
dataset/ETHPy150Open sassoftware/conary/conary/lib/cfg.py/ConfigFile._openUrl
7,286
def _file_is_executable(exe_name): """Platform-independent check if file is executable. Args: exe_name: file name to test. Returns: bool, True if exe_name is executable. """ if os.path.isfile(exe_name) and os.access(exe_name, os.X_OK): if not sys.platform.startswith('win'): # This is sufficient for Linux and Mac OS X, but not for Windows. return True # More information about the PE File Structure and MS-DOS Header can be # found here: https://msdn.microsoft.com/en-us/magazine/cc301805.aspx # and here: https://msdn.microsoft.com/en-us/library/ms809762.aspx # TODO: Get rid of this when a better solution is found. try: with open(exe_name, 'rb') as f: s = f.read(2) return s == 'MZ' except __HOLE__: pass return False
OSError
dataset/ETHPy150Open GoogleCloudPlatform/python-compat-runtime/appengine-compat/exported_appengine_sdk/google/appengine/tools/devappserver2/go_managedvm.py/_file_is_executable
7,287
def _rmtree(directory): try: shutil.rmtree(directory) except __HOLE__: pass
OSError
dataset/ETHPy150Open GoogleCloudPlatform/python-compat-runtime/appengine-compat/exported_appengine_sdk/google/appengine/tools/devappserver2/go_managedvm.py/_rmtree
7,288
def _run_tool(tool, extra_args): """Run external executable tool. Args: tool: string name of the tool to run. extra_args: additional arguments for tool. Returns: A tuple of the (stdout, stderr) from the process. Raises: BuildError: if tool fails. """ args = [tool] if sys.platform.startswith('win'): args = [tool + '.exe'] args.extend(extra_args) logging.debug('Calling: %s', ' '.join(args)) try: process = safe_subprocess.start_process(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = process.communicate() except __HOLE__ as e: msg = '%s not found.' % args[0] raise go_errors.BuildError('%s\n%s' % (msg, e)) if process.returncode: raise go_errors.BuildError( '(Executed command: %s)\n\n%s\n%s' % (' '.join(args), stdout, stderr)) return stdout, stderr
OSError
dataset/ETHPy150Open GoogleCloudPlatform/python-compat-runtime/appengine-compat/exported_appengine_sdk/google/appengine/tools/devappserver2/go_managedvm.py/_run_tool
7,289
def test_deprecate_option(self): # we can deprecate non-existent options self.cf.deprecate_option('foo') self.assertTrue(self.cf._is_deprecated('foo')) with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') try: self.cf.get_option('foo') except __HOLE__: pass else: self.fail("Nonexistent option didn't raise KeyError") self.assertEqual(len(w), 1) # should have raised one warning self.assertTrue( 'deprecated' in str(w[-1])) # we get the default message self.cf.register_option('a', 1, 'doc', validator=self.cf.is_int) self.cf.register_option('b.c', 'hullo', 'doc2') self.cf.register_option('foo', 'hullo', 'doc2') self.cf.deprecate_option('a', removal_ver='nifty_ver') with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') self.cf.get_option('a') self.assertEqual(len(w), 1) # should have raised one warning self.assertTrue( 'eprecated' in str(w[-1])) # we get the default message self.assertTrue( 'nifty_ver' in str(w[-1])) # with the removal_ver quoted self.assertRaises( KeyError, self.cf.deprecate_option, 'a') # can't depr. twice self.cf.deprecate_option('b.c', 'zounds!') with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') self.cf.get_option('b.c') self.assertEqual(len(w), 1) # should have raised one warning self.assertTrue( 'zounds!' in str(w[-1])) # we get the custom message # test rerouting keys self.cf.register_option('d.a', 'foo', 'doc2') self.cf.register_option('d.dep', 'bar', 'doc2') self.assertEqual(self.cf.get_option('d.a'), 'foo') self.assertEqual(self.cf.get_option('d.dep'), 'bar') self.cf.deprecate_option('d.dep', rkey='d.a') # reroute d.dep to d.a with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') self.assertEqual(self.cf.get_option('d.dep'), 'foo') self.assertEqual(len(w), 1) # should have raised one warning self.assertTrue( 'eprecated' in str(w[-1])) # we get the custom message with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') self.cf.set_option('d.dep', 'baz') # should overwrite "d.a" self.assertEqual(len(w), 1) # should have raised one warning self.assertTrue( 'eprecated' in str(w[-1])) # we get the custom message with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') self.assertEqual(self.cf.get_option('d.dep'), 'baz') self.assertEqual(len(w), 1) # should have raised one warning self.assertTrue( 'eprecated' in str(w[-1])) # we get the custom message
KeyError
dataset/ETHPy150Open pydata/pandas/pandas/tests/test_config.py/TestConfig.test_deprecate_option
7,290
@synchronizedDeferred(lock) @deferredAsThread #will catch exceptions def getprime(self): pfh = open(config.DRONED_PRIMES) psize = os.stat(config.DRONED_PRIMES)[6] if (psize % 4) != 0 or psize < 4000: pfh.close() raise AssertionError("primes file is corrupt/too small") try: fcntl.fcntl(pfh.fileno(), fcntl.F_SETFD, fcntl.FD_CLOEXEC) except: pass result = 0 while True: try: r = random.randint(0,(psize - 4) / 4) * 4 pfh.seek(r) p = pfh.read(4) prime = struct.unpack("!L",p)[0] #makes a blocking call from this thread to the reactor #this is done for thread safety sync = synchronizedInThread() trp = sync(self._trackPrime) result = trp(prime) break except __HOLE__: continue pfh.close() return result
AssertionError
dataset/ETHPy150Open OrbitzWorldwide/droned/droned/lib/droned/models/server.py/DroneServer.getprime
7,291
def wrap(self, func): """ Wrap :func: to perform aggregation on :func: call. Should be called with view instance methods. """ @six.wraps(func) def wrapper(*args, **kwargs): try: return self.aggregate() except __HOLE__: return func(*args, **kwargs) return wrapper
KeyError
dataset/ETHPy150Open ramses-tech/nefertari/nefertari/view_helpers.py/ESAggregator.wrap
7,292
def condition(etag_func=None, last_modified_func=None): """ Decorator to support conditional retrieval (or change) for a view function. The parameters are callables to compute the ETag and last modified time for the requested resource, respectively. The callables are passed the same parameters as the view itself. The Etag function should return a string (or None if the resource doesn't exist), whilst the last_modified function should return a datetime object (or None if the resource doesn't exist). If both parameters are provided, all the preconditions must be met before the view is processed. This decorator will either pass control to the wrapped view function or return an HTTP 304 response (unmodified) or 412 response (preconditions failed), depending upon the request method. Any behavior marked as "undefined" in the HTTP spec (e.g. If-none-match plus If-modified-since headers) will result in the view function being called. """ def decorator(func): @wraps(func, assigned=available_attrs(func)) def inner(request, *args, **kwargs): # Get HTTP request headers if_modified_since = request.META.get("HTTP_IF_MODIFIED_SINCE") if if_modified_since: if_modified_since = parse_http_date_safe(if_modified_since) if_none_match = request.META.get("HTTP_IF_NONE_MATCH") if_match = request.META.get("HTTP_IF_MATCH") if if_none_match or if_match: # There can be more than one ETag in the request, so we # consider the list of values. try: etags = parse_etags(if_none_match or if_match) except __HOLE__: # In case of invalid etag ignore all ETag headers. # Apparently Opera sends invalidly quoted headers at times # (we should be returning a 400 response, but that's a # little extreme) -- this is Django bug #10681. if_none_match = None if_match = None # Compute values (if any) for the requested resource. if etag_func: res_etag = etag_func(request, *args, **kwargs) else: res_etag = None if last_modified_func: dt = last_modified_func(request, *args, **kwargs) if dt: res_last_modified = timegm(dt.utctimetuple()) else: res_last_modified = None else: res_last_modified = None response = None if not ((if_match and (if_modified_since or if_none_match)) or (if_match and if_none_match)): # We only get here if no undefined combinations of headers are # specified. if ((if_none_match and (res_etag in etags or "*" in etags and res_etag)) and (not if_modified_since or (res_last_modified and if_modified_since and res_last_modified <= if_modified_since))): if request.method in ("GET", "HEAD"): response = HttpResponseNotModified() else: logger.warning('Precondition Failed: %s', request.path, extra={ 'status_code': 412, 'request': request } ) response = HttpResponse(status=412) elif if_match and ((not res_etag and "*" in etags) or (res_etag and res_etag not in etags)): logger.warning('Precondition Failed: %s', request.path, extra={ 'status_code': 412, 'request': request } ) response = HttpResponse(status=412) elif (not if_none_match and request.method == "GET" and res_last_modified and if_modified_since and res_last_modified <= if_modified_since): response = HttpResponseNotModified() if response is None: response = func(request, *args, **kwargs) # Set relevant headers on the response if they don't already exist. if res_last_modified and not response.has_header('Last-Modified'): response['Last-Modified'] = http_date(res_last_modified) if res_etag and not response.has_header('ETag'): response['ETag'] = quote_etag(res_etag) return response return inner return decorator # Shortcut decorators for common cases based on ETag or Last-Modified only
ValueError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/django/views/decorators/http.py/condition
7,293
def VerifyPermissions(required_permission, user, permission_type): """Verifies a valid user is logged in. Args: required_permission: permission string from permissions.*. user: models.User entity; default current user. permission_type: string, one of permission.TYPE_* variables. Raises: models.AccessDeniedError: there was a permissions issue. """ if not permission_type: raise models.AccessDeniedError('permission_type not specified') try: if not user.HasPerm(required_permission, permission_type=permission_type): raise models.AccessDeniedError( 'User lacks %s permission' % required_permission) except __HOLE__: raise models.AccessDeniedError( 'unknown permission_type: %s' % permission_type)
ValueError
dataset/ETHPy150Open google/cauliflowervest/src/cauliflowervest/server/handlers/__init__.py/VerifyPermissions
7,294
def _get_method(self, function_name): try: return self.http_lib_methods(self.authorized_methods[function_name]['method']).value except KeyError: raise MethodNotFoundInApiException(function_name) # From dict method except __HOLE__: # From FixedStringWithValue raise HttpMethodNotImplementedException("Http method specified for %s" % function_name)
ValueError
dataset/ETHPy150Open biicode/client/rest/rest_api.py/RestApiClient._get_method
7,295
def _get_pattern(self, function_name): try: return self.authorized_methods[function_name]['pattern'] except __HOLE__: raise MethodNotFoundInApiException(function_name) # From dict method
KeyError
dataset/ETHPy150Open biicode/client/rest/rest_api.py/RestApiClient._get_pattern
7,296
def parse(local_cache): q = sys.stdin.read() URL = 'http://en.wikipedia.org/w/api.php?action=query&list=search&srlimit=50&srprop=wordcount&format=json&srsearch=' URLraw = 'http://en.wikipedia.org/w/index.php?action=raw&title=' data_json = False if local_cache: try: cached_data = json.loads(open('wikipedia.py.data').read()) data_json = cached_data.get('data1', {}) except __HOLE__: cached_data = {'data1':{}, 'data2':{}} if not data_json: data = urllib2.urlopen(URL+urllib.quote_plus(q)).read() data_json = json.loads(data) if local_cache: cached_data['data1'] = data_json records = [] try: search_result = data_json.get("query") if not search_result: search_result = data_json.get("query-continue", {"search":[]}) for x in search_result["search"]: if x['wordcount'] > 20: quoted_title = urllib.quote_plus(x['title'].encode('utf8')) try: title_data = None if local_cache: title_data = cached_data.get('data2',{}).get(quoted_title) if title_data is None: title_data = urllib2.urlopen(URLraw+quoted_title).read() if local_cache: cached_data.setdefault('data2', {})[quoted_title] = title_data except httplib.BadStatusLine: sys.stderr.write('Problem reading %s\n' % (URLraw+quoted_title)) continue citations = wikitext_to_dict(title_data) if citations: for c in citations: c['link'] = [{'url':'http://en.wikipedia.org/wiki/'+quoted_title}] records.extend(citations) except: sys.stderr.write(traceback.format_exc()) sys.stdout.write(json.dumps({'records':records, 'metadata':{}})) if local_cache: open('wikipedia.py.data', 'w').write(json.dumps(cached_data))
IOError
dataset/ETHPy150Open okfn/bibserver/parserscrapers_plugins/wikipedia.py/parse
7,297
def save(self, force_insert=False, force_update=False, *args, **kwargs): if not self.id: response = urllib.urlopen(self.external_image.url) data = StringIO(response.read()) im = Image.open(data) size = im.size ratio = size[1] / size[0] if self.width >= size[0]: resized = im else: try: resized = im.resize((self.width, int(round(self.width*ratio))), Image.ANTIALIAS) except IOError, e: if e.message == "cannot read interlaced PNG files": # Ain't nothing can be done until you upgrade PIL to 1.1.7 resized = im else: raise self.width, self.height = resized.size try: resized.save(self.get_filename(), format='jpeg') self.content_type = 'image/jpeg' except IOError, e: try: resized.convert('RGB').save(self.get_filename(), format='jpeg') self.content_type = 'image/jpeg' except __HOLE__: open(self.get_filename(), 'wb').write(data.getvalue()) self.content_type = response.headers['content-type'] self.external_image.width = size[0] self.external_image.height = size[1] super(ExternalImageSized, self).save(force_insert=False, force_update=False, **kwargs)
IOError
dataset/ETHPy150Open mollyproject/mollyproject/molly/external_media/models.py/ExternalImageSized.save
7,298
def delete(self): try: os.unlink(self.get_filename()) except __HOLE__: # Ignore errors where we're trying to delete a file that's already # been deleted pass super(ExternalImageSized, self).delete()
OSError
dataset/ETHPy150Open mollyproject/mollyproject/molly/external_media/models.py/ExternalImageSized.delete
7,299
def lazy_property(f): @property @wraps(f) def wrapper(self, *args, **kwargs): lazy = '_lazy_' + f.__name__ try: return getattr(self, lazy) except __HOLE__: result = f(self, *args, **kwargs) setattr(self, lazy, result) return result return wrapper
AttributeError
dataset/ETHPy150Open chango/inferno/inferno/lib/lazy_property.py/lazy_property