Unnamed: 0
int64
0
10k
function
stringlengths
79
138k
label
stringclasses
20 values
info
stringlengths
42
261
3,000
def _color_palette(cmap, n_colors): import matplotlib.pyplot as plt from matplotlib.colors import ListedColormap colors_i = np.linspace(0, 1., n_colors) if isinstance(cmap, (list, tuple)): # we have a list of colors try: # first try to turn it into a palette with seaborn from seaborn.apionly import color_palette pal = color_palette(cmap, n_colors=n_colors) except __HOLE__: # if that fails, use matplotlib # in this case, is there any difference between mpl and seaborn? cmap = ListedColormap(cmap, N=n_colors) pal = cmap(colors_i) elif isinstance(cmap, basestring): # we have some sort of named palette try: # first try to turn it into a palette with seaborn from seaborn.apionly import color_palette pal = color_palette(cmap, n_colors=n_colors) except (ImportError, ValueError): # ValueError is raised when seaborn doesn't like a colormap # (e.g. jet). If that fails, use matplotlib try: # is this a matplotlib cmap? cmap = plt.get_cmap(cmap) except ValueError: # or maybe we just got a single color as a string cmap = ListedColormap([cmap], N=n_colors) pal = cmap(colors_i) else: # cmap better be a LinearSegmentedColormap (e.g. viridis) pal = cmap(colors_i) return pal
ImportError
dataset/ETHPy150Open pydata/xarray/xarray/plot/utils.py/_color_palette
3,001
def info(name): ''' Return user information CLI Example: .. code-block:: bash salt '*' user.info root ''' ret = {} try: data = pwd.getpwnam(name) ret['gid'] = data.pw_gid ret['groups'] = list_groups(name) ret['home'] = data.pw_dir ret['name'] = data.pw_name ret['passwd'] = data.pw_passwd ret['shell'] = data.pw_shell ret['uid'] = data.pw_uid # Put GECOS info into a list gecos_field = data.pw_gecos.split(',', 3) # Assign empty strings for any unspecified GECOS fields while len(gecos_field) < 4: gecos_field.append('') ret['fullname'] = gecos_field[0] ret['roomnumber'] = gecos_field[1] ret['workphone'] = gecos_field[2] ret['homephone'] = gecos_field[3] except __HOLE__: return {} return ret
KeyError
dataset/ETHPy150Open saltstack/salt/salt/modules/solaris_user.py/info
3,002
def varToXML(v, name): """ single variable or dictionary to xml representation """ type, typeName, resolver = getType(v) try: if hasattr(v, '__class__'): try: cName = str(v.__class__) if cName.find('.') != -1: cName = cName.split('.')[-1] elif cName.find("'") != -1: #does not have '.' (could be something like <type 'int'>) cName = cName[cName.index("'") + 1:] if cName.endswith("'>"): cName = cName[:-2] except: cName = str(v.__class__) value = '%s: %s' % (cName, v) else: value = str(v) except: try: value = repr(v) except: value = 'Unable to get repr for %s' % v.__class__ xml = '<var name="%s" type="%s"' % (makeValidXmlValue(name),makeValidXmlValue(typeName)) if value: #cannot be too big... communication may not handle it. if len(value) > MAXIMUM_VARIABLE_REPRESENTATION_SIZE: value = value[0:MAXIMUM_VARIABLE_REPRESENTATION_SIZE] value += '...' #fix to work with unicode values try: if not IS_PY3K: if isinstance(value, unicode): value = value.encode('utf-8') else: if isinstance(value, bytes): value = value.encode('utf-8') except __HOLE__: #in java, unicode is a function pass xmlValue = ' value="%s"' % (makeValidXmlValue(quote(value, '/>_= \t'))) else: xmlValue = '' if resolver is not None: xmlCont = ' isContainer="True"' else: xmlCont = '' return ''.join((xml, xmlValue, xmlCont, ' />\n'))
TypeError
dataset/ETHPy150Open CountZer0/PipelineConstructionSet/python/common/diagnostic/pydevDebug/pydevd_vars.py/varToXML
3,003
def parse_args(args): usage = ('usage: %prog service [service params] [general params] ' '[resource|param, ...] method [param, ...]') parser = optparse.OptionParser(usage=usage) parser.add_option('--verbose', '-v', dest='verbose', action='count', default=0) parser.add_option('--executor', '-x', dest='executor', action='store') if len(args) < 2: parser.error('not enough arguments') service = args[1] # hack to detect if the first argument is -h or --help, since we don't call # parse_args before adding the service-specific parameters if service in ('-h', '--help'): parser.print_help() parser.exit() try: module = __import__('libsaas.services', globals(), locals(), [service]) module = getattr(module, service) except (__HOLE__, AttributeError): parser.error('no such service %s' % service) members = inspect.getmembers(module, lambda obj: inspect.isclass(obj) and issubclass(obj, base.Resource)) if not members: parser.error('no such service %s' % service) _, klass = members[0] # got the service class, inspect its __init__ method to extract the keyword # arguments argspec = inspect.getargspec(klass.__init__) for argname, default in zip(reversed(argspec.args), chain(reversed(argspec.defaults or ()), repeat(None))): if argname == 'self': continue optargs = {'dest': argname} if default is not None: optargs.update({'default': default}) parser.add_option('--%s' % argname, **optargs) options, args = parser.parse_args(args) if options.executor: try: module_name = '{0}_executor'.format(options.executor) module = __import__('libsaas.executors', globals(), locals(), [module_name]) module = getattr(module, module_name) except ImportError: parser.error('no such executor %s' % options.executor) module.use() level = logging.ERROR if options.verbose > 1: level = logging.DEBUG elif options.verbose > 0: level = logging.INFO logging.basicConfig(level=level) del options.verbose del options.executor if len(args) < 2: parser.error('not enough arguments') # instantiate the class, get the resource instance = klass(**options.__dict__) action, args = extract_action(instance, parser, args[2:]) pprint.pprint(action(*list(map(try_interpret_arg, args))))
ImportError
dataset/ETHPy150Open ducksboard/libsaas/libsaas/scripts/saas.py/parse_args
3,004
def get_objects(self, data=None, size=1024): """ Get sentences but return list of NMEA objects """ str_data = self._read(data=data, size=size) nmea_objects = [] for nmea_str in str_data: try: nmea_ob = self._get_type(nmea_str)() except __HOLE__: # NMEA sentence was not recognised continue nmea_ob.parse(nmea_str) nmea_objects.append(nmea_ob) return nmea_objects
TypeError
dataset/ETHPy150Open FishPi/FishPi-POCV---Command---Control/external/pynmea/streamer.py/NMEAStream.get_objects
3,005
def _split(self, data, separator=None): """ Take some data and split up based on the notion that a sentence looks something like: $x,y,z or $x,y,z*ab separator is for cases where there is something strange or non-standard as a separator between sentences. Without this, there is no real way to tell whether: $x,y,zSTUFF is legal or if STUFF should be stripped. """ sentences = data.split('$') clean_sentences = [] for item in sentences: cleaned_item = item.rstrip() if separator: cleaned_item = cleaned_item.rstrip(separator) if '*' in cleaned_item.split(',')[-1]: # There must be a checksum. Remove any trailing fluff: try: first, checksum = cleaned_item.split('*') except __HOLE__: # Some GPS data recorders have been shown to output # run-together sentences (no leading $). # In this case, ignore error and continue, discarding the # erroneous data. # TODO: try and fix the data. continue cleaned_item = '*'.join([first, checksum[:2]]) if cleaned_item: clean_sentences.append(cleaned_item) return clean_sentences
ValueError
dataset/ETHPy150Open FishPi/FishPi-POCV---Command---Control/external/pynmea/streamer.py/NMEAStream._split
3,006
@classmethod def construct_from_string(cls, string): """ attempt to construct this type from a string, raise a TypeError if it's not possible """ try: return cls(unit=string) except __HOLE__: raise TypeError("could not construct DatetimeTZDtype")
ValueError
dataset/ETHPy150Open pydata/pandas/pandas/types/dtypes.py/DatetimeTZDtype.construct_from_string
3,007
def addcause(self, cause): try: self.score += self.causesdict[cause] self.causes.append(cause) except __HOLE__: print "ERROR: Unknown cause [%s]" % (cause)
KeyError
dataset/ETHPy150Open tatanus/SPF/spf/core/webprofiler.py/indicator.addcause
3,008
def default_blas_ldflags(): global numpy try: if (hasattr(numpy.distutils, '__config__') and numpy.distutils.__config__): # If the old private interface is available use it as it # don't print information to the user. blas_info = numpy.distutils.__config__.blas_opt_info else: # We do this import only here, as in some setup, if we # just import theano and exit, with the import at global # scope, we get this error at exit: "Exception TypeError: # "'NoneType' object is not callable" in <bound method # Popen.__del__ of <subprocess.Popen object at 0x21359d0>> # ignored" # This happen with Python 2.7.3 |EPD 7.3-1 and numpy 1.8.1 import numpy.distutils.system_info # noqa # We need to catch warnings as in some cases NumPy print # stuff that we don't want the user to see. # I'm not able to remove all printed stuff with warnings.catch_warnings(record=True): numpy.distutils.system_info.system_info.verbosity = 0 blas_info = numpy.distutils.system_info.get_info("blas_opt") # If we are in a EPD installation, mkl is available if "EPD" in sys.version: use_unix_epd = True if sys.platform == 'win32': return ' '.join( ['-L%s' % os.path.join(sys.prefix, "Scripts")] + # Why on Windows, the library used are not the # same as what is in # blas_info['libraries']? ['-l%s' % l for l in ["mk2_core", "mk2_intel_thread", "mk2_rt"]]) elif sys.platform == 'darwin': # The env variable is needed to link with mkl new_path = os.path.join(sys.prefix, "lib") v = os.getenv("DYLD_FALLBACK_LIBRARY_PATH", None) if v is not None: # Explicit version could be replaced by a symbolic # link called 'Current' created by EPD installer # This will resolve symbolic links v = os.path.realpath(v) # The python __import__ don't seam to take into account # the new env variable "DYLD_FALLBACK_LIBRARY_PATH" # when we set with os.environ['...'] = X or os.putenv() # So we warn the user and tell him what todo. if v is None or new_path not in v.split(":"): _logger.warning( "The environment variable " "'DYLD_FALLBACK_LIBRARY_PATH' does not contain " "the '%s' path in its value. This will make " "Theano use a slow version of BLAS. Update " "'DYLD_FALLBACK_LIBRARY_PATH' to contain the " "said value, this will disable this warning." % new_path) use_unix_epd = False if use_unix_epd: return ' '.join( ['-L%s' % os.path.join(sys.prefix, "lib")] + ['-l%s' % l for l in blas_info['libraries']]) # Canopy if "Canopy" in sys.prefix: subsub = 'lib' if sys.platform == 'win32': subsub = 'Scripts' lib_path = os.path.join(sys.base_prefix, subsub) if not os.path.exists(lib_path): # Old logic to find the path. I don't think we still # need it, but I don't have the time to test all # installation configuration. So I keep this as a fall # back in case the current expectation don't work. # This old logic don't work when multiple version of # Canopy is installed. p = os.path.join(sys.base_prefix, "..", "..", "appdata") assert os.path.exists(p), "Canopy changed the location of MKL" lib_paths = os.listdir(p) # Try to remove subdir that can't contain MKL for sub in lib_paths: if not os.path.exists(os.path.join(p, sub, subsub)): lib_paths.remove(sub) assert len(lib_paths) == 1, ( "Unexpected case when looking for Canopy MKL libraries", p, lib_paths, [os.listdir(os.path.join(p, sub)) for sub in lib_paths]) lib_path = os.path.join(p, lib_paths[0], subsub) assert os.path.exists(lib_path), "Canopy changed the location of MKL" if sys.platform == "linux2" or sys.platform == "darwin": return ' '.join( ['-L%s' % lib_path] + ['-l%s' % l for l in blas_info['libraries']]) elif sys.platform == 'win32': return ' '.join( ['-L%s' % lib_path] + # Why on Windows, the library used are not the # same as what is in blas_info['libraries']? ['-l%s' % l for l in ["mk2_core", "mk2_intel_thread", "mk2_rt"]]) # Anaconda if "Anaconda" in sys.version and sys.platform == "win32": # If the "mkl-service" conda package (available # through Python package "mkl") is installed and # importable, then the libraries (installed by conda # package "mkl-rt") are actually available. Using # "conda install mkl" will install both, as well as # optimized versions of numpy and scipy. try: import mkl # noqa except ImportError as e: _logger.info('Conda mkl is not available: %s', e) else: # This branch is executed if no exception was raised lib_path = os.path.join(sys.prefix, 'DLLs') flags = ['-L%s' % lib_path] flags += ['-l%s' % l for l in ["mkl_core", "mkl_intel_thread", "mkl_rt"]] res = try_blas_flag(flags) if res: return res ret = ( # TODO: the Gemm op below should separate the # -L and -l arguments into the two callbacks # that CLinker uses for that stuff. for now, # we just pass the whole ldflags as the -l # options part. ['-L%s' % l for l in blas_info.get('library_dirs', [])] + ['-l%s' % l for l in blas_info.get('libraries', [])] + blas_info.get('extra_link_args', [])) # For some very strange reason, we need to specify -lm twice # to get mkl to link correctly. I have no idea why. if any('mkl' in fl for fl in ret): ret.extend(['-lm', '-lm']) res = try_blas_flag(ret) if res: return res # Some environment don't have the lib dir in LD_LIBRARY_PATH. # So add it. ret.extend(['-Wl,-rpath,' + l for l in blas_info.get('library_dirs', [])]) res = try_blas_flag(ret) if res: return res # Try to add the anaconda lib directory to runtime loading of lib. # This fix some case with Anaconda 2.3 on Linux. # Newer Anaconda still have this problem but only have # Continuum in sys.version. if (("Anaconda" in sys.version or "Continuum" in sys.version) and "linux" in sys.platform): lib_path = os.path.join(sys.prefix, 'lib') ret.append('-Wl,-rpath,' + lib_path) res = try_blas_flag(ret) if res: return res except __HOLE__: pass # Even if we could not detect what was used for numpy, or if these # libraries are not found, most Linux systems have a libblas.so # readily available. We try to see if that's the case, rather # than disable blas. To test it correctly, we must load a program. # Otherwise, there could be problem in the LD_LIBRARY_PATH. return try_blas_flag(['-lblas'])
KeyError
dataset/ETHPy150Open rizar/attention-lvcsr/libs/Theano/theano/configdefaults.py/default_blas_ldflags
3,009
def filter_compiledir(path): # Expand '~' in path path = os.path.expanduser(path) # Turn path into the 'real' path. This ensures that: # 1. There is no relative path, which would fail e.g. when trying to # import modules from the compile dir. # 2. The path is stable w.r.t. e.g. symlinks (which makes it easier # to re-use compiled modules). path = os.path.realpath(path) if os.access(path, os.F_OK): # Do it exist? if not os.access(path, os.R_OK | os.W_OK | os.X_OK): # If it exist we need read, write and listing access raise ValueError( "compiledir '%s' exists but you don't have read, write" " or listing permissions." % path) else: try: os.makedirs(path, 0o770) # read-write-execute for user and group except OSError as e: # Maybe another parallel execution of theano was trying to create # the same directory at the same time. if e.errno != errno.EEXIST: raise ValueError( "Unable to create the compiledir directory" " '%s'. Check the permissions." % path) # PROBLEM: sometimes the initial approach based on # os.system('touch') returned -1 for an unknown reason; the # alternate approach here worked in all cases... it was weird. # No error should happen as we checked the permissions. init_file = os.path.join(path, '__init__.py') if not os.path.exists(init_file): try: open(init_file, 'w').close() except __HOLE__ as e: if os.path.exists(init_file): pass # has already been created else: e.args += ('%s exist? %s' % (path, os.path.exists(path)),) raise return path
IOError
dataset/ETHPy150Open rizar/attention-lvcsr/libs/Theano/theano/configdefaults.py/filter_compiledir
3,010
def __init__(self, url, **config): self.url = url parsed = urlparse(self.url) if parsed.scheme == "bolt": self.host = parsed.hostname self.port = parsed.port else: raise ProtocolError("Unsupported URI scheme: '%s' in url: '%s'. Currently only supported 'bolt'." % (parsed.scheme, url)) self.config = config self.max_pool_size = config.get("max_pool_size", DEFAULT_MAX_POOL_SIZE) self.session_pool = deque() try: self.encrypted = encrypted = config["encrypted"] except __HOLE__: _warn_about_insecure_default() self.encrypted = encrypted = ENCRYPTED_DEFAULT self.trust = trust = config.get("trust", TRUST_DEFAULT) if encrypted: if not SSL_AVAILABLE: raise RuntimeError("Bolt over TLS is only available in Python 2.7.9+ and Python 3.3+") ssl_context = SSLContext(PROTOCOL_SSLv23) ssl_context.options |= OP_NO_SSLv2 if trust >= TRUST_SIGNED_CERTIFICATES: ssl_context.verify_mode = CERT_REQUIRED ssl_context.set_default_verify_paths() self.ssl_context = ssl_context else: self.ssl_context = None
KeyError
dataset/ETHPy150Open nigelsmall/py2neo/py2neo/packages/neo4j/v1/session.py/Driver.__init__
3,011
def session(self): """ Create a new session based on the graph database details specified within this driver: >>> from neo4j.v1 import GraphDatabase >>> driver = GraphDatabase.driver("bolt://localhost") >>> session = driver.session() """ session = None done = False while not done: try: session = self.session_pool.pop() except __HOLE__: session = Session(self) done = True else: if session.healthy: session.connection.reset() done = session.healthy return session
IndexError
dataset/ETHPy150Open nigelsmall/py2neo/py2neo/packages/neo4j/v1/session.py/Driver.session
3,012
def index(self, key): """ Return the index of the given key """ try: return self._keys.index(key) except __HOLE__: raise KeyError(key)
ValueError
dataset/ETHPy150Open nigelsmall/py2neo/py2neo/packages/neo4j/v1/session.py/Record.index
3,013
def __eq__(self, other): try: return self._keys == tuple(other.keys()) and self._values == tuple(other.values()) except __HOLE__: return False
AttributeError
dataset/ETHPy150Open nigelsmall/py2neo/py2neo/packages/neo4j/v1/session.py/Record.__eq__
3,014
def get_url(self, secure=False, longurl=False): """ Return the url :param secure: bool - To use https :param longurl: bool - On local, reference the local path with the domain ie: http://site.com/files/object.png otherwise /files/object.png :return: str """ driver_name = self.driver.name.lower() try: # Currently only Cloudfiles and Local supports it url = self._obj.get_cdn_url() if "local" in driver_name: url = url_for(SERVER_ENDPOINT, object_name=self.name, _external=longurl) except __HOLE__ as e: object_path = '%s/%s' % (self.container.name, self.name) if 's3' in driver_name: base_url = 'http://%s' % self.driver.connection.host url = urljoin(base_url, object_path) elif 'google' in driver_name: url = urljoin('http://storage.googleapis.com', object_path) elif 'azure' in driver_name: base_url = ('http://%s.blob.core.windows.net' % self.driver.key) url = urljoin(base_url, object_path) else: raise e if secure: if 'cloudfiles' in driver_name: parsed_url = urlparse(url) if parsed_url.scheme != 'http': return url split_netloc = parsed_url.netloc.split('.') split_netloc[1] = 'ssl' url = urlunparse( 'https', '.'.join(split_netloc), parsed_url.path, parsed_url.params, parsed_url.query, parsed_url.fragment ) if ('s3' in driver_name or 'google' in driver_name or 'azure' in driver_name): url = url.replace('http://', 'https://') return url
NotImplementedError
dataset/ETHPy150Open mardix/flask-cloudy/flask_cloudy.py/Object.get_url
3,015
def authenticate(self, request): client = cas.get_client() # Returns a CAS server client try: auth_header_field = request.META["HTTP_AUTHORIZATION"] auth_token = cas.parse_auth_header(auth_header_field) except (cas.CasTokenError, __HOLE__): return None # If no token in header, then this method is not applicable # Found a token; query CAS for the associated user id try: cas_auth_response = client.profile(auth_token) except cas.CasHTTPError: raise exceptions.NotAuthenticated(_('User provided an invalid OAuth2 access token')) if cas_auth_response.authenticated is False: raise exceptions.NotAuthenticated(_('CAS server failed to authenticate this token')) user_id = cas_auth_response.user user = User.load(user_id) if user is None: raise exceptions.AuthenticationFailed(_('Could not find the user associated with this token')) check_user(user) return user, cas_auth_response
KeyError
dataset/ETHPy150Open CenterForOpenScience/osf.io/api/base/authentication/drf.py/OSFCASAuthentication.authenticate
3,016
def __getitem__(self, name): try: return self.relevant[name] except __HOLE__: return ()
KeyError
dataset/ETHPy150Open OpenMDAO/OpenMDAO/openmdao/core/relevance.py/Relevance.__getitem__
3,017
def is_relevant(self, var_of_interest, varname): """ Returns True if a variable is relevant to a particular variable of interest. Args ---- var_of_interest : str Name of a variable of interest (either a parameter or a constraint or objective output, depending on mode.) varname : str Name of some other variable in the model. Returns ------- bool: True if varname is in the relevant path of var_of_interest """ try: return varname in self.relevant[var_of_interest] except __HOLE__: return True
KeyError
dataset/ETHPy150Open OpenMDAO/OpenMDAO/openmdao/core/relevance.py/Relevance.is_relevant
3,018
def test_point(): g = Point(0, 0) try: assert hash(g) return False except __HOLE__: return True
TypeError
dataset/ETHPy150Open Toblerity/Shapely/tests/test_hash.py/test_point
3,019
def test_multipoint(): g = MultiPoint([(0, 0)]) try: assert hash(g) return False except __HOLE__: return True
TypeError
dataset/ETHPy150Open Toblerity/Shapely/tests/test_hash.py/test_multipoint
3,020
def test_polygon(): g = Point(0, 0).buffer(1.0) try: assert hash(g) return False except __HOLE__: return True
TypeError
dataset/ETHPy150Open Toblerity/Shapely/tests/test_hash.py/test_polygon
3,021
def test_collection(): g = GeometryCollection([Point(0, 0)]) try: assert hash(g) return False except __HOLE__: return True
TypeError
dataset/ETHPy150Open Toblerity/Shapely/tests/test_hash.py/test_collection
3,022
def check_permissions(permission_type, user, project): """ Here we check permission types, and see if a user has proper perms. If a user has "edit" permissions on a project, they pretty much have carte blanche to do as they please, so we kick that back as true. Otherwise we go more fine grained and check their view and comment permissions """ try: groups = user.groups.all() except __HOLE__: groups = None if user in project.users_can_edit.all(): return True for x in groups: if x in project.groups_can_edit.all(): return True if permission_type == "edit": if project.allow_anon_editing: return True if permission_type == "view": if user in project.users_can_view.all(): return True for x in groups: if x in project.groups_can_view.all(): return True if project.allow_anon_viewing is True: return True if permission_type == "comment": if user in project.users_can_comment.all(): return True for x in groups: if x in project.groups_can_comment.all(): return True if project.allow_anon_comment is True: return True ## If we make it all the way here, we haven't returned True yet. ## Thus we'll kick back false on the fall through return False
AttributeError
dataset/ETHPy150Open f4nt/djtracker/djtracker/utils.py/check_permissions
3,023
def check_perms(request, project, user=None): """ Here we check permission types, and see if a user has proper perms. If a user has "edit" permissions on a project, they pretty much have carte blanche to do as they please, so we kick that back as true. Otherwise we go more fine grained and check their view and comment permissions """ can_view = False can_edit = False can_comment = False if request is not None: user = request.user else: user = user try: groups = user.groups.all() except AttributeError: groups = None try: if user.is_authenticated(): if project.allow_authed_viewing: can_view = True if project.allow_authed_editing: can_edit = True can_view = True can_comment = True if project.allow_authed_comment: can_comment = True can_view = True if user in project.users_can_view.all(): can_view = True if user in project.users_can_edit.all(): can_edit = True can_comment = True can_view = True if user in project.users_can_comment.all(): can_comment = True can_view = True for x in groups: if x in project.groups_can_view.all(): can_view = True if x in project.groups_can_edit.all(): can_edit = True can_comment = True can_view = True if x in project.users_can_comment.all(): can_comment = True can_view = True except __HOLE__: pass if project.allow_anon_viewing: can_view = True if project.allow_anon_editing: can_edit = True if project.allow_anon_comment: can_comment = True return can_view, can_edit, can_comment # ported snipped from another project
AttributeError
dataset/ETHPy150Open f4nt/djtracker/djtracker/utils.py/check_perms
3,024
def render(self, part, context): """ render a mail part """ try: return self.mailparts[part].nodelist.render(context) except __HOLE__: return None
KeyError
dataset/ETHPy150Open f4nt/djtracker/djtracker/utils.py/MailTemplate.render
3,025
@lazy_import def pickle(): try: import cPickle as pickle except __HOLE__: import pickle return pickle
ImportError
dataset/ETHPy150Open cloudmatrix/esky/esky/sudo/__init__.py/pickle
3,026
@lazy_import def threading(): try: import threading except __HOLE__: threading = None return threading
ImportError
dataset/ETHPy150Open cloudmatrix/esky/esky/sudo/__init__.py/threading
3,027
@lazy_import def _impl(): try: from esky.sudo import sudo_osx return sudo_osx except __HOLE__: from esky.sudo import sudo_unix return sudo_unix
ImportError
dataset/ETHPy150Open cloudmatrix/esky/esky/sudo/__init__.py/_impl
3,028
def __init__(self,target): # Reflect the 'name' attribute if it has one, but don't worry # if not. This helps SudoProxy be re-used on other classes. try: self.name = target.name except __HOLE__: pass self.target = target self.closed = False self.pipe = None
AttributeError
dataset/ETHPy150Open cloudmatrix/esky/esky/sudo/__init__.py/SudoProxy.__init__
3,029
def _get_sudo_argtypes(obj,methname): """Get the argtypes list for the given method. This searches the base classes of obj if the given method is not declared allowed_from_sudo, so that people don't have to constantly re-apply the decorator. """ for base in _get_mro(obj): try: argtypes = base.__dict__[methname]._esky_sudo_argtypes except (KeyError,__HOLE__): pass else: return argtypes return None
AttributeError
dataset/ETHPy150Open cloudmatrix/esky/esky/sudo/__init__.py/_get_sudo_argtypes
3,030
def _get_sudo_iterator(obj,methname): """Get the iterator flag for the given method. This searches the base classes of obj if the given method is not declared allowed_from_sudo, so that people don't have to constantly re-apply the decorator. """ for base in _get_mro(obj): try: iterator = base.__dict__[methname]._esky_sudo_iterator except (KeyError,__HOLE__): pass else: return iterator return False
AttributeError
dataset/ETHPy150Open cloudmatrix/esky/esky/sudo/__init__.py/_get_sudo_iterator
3,031
def _get_mro(obj): """Get the method resolution order for an object. In other words, get the list of classes what are used to look up methods on the given object, in the order in which they'll be consulted. """ try: return obj.__class__.__mro__ except __HOLE__: return _get_oldstyle_mro(obj.__class__,set())
AttributeError
dataset/ETHPy150Open cloudmatrix/esky/esky/sudo/__init__.py/_get_mro
3,032
@base.get('/nodep/<host>/<int:port>') def node_panel(request, host, port): node = models.node.get_by_host_port(host, port) if node is None: return base.not_found() detail = {} try: detail = file_ipc.read_details()['nodes'][ '%s:%d' % (node.host, node.port)] except (__HOLE__, ValueError, KeyError): pass return request.render( 'node/panel.html', node=node, detail=detail, max_mem_limit=NODE_MAX_MEM, stats_enabled=stats.client is not None)
IOError
dataset/ETHPy150Open HunanTV/redis-ctl/handlers/nodes.py/node_panel
3,033
def render(self, name, value, attrs=None): try: year_val, month_val, day_val = value.year, value.month, value.day except __HOLE__: year_val = month_val = day_val = None if isinstance(value, basestring): match = RE_DATE.match(value) if match: year_val, month_val, day_val = [int(v) for v in match.groups()] output = [] if 'id' in self.attrs: id_ = self.attrs['id'] else: id_ = 'id_%s' % name month_choices = MONTHS.items() if not (self.required and value): month_choices.append(self.none_value) month_choices.sort() local_attrs = self.build_attrs(id=self.month_field % id_) s = Select(choices=month_choices) select_html = s.render(self.month_field % name, month_val, local_attrs) output.append(select_html) day_choices = [(i, i) for i in range(1, 32)] if not (self.required and value): day_choices.insert(0, self.none_value) local_attrs['id'] = self.day_field % id_ s = Select(choices=day_choices) select_html = s.render(self.day_field % name, day_val, local_attrs) output.append(select_html) year_choices = [(i, i) for i in self.years] if not (self.required and value): year_choices.insert(0, self.none_value) local_attrs['id'] = self.year_field % id_ s = Select(choices=year_choices) select_html = s.render(self.year_field % name, year_val, local_attrs) output.append(select_html) return mark_safe(u'\n'.join(output))
AttributeError
dataset/ETHPy150Open CollabQ/CollabQ/vendor/django/forms/extras/widgets.py/SelectDateWidget.render
3,034
def crop(config): """ This function takes an OpenVAS XML with results and return Vulnerability info. :param config: Config instance :type config: Config :raises: TypeError, InvalidFormat """ import copy if not isinstance(config, Config): raise TypeError("Expected Config, got '%s' instead" % type(config)) try: from xml.etree import cElementTree as ET except __HOLE__: from xml.etree import ElementTree as ET output_file = config.output_file file_path = config.input_files scope_hosts = config.scope excluded_hosts = config.excluded # Check format with open(file_path, "rU") as f: first_line = f.readline() if not first_line.startswith("<report ") or \ not all(True for x in ("extension", "format_id", "content_type") if x in first_line): raise IOError("Invalid report format") # Read input file root = ET.parse(file_path).getroot() report = root.getchildren()[0] # -------------------------------------------------------------------------- # Create clone # -------------------------------------------------------------------------- root_clone = ET.Element(root.tag) root_clone.attrib=root.attrib report_clone = ET.SubElement(root_clone, "report") report_clone.attrib = report.attrib # Copy all elements for child in report.getchildren(): # Add only these tags that not contain target information if child.tag not in ("host_start", "host_end", "host", "ports", "results"): tag = copy.deepcopy(child) # Add to clone report_clone.append(tag) # -------------------------------------------------------------------------- # Add results tag # -------------------------------------------------------------------------- results_tag = ET.Element("results") results_tag.attrib = root.find(".//results").attrib for vuln in root.findall(".//results/result"): # -------------------------------------------------------------------------- # Target info # -------------------------------------------------------------------------- vuln_host = vuln.find(".//host").text # Apply filter to include hosts or not if vuln_host in excluded_hosts: continue if scope_hosts is not None: if vuln_host not in scope_hosts: continue # Add to clone results_tag.append(vuln) # Add to clone report_clone.append(results_tag) # -------------------------------------------------------------------------- # Add ports tag # -------------------------------------------------------------------------- ports_tag = ET.Element("ports") ports_tag.attrib = root.find(".//ports").attrib for port in root.findall(".//ports/port"): # -------------------------------------------------------------------------- # Target info # -------------------------------------------------------------------------- vuln_host = port.find(".//host").text # Apply filter to include hosts or not if vuln_host in excluded_hosts: continue if scope_hosts is not None: if vuln_host not in scope_hosts: continue # Add to clone ports_tag.append(port) # Add to clone report_clone.append(ports_tag) # -------------------------------------------------------------------------- # Add host tag # -------------------------------------------------------------------------- for host in root.findall(".//report/host"): # -------------------------------------------------------------------------- # Target info # -------------------------------------------------------------------------- vuln_host = host.find(".//ip").text # Apply filter to include hosts or not if vuln_host in excluded_hosts: continue if scope_hosts is not None: if vuln_host not in scope_hosts: continue #host_tag = ET.Element("host", attrib=host.attrib, text=host.text) # Add to clone report_clone.append(host) # -------------------------------------------------------------------------- # Add host_start tag # -------------------------------------------------------------------------- for host_start in root.findall(".//report/host_start"): # -------------------------------------------------------------------------- # Target info # -------------------------------------------------------------------------- vuln_host = host_start.find(".//host").text # Apply filter to include hosts or not if vuln_host in excluded_hosts: continue if scope_hosts is not None: if vuln_host not in scope_hosts: continue # host_tag = ET.Element("host", attrib=host_start.attrib) # Add to clone report_clone.append(host_start) # -------------------------------------------------------------------------- # Add host_end tag # -------------------------------------------------------------------------- for host_end in root.findall(".//report/host_end"): # -------------------------------------------------------------------------- # Target info # -------------------------------------------------------------------------- vuln_host = host_end.find(".//host").text # Apply filter to include hosts or not if vuln_host in excluded_hosts: continue if scope_hosts is not None: if vuln_host not in scope_hosts: continue # host_tag = ET.Element("host", attrib=host_start.attrib) # Add to clone report_clone.append(host_end) # Save to file tree = ET.ElementTree(root_clone) # Un IO Memory and delete first line. tree.write(output_file, encoding="UTF-8", xml_declaration=False) # ----------------------------------------------------------------------
ImportError
dataset/ETHPy150Open cr0hn/openvas_to_report/openvas_to_report/api.py/crop
3,035
def __init__(self, casemode=0, countpos=0, dirsonly=False, exclude="", filesonly=False, hidden=False, ignorecase=False, interactive=False, keepext=False, mediamode=False, noclobber=False, recursive=False, regex=False, remdups=False, remext=False, remnonwords=False, remsymbols=False, simulate=False, spacemode=0, quiet=False, verbosity=1, matchpattern="", replacepattern="", recursivedepth=0): # Universal options: try: self._casemode = int(casemode) # 0=lc, 1=uc, 2=flfw, 3=flew except TypeError: self._casemode = 0 try: self._countpos = int(countpos) # Adds numerical index at position. except TypeError: self._countpos = 0 try: self._spacemode = int(spacemode) # 0=su, 1=sh, 2=sd, 3=ds, 4=hs, 5=us except __HOLE__: self.spacemode = 0 self._dirsonly = dirsonly # Only edit directory names. self._filesonly = False if dirsonly else filesonly # Only file names. self._hidden = hidden # Look at hidden files and directories, too. self._ignorecase = ignorecase # Case sensitivity. self._interactive = interactive # Confirm before overwriting. self._keepext = keepext # Don't modify remext. self._mediamode = mediamode # Mode to sanitize NTFS-filenames/dirnames. self._noclobber = noclobber # Don't overwrite anything. self._recursive = recursive # Look for files recursively self._regex = regex # Use regular expressions instead of glob/fnmatch. self._remdups = remdups # Remove remdups. self._remext = remext # Remove all remext. self._remnonwords = remnonwords # Only allow wordchars (\w) self._remsymbols = remsymbols # Normalize remsymbols (ñé becomes ne). self._simulate = simulate # Simulate renaming and dump result to stdout. # Initialize GUI options. self._recursivedepth = recursivedepth self._excludeedit = "" if not exclude else exclude self._matchedit = "" if not matchpattern else matchpattern self._replaceedit = "" if not replacepattern else replacepattern self._autostop = False # Automatically stop execution on rename error. self._countbase = 1 # Base to start counting from. self._countfill = True # 9->10: 9 becomes 09. 99->100: 99 becomes 099. self._countpreedit = "" # String that is prepended to the counter. self._countstep = 1 # Increment per count iteration. self._countsufedit = "" # String that is appended to the counter. self._deletecheck = False # Whether to delete a specified range. self._deleteend = 1 # End index of deletion sequence. self._deletestart = 0 # Start index of deletion sequence. self._filteredit = "" self._insertcheck = False # Whether to apply an insertion. self._insertedit = "" # The inserted text/string. self._insertpos = 0 # Position/Index to insert at. self._manualmirror = False # Mirror manual rename to all targets. self._matchcheck = True # Whether to apply source/target patterns. self._matchexcludecheck = False self._matchfiltercheck = False self._matchreplacecheck = True self._casecheck = True if isinstance(casemode, str) else False self._countcheck = True if isinstance(countpos, str) else False removelist = [remdups, remext, remnonwords, remsymbols] self._removecheck = True if any(removelist) else False self._spacecheck = True if isinstance(spacemode, str) else False self.stopupdate = False self.stopcommit = False self.includes = set() self.excludes = set() self.recursiveincludes = set() self.recursiveexcludes = set() self.configdir = helpers.get_configdir() # Create the logger. helpers.configure_logger(verbosity, quiet, self.configdir) self.history = [] # History of commited operations, used to undo them. # Match everything inside one set of braces: self.bracerx = re.compile("(?<=\{)(.*?)(?=\})")
TypeError
dataset/ETHPy150Open mikar/demimove/demimove/fileops.py/FileOps.__init__
3,036
def undo(self, actions=None): if actions is None: try: actions = self.history.pop() except __HOLE__: log.error("History list is empty.") return for i in actions: log.debug("{} -> {}.".format(i[1], i[0])) if self.simulate: continue try: os.rename(i[1], i[0]) except Exception as e: log.error("Rename Error: {} -> {} ({}).".format(i[1], i[0], e)) if self.autostop: break log.info("Undo complete.")
IndexError
dataset/ETHPy150Open mikar/demimove/demimove/fileops.py/FileOps.undo
3,037
def modify_previews(self, previews): if self.countcheck: lenp, base, step = len(previews), self.countbase, self.countstep countlen = len(str(lenp)) countrange = xrange(base, lenp * step + 1, step) if self.countfill: count = (str(i).rjust(countlen, "0") for i in countrange) else: count = (str(i) for i in countrange) modified = [] for preview in previews: name = preview[1] if not self.remext and not self.keepext: name += preview[2] if self.casecheck: name = self.apply_case(name) if self.spacecheck: name = self.apply_space(name) if self.deletecheck: name = self.apply_delete(name) if self.removecheck: name = self.apply_remove(name) if self.insertcheck: name = self.apply_insert(name) if self.matchcheck: name = self.apply_replace(name) if self.countcheck: try: name = self.apply_count(name, count.next()) except __HOLE__: pass if self.keepext: name += preview[2] preview = ((preview[0], preview[1] + preview[2]), name) modified.append(preview) return modified
StopIteration
dataset/ETHPy150Open mikar/demimove/demimove/fileops.py/FileOps.modify_previews
3,038
def apply_remove(self, s): if not self.removecheck: return s if self.remnonwords: s = re.sub("\W", "", s, flags=self.ignorecase) if self.remsymbols: allowed = string.ascii_letters + string.digits + " .-_+" # []() for i in ["utf-8", "latin1"]: try: # Convert bytestring to unicode and back. s = "".join(c for c in normalize("NFKD", s.decode(i)) if c in allowed).encode("utf-8") break except __HOLE__: pass else: log.debug("Symbols: Could not decode {}.".format(s)) if self.remdups: s = re.sub(r"([-_ .])\1+", r"\1", s, flags=self.ignorecase) return s
UnicodeDecodeError
dataset/ETHPy150Open mikar/demimove/demimove/fileops.py/FileOps.apply_remove
3,039
def get_tests(config={}): tests = [] tests += list_test_cases(DSATest) try: from Crypto.PublicKey import _fastmath tests += list_test_cases(DSAFastMathTest) except __HOLE__: from distutils.sysconfig import get_config_var import inspect _fm_path = os.path.normpath(os.path.dirname(os.path.abspath( inspect.getfile(inspect.currentframe()))) +"/../../PublicKey/_fastmath"+get_config_var("SO")) if os.path.exists(_fm_path): raise ImportError("While the _fastmath module exists, importing "+ "it failed. This may point to the gmp or mpir shared library "+ "not being in the path. _fastmath was found at "+_fm_path) tests += list_test_cases(DSASlowMathTest) return tests
ImportError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/pycrypto-2.6.1/lib/Crypto/SelfTest/PublicKey/test_DSA.py/get_tests
3,040
def set_charset(self, charset): """Set the charset of the payload to a given character set. charset can be a Charset instance, a string naming a character set, or None. If it is a string it will be converted to a Charset instance. If charset is None, the charset parameter will be removed from the Content-Type field. Anything else will generate a TypeError. The message will be assumed to be of type text/* encoded with charset.input_charset. It will be converted to charset.output_charset and encoded properly, if needed, when generating the plain text representation of the message. MIME headers (MIME-Version, Content-Type, Content-Transfer-Encoding) will be added as needed. """ if charset is None: self.del_param('charset') self._charset = None return if isinstance(charset, basestring): charset = email.charset.Charset(charset) if not isinstance(charset, email.charset.Charset): raise TypeError(charset) # BAW: should we accept strings that can serve as arguments to the # Charset constructor? self._charset = charset if not self.has_key('MIME-Version'): self.add_header('MIME-Version', '1.0') if not self.has_key('Content-Type'): self.add_header('Content-Type', 'text/plain', charset=charset.get_output_charset()) else: self.set_param('charset', charset.get_output_charset()) if str(charset) <> charset.get_output_charset(): self._payload = charset.body_encode(self._payload) if not self.has_key('Content-Transfer-Encoding'): cte = charset.get_body_encoding() try: cte(self) except __HOLE__: self._payload = charset.body_encode(self._payload) self.add_header('Content-Transfer-Encoding', cte)
TypeError
dataset/ETHPy150Open babble/babble/include/jython/Lib/email/message.py/Message.set_charset
3,041
def _get_params_preserve(self, failobj, header): # Like get_params() but preserves the quoting of values. BAW: # should this be part of the public interface? missing = object() value = self.get(header, missing) if value is missing: return failobj params = [] for p in _parseparam(';' + value): try: name, val = p.split('=', 1) name = name.strip() val = val.strip() except __HOLE__: # Must have been a bare attribute name = p.strip() val = '' params.append((name, val)) params = utils.decode_params(params) return params
ValueError
dataset/ETHPy150Open babble/babble/include/jython/Lib/email/message.py/Message._get_params_preserve
3,042
@cache_readonly def theoretical_quantiles(self): try: return self.dist.ppf(self.theoretical_percentiles) except __HOLE__: msg = '%s requires more parameters to ' \ 'compute ppf'.format(self.dist.name,) raise TypeError(msg) except: msg = 'failed to compute the ppf of {0}'.format(self.dist.name,) raise
TypeError
dataset/ETHPy150Open statsmodels/statsmodels/statsmodels/graphics/gofplots.py/ProbPlot.theoretical_quantiles
3,043
def _get_dns_driver(): global DNS_DRIVER if DNS_DRIVER: return DNS_DRIVER if not cfg.CONF.external_dns_driver: return try: DNS_DRIVER = driver.ExternalDNSService.get_instance() LOG.debug("External DNS driver loaded: %s", cfg.CONF.external_dns_driver) return DNS_DRIVER except __HOLE__: LOG.exception(_LE("ImportError exception occurred while loading " "the external DNS service driver")) raise dns.ExternalDNSDriverNotFound( driver=cfg.CONF.external_dns_driver)
ImportError
dataset/ETHPy150Open openstack/neutron/neutron/plugins/ml2/extensions/dns_integration.py/_get_dns_driver
3,044
def check_custom_authorization_check_importable(app_configs, **kwargs): errors = [] authorization_check = hijack_settings.HIJACK_AUTHORIZATION_CHECK try: if authorization_check != staff_member_required: import_string(authorization_check) except __HOLE__: errors.append( Error( 'Setting HIJACK_AUTHORIZATION_CHECK cannot be imported', hint=None, obj=authorization_check, id='hijack.E002', ) ) return errors
ImportError
dataset/ETHPy150Open arteria/django-hijack/hijack/checks.py/check_custom_authorization_check_importable
3,045
def check_hijack_decorator_importable(app_configs, **kwargs): errors = [] decorator = hijack_settings.HIJACK_DECORATOR try: if decorator != 'django.contrib.admin.views.decorators.staff_member_required': import_string(decorator) except __HOLE__: errors.append( Error( 'Setting HIJACK_DECORATOR cannot be imported', hint=None, obj=decorator, id='hijack.E003', ) ) return errors
ImportError
dataset/ETHPy150Open arteria/django-hijack/hijack/checks.py/check_hijack_decorator_importable
3,046
def render(self, context): try: output = self.filter_expression.resolve(context) output = localize(output) output = force_unicode(output) except TemplateSyntaxError, e: if not hasattr(e, 'source'): e.source = self.source raise except __HOLE__: return '' if (context.autoescape and not isinstance(output, SafeData)) or isinstance(output, EscapeData): return escape(output) else: return output
UnicodeDecodeError
dataset/ETHPy150Open AppScale/appscale/AppServer/google/appengine/_internal/django/template/debug.py/DebugVariableNode.render
3,047
def configure(factory=None, **kwargs): if not factory: factory = os.environ.get(ENVIRONMENT_VARIABLE) if not factory: raise ImportError( 'Settings could not be imported because configure was called' ' without arguments and the environment variable %s is' ' undefined.' % ENVIRONMENT_VARIABLE) if '.' in factory: factory_module, factory_name = factory.rsplit('.', 1) try: mod = import_module(factory_module) factory_obj = getattr(mod, factory_name) except (ImportError, __HOLE__) as err: raise SettingsFactoryDoesNotExist( 'The object "%s" could not be found (Is it on sys.path?):' ' %s' % (factory, err)) settings_obj = factory_obj() settings_dict = dict((k, getattr(settings_obj, k)) for k in dir(settings_obj) if not str(k).startswith('_')) if 'SETTINGS_MODULE' not in settings_dict: settings_dict['SETTINGS_MODULE'] = ( '%s_%s_unrolledcbsettings' % (factory_module, factory_name)) # Create an importer for handling imports of our constructed settings # module. sys.meta_path.insert( 0, SettingsImporter(settings_dict['SETTINGS_MODULE'], settings_dict) ) os.environ[DJANGO_SETTINGS_MODULE] = settings_dict['SETTINGS_MODULE'] return mod, settings_obj else: raise InvalidSettingsFactory( '%s is not a valid settings factory. Please provide something of' ' the form `path.to.MySettingsFactory`' % factory)
AttributeError
dataset/ETHPy150Open matthewwithanm/django-classbasedsettings/cbsettings/__init__.py/configure
3,048
def loads(self, response): try: return json.loads(response.content.decode('utf-8'))['data'] except (UnicodeDecodeError, __HOLE__): return response.content
ValueError
dataset/ETHPy150Open swayf/proxmoxer/proxmoxer/backends/https.py/JsonSerializer.loads
3,049
@classmethod def _handle_creation_inputs(cls, *args, **kwargs): """Return the number of rows, cols and flat matrix elements. Examples ======== >>> from sympy import Matrix, I Matrix can be constructed as follows: * from a nested list of iterables >>> Matrix( ((1, 2+I), (3, 4)) ) Matrix([ [1, 2 + I], [3, 4]]) * from un-nested iterable (interpreted as a column) >>> Matrix( [1, 2] ) Matrix([ [1], [2]]) * from un-nested iterable with dimensions >>> Matrix(1, 2, [1, 2] ) Matrix([[1, 2]]) * from no arguments (a 0 x 0 matrix) >>> Matrix() Matrix(0, 0, []) * from a rule >>> Matrix(2, 2, lambda i, j: i/(j + 1) ) Matrix([ [0, 0], [1, 1/2]]) """ from sympy.matrices.sparse import SparseMatrix flat_list = None if len(args) == 1: # Matrix(SparseMatrix(...)) if isinstance(args[0], SparseMatrix): return args[0].rows, args[0].cols, flatten(args[0].tolist()) # Matrix(Matrix(...)) elif isinstance(args[0], MatrixBase): return args[0].rows, args[0].cols, args[0]._mat # Matrix(MatrixSymbol('X', 2, 2)) elif isinstance(args[0], Basic) and args[0].is_Matrix: return args[0].rows, args[0].cols, args[0].as_explicit()._mat # Matrix(numpy.ones((2, 2))) elif hasattr(args[0], "__array__"): # NumPy array or matrix or some other object that implements # __array__. So let's first use this method to get a # numpy.array() and then make a python list out of it. arr = args[0].__array__() if len(arr.shape) == 2: rows, cols = arr.shape[0], arr.shape[1] flat_list = [cls._sympify(i) for i in arr.ravel()] return rows, cols, flat_list elif len(arr.shape) == 1: rows, cols = arr.shape[0], 1 flat_list = [S.Zero]*rows for i in range(len(arr)): flat_list[i] = cls._sympify(arr[i]) return rows, cols, flat_list else: raise NotImplementedError( "SymPy supports just 1D and 2D matrices") # Matrix([1, 2, 3]) or Matrix([[1, 2], [3, 4]]) elif is_sequence(args[0])\ and not isinstance(args[0], DeferredVector): in_mat = [] ncol = set() for row in args[0]: if isinstance(row, MatrixBase): in_mat.extend(row.tolist()) if row.cols or row.rows: # only pay attention if it's not 0x0 ncol.add(row.cols) else: in_mat.append(row) try: ncol.add(len(row)) except __HOLE__: ncol.add(1) if len(ncol) > 1: raise ValueError("Got rows of variable lengths: %s" % sorted(list(ncol))) cols = ncol.pop() if ncol else 0 rows = len(in_mat) if cols else 0 if rows: if not is_sequence(in_mat[0]): cols = 1 flat_list = [cls._sympify(i) for i in in_mat] return rows, cols, flat_list flat_list = [] for j in range(rows): for i in range(cols): flat_list.append(cls._sympify(in_mat[j][i])) elif len(args) == 3: rows = as_int(args[0]) cols = as_int(args[1]) # Matrix(2, 2, lambda i, j: i+j) if len(args) == 3 and isinstance(args[2], collections.Callable): op = args[2] flat_list = [] for i in range(rows): flat_list.extend( [cls._sympify(op(cls._sympify(i), cls._sympify(j))) for j in range(cols)]) # Matrix(2, 2, [1, 2, 3, 4]) elif len(args) == 3 and is_sequence(args[2]): flat_list = args[2] if len(flat_list) != rows*cols: raise ValueError('List length should be equal to rows*columns') flat_list = [cls._sympify(i) for i in flat_list] # Matrix() elif len(args) == 0: # Empty Matrix rows = cols = 0 flat_list = [] if flat_list is None: raise TypeError("Data type not understood") return rows, cols, flat_list
TypeError
dataset/ETHPy150Open sympy/sympy/sympy/matrices/matrices.py/MatrixBase._handle_creation_inputs
3,050
def norm(self, ord=None): """Return the Norm of a Matrix or Vector. In the simplest case this is the geometric size of the vector Other norms can be specified by the ord parameter ===== ============================ ========================== ord norm for matrices norm for vectors ===== ============================ ========================== None Frobenius norm 2-norm 'fro' Frobenius norm - does not exist inf -- max(abs(x)) -inf -- min(abs(x)) 1 -- as below -1 -- as below 2 2-norm (largest sing. value) as below -2 smallest singular value as below other - does not exist sum(abs(x)**ord)**(1./ord) ===== ============================ ========================== Examples ======== >>> from sympy import Matrix, Symbol, trigsimp, cos, sin, oo >>> x = Symbol('x', real=True) >>> v = Matrix([cos(x), sin(x)]) >>> trigsimp( v.norm() ) 1 >>> v.norm(10) (sin(x)**10 + cos(x)**10)**(1/10) >>> A = Matrix([[1, 1], [1, 1]]) >>> A.norm(2)# Spectral norm (max of |Ax|/|x| under 2-vector-norm) 2 >>> A.norm(-2) # Inverse spectral norm (smallest singular value) 0 >>> A.norm() # Frobenius Norm 2 >>> Matrix([1, -2]).norm(oo) 2 >>> Matrix([-1, 2]).norm(-oo) 1 See Also ======== normalized """ # Row or Column Vector Norms vals = list(self.values()) or [0] if self.rows == 1 or self.cols == 1: if ord == 2 or ord is None: # Common case sqrt(<x, x>) return sqrt(Add(*(abs(i)**2 for i in vals))) elif ord == 1: # sum(abs(x)) return Add(*(abs(i) for i in vals)) elif ord == S.Infinity: # max(abs(x)) return Max(*[abs(i) for i in vals]) elif ord == S.NegativeInfinity: # min(abs(x)) return Min(*[abs(i) for i in vals]) # Otherwise generalize the 2-norm, Sum(x_i**ord)**(1/ord) # Note that while useful this is not mathematically a norm try: return Pow(Add(*(abs(i)**ord for i in vals)), S(1) / ord) except (NotImplementedError, __HOLE__): raise ValueError("Expected order to be Number, Symbol, oo") # Matrix Norms else: if ord == 2: # Spectral Norm # Maximum singular value return Max(*self.singular_values()) elif ord == -2: # Minimum singular value return Min(*self.singular_values()) elif (ord is None or isinstance(ord, string_types) and ord.lower() in ['f', 'fro', 'frobenius', 'vector']): # Reshape as vector and send back to norm function return self.vec().norm(ord=2) else: raise NotImplementedError("Matrix Norms under development")
TypeError
dataset/ETHPy150Open sympy/sympy/sympy/matrices/matrices.py/MatrixBase.norm
3,051
def pinv(self): """Calculate the Moore-Penrose pseudoinverse of the matrix. The Moore-Penrose pseudoinverse exists and is unique for any matrix. If the matrix is invertible, the pseudoinverse is the same as the inverse. Examples ======== >>> from sympy import Matrix >>> Matrix([[1, 2, 3], [4, 5, 6]]).pinv() Matrix([ [-17/18, 4/9], [ -1/9, 1/9], [ 13/18, -2/9]]) See Also ======== inv pinv_solve References ========== .. [1] https://en.wikipedia.org/wiki/Moore-Penrose_pseudoinverse """ A = self AH = self.H # Trivial case: pseudoinverse of all-zero matrix is its transpose. if A.is_zero: return AH try: if self.rows >= self.cols: return (AH * A).inv() * AH else: return AH * (A * AH).inv() except __HOLE__: # Matrix is not full rank, so A*AH cannot be inverted. raise NotImplementedError('Rank-deficient matrices are not yet ' 'supported.')
ValueError
dataset/ETHPy150Open sympy/sympy/sympy/matrices/matrices.py/MatrixBase.pinv
3,052
def a2idx(j, n=None): """Return integer after making positive and validating against n.""" if type(j) is not int: try: j = j.__index__() except __HOLE__: raise IndexError("Invalid index a[%r]" % (j, )) if n is not None: if j < 0: j += n if not (j >= 0 and j < n): raise IndexError("Index out of range: a[%s]" % (j, )) return int(j)
AttributeError
dataset/ETHPy150Open sympy/sympy/sympy/matrices/matrices.py/a2idx
3,053
def converter (function, klass=None): def _integer (value): if klass is None: return function(value) try: return klass(value) except __HOLE__: return function(value) return _integer
ValueError
dataset/ETHPy150Open Exa-Networks/exabgp/lib/exabgp/bgp/message/update/nlri/flow.py/converter
3,054
def __init__(self, settings, key_bindings): super(CaffeVisApp, self).__init__(settings, key_bindings) print 'Got settings', settings self.settings = settings self.bindings = key_bindings self._net_channel_swap = (2,1,0) self._net_channel_swap_inv = tuple([self._net_channel_swap.index(ii) for ii in range(len(self._net_channel_swap))]) self._range_scale = 1.0 # not needed; image already in [0,255] # Set the mode to CPU or GPU. Note: in the latest Caffe # versions, there is one Caffe object *per thread*, so the # mode must be set per thread! Here we set the mode for the # main thread; it is also separately set in CaffeProcThread. sys.path.insert(0, os.path.join(settings.caffevis_caffe_root, 'python')) import caffe if settings.caffevis_mode_gpu: caffe.set_mode_gpu() print 'CaffeVisApp mode (in main thread): GPU' else: caffe.set_mode_cpu() print 'CaffeVisApp mode (in main thread): CPU' self.net = caffe.Classifier( settings.caffevis_deploy_prototxt, settings.caffevis_network_weights, mean = None, # Set to None for now, assign later # self._data_mean, channel_swap = self._net_channel_swap, raw_scale = self._range_scale, ) if isinstance(settings.caffevis_data_mean, basestring): # If the mean is given as a filename, load the file try: self._data_mean = np.load(settings.caffevis_data_mean) except __HOLE__: print '\n\nCound not load mean file:', settings.caffevis_data_mean print 'Ensure that the values in settings.py point to a valid model weights file, network' print 'definition prototxt, and mean. To fetch a default model and mean file, use:\n' print '$ cd models/caffenet-yos/' print '$ ./fetch.sh\n\n' raise input_shape = self.net.blobs[self.net.inputs[0]].data.shape[-2:] # e.g. 227x227 # Crop center region (e.g. 227x227) if mean is larger (e.g. 256x256) excess_h = self._data_mean.shape[1] - input_shape[0] excess_w = self._data_mean.shape[2] - input_shape[1] assert excess_h >= 0 and excess_w >= 0, 'mean should be at least as large as %s' % repr(input_shape) self._data_mean = self._data_mean[:, (excess_h/2):(excess_h/2+input_shape[0]), (excess_w/2):(excess_w/2+input_shape[1])] elif settings.caffevis_data_mean is None: self._data_mean = None else: # The mean has been given as a value or a tuple of values self._data_mean = np.array(settings.caffevis_data_mean) # Promote to shape C,1,1 while len(self._data_mean.shape) < 1: self._data_mean = np.expand_dims(self._data_mean, -1) #if not isinstance(self._data_mean, tuple): # # If given as int/float: promote to tuple # self._data_mean = tuple(self._data_mean) if self._data_mean is not None: self.net.transformer.set_mean(self.net.inputs[0], self._data_mean) check_force_backward_true(settings.caffevis_deploy_prototxt) self.labels = None if self.settings.caffevis_labels: self.labels = read_label_file(self.settings.caffevis_labels) self.proc_thread = None self.jpgvis_thread = None self.handled_frames = 0 if settings.caffevis_jpg_cache_size < 10*1024**2: raise Exception('caffevis_jpg_cache_size must be at least 10MB for normal operation.') self.img_cache = FIFOLimitedArrayCache(settings.caffevis_jpg_cache_size) self._populate_net_layer_info()
IOError
dataset/ETHPy150Open yosinski/deep-visualization-toolbox/caffevis/app.py/CaffeVisApp.__init__
3,055
def _draw_layer_pane(self, pane): '''Returns the data shown in highres format, b01c order.''' if self.state.layers_show_back: layer_dat_3D = self.net.blobs[self.state.layer].diff[0] else: layer_dat_3D = self.net.blobs[self.state.layer].data[0] # Promote FC layers with shape (n) to have shape (n,1,1) if len(layer_dat_3D.shape) == 1: layer_dat_3D = layer_dat_3D[:,np.newaxis,np.newaxis] n_tiles = layer_dat_3D.shape[0] tile_rows,tile_cols = self.net_layer_info[self.state.layer]['tiles_rc'] display_3D_highres = None if self.state.pattern_mode: # Show desired patterns loaded from disk load_layer = self.state.layer if self.settings.caffevis_jpgvis_remap and self.state.layer in self.settings.caffevis_jpgvis_remap: load_layer = self.settings.caffevis_jpgvis_remap[self.state.layer] if self.settings.caffevis_jpgvis_layers and load_layer in self.settings.caffevis_jpgvis_layers: jpg_path = os.path.join(self.settings.caffevis_unit_jpg_dir, 'regularized_opt', load_layer, 'whole_layer.jpg') # Get highres version #cache_before = str(self.img_cache) display_3D_highres = self.img_cache.get((jpg_path, 'whole'), None) #else: # display_3D_highres = None if display_3D_highres is None: try: with WithTimer('CaffeVisApp:load_sprite_image', quiet = self.debug_level < 1): display_3D_highres = load_square_sprite_image(jpg_path, n_sprites = n_tiles) except __HOLE__: # File does not exist, so just display disabled. pass else: self.img_cache.set((jpg_path, 'whole'), display_3D_highres) #cache_after = str(self.img_cache) #print 'Cache was / is:\n %s\n %s' % (cache_before, cache_after) if display_3D_highres is not None: # Get lowres version, maybe. Assume we want at least one pixel for selection border. row_downsamp_factor = int(np.ceil(float(display_3D_highres.shape[1]) / (pane.data.shape[0] / tile_rows - 2))) col_downsamp_factor = int(np.ceil(float(display_3D_highres.shape[2]) / (pane.data.shape[1] / tile_cols - 2))) ds = max(row_downsamp_factor, col_downsamp_factor) if ds > 1: #print 'Downsampling by', ds display_3D = display_3D_highres[:,::ds,::ds,:] else: display_3D = display_3D_highres else: display_3D = layer_dat_3D * 0 # nothing to show else: # Show data from network (activations or diffs) if self.state.layers_show_back: back_what_to_disp = self.get_back_what_to_disp() if back_what_to_disp == 'disabled': layer_dat_3D_normalized = np.tile(self.settings.window_background, layer_dat_3D.shape + (1,)) elif back_what_to_disp == 'stale': layer_dat_3D_normalized = np.tile(self.settings.stale_background, layer_dat_3D.shape + (1,)) else: layer_dat_3D_normalized = tile_images_normalize(layer_dat_3D, boost_indiv = self.state.layer_boost_indiv, boost_gamma = self.state.layer_boost_gamma, neg_pos_colors = ((1,0,0), (0,1,0))) else: layer_dat_3D_normalized = tile_images_normalize(layer_dat_3D, boost_indiv = self.state.layer_boost_indiv, boost_gamma = self.state.layer_boost_gamma) #print ' ===layer_dat_3D_normalized.shape', layer_dat_3D_normalized.shape, 'layer_dat_3D_normalized dtype', layer_dat_3D_normalized.dtype, 'range', layer_dat_3D_normalized.min(), layer_dat_3D_normalized.max() display_3D = layer_dat_3D_normalized # Convert to float if necessary: display_3D = ensure_float01(display_3D) # Upsample gray -> color if necessary # e.g. (1000,32,32) -> (1000,32,32,3) if len(display_3D.shape) == 3: display_3D = display_3D[:,:,:,np.newaxis] if display_3D.shape[3] == 1: display_3D = np.tile(display_3D, (1, 1, 1, 3)) # Upsample unit length tiles to give a more sane tile / highlight ratio # e.g. (1000,1,1,3) -> (1000,3,3,3) if display_3D.shape[1] == 1: display_3D = np.tile(display_3D, (1, 3, 3, 1)) if self.state.layers_show_back and not self.state.pattern_mode: padval = self.settings.caffevis_layer_clr_back_background else: padval = self.settings.window_background highlights = [None] * n_tiles with self.state.lock: if self.state.cursor_area == 'bottom': highlights[self.state.selected_unit] = self.settings.caffevis_layer_clr_cursor # in [0,1] range if self.state.backprop_selection_frozen and self.state.layer == self.state.backprop_layer: highlights[self.state.backprop_unit] = self.settings.caffevis_layer_clr_back_sel # in [0,1] range _, display_2D = tile_images_make_tiles(display_3D, hw = (tile_rows,tile_cols), padval = padval, highlights = highlights) if display_3D_highres is None: display_3D_highres = display_3D # Display pane based on layers_pane_zoom_mode state_layers_pane_zoom_mode = self.state.layers_pane_zoom_mode assert state_layers_pane_zoom_mode in (0,1,2) if state_layers_pane_zoom_mode == 0: # Mode 0: normal display (activations or patterns) display_2D_resize = ensure_uint255_and_resize_to_fit(display_2D, pane.data.shape) elif state_layers_pane_zoom_mode == 1: # Mode 1: zoomed selection unit_data = display_3D_highres[self.state.selected_unit] display_2D_resize = ensure_uint255_and_resize_to_fit(unit_data, pane.data.shape) else: # Mode 2: zoomed backprop pane display_2D_resize = ensure_uint255_and_resize_to_fit(display_2D, pane.data.shape) * 0 pane.data[:] = to_255(self.settings.window_background) pane.data[0:display_2D_resize.shape[0], 0:display_2D_resize.shape[1], :] = display_2D_resize if self.settings.caffevis_label_layers and self.state.layer in self.settings.caffevis_label_layers and self.labels and self.state.cursor_area == 'bottom': # Display label annotation atop layers pane (e.g. for fc8/prob) defaults = {'face': getattr(cv2, self.settings.caffevis_label_face), 'fsize': self.settings.caffevis_label_fsize, 'clr': to_255(self.settings.caffevis_label_clr), 'thick': self.settings.caffevis_label_thick} loc_base = self.settings.caffevis_label_loc[::-1] # Reverse to OpenCV c,r order lines = [FormattedString(self.labels[self.state.selected_unit], defaults)] cv2_typeset_text(pane.data, lines, loc_base) return display_3D_highres
IOError
dataset/ETHPy150Open yosinski/deep-visualization-toolbox/caffevis/app.py/CaffeVisApp._draw_layer_pane
3,056
def test_collectionannot(self): 'Test building an AnnotationDB from file' from pygr import seqdb, cnestedlist, sqlgraph hg18 = pygr.Data.getResource('TEST.Seq.Genome.hg18') # BUILD ANNOTATION DATABASE FOR REFSEQ EXONS exon_slices = Collection( filename=os.path.join(self.path, 'refGene_exonAnnot_hg18.cdb'), intKeys=True, mode='cr', writeback=False) exon_db = seqdb.AnnotationDB(exon_slices, hg18, sliceAttrDict=dict(id=0, exon_id=1, orientation=2, gene_id=3, start=4, stop=5)) msa = cnestedlist.NLMSA(os.path.join(self.path, 'refGene_exonAnnot_hg18'), 'w', pairwiseMode=True, bidirectional=False) for lines in open(os.path.join(testInputDir, 'refGene_exonAnnot%s_hg18.txt' % smallSamplePostfix), 'r').xreadlines(): row = [x for x in lines.split('\t')] # CONVERT TO LIST SO MUTABLE row[1] = int(row[1]) # CONVERT FROM STRING TO INTEGER exon_slices[row[1]] = row exon = exon_db[row[1]] # GET THE ANNOTATION OBJECT FOR THIS EXON msa.addAnnotation(exon) # SAVE IT TO GENOME MAPPING exon_db.clear_cache() # not really necessary; cache should autoGC # SHELVE SHOULD BE EXPLICITLY CLOSED IN ORDER TO SAVE CURRENT CONTENTS exon_slices.close() msa.build() # FINALIZE GENOME ALIGNMENT INDEXES exon_db.__doc__ = 'Exon Annotation Database for hg18' pygr.Data.addResource('TEST.Annotation.hg18.exons', exon_db) msa.__doc__ = 'NLMSA Exon for hg18' pygr.Data.addResource('TEST.Annotation.NLMSA.hg18.exons', msa) exon_schema = pygr.Data.ManyToManyRelation(hg18, exon_db, bindAttrs=('exon1', )) exon_schema.__doc__ = 'Exon Schema for hg18' pygr.Data.addSchema('TEST.Annotation.NLMSA.hg18.exons', exon_schema) # BUILD ANNOTATION DATABASE FOR REFSEQ SPLICES splice_slices = Collection( filename=os.path.join(self.path, 'refGene_spliceAnnot_hg18.cdb'), intKeys=True, mode='cr', writeback=False) splice_db = seqdb.AnnotationDB(splice_slices, hg18, sliceAttrDict=dict(id=0, splice_id=1, orientation=2, gene_id=3, start=4, stop=5)) msa = cnestedlist.NLMSA(os.path.join(self.path, 'refGene_spliceAnnot_hg18'), 'w', pairwiseMode=True, bidirectional=False) for lines in open(os.path.join(testInputDir, 'refGene_spliceAnnot%s_hg18.txt' % smallSamplePostfix), 'r').xreadlines(): row = [x for x in lines.split('\t')] # CONVERT TO LIST SO MUTABLE row[1] = int(row[1]) # CONVERT FROM STRING TO INTEGER splice_slices[row[1]] = row # GET THE ANNOTATION OBJECT FOR THIS EXON splice = splice_db[row[1]] msa.addAnnotation(splice) # SAVE IT TO GENOME MAPPING splice_db.clear_cache() # not really necessary; cache should autoGC # SHELVE SHOULD BE EXPLICITLY CLOSED IN ORDER TO SAVE CURRENT CONTENTS splice_slices.close() msa.build() # FINALIZE GENOME ALIGNMENT INDEXES splice_db.__doc__ = 'Splice Annotation Database for hg18' pygr.Data.addResource('TEST.Annotation.hg18.splices', splice_db) msa.__doc__ = 'NLMSA Splice for hg18' pygr.Data.addResource('TEST.Annotation.NLMSA.hg18.splices', msa) splice_schema = pygr.Data.ManyToManyRelation(hg18, splice_db, bindAttrs=('splice1', )) splice_schema.__doc__ = 'Splice Schema for hg18' pygr.Data.addSchema('TEST.Annotation.NLMSA.hg18.splices', splice_schema) # BUILD ANNOTATION DATABASE FOR REFSEQ EXONS cds_slices = Collection( filename=os.path.join(self.path, 'refGene_cdsAnnot_hg18.cdb'), intKeys=True, mode='cr', writeback=False) cds_db = seqdb.AnnotationDB(cds_slices, hg18, sliceAttrDict=dict(id=0, cds_id=1, orientation=2, gene_id=3, start=4, stop=5)) msa = cnestedlist.NLMSA(os.path.join(self.path, 'refGene_cdsAnnot_hg18'), 'w', pairwiseMode=True, bidirectional=False) for lines in open(os.path.join(testInputDir, 'refGene_cdsAnnot%s_hg18.txt' % smallSamplePostfix), 'r').xreadlines(): row = [x for x in lines.split('\t')] # CONVERT TO LIST SO MUTABLE row[1] = int(row[1]) # CONVERT FROM STRING TO INTEGER cds_slices[row[1]] = row cds = cds_db[row[1]] # GET THE ANNOTATION OBJECT FOR THIS EXON msa.addAnnotation(cds) # SAVE IT TO GENOME MAPPING cds_db.clear_cache() # not really necessary; cache should autoGC # SHELVE SHOULD BE EXPLICITLY CLOSED IN ORDER TO SAVE CURRENT CONTENTS cds_slices.close() msa.build() # FINALIZE GENOME ALIGNMENT INDEXES cds_db.__doc__ = 'CDS Annotation Database for hg18' pygr.Data.addResource('TEST.Annotation.hg18.cdss', cds_db) msa.__doc__ = 'NLMSA CDS for hg18' pygr.Data.addResource('TEST.Annotation.NLMSA.hg18.cdss', msa) cds_schema = pygr.Data.ManyToManyRelation(hg18, cds_db, bindAttrs=('cds1', )) cds_schema.__doc__ = 'CDS Schema for hg18' pygr.Data.addSchema('TEST.Annotation.NLMSA.hg18.cdss', cds_schema) # BUILD ANNOTATION DATABASE FOR MOST CONSERVED ELEMENTS FROM UCSC ucsc_slices = Collection( filename=os.path.join(self.path, 'phastConsElements28way_hg18.cdb'), intKeys=True, mode='cr', writeback=False) ucsc_db = seqdb.AnnotationDB(ucsc_slices, hg18, sliceAttrDict=dict(id=0, ucsc_id=1, orientation=2, gene_id=3, start=4, stop=5)) msa = cnestedlist.NLMSA(os.path.join(self.path, 'phastConsElements28way_hg18'), 'w', pairwiseMode=True, bidirectional=False) for lines in open(os.path.join(testInputDir, 'phastConsElements28way%s_hg18.txt' % smallSamplePostfix), 'r').xreadlines(): row = [x for x in lines.split('\t')] # CONVERT TO LIST SO MUTABLE row[1] = int(row[1]) # CONVERT FROM STRING TO INTEGER ucsc_slices[row[1]] = row ucsc = ucsc_db[row[1]] # GET THE ANNOTATION OBJECT FOR THIS EXON msa.addAnnotation(ucsc) # SAVE IT TO GENOME MAPPING ucsc_db.clear_cache() # not really necessary; cache should autoGC # SHELVE SHOULD BE EXPLICITLY CLOSED IN ORDER TO SAVE CURRENT CONTENTS ucsc_slices.close() msa.build() # FINALIZE GENOME ALIGNMENT INDEXES ucsc_db.__doc__ = 'Most Conserved Elements for hg18' pygr.Data.addResource('TEST.Annotation.UCSC.hg18.mostconserved', ucsc_db) msa.__doc__ = 'NLMSA for Most Conserved Elements for hg18' pygr.Data.addResource('TEST.Annotation.UCSC.NLMSA.hg18.mostconserved', msa) ucsc_schema = pygr.Data.ManyToManyRelation(hg18, ucsc_db, bindAttrs=('element1', )) ucsc_schema.__doc__ = \ 'Schema for UCSC Most Conserved Elements for hg18' pygr.Data.addSchema('TEST.Annotation.UCSC.NLMSA.hg18.mostconserved', ucsc_schema) # BUILD ANNOTATION DATABASE FOR SNP126 FROM UCSC snp_slices = Collection(filename=os.path.join(self.path, 'snp126_hg18.cdb'), intKeys=True, protocol=2, mode='cr', writeback=False) snp_db = seqdb.AnnotationDB(snp_slices, hg18, sliceAttrDict=dict(id=0, snp_id=1, orientation=2, gene_id=3, start=4, stop=5, score=6, ref_NCBI=7, ref_UCSC=8, observed=9, molType=10, myClass=11, myValid=12, avHet=13, avHetSE=14, myFunc=15, locType=16, myWeight=17)) msa = cnestedlist.NLMSA(os.path.join(self.path, 'snp126_hg18'), 'w', pairwiseMode=True, bidirectional=False) for lines in open(os.path.join(testInputDir, 'snp126%s_hg18.txt' % smallSamplePostfix), 'r').xreadlines(): row = [x for x in lines.split('\t')] # CONVERT TO LIST SO MUTABLE row[1] = int(row[1]) # CONVERT FROM STRING TO INTEGER snp_slices[row[1]] = row snp = snp_db[row[1]] # GET THE ANNOTATION OBJECT FOR THIS EXON msa.addAnnotation(snp) # SAVE IT TO GENOME MAPPING snp_db.clear_cache() # not really necessary; cache should autoGC # SHELVE SHOULD BE EXPLICITLY CLOSED IN ORDER TO SAVE CURRENT CONTENTS snp_slices.close() msa.build() # FINALIZE GENOME ALIGNMENT INDEXES snp_db.__doc__ = 'SNP126 for hg18' pygr.Data.addResource('TEST.Annotation.UCSC.hg18.snp126', snp_db) msa.__doc__ = 'NLMSA for SNP126 for hg18' pygr.Data.addResource('TEST.Annotation.UCSC.NLMSA.hg18.snp126', msa) snp_schema = pygr.Data.ManyToManyRelation(hg18, snp_db, bindAttrs=('snp1', )) snp_schema.__doc__ = 'Schema for UCSC SNP126 for hg18' pygr.Data.addSchema('TEST.Annotation.UCSC.NLMSA.hg18.snp126', snp_schema) pygr.Data.save() pygr.Data.clear_cache() # QUERY TO EXON AND SPLICES ANNOTATION DATABASE hg18 = pygr.Data.getResource('TEST.Seq.Genome.hg18') exonmsa = pygr.Data.getResource('TEST.Annotation.NLMSA.hg18.exons') splicemsa = pygr.Data.getResource('TEST.Annotation.NLMSA.hg18.splices') conservedmsa = \ pygr.Data.getResource('TEST.Annotation.UCSC.NLMSA.hg18.mostconserved') snpmsa = \ pygr.Data.getResource('TEST.Annotation.UCSC.NLMSA.hg18.snp126') cdsmsa = pygr.Data.getResource('TEST.Annotation.NLMSA.hg18.cdss') exons = pygr.Data.getResource('TEST.Annotation.hg18.exons') splices = pygr.Data.getResource('TEST.Annotation.hg18.splices') mostconserved = \ pygr.Data.getResource('TEST.Annotation.UCSC.hg18.mostconserved') snp126 = pygr.Data.getResource('TEST.Annotation.UCSC.hg18.snp126') cdss = pygr.Data.getResource('TEST.Annotation.hg18.cdss') # OPEN hg18_MULTIZ28WAY NLMSA msa = cnestedlist.NLMSA(os.path.join(msaDir, 'hg18_multiz28way'), 'r', trypath=[seqDir]) exonAnnotFileName = os.path.join(testInputDir, 'Annotation_ConservedElement_Exons%s_hg18.txt' % smallSamplePostfix) intronAnnotFileName = os.path.join(testInputDir, 'Annotation_ConservedElement_Introns%s_hg18.txt' % smallSamplePostfix) stopAnnotFileName = os.path.join(testInputDir, 'Annotation_ConservedElement_Stop%s_hg18.txt' % smallSamplePostfix) newexonAnnotFileName = os.path.join(self.path, 'new_Exons_hg18.txt') newintronAnnotFileName = os.path.join(self.path, 'new_Introns_hg18.txt') newstopAnnotFileName = os.path.join(self.path, 'new_stop_hg18.txt') tmpexonAnnotFileName = self.copyFile(exonAnnotFileName) tmpintronAnnotFileName = self.copyFile(intronAnnotFileName) tmpstopAnnotFileName = self.copyFile(stopAnnotFileName) if smallSampleKey: chrList = [smallSampleKey] else: chrList = hg18.seqLenDict.keys() chrList.sort() outfile = open(newexonAnnotFileName, 'w') for chrid in chrList: slice = hg18[chrid] # EXON ANNOTATION DATABASE try: ex1 = exonmsa[slice] except: continue else: exlist1 = [(ix.exon_id, ix) for ix in ex1.keys()] exlist1.sort() for ixx, exon in exlist1: saveList = [] tmp = exon.sequence tmpexon = exons[exon.exon_id] tmpslice = tmpexon.sequence # FOR REAL EXON COORDINATE wlist1 = 'EXON', chrid, tmpexon.exon_id, tmpexon.gene_id, \ tmpslice.start, tmpslice.stop try: out1 = conservedmsa[tmp] except KeyError: pass else: elementlist = [(ix.ucsc_id, ix) for ix in out1.keys()] elementlist.sort() for iyy, element in elementlist: if element.stop - element.start < 100: continue score = int(string.split(element.gene_id, '=')[1]) if score < 100: continue tmp2 = element.sequence tmpelement = mostconserved[element.ucsc_id] # FOR REAL ELEMENT COORDINATE tmpslice2 = tmpelement.sequence wlist2 = wlist1 + (tmpelement.ucsc_id, tmpelement.gene_id, tmpslice2.start, tmpslice2.stop) slicestart, sliceend = max(tmp.start, tmp2.start),\ min(tmp.stop, tmp2.stop) if slicestart < 0 or sliceend < 0: sys.exit('wrong query') tmp1 = msa.seqDict['hg18.' + chrid][slicestart: sliceend] edges = msa[tmp1].edges() for src, dest, e in edges: if src.stop - src.start < 100: continue palign, pident = e.pAligned(), e.pIdentity() if palign < 0.8 or pident < 0.8: continue palign, pident = '%.2f' % palign, \ '%.2f' % pident wlist3 = wlist2 + ((~msa.seqDict)[src], str(src), src.start, src.stop, (~msa.seqDict)[dest], str(dest), dest.start, dest.stop, palign, pident) saveList.append('\t'.join(map(str, wlist3)) + '\n') saveList.sort() for saveline in saveList: outfile.write(saveline) outfile.close() md5old = hashlib.md5() md5old.update(open(tmpexonAnnotFileName, 'r').read()) md5new = hashlib.md5() md5new.update(open(newexonAnnotFileName, 'r').read()) assert md5old.digest() == md5new.digest() outfile = open(newintronAnnotFileName, 'w') for chrid in chrList: slice = hg18[chrid] # SPLICE ANNOTATION DATABASE try: sp1 = splicemsa[slice] except: continue else: splist1 = [(ix.splice_id, ix) for ix in sp1.keys()] splist1.sort() for ixx, splice in splist1: saveList = [] tmp = splice.sequence tmpsplice = splices[splice.splice_id] tmpslice = tmpsplice.sequence # FOR REAL EXON COORDINATE wlist1 = 'INTRON', chrid, tmpsplice.splice_id, \ tmpsplice.gene_id, tmpslice.start, tmpslice.stop try: out1 = conservedmsa[tmp] except __HOLE__: pass else: elementlist = [(ix.ucsc_id, ix) for ix in out1.keys()] elementlist.sort() for iyy, element in elementlist: if element.stop - element.start < 100: continue score = int(string.split(element.gene_id, '=')[1]) if score < 100: continue tmp2 = element.sequence tmpelement = mostconserved[element.ucsc_id] # FOR REAL ELEMENT COORDINATE tmpslice2 = tmpelement.sequence wlist2 = wlist1 + (tmpelement.ucsc_id, tmpelement.gene_id, tmpslice2.start, tmpslice2.stop) slicestart, sliceend = max(tmp.start, tmp2.start),\ min(tmp.stop, tmp2.stop) if slicestart < 0 or sliceend < 0: sys.exit('wrong query') tmp1 = msa.seqDict['hg18.' + chrid][slicestart: sliceend] edges = msa[tmp1].edges() for src, dest, e in edges: if src.stop - src.start < 100: continue palign, pident = e.pAligned(), e.pIdentity() if palign < 0.8 or pident < 0.8: continue palign, pident = '%.2f' % palign, \ '%.2f' % pident wlist3 = wlist2 + ((~msa.seqDict)[src], str(src), src.start, src.stop, (~msa.seqDict)[dest], str(dest), dest.start, dest.stop, palign, pident) saveList.append('\t'.join(map(str, wlist3)) + '\n') saveList.sort() for saveline in saveList: outfile.write(saveline) # SNP IN SPLICE SITES saveList = [] gt = tmpslice[:2] ag = tmpslice[-2:] try: gtout = snpmsa[gt] agout = snpmsa[ag] except KeyError: pass else: gtlist = gtout.keys() aglist = agout.keys() for snp in gtlist: tmpsnp = snp.sequence annsnp = snp126[snp.snp_id] wlist2 = ('SNP5', chrid, tmpsplice.gene_id, gt.start, gt.stop, str(gt)) + \ (annsnp.snp_id, tmpsnp.start, tmpsnp.stop, str(tmpsnp), annsnp.gene_id, annsnp.ref_NCBI, annsnp.ref_UCSC, annsnp.observed, annsnp.molType, annsnp.myClass, annsnp.myValid) tmp1 = msa.seqDict['hg18.' + chrid][abs(gt.start):\ abs(gt.stop)] edges = msa[tmp1].edges() for src, dest, e in edges: if src.stop - src.start != 2 or \ dest.stop - dest.start != 2: continue palign, pident = e.pAligned(), e.pIdentity() palign, pident = '%.2f' % palign, \ '%.2f' % pident wlist3 = wlist2 + ((~msa.seqDict)[src], str(src), src.start, src.stop, (~msa.seqDict)[dest], str(dest), dest.start, dest.stop, palign, pident) saveList.append('\t'.join(map(str, wlist3)) + '\n') for snp in aglist: tmpsnp = snp.sequence annsnp = snp126[snp.snp_id] wlist2 = ('SNP3', chrid, tmpsplice.gene_id, ag.start, ag.stop, str(ag)) + \ (annsnp.snp_id, tmpsnp.start, tmpsnp.stop, str(tmpsnp), annsnp.gene_id, annsnp.ref_NCBI, annsnp.ref_UCSC, annsnp.observed, annsnp.molType, annsnp.myClass, annsnp.myValid) tmp1 = msa.seqDict['hg18.' + chrid][abs(ag.start):\ abs(ag.stop)] edges = msa[tmp1].edges() for src, dest, e in edges: if src.stop - src.start != 2 or \ dest.stop - dest.start != 2: continue palign, pident = e.pAligned(), e.pIdentity() palign, pident = '%.2f' % palign, \ '%.2f' % pident wlist3 = wlist2 + ((~msa.seqDict)[src], str(src), src.start, src.stop, (~msa.seqDict)[dest], str(dest), dest.start, dest.stop, palign, pident) saveList.append('\t'.join(map(str, wlist3)) + '\n') saveList.sort() for saveline in saveList: outfile.write(saveline) outfile.close() md5old = hashlib.md5() md5old.update(open(tmpintronAnnotFileName, 'r').read()) md5new = hashlib.md5() md5new.update(open(newintronAnnotFileName, 'r').read()) assert md5old.digest() == md5new.digest() outfile = open(newstopAnnotFileName, 'w') for chrid in chrList: slice = hg18[chrid] # STOP ANNOTATION DATABASE try: cds1 = cdsmsa[slice] except: continue else: cdslist1 = [(ix.cds_id, ix) for ix in cds1.keys()] cdslist1.sort() for ixx, cds in cdslist1: saveList = [] tmp = cds.sequence tmpcds = cdss[cds.cds_id] tmpslice = tmpcds.sequence # FOR REAL EXON COORDINATE wlist1 = 'STOP', chrid, tmpcds.cds_id, tmpcds.gene_id, \ tmpslice.start, tmpslice.stop if tmpslice.start < 0: stopstart, stopend = -tmpslice.stop, -tmpslice.start stop = -hg18[chrid][stopstart:stopstart+3] else: stopstart, stopend = tmpslice.start, tmpslice.stop stop = hg18[chrid][stopend-3:stopend] if str(stop).upper() not in ('TAA', 'TAG', 'TGA'): continue try: snp1 = snpmsa[stop] except KeyError: pass else: snplist = [(ix.snp_id, ix) for ix in snp1.keys()] snplist.sort() for iyy, snp in snplist: tmpsnp = snp.sequence annsnp = snp126[snp.snp_id] wlist2 = wlist1 + (str(stop), stop.start, stop.stop) + \ (annsnp.snp_id, tmpsnp.start, tmpsnp.stop, str(tmpsnp), annsnp.gene_id, annsnp.ref_NCBI, annsnp.ref_UCSC, annsnp.observed, annsnp.molType, annsnp.myClass, annsnp.myValid) if tmpslice.start < 0: tmp1 = -msa.seqDict['hg18.' + chrid]\ [stopstart:stopstart + 3] else: tmp1 = msa.seqDict['hg18.' + chrid]\ [stopend - 3:stopend] edges = msa[tmp1].edges() for src, dest, e in edges: if src.stop - src.start != 3 or \ dest.stop - dest.start != 3: continue palign, pident = e.pAligned(), e.pIdentity() palign, pident = '%.2f' % palign, \ '%.2f' % pident if str(dest).upper() not in ('TAA', 'TAG', 'TGA'): nonstr = 'NONSENSE' else: nonstr = 'STOP' wlist3 = wlist2 + ((~msa.seqDict)[src], str(src), src.start, src.stop, (~msa.seqDict)[dest], str(dest), dest.start, dest.stop, palign, pident, nonstr) saveList.append('\t'.join(map(str, wlist3)) + '\n') saveList.sort() for saveline in saveList: outfile.write(saveline) outfile.close() md5old = hashlib.md5() md5old.update(open(tmpstopAnnotFileName, 'r').read()) md5new = hashlib.md5() md5new.update(open(newstopAnnotFileName, 'r').read()) assert md5old.digest() == md5new.digest()
KeyError
dataset/ETHPy150Open cjlee112/pygr/tests/annotation_hg18_megatest.py/Build_Test.test_collectionannot
3,057
def test_mysqlannot(self): 'Test building an AnnotationDB from MySQL' from pygr import seqdb, cnestedlist, sqlgraph hg18 = pygr.Data.getResource('TEST.Seq.Genome.hg18') # BUILD ANNOTATION DATABASE FOR REFSEQ EXONS: MYSQL VERSION exon_slices = sqlgraph.SQLTableClustered( '%s.pygr_refGene_exonAnnot%s_hg18' % (testInputDB, smallSamplePostfix), clusterKey='chromosome', maxCache=0) exon_db = seqdb.AnnotationDB(exon_slices, hg18, sliceAttrDict=dict(id='chromosome', gene_id='name', exon_id='exon_id')) msa = cnestedlist.NLMSA(os.path.join(self.path, 'refGene_exonAnnot_SQL_hg18'), 'w', pairwiseMode=True, bidirectional=False) for id in exon_db: msa.addAnnotation(exon_db[id]) exon_db.clear_cache() # not really necessary; cache should autoGC exon_slices.clear_cache() msa.build() exon_db.__doc__ = 'SQL Exon Annotation Database for hg18' pygr.Data.addResource('TEST.Annotation.SQL.hg18.exons', exon_db) msa.__doc__ = 'SQL NLMSA Exon for hg18' pygr.Data.addResource('TEST.Annotation.NLMSA.SQL.hg18.exons', msa) exon_schema = pygr.Data.ManyToManyRelation(hg18, exon_db, bindAttrs=('exon2', )) exon_schema.__doc__ = 'SQL Exon Schema for hg18' pygr.Data.addSchema('TEST.Annotation.NLMSA.SQL.hg18.exons', exon_schema) # BUILD ANNOTATION DATABASE FOR REFSEQ SPLICES: MYSQL VERSION splice_slices = sqlgraph.SQLTableClustered( '%s.pygr_refGene_spliceAnnot%s_hg18' % (testInputDB, smallSamplePostfix), clusterKey='chromosome', maxCache=0) splice_db = seqdb.AnnotationDB(splice_slices, hg18, sliceAttrDict=dict(id='chromosome', gene_id='name', splice_id='splice_id')) msa = cnestedlist.NLMSA(os.path.join(self.path, 'refGene_spliceAnnot_SQL_hg18'), 'w', pairwiseMode=True, bidirectional=False) for id in splice_db: msa.addAnnotation(splice_db[id]) splice_db.clear_cache() # not really necessary; cache should autoGC splice_slices.clear_cache() msa.build() splice_db.__doc__ = 'SQL Splice Annotation Database for hg18' pygr.Data.addResource('TEST.Annotation.SQL.hg18.splices', splice_db) msa.__doc__ = 'SQL NLMSA Splice for hg18' pygr.Data.addResource('TEST.Annotation.NLMSA.SQL.hg18.splices', msa) splice_schema = pygr.Data.ManyToManyRelation(hg18, splice_db, bindAttrs=('splice2', )) splice_schema.__doc__ = 'SQL Splice Schema for hg18' pygr.Data.addSchema('TEST.Annotation.NLMSA.SQL.hg18.splices', splice_schema) # BUILD ANNOTATION DATABASE FOR REFSEQ EXONS: MYSQL VERSION cds_slices = sqlgraph.SQLTableClustered( '%s.pygr_refGene_cdsAnnot%s_hg18' % (testInputDB, smallSamplePostfix), clusterKey='chromosome', maxCache=0) cds_db = seqdb.AnnotationDB(cds_slices, hg18, sliceAttrDict=dict(id='chromosome', gene_id='name', cds_id='cds_id')) msa = cnestedlist.NLMSA(os.path.join(self.path, 'refGene_cdsAnnot_SQL_hg18'), 'w', pairwiseMode=True, bidirectional=False) for id in cds_db: msa.addAnnotation(cds_db[id]) cds_db.clear_cache() # not really necessary; cache should autoGC cds_slices.clear_cache() msa.build() cds_db.__doc__ = 'SQL CDS Annotation Database for hg18' pygr.Data.addResource('TEST.Annotation.SQL.hg18.cdss', cds_db) msa.__doc__ = 'SQL NLMSA CDS for hg18' pygr.Data.addResource('TEST.Annotation.NLMSA.SQL.hg18.cdss', msa) cds_schema = pygr.Data.ManyToManyRelation(hg18, cds_db, bindAttrs=('cds2', )) cds_schema.__doc__ = 'SQL CDS Schema for hg18' pygr.Data.addSchema('TEST.Annotation.NLMSA.SQL.hg18.cdss', cds_schema) # BUILD ANNOTATION DATABASE FOR MOST CONSERVED ELEMENTS FROM UCSC: # MYSQL VERSION ucsc_slices = \ sqlgraph.SQLTableClustered('%s.pygr_phastConsElements28way%s_hg18' % (testInputDB, smallSamplePostfix), clusterKey='chromosome', maxCache=0) ucsc_db = seqdb.AnnotationDB(ucsc_slices, hg18, sliceAttrDict=dict(id='chromosome', gene_id='name', ucsc_id='ucsc_id')) msa = cnestedlist.NLMSA(os.path.join(self.path, 'phastConsElements28way_SQL_hg18'), 'w', pairwiseMode=True, bidirectional=False) for id in ucsc_db: msa.addAnnotation(ucsc_db[id]) ucsc_db.clear_cache() # not really necessary; cache should autoGC ucsc_slices.clear_cache() msa.build() ucsc_db.__doc__ = 'SQL Most Conserved Elements for hg18' pygr.Data.addResource('TEST.Annotation.UCSC.SQL.hg18.mostconserved', ucsc_db) msa.__doc__ = 'SQL NLMSA for Most Conserved Elements for hg18' pygr.Data.addResource( 'TEST.Annotation.UCSC.NLMSA.SQL.hg18.mostconserved', msa) ucsc_schema = pygr.Data.ManyToManyRelation(hg18, ucsc_db, bindAttrs=('element2', )) ucsc_schema.__doc__ = \ 'SQL Schema for UCSC Most Conserved Elements for hg18' pygr.Data.addSchema( 'TEST.Annotation.UCSC.NLMSA.SQL.hg18.mostconserved', ucsc_schema) # BUILD ANNOTATION DATABASE FOR SNP126 FROM UCSC: MYSQL VERSION snp_slices = sqlgraph.SQLTableClustered('%s.pygr_snp126%s_hg18' % (testInputDB, smallSamplePostfix), clusterKey='clusterKey', maxCache=0) snp_db = seqdb.AnnotationDB(snp_slices, hg18, sliceAttrDict=dict(id='chromosome', gene_id='name', snp_id='snp_id', score='score', ref_NCBI='ref_NCBI', ref_UCSC='ref_UCSC', observed='observed', molType='molType', myClass='myClass', myValid='myValid', avHet='avHet', avHetSE='avHetSE', myFunc='myFunc', locType='locType', myWeight='myWeight')) msa = cnestedlist.NLMSA(os.path.join(self.path, 'snp126_SQL_hg18'), 'w', pairwiseMode=True, bidirectional=False) for id in snp_db: msa.addAnnotation(snp_db[id]) snp_db.clear_cache() # not really necessary; cache should autoGC snp_slices.clear_cache() msa.build() snp_db.__doc__ = 'SQL SNP126 for hg18' pygr.Data.addResource('TEST.Annotation.UCSC.SQL.hg18.snp126', snp_db) msa.__doc__ = 'SQL NLMSA for SNP126 for hg18' pygr.Data.addResource('TEST.Annotation.UCSC.NLMSA.SQL.hg18.snp126', msa) snp_schema = pygr.Data.ManyToManyRelation(hg18, snp_db, bindAttrs=('snp2', )) snp_schema.__doc__ = 'SQL Schema for UCSC SNP126 for hg18' pygr.Data.addSchema('TEST.Annotation.UCSC.NLMSA.SQL.hg18.snp126', snp_schema) pygr.Data.save() pygr.Data.clear_cache() # QUERY TO EXON AND SPLICES ANNOTATION DATABASE hg18 = pygr.Data.getResource('TEST.Seq.Genome.hg18') exonmsa = pygr.Data.getResource('TEST.Annotation.NLMSA.SQL.hg18.exons') splicemsa = \ pygr.Data.getResource('TEST.Annotation.NLMSA.SQL.hg18.splices') conservedmsa = \ pygr.Data.getResource('TEST.Annotation.UCSC.NLMSA.SQL.hg18.mostconserved') snpmsa = \ pygr.Data.getResource('TEST.Annotation.UCSC.NLMSA.SQL.hg18.snp126') cdsmsa = pygr.Data.getResource('TEST.Annotation.NLMSA.SQL.hg18.cdss') exons = pygr.Data.getResource('TEST.Annotation.SQL.hg18.exons') splices = pygr.Data.getResource('TEST.Annotation.SQL.hg18.splices') mostconserved = \ pygr.Data.getResource('TEST.Annotation.UCSC.SQL.hg18.mostconserved') snp126 = pygr.Data.getResource('TEST.Annotation.UCSC.SQL.hg18.snp126') cdss = pygr.Data.getResource('TEST.Annotation.SQL.hg18.cdss') # OPEN hg18_MULTIZ28WAY NLMSA msa = cnestedlist.NLMSA(os.path.join(msaDir, 'hg18_multiz28way'), 'r', trypath=[seqDir]) exonAnnotFileName = os.path.join(testInputDir, 'Annotation_ConservedElement_Exons%s_hg18.txt' % smallSamplePostfix) intronAnnotFileName = os.path.join(testInputDir, 'Annotation_ConservedElement_Introns%s_hg18.txt' % smallSamplePostfix) stopAnnotFileName = os.path.join(testInputDir, 'Annotation_ConservedElement_Stop%s_hg18.txt' % smallSamplePostfix) newexonAnnotFileName = os.path.join(self.path, 'new_Exons_hg18.txt') newintronAnnotFileName = os.path.join(self.path, 'new_Introns_hg18.txt') newstopAnnotFileName = os.path.join(self.path, 'new_stop_hg18.txt') tmpexonAnnotFileName = self.copyFile(exonAnnotFileName) tmpintronAnnotFileName = self.copyFile(intronAnnotFileName) tmpstopAnnotFileName = self.copyFile(stopAnnotFileName) if smallSampleKey: chrList = [smallSampleKey] else: chrList = hg18.seqLenDict.keys() chrList.sort() outfile = open(newexonAnnotFileName, 'w') for chrid in chrList: slice = hg18[chrid] # EXON ANNOTATION DATABASE try: ex1 = exonmsa[slice] except: continue else: exlist1 = [(ix.exon_id, ix) for ix in ex1.keys()] exlist1.sort() for ixx, exon in exlist1: saveList = [] tmp = exon.sequence tmpexon = exons[exon.exon_id] tmpslice = tmpexon.sequence # FOR REAL EXON COORDINATE wlist1 = 'EXON', chrid, tmpexon.exon_id, tmpexon.gene_id, \ tmpslice.start, tmpslice.stop try: out1 = conservedmsa[tmp] except KeyError: pass else: elementlist = [(ix.ucsc_id, ix) for ix in out1.keys()] elementlist.sort() for iyy, element in elementlist: if element.stop - element.start < 100: continue score = int(string.split(element.gene_id, '=')[1]) if score < 100: continue tmp2 = element.sequence tmpelement = mostconserved[element.ucsc_id] # FOR REAL ELEMENT COORDINATE tmpslice2 = tmpelement.sequence wlist2 = wlist1 + (tmpelement.ucsc_id, tmpelement.gene_id, tmpslice2.start, tmpslice2.stop) slicestart, sliceend = max(tmp.start, tmp2.start),\ min(tmp.stop, tmp2.stop) if slicestart < 0 or sliceend < 0: sys.exit('wrong query') tmp1 = msa.seqDict['hg18.' + chrid][slicestart: sliceend] edges = msa[tmp1].edges() for src, dest, e in edges: if src.stop - src.start < 100: continue palign, pident = e.pAligned(), e.pIdentity() if palign < 0.8 or pident < 0.8: continue palign, pident = '%.2f' % palign, \ '%.2f' % pident wlist3 = wlist2 + ((~msa.seqDict)[src], str(src), src.start, src.stop, (~msa.seqDict)[dest], str(dest), dest.start, dest.stop, palign, pident) saveList.append('\t'.join(map(str, wlist3)) + '\n') saveList.sort() for saveline in saveList: outfile.write(saveline) outfile.close() md5old = hashlib.md5() md5old.update(open(tmpexonAnnotFileName, 'r').read()) md5new = hashlib.md5() md5new.update(open(newexonAnnotFileName, 'r').read()) assert md5old.digest() == md5new.digest() outfile = open(newintronAnnotFileName, 'w') for chrid in chrList: slice = hg18[chrid] # SPLICE ANNOTATION DATABASE try: sp1 = splicemsa[slice] except: continue else: splist1 = [(ix.splice_id, ix) for ix in sp1.keys()] splist1.sort() for ixx, splice in splist1: saveList = [] tmp = splice.sequence tmpsplice = splices[splice.splice_id] tmpslice = tmpsplice.sequence # FOR REAL EXON COORDINATE wlist1 = 'INTRON', chrid, tmpsplice.splice_id, \ tmpsplice.gene_id, tmpslice.start, tmpslice.stop try: out1 = conservedmsa[tmp] except __HOLE__: pass else: elementlist = [(ix.ucsc_id, ix) for ix in out1.keys()] elementlist.sort() for iyy, element in elementlist: if element.stop - element.start < 100: continue score = int(string.split(element.gene_id, '=')[1]) if score < 100: continue tmp2 = element.sequence tmpelement = mostconserved[element.ucsc_id] # FOR REAL ELEMENT COORDINATE tmpslice2 = tmpelement.sequence wlist2 = wlist1 + (tmpelement.ucsc_id, tmpelement.gene_id, tmpslice2.start, tmpslice2.stop) slicestart, sliceend = max(tmp.start, tmp2.start),\ min(tmp.stop, tmp2.stop) if slicestart < 0 or sliceend < 0: sys.exit('wrong query') tmp1 = msa.seqDict['hg18.' + chrid][slicestart: sliceend] edges = msa[tmp1].edges() for src, dest, e in edges: if src.stop - src.start < 100: continue palign, pident = e.pAligned(), e.pIdentity() if palign < 0.8 or pident < 0.8: continue palign, pident = '%.2f' % palign, \ '%.2f' % pident wlist3 = wlist2 + ((~msa.seqDict)[src], str(src), src.start, src.stop, (~msa.seqDict)[dest], str(dest), dest.start, dest.stop, palign, pident) saveList.append('\t'.join(map(str, wlist3)) + '\n') saveList.sort() for saveline in saveList: outfile.write(saveline) # SNP IN SPLICE SITES saveList = [] gt = tmpslice[:2] ag = tmpslice[-2:] try: gtout = snpmsa[gt] agout = snpmsa[ag] except KeyError: pass else: gtlist = gtout.keys() aglist = agout.keys() for snp in gtlist: tmpsnp = snp.sequence annsnp = snp126[snp.snp_id] wlist2 = ('SNP5', chrid, tmpsplice.gene_id, gt.start, gt.stop, str(gt)) + \ (annsnp.snp_id, tmpsnp.start, tmpsnp.stop, str(tmpsnp), annsnp.gene_id, annsnp.ref_NCBI, annsnp.ref_UCSC, annsnp.observed, annsnp.molType, annsnp.myClass, annsnp.myValid) tmp1 = msa.seqDict['hg18.' + chrid][abs(gt.start): abs(gt.stop)] edges = msa[tmp1].edges() for src, dest, e in edges: if src.stop - src.start != 2 or \ dest.stop - dest.start != 2: continue palign, pident = e.pAligned(), e.pIdentity() palign, pident = '%.2f' % palign, \ '%.2f' % pident wlist3 = wlist2 + ((~msa.seqDict)[src], str(src), src.start, src.stop, (~msa.seqDict)[dest], str(dest), dest.start, dest.stop, palign, pident) saveList.append('\t'.join(map(str, wlist3)) + '\n') for snp in aglist: tmpsnp = snp.sequence annsnp = snp126[snp.snp_id] wlist2 = ('SNP3', chrid, tmpsplice.gene_id, ag.start, ag.stop, str(ag)) + \ (annsnp.snp_id, tmpsnp.start, tmpsnp.stop, str(tmpsnp), annsnp.gene_id, annsnp.ref_NCBI, annsnp.ref_UCSC, annsnp.observed, annsnp.molType, annsnp.myClass, annsnp.myValid) tmp1 = msa.seqDict['hg18.' + chrid][abs(ag.start): abs(ag.stop)] edges = msa[tmp1].edges() for src, dest, e in edges: if src.stop - src.start != 2 or \ dest.stop - dest.start != 2: continue palign, pident = e.pAligned(), e.pIdentity() palign, pident = '%.2f' % palign, \ '%.2f' % pident wlist3 = wlist2 + ((~msa.seqDict)[src], str(src), src.start, src.stop, (~msa.seqDict)[dest], str(dest), dest.start, dest.stop, palign, pident) saveList.append('\t'.join(map(str, wlist3)) + '\n') saveList.sort() for saveline in saveList: outfile.write(saveline) outfile.close() md5old = hashlib.md5() md5old.update(open(tmpintronAnnotFileName, 'r').read()) md5new = hashlib.md5() md5new.update(open(newintronAnnotFileName, 'r').read()) assert md5old.digest() == md5new.digest() outfile = open(newstopAnnotFileName, 'w') for chrid in chrList: slice = hg18[chrid] # STOP ANNOTATION DATABASE try: cds1 = cdsmsa[slice] except: continue else: cdslist1 = [(ix.cds_id, ix) for ix in cds1.keys()] cdslist1.sort() for ixx, cds in cdslist1: saveList = [] tmp = cds.sequence tmpcds = cdss[cds.cds_id] tmpslice = tmpcds.sequence # FOR REAL EXON COORDINATE wlist1 = 'STOP', chrid, tmpcds.cds_id, tmpcds.gene_id, \ tmpslice.start, tmpslice.stop if tmpslice.start < 0: stopstart, stopend = -tmpslice.stop, -tmpslice.start stop = -hg18[chrid][stopstart:stopstart+3] else: stopstart, stopend = tmpslice.start, tmpslice.stop stop = hg18[chrid][stopend-3:stopend] if str(stop).upper() not in ('TAA', 'TAG', 'TGA'): continue try: snp1 = snpmsa[stop] except KeyError: pass else: snplist = [(ix.snp_id, ix) for ix in snp1.keys()] snplist.sort() for iyy, snp in snplist: tmpsnp = snp.sequence annsnp = snp126[snp.snp_id] wlist2 = wlist1 + (str(stop), stop.start, stop.stop) + (annsnp.snp_id, tmpsnp.start, tmpsnp.stop, str(tmpsnp), annsnp.gene_id, annsnp.ref_NCBI, annsnp.ref_UCSC, annsnp.observed, annsnp.molType, annsnp.myClass, annsnp.myValid) if tmpslice.start < 0: tmp1 = -msa.seqDict['hg18.' + chrid]\ [stopstart:stopstart + 3] else: tmp1 = msa.seqDict['hg18.' + chrid]\ [stopend - 3:stopend] edges = msa[tmp1].edges() for src, dest, e in edges: if src.stop - src.start != 3 or \ dest.stop - dest.start != 3: continue palign, pident = e.pAligned(), e.pIdentity() palign, pident = '%.2f' % palign, '%.2f' \ % pident if str(dest).upper() not in ('TAA', 'TAG', 'TGA'): nonstr = 'NONSENSE' else: nonstr = 'STOP' wlist3 = wlist2 + ((~msa.seqDict)[src], str(src), src.start, src.stop, (~msa.seqDict)[dest], str(dest), dest.start, dest.stop, palign, pident, nonstr) saveList.append('\t'.join(map(str, wlist3)) + '\n') saveList.sort() for saveline in saveList: outfile.write(saveline) outfile.close() md5old = hashlib.md5() md5old.update(open(tmpstopAnnotFileName, 'r').read()) md5new = hashlib.md5() md5new.update(open(newstopAnnotFileName, 'r').read()) assert md5old.digest() == md5new.digest()
KeyError
dataset/ETHPy150Open cjlee112/pygr/tests/annotation_hg18_megatest.py/Build_Test.test_mysqlannot
3,058
def run(self, args, opts): if len(args) != 1: raise UsageError() editor = self.settings['EDITOR'] try: spidercls = self.crawler_process.spider_loader.load(args[0]) except __HOLE__: return self._err("Spider not found: %s" % args[0]) sfile = sys.modules[spidercls.__module__].__file__ sfile = sfile.replace('.pyc', '.py') self.exitcode = os.system('%s "%s"' % (editor, sfile))
KeyError
dataset/ETHPy150Open scrapy/scrapy/scrapy/commands/edit.py/Command.run
3,059
def test_coercion(self): f = CFoo() # Test coercion from basic built-in types f.ints = [1, 2, 3] desired = [1, 2, 3] self.assertEqual(f.ints, desired) f.ints = (1, 2, 3) self.assertEqual(f.ints, desired) f.strs = ("abc", "def", "ghi") self.assertEqual(f.strs, ["abc", "def", "ghi"]) f.strs = "abcdef" self.assertEqual(f.strs, list("abcdef")) try: from numpy import array except __HOLE__: pass else: if sys.version_info[0] < 3: f.ints = array([1, 2, 3]) self.assertEqual(f.ints, [1, 2, 3]) else: # These would fail due to np.int_ being an invalid vallue # for the Int-trait. pass f.strs = array(("abc", "def", "ghi")) self.assertEqual(f.strs, ["abc", "def", "ghi"])
ImportError
dataset/ETHPy150Open enthought/traits/traits/tests/test_list.py/ListTestCase.test_coercion
3,060
def test_build_request(self): url = "http://storj.io/" data = "test" headers = {"referer": "http://www.google.com/"} r = bt.build_request(url, data, headers) self.assertTrue(isinstance(r, bt.Request)) # Invalid URL. url = "" try: r = bt.build_request(url, data, headers) except __HOLE__: pass
ValueError
dataset/ETHPy150Open Storj/dataserv-client/tests/test_bandwidth_test.py/TestBandwidthTest.test_build_request
3,061
def test_get_config(self): # Invalid URL. try: bt.getConfig(url="test") except __HOLE__: pass # Valid XML. configxml = """<?xml version="1.0" encoding="UTF-8"?> <settings> <client ip="127.0.0.1" lat="40" lon="-90" isp="Comcast Cable" isprating="2.9" rating="0" ispdlavg="30000" ispulavg="60000" loggedin="0" /> <times dl1="5000000" dl2="35000000" dl3="800000000" ul1="1000000" ul2="8000000" ul3="35000000" /> <download testlength="10" initialtest="250K" mintestsize="250K" threadsperurl="4" /> <upload testlength="10" ratio="5" initialtest="0" mintestsize="32K" threads="2" maxchunksize="512K" maxchunkcount="50" threadsperurl="4" /> </settings> """ self.assertTrue(type(bt.getConfig(configxml=configxml)) is dict)
ValueError
dataset/ETHPy150Open Storj/dataserv-client/tests/test_bandwidth_test.py/TestBandwidthTest.test_get_config
3,062
def get_field_value_by_dotpath( self, folder, field_name, raw=False, formatted=False ): fields = folder.get_fields() key_dotpath = None if '.' in field_name: field_name, key_dotpath = field_name.split('.', 1) if field_name not in fields: raise JirafsError("Field '%s' does not exist." % field_name) if raw: data = fields[field_name] else: data = fields.get_transformed(field_name) if key_dotpath: try: for component in key_dotpath.split('.'): if not isinstance(data, dict): raise JirafsError( "Key '%s' (of dotpath '%s') is not an object " "in field '%s'." % ( component, key_dotpath, field_name, ) ) elif component not in data: data = '' break else: data = data[component] except (__HOLE__, TypeError): raise JirafsError( "Field '%s' could not be parsed as JSON for retrieving " "dotpath '%s'." % ( field_name, key_dotpath, ) ) return data
ValueError
dataset/ETHPy150Open coddingtonbear/jirafs/jirafs/commands/field.py/Command.get_field_value_by_dotpath
3,063
def test_write_write_rollback_read_first_value(self): with local_db.Transaction() as tx: tx.execute('INSERT INTO tests VALUES (:k, :v)', k='foo', v='bar') try: with local_db.Transaction() as tx: tx.execute('UPDATE tests SET value=:v WHERE key=:k', k='foo', v='baz') raise RuntimeError() except __HOLE__: pass with local_db.Transaction() as tx: row = tx.query_one('SELECT value FROM tests WHERE key=:k', k='foo') self.assertEqual('bar', row['value'])
RuntimeError
dataset/ETHPy150Open MirantisWorkloadMobility/CloudFerry/tests/lib/utils/test_local_db.py/LocalDbTestCase.test_write_write_rollback_read_first_value
3,064
def test_nested_tx_rollback_inner(self): with local_db.Transaction() as tx1: tx1.execute('INSERT INTO tests VALUES (:k, :v)', k='foo', v='bar') try: with local_db.Transaction() as tx2: tx2.execute('UPDATE tests SET value=:v WHERE key=:k', k='foo', v='baz') raise RuntimeError() except __HOLE__: pass with local_db.Transaction() as tx: row = tx.query_one('SELECT value FROM tests WHERE key=:k', k='foo') self.assertEqual('bar', row['value'])
RuntimeError
dataset/ETHPy150Open MirantisWorkloadMobility/CloudFerry/tests/lib/utils/test_local_db.py/LocalDbTestCase.test_nested_tx_rollback_inner
3,065
def test_nested_tx_rollback_outer(self): # Prepare state with local_db.Transaction() as tx: tx.execute('INSERT INTO tests VALUES (:k, :v)', k='foo', v='bar') # Run outer rollback from inner tx try: with local_db.Transaction() as tx1: tx1.execute('UPDATE tests SET value=:v WHERE key=:k', k='foo', v='baz') with local_db.Transaction() as tx2: tx2.execute('UPDATE tests SET value=:v WHERE key=:k', k='foo', v='qux') raise RuntimeError() except __HOLE__: pass with local_db.Transaction() as tx: row = tx.query_one('SELECT value FROM tests WHERE key=:k', k='foo') self.assertEqual('bar', row['value'])
RuntimeError
dataset/ETHPy150Open MirantisWorkloadMobility/CloudFerry/tests/lib/utils/test_local_db.py/LocalDbTestCase.test_nested_tx_rollback_outer
3,066
def is_signed(file_path): """Return True if the file has been signed. This utility function will help detect if a XPI file has been signed by mozilla (if we can't trust the File.is_signed field). It will simply check the signature filenames, and assume that if they're named "mozilla.*" then the xpi has been signed by us. This is in no way a perfect or correct solution, it's just the way we do it until we decide to inspect/walk the certificates chain to validate it comes from Mozilla. """ try: with zipfile.ZipFile(file_path, mode='r') as zf: filenames = set(zf.namelist()) except (zipfile.BadZipfile, __HOLE__): filenames = set() return set([u'META-INF/mozilla.rsa', u'META-INF/mozilla.sf', u'META-INF/manifest.mf']).issubset(filenames)
IOError
dataset/ETHPy150Open mozilla/addons-server/src/olympia/lib/crypto/packaged.py/is_signed
3,067
def ifind(pred, seq): try: return next(filter(pred, seq)) except __HOLE__: return None
StopIteration
dataset/ETHPy150Open osrg/ryu/ryu/lib/ovs/vsctl.py/ifind
3,068
def set_queue(self, vsctl_qos, max_rate, min_rate, queue_id): ovsrec_qos = vsctl_qos.qos_cfg[0] try: ovsrec_queue = ovsrec_qos.queues[queue_id] except (__HOLE__, KeyError): ovsrec_queue = self.txn.insert( self.txn.idl.tables[vswitch_idl.OVSREC_TABLE_QUEUE]) if max_rate is not None: self.set_column(ovsrec_queue, 'other_config', 'max-rate', max_rate) if min_rate is not None: self.set_column(ovsrec_queue, 'other_config', 'min-rate', min_rate) self.set_column(ovsrec_qos, 'queues', queue_id, ['uuid', str(ovsrec_queue.uuid)]) self.add_queue_to_cache(vsctl_qos, ovsrec_queue) return ovsrec_queue
AttributeError
dataset/ETHPy150Open osrg/ryu/ryu/lib/ovs/vsctl.py/VSCtlContext.set_queue
3,069
@staticmethod def _column_delete(ovsrec_row, column, ovsrec_del): value = getattr(ovsrec_row, column) try: value.remove(ovsrec_del) except __HOLE__: # Datum.to_python() with _uuid_to_row trims down deleted # references. If ovsrec_del.delete() is called before # _column_delete(), value doesn't include ovsrec_del. pass VSCtlContext._column_set(ovsrec_row, column, value)
ValueError
dataset/ETHPy150Open osrg/ryu/ryu/lib/ovs/vsctl.py/VSCtlContext._column_delete
3,070
def loadUserMapFromCache(self): self.users = {} self.userMapFromPerforceServer = False try: cache = open(self.getUserCacheFilename(), "rb") lines = cache.readlines() cache.close() for line in lines: entry = line.strip().split("\t") self.users[entry[0]] = entry[1] except __HOLE__: self.getUserMapFromPerforceServer()
IOError
dataset/ETHPy150Open zulip/zulip/api/integrations/perforce/git_p4.py/P4UserMap.loadUserMapFromCache
3,071
def importP4Labels(self, stream, p4Labels): if verbose: print("import p4 labels: " + ' '.join(p4Labels)) ignoredP4Labels = gitConfigList("git-p4.ignoredP4Labels") validLabelRegexp = gitConfig("git-p4.labelImportRegexp") if len(validLabelRegexp) == 0: validLabelRegexp = defaultLabelRegexp m = re.compile(validLabelRegexp) for name in p4Labels: commitFound = False if not m.match(name): if verbose: print("label %s does not match regexp %s" % (name, validLabelRegexp)) continue if name in ignoredP4Labels: continue labelDetails = p4CmdList(['label', "-o", name])[0] # get the most recent changelist for each file in this label change = p4Cmd(["changes", "-m", "1"] + ["%s...@%s" % (p, name) for p in self.depotPaths]) if 'change' in change: # find the corresponding git commit; take the oldest commit changelist = int(change['change']) gitCommit = read_pipe(["git", "rev-list", "--max-count=1", "--reverse", ":/\[git-p4:.*change = %d\]" % changelist]) if len(gitCommit) == 0: print("could not find git commit for changelist %d" % changelist) else: gitCommit = gitCommit.strip() commitFound = True # Convert from p4 time format try: tmwhen = time.strptime(labelDetails['Update'], "%Y/%m/%d %H:%M:%S") except __HOLE__: print("Could not convert label time %s" % labelDetails['Update']) tmwhen = 1 when = int(time.mktime(tmwhen)) self.streamTag(stream, name, labelDetails, gitCommit, when) if verbose: print("p4 label %s mapped to git commit %s" % (name, gitCommit)) else: if verbose: print("Label %s has no changelists - possibly deleted?" % name) if not commitFound: # We can't import this label; don't try again as it will get very # expensive repeatedly fetching all the files for labels that will # never be imported. If the label is moved in the future, the # ignore will need to be removed manually. system(["git", "config", "--add", "git-p4.ignoredP4Labels", name])
ValueError
dataset/ETHPy150Open zulip/zulip/api/integrations/perforce/git_p4.py/P4Sync.importP4Labels
3,072
def importChanges(self, changes): cnt = 1 for change in changes: description = p4_describe(change) self.updateOptionDict(description) if not self.silent: sys.stdout.write("\rImporting revision %s (%s%%)" % (change, cnt * 100 / len(changes))) sys.stdout.flush() cnt = cnt + 1 try: if self.detectBranches: branches = self.splitFilesIntoBranches(description) for branch in branches.keys(): ## HACK --hwn branchPrefix = self.depotPaths[0] + branch + "/" self.branchPrefixes = [ branchPrefix ] parent = "" filesForCommit = branches[branch] if self.verbose: print("branch is %s" % branch) self.updatedBranches.add(branch) if branch not in self.createdBranches: self.createdBranches.add(branch) parent = self.knownBranches[branch] if parent == branch: parent = "" else: fullBranch = self.projectName + branch if fullBranch not in self.p4BranchesInGit: if not self.silent: print("\n Importing new branch %s" % fullBranch); if self.importNewBranch(branch, change - 1): parent = "" self.p4BranchesInGit.append(fullBranch) if not self.silent: print("\n Resuming with change %s" % change); if self.verbose: print("parent determined through known branches: %s" % parent) branch = self.gitRefForBranch(branch) parent = self.gitRefForBranch(parent) if self.verbose: print("looking for initial parent for %s; current parent is %s" % (branch, parent)) if len(parent) == 0 and branch in self.initialParents: parent = self.initialParents[branch] del self.initialParents[branch] blob = None if len(parent) > 0: tempBranch = "%s/%d" % (self.tempBranchLocation, change) if self.verbose: print("Creating temporary branch: " + tempBranch) self.commit(description, filesForCommit, tempBranch) self.tempBranches.append(tempBranch) self.checkpoint() blob = self.searchParent(parent, branch, tempBranch) if blob: self.commit(description, filesForCommit, branch, blob) else: if self.verbose: print("Parent of %s not found. Committing into head of %s" % (branch, parent)) self.commit(description, filesForCommit, branch, parent) else: files = self.extractFilesFromCommit(description) self.commit(description, files, self.branch, self.initialParent) # only needed once, to connect to the previous commit self.initialParent = "" except __HOLE__: print(self.gitError.read()) sys.exit(1)
IOError
dataset/ETHPy150Open zulip/zulip/api/integrations/perforce/git_p4.py/P4Sync.importChanges
3,073
def importHeadRevision(self, revision): print("Doing initial import of %s from revision %s into %s" % (' '.join(self.depotPaths), revision, self.branch)) details = {} details["user"] = "git perforce import user" details["desc"] = ("Initial import of %s from the state at revision %s\n" % (' '.join(self.depotPaths), revision)) details["change"] = revision newestRevision = 0 fileCnt = 0 fileArgs = ["%s...%s" % (p, revision) for p in self.depotPaths] for info in p4CmdList(["files"] + fileArgs): if 'code' in info and info['code'] == 'error': sys.stderr.write("p4 returned an error: %s\n" % info['data']) if info['data'].find("must refer to client") >= 0: sys.stderr.write("This particular p4 error is misleading.\n") sys.stderr.write("Perhaps the depot path was misspelled.\n"); sys.stderr.write("Depot path: %s\n" % " ".join(self.depotPaths)) sys.exit(1) if 'p4ExitCode' in info: sys.stderr.write("p4 exitcode: %s\n" % info['p4ExitCode']) sys.exit(1) change = int(info["change"]) if change > newestRevision: newestRevision = change if info["action"] in self.delete_actions: # don't increase the file cnt, otherwise details["depotFile123"] will have gaps! #fileCnt = fileCnt + 1 continue for prop in ["depotFile", "rev", "action", "type" ]: details["%s%s" % (prop, fileCnt)] = info[prop] fileCnt = fileCnt + 1 details["change"] = newestRevision # Use time from top-most change so that all git p4 clones of # the same p4 repo have the same commit SHA1s. res = p4_describe(newestRevision) details["time"] = res["time"] self.updateOptionDict(details) try: self.commit(details, self.extractFilesFromCommit(details), self.branch) except __HOLE__: print("IO error with git fast-import. Is your git version recent enough?") print(self.gitError.read())
IOError
dataset/ETHPy150Open zulip/zulip/api/integrations/perforce/git_p4.py/P4Sync.importHeadRevision
3,074
def main(): if len(sys.argv[1:]) == 0: printUsage(list(commands.keys())) sys.exit(2) cmdName = sys.argv[1] try: klass = commands[cmdName] cmd = klass() except __HOLE__: print("unknown command %s" % cmdName) print("") printUsage(list(commands.keys())) sys.exit(2) options = cmd.options cmd.gitdir = os.environ.get("GIT_DIR", None) args = sys.argv[2:] options.append(optparse.make_option("--verbose", "-v", dest="verbose", action="store_true")) if cmd.needsGit: options.append(optparse.make_option("--git-dir", dest="gitdir")) parser = optparse.OptionParser(cmd.usage.replace("%prog", "%prog " + cmdName), options, description = cmd.description, formatter = HelpFormatter()) (cmd, args) = parser.parse_args(sys.argv[2:], cmd); global verbose verbose = cmd.verbose if cmd.needsGit: if cmd.gitdir == None: cmd.gitdir = os.path.abspath(".git") if not isValidGitDir(cmd.gitdir): cmd.gitdir = read_pipe("git rev-parse --git-dir").strip() if os.path.exists(cmd.gitdir): cdup = read_pipe("git rev-parse --show-cdup").strip() if len(cdup) > 0: chdir(cdup); if not isValidGitDir(cmd.gitdir): if isValidGitDir(cmd.gitdir + "/.git"): cmd.gitdir += "/.git" else: die("fatal: cannot locate git repository at %s" % cmd.gitdir) os.environ["GIT_DIR"] = cmd.gitdir if not cmd.run(args): parser.print_help() sys.exit(2)
KeyError
dataset/ETHPy150Open zulip/zulip/api/integrations/perforce/git_p4.py/main
3,075
def show(self, callback): self.callback = callback options = dict( index=self.index ) try: response = self.client.indices.get_settings(**options) except Exception as e: return sublime.error_message("Error: {}".format(e)) self.choices = DEFAULT_ANALYZERS.copy() analyzers = [] try: analyzers = response[self.index]["settings"]["index"]["analysis"]["analyzer"].keys() except __HOLE__: pass for analyzer in analyzers: self.choices.append([ analyzer, "Custom Analyzer: {}".format(analyzer) ]) self.choices += DEFAULT_ANALYZERS self.choices.sort() self.window.show_quick_panel(self.choices, self.on_done)
KeyError
dataset/ETHPy150Open KunihikoKido/sublime-elasticsearch-client/panel/analyzer_list_panel.py/AnalyzerListPanel.show
3,076
@classmethod def validate_template(cls): y = re.findall(cls._MATCH_YEAR, cls._template) m = re.findall(cls._MATCH_MONTH, cls._template) c = re.findall(cls._MATCH_COUNTER, cls._template) if len(y) > 1: raise ValidationError('{year} can only be used once') if len(m) > 1: raise ValidationError('{month} can only be used once') if len(m) == 1 and len(y) == 0: raise ValidationError('{month} can only be used while {year} is present') if len(c) > 1: raise ValidationError('{counter:0Nd} can only be used once') if len(c) == 0: raise ValidationError('{counter:0Nd} must be used once') try: cls._template.format(year=1999, month=11, counter=1) except __HOLE__: raise ValidationError('The string has the wrong format')
KeyError
dataset/ETHPy150Open django-bmf/django-bmf/djangobmf/core/numberrange.py/NumberRange.validate_template
3,077
@not_implemented_for('directed') def find_cliques(G): """Search for all maximal cliques in a graph. Maximal cliques are the largest complete subgraph containing a given node. The largest maximal clique is sometimes called the maximum clique. Returns ------- generator of lists: genetor of member list for each maximal clique See Also -------- find_cliques_recursive : A recursive version of the same algorithm Notes ----- To obtain a list of cliques, use list(find_cliques(G)). Based on the algorithm published by Bron & Kerbosch (1973) [1]_ as adapated by Tomita, Tanaka and Takahashi (2006) [2]_ and discussed in Cazals and Karande (2008) [3]_. The method essentially unrolls the recursion used in the references to avoid issues of recursion stack depth. This algorithm is not suitable for directed graphs. This algorithm ignores self-loops and parallel edges as clique is not conventionally defined with such edges. There are often many cliques in graphs. This algorithm can run out of memory for large graphs. References ---------- .. [1] Bron, C. and Kerbosch, J. 1973. Algorithm 457: finding all cliques of an undirected graph. Commun. ACM 16, 9 (Sep. 1973), 575-577. http://portal.acm.org/citation.cfm?doid=362342.362367 .. [2] Etsuji Tomita, Akira Tanaka, Haruhisa Takahashi, The worst-case time complexity for generating all maximal cliques and computational experiments, Theoretical Computer Science, Volume 363, Issue 1, Computing and Combinatorics, 10th Annual International Conference on Computing and Combinatorics (COCOON 2004), 25 October 2006, Pages 28-42 http://dx.doi.org/10.1016/j.tcs.2006.06.015 .. [3] F. Cazals, C. Karande, A note on the problem of reporting maximal cliques, Theoretical Computer Science, Volume 407, Issues 1-3, 6 November 2008, Pages 564-568, http://dx.doi.org/10.1016/j.tcs.2008.05.010 """ # Cache nbrs and find first pivot (highest degree) maxconn=-1 nnbrs={} pivotnbrs=set() # handle empty graph for n,nbrs in G.adjacency_iter(): nbrs=set(nbrs) nbrs.discard(n) conn = len(nbrs) if conn > maxconn: nnbrs[n] = pivotnbrs = nbrs maxconn = conn else: nnbrs[n] = nbrs # Initial setup cand=set(nnbrs) smallcand = set(cand - pivotnbrs) done=set() stack=[] clique_so_far=[] # Start main loop while smallcand or stack: try: # Any nodes left to check? n=smallcand.pop() except __HOLE__: # back out clique_so_far cand,done,smallcand = stack.pop() clique_so_far.pop() continue # Add next node to clique clique_so_far.append(n) cand.remove(n) done.add(n) nn=nnbrs[n] new_cand = cand & nn new_done = done & nn # check if we have more to search if not new_cand: if not new_done: # Found a clique! yield clique_so_far[:] clique_so_far.pop() continue # Shortcut--only one node left! if not new_done and len(new_cand)==1: yield clique_so_far + list(new_cand) clique_so_far.pop() continue # find pivot node (max connected in cand) # look in done nodes first numb_cand=len(new_cand) maxconndone=-1 for n in new_done: cn = new_cand & nnbrs[n] conn=len(cn) if conn > maxconndone: pivotdonenbrs=cn maxconndone=conn if maxconndone==numb_cand: break # Shortcut--this part of tree already searched if maxconndone == numb_cand: clique_so_far.pop() continue # still finding pivot node # look in cand nodes second maxconn=-1 for n in new_cand: cn = new_cand & nnbrs[n] conn=len(cn) if conn > maxconn: pivotnbrs=cn maxconn=conn if maxconn == numb_cand-1: break # pivot node is max connected in cand from done or cand if maxconndone > maxconn: pivotnbrs = pivotdonenbrs # save search status for later backout stack.append( (cand, done, smallcand) ) cand=new_cand done=new_done smallcand = cand - pivotnbrs
KeyError
dataset/ETHPy150Open gkno/gkno_launcher/src/networkx/algorithms/clique.py/find_cliques
3,078
def __descend_sections(self, key, create=False): """Traverse the nested mappings down to the last layer """ if self.base is None: raise KeyError("Cannot access key in empty mapping") try: split_name = key.split(self.SECTION_SEPARATOR) except __HOLE__ as err: raise TypeError("Key must be a string ('%s')" % err) level = 0 section = self.base # Iterate through the sections while level < len(split_name) - 1: try: section = section[split_name[level]] level += 1 except KeyError as err: if not create: raise KeyError( "Section '%s' does not exist in '%s'" % ( split_name[level], self.SECTION_SEPARATOR.join(split_name[:level]))) else: section[split_name[level]] = self._new_section(section, level) section = section[split_name[level]] level += 1 subkey = split_name[level] return section, subkey
AttributeError
dataset/ETHPy150Open duerrp/pyexperiment/pyexperiment/utils/HierarchicalMapping.py/HierarchicalMapping.__descend_sections
3,079
def __getitem__(self, key): """Get an item """ section, subkey = self.__descend_sections(key) # At the last section, get the value try: value = section[subkey] except __HOLE__ as _err: raise KeyError( "Key '%s' does not exist" % key) return value
KeyError
dataset/ETHPy150Open duerrp/pyexperiment/pyexperiment/utils/HierarchicalMapping.py/HierarchicalMapping.__getitem__
3,080
def __delitem__(self, key): """Delete an item """ section, subkey = self.__descend_sections(key) # At the last section, set the value try: del section[subkey] except __HOLE__ as _err: raise KeyError( "Key does not exist '%s'" % key)
KeyError
dataset/ETHPy150Open duerrp/pyexperiment/pyexperiment/utils/HierarchicalMapping.py/HierarchicalMapping.__delitem__
3,081
def __iter__(self): """Need to define __iter__ to make it a MutableMapping """ iterator_list = [(iteritems(self.base or {}), '')] while iterator_list: iterator, prefix = iterator_list.pop() try: key, value = next(iterator) if len(prefix) > 0: key = prefix + '.' + key except __HOLE__: continue iterator_list.append((iterator, prefix)) if self._is_section(value): iterator_list.append((iteritems(value), key)) else: yield key
StopIteration
dataset/ETHPy150Open duerrp/pyexperiment/pyexperiment/utils/HierarchicalMapping.py/HierarchicalMapping.__iter__
3,082
def get(self, key, default=None): """Get the key or return the default value if provided """ try: return self[key] except __HOLE__: if default is not None: return default else: raise
KeyError
dataset/ETHPy150Open duerrp/pyexperiment/pyexperiment/utils/HierarchicalMapping.py/HierarchicalMapping.get
3,083
def get_or_set(self, key, value): """Either gets the value associated with key or set it This can be useful as an easy way of """ try: return self[key] except __HOLE__: self[key] = value return value
KeyError
dataset/ETHPy150Open duerrp/pyexperiment/pyexperiment/utils/HierarchicalMapping.py/HierarchicalMapping.get_or_set
3,084
def asShape(context): """Adapts the context to a geometry interface. The coordinates remain stored in the context. """ if hasattr(context, "__geo_interface__"): ob = context.__geo_interface__ else: ob = context try: geom_type = ob.get("type").lower() except __HOLE__: raise ValueError("Context does not provide geo interface") if geom_type == "point": return asPoint(ob["coordinates"]) elif geom_type == "linestring": return asLineString(ob["coordinates"]) elif geom_type == "polygon": return asPolygon(ob["coordinates"][0], ob["coordinates"][1:]) elif geom_type == "multipoint": return asMultiPoint(ob["coordinates"]) elif geom_type == "multilinestring": return asMultiLineString(ob["coordinates"]) elif geom_type == "multipolygon": return MultiPolygonAdapter(ob["coordinates"], context_type='geojson') else: raise ValueError("Unknown geometry type: %s" % geom_type)
AttributeError
dataset/ETHPy150Open Toblerity/Shapely/shapely/geometry/geo.py/asShape
3,085
def __init__(self, filename, version=ID3V2_DEFAULT_VERSION): """ @param filename: the file to open or write to. @type filename: string @param version: if header doesn't exists, we need this to tell us what version \ header to use @type version: float @raise ID3Exception: if file does not have an ID3v2 but is specified to be in read or modify mode. """ if str(version) not in self.supported: raise ID3ParameterException("version %s not valid" % str(version)) if not os.path.exists(filename): raise ID3ParameterException("filename %s not valid" % filename) try: self.f = open(filename, 'rb+') self.read_only = False except __HOLE__, (errno, strerror): if errno == 13: # permission denied self.f = open(filename, 'rb') self.read_only = True self.filename = filename if self.tag_exists(): self.parse_header() self.parse_frames() else: self.new_header(str(version))
IOError
dataset/ETHPy150Open Ciantic/pytagger/tagger/id3v2.py/ID3v2.__init__
3,086
def __hash__(self): try: return self._hash except __HOLE__: self._hash = hash(self._comparison_key) return self._hash
AttributeError
dataset/ETHPy150Open nltk/nltk/nltk/parse/chart.py/EdgeI.__hash__
3,087
def to_internal_value(self, data): if data is None: return None try: return next( user for user in self.get_queryset() if user._id == data ) except __HOLE__: self.fail('invalid_data')
StopIteration
dataset/ETHPy150Open CenterForOpenScience/osf.io/api/files/serializers.py/CheckoutField.to_internal_value
3,088
def is_uuid_like(val): """Returns validation of a value as a UUID. For our purposes, a UUID is a canonical form string: aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa """ try: return str(uuid.UUID(val)) == val except (TypeError, __HOLE__, AttributeError): return False
ValueError
dataset/ETHPy150Open Havate/havate-openstack/proto-build/gui/horizon/Horizon_GUI/openstack_dashboard/openstack/common/uuidutils.py/is_uuid_like
3,089
def __lt__(self, other): try: return self.nums < other.nums except __HOLE__: return self.nums[0] < other
AttributeError
dataset/ETHPy150Open tabatkins/bikeshed/bikeshed/config.py/HierarchicalNumber.__lt__
3,090
def __eq__(self, other): try: return self.nums == other.nums except __HOLE__: return self.nums[0] == other
AttributeError
dataset/ETHPy150Open tabatkins/bikeshed/bikeshed/config.py/HierarchicalNumber.__eq__
3,091
def retrieveDataFile(filename, quiet=False, str=False): cacheLocation = scriptPath + "/spec-data/" + filename fallbackLocation = scriptPath + "/spec-data/readonly/" + filename try: fh = open(cacheLocation, 'r') except __HOLE__: try: fh = open(fallbackLocation, 'r') except IOError: die("Couldn't retrieve the file '{0}' from cache. Something's wrong, please report this.", filename) return import shutil try: if not quiet: say("Attempting to save the {0} file to cache...", type) if not dryRun: shutil.copy(fallbackLocation, cacheLocation) if not quiet: say("Successfully saved the {0} file to cache.", type) except: if not quiet: warn("Couldn't save the {0} file to cache. Proceeding...", type) if str: return unicode(fh.read(), encoding="utf-8") else: return fh
IOError
dataset/ETHPy150Open tabatkins/bikeshed/bikeshed/config.py/retrieveDataFile
3,092
def retrieveBoilerplateFile(self, name, group=None, status=None, error=True): # Looks in three locations, in order: # the folder the spec source is in, the group's boilerplate folder, and the generic boilerplate folder. # In each location, it first looks for the file specialized on status, and then for the generic file. # Filenames must be of the format NAME.include or NAME-STATUS.include if group is None and self.md.group is not None: group = self.md.group.lower() if status is None: status = self.md.status localFolder = os.path.dirname(os.path.abspath(self.inputSource)) includeFolder = os.path.join(config.scriptPath, "include") statusFile = "{0}-{1}.include".format(name, status) genericFile = "{0}.include".format(name) filenames = [] filenames.append(os.path.join(localFolder, statusFile)) filenames.append(os.path.join(localFolder, genericFile)) if group: filenames.append(os.path.join(includeFolder, group, statusFile)) filenames.append(os.path.join(includeFolder, group, genericFile)) filenames.append(os.path.join(includeFolder, statusFile)) filenames.append(os.path.join(includeFolder, genericFile)) for filename in filenames: if os.path.isfile(filename): try: with io.open(filename, 'r', encoding="utf-8") as fh: return fh.read() except __HOLE__: if error: die("The include file for {0} disappeared underneath me.", name) return "" break else: if error: die("Couldn't find an appropriate include file for the {0} inclusion, given group='{1}' and status='{2}'.", name, group, status) return ""
IOError
dataset/ETHPy150Open tabatkins/bikeshed/bikeshed/config.py/retrieveBoilerplateFile
3,093
def runAnalysis(self): """ Compute all the features that are currently selected, for the nodule and/or for the surrounding spheres """ # build list of features and feature classes based on what is checked by the user self.selectedMainFeaturesKeys = set() self.selectedFeatureKeys = set() self.analysisResults = dict() self.analysisResultsTiming = dict() self.__analyzedSpheres__ = set() for featureClass in self.featureWidgets: for widget in self.featureWidgets[featureClass]: if widget.checked: self.selectedMainFeaturesKeys.add(featureClass) self.selectedFeatureKeys.add(str(widget.text)) # Preconditions if self.inputVolumeSelector.currentNode() is None: # TODO: disable the button until segmentation is done qt.QMessageBox.warning(slicer.util.mainWindow(), "Select a volume", "Please select and segment an input volume") return if self.logic.currentLabelmap is None: qt.QMessageBox.warning(slicer.util.mainWindow(), "Segment a labelmap", "Please select and segment a labelmap volume") return if len(self.selectedFeatureKeys) == 0: qt.QMessageBox.information(slicer.util.mainWindow(), "Select a feature", "Please select at least one feature from the menu to calculate") return if "Parenchymal Volume" in self.selectedMainFeaturesKeys and self.parenchymaLabelmapSelector.currentNode() is None: qt.QMessageBox.warning(slicer.util.mainWindow(), "Select a labelmap", "Please select a segmented emphysema labelmap in the Parenchymal Volume tab") return if self.otherRadiusCheckbox.checked and int(self.otherRadiusTextbox.text) > self.logic.MAX_TUMOR_RADIUS: qt.QMessageBox.warning(slicer.util.mainWindow(), "Invalid value", "The radius of the sphere must have a maximum value of {0}".format( self.logic.MAX_TUMOR_RADIUS)) return try: # Analysis for the volume and the nodule: keyName = self.inputVolumeSelector.currentNode().GetName() start = time.time() if self.noduleCheckbox.checked: logic = FeatureExtractionLogic(self.logic.currentVolume, self.logic.currentVolumeArray, self.logic.currentLabelmapArray, self.selectedMainFeaturesKeys.difference(["Parenchymal Volume"]), self.selectedFeatureKeys.difference( self.featureClasses["Parenchymal Volume"])) print("******** Nodule analysis results...") t1 = start t2 = time.time() self.analysisResults[keyName] = collections.OrderedDict() self.analysisResultsTiming[keyName] = collections.OrderedDict() logic.run(self.analysisResults[keyName], self.logic.printTiming, self.analysisResultsTiming[keyName]) # Print analysis results print(self.analysisResults[keyName]) if self.logic.printTiming: print("Elapsed time for the nodule analysis (TOTAL={0} seconds:".format(t2 - t1)) print(self.analysisResultsTiming[keyName]) # Check in any sphere has been selected for the analysis, because otherwise it's not necessary to calculate the distance map anySphereChecked = False for r in self.logic.spheresDict[self.workingMode]: if self.spheresButtonGroup.button(r*10).isChecked(): anySphereChecked = True break if self.otherRadiusCheckbox.checked and self.otherRadiusTextbox.text != "": anySphereChecked = True # if self.r15Checkbox.checked or self.r20Checkbox.checked or self.r25Checkbox.checked \ # or (self.rOtherCheckbox.checked and self.otherRadiusTextbox.text != ""): if anySphereChecked: if "Parenchymal Volume" in self.selectedMainFeaturesKeys: # If the parenchymal volume analysis is required, we need the numpy array represeting the whole # emphysema segmentation labelmap labelmapWholeVolumeArray = slicer.util.array(self.parenchymaLabelmapSelector.currentNode().GetName()) else: labelmapWholeVolumeArray = None # print("DEBUG: analyzing spheres...") t1 = time.time() self.logic.getCurrentDistanceMap() if self.logic.printTiming: print("Time to get the current distance map: {0} seconds".format(time.time() - t1)) for r in self.logic.spheresDict[self.workingMode]: if self.spheresButtonGroup.button(r*10).isChecked(): self.runAnalysisSphere(r, labelmapWholeVolumeArray) self.__analyzedSpheres__.add(r) # if self.r15Checkbox.checked: # self.runAnalysisSphere(15, labelmapWholeVolumeArray) # self.__analyzedSpheres__.add(15) # if self.r20Checkbox.checked: # self.runAnalysisSphere(20, labelmapWholeVolumeArray) # self.__analyzedSpheres__.add(20) # if self.r25Checkbox.checked: # self.runAnalysisSphere(25, labelmapWholeVolumeArray) # self.__analyzedSpheres__.add(25) if self.otherRadiusCheckbox.checked: r = int(self.otherRadiusTextbox.text) self.runAnalysisSphere(r, labelmapWholeVolumeArray) self.__analyzedSpheres__.add(r) t = time.time() - start if self.logic.printTiming: print("********* TOTAL ANALYSIS TIME: {0} SECONDS".format(t)) # Save the results in the report widget qt.QMessageBox.information(slicer.util.mainWindow(), "Process finished", "Analysis finished. Total time: {0} seconds. Click the \"Open\" button to see the results".format(t)) self.refreshUI() except __HOLE__: qt.QMessageBox.warning(slicer.util.mainWindow(), "Process cancelled", "The process has been cancelled by the user") finally: self.saveReport(showConfirmation=False)
StopIteration
dataset/ETHPy150Open acil-bwh/SlicerCIP/Scripted/CIP_LesionModel/CIP_LesionModel.py/CIP_LesionModelWidget.runAnalysis
3,094
def kill_kernel(self): """ Kill the running kernel. """ if self.has_kernel: # Pause the heart beat channel if it exists. if self._hb_channel is not None: self._hb_channel.pause() # Attempt to kill the kernel. try: self.kernel.kill() except __HOLE__ as e: # In Windows, we will get an Access Denied error if the process # has already terminated. Ignore it. if sys.platform == 'win32': if e.winerror != 5: raise # On Unix, we may get an ESRCH error if the process has already # terminated. Ignore it. else: from errno import ESRCH if e.errno != ESRCH: raise self.kernel = None else: raise RuntimeError("Cannot kill kernel. No kernel is running!")
OSError
dataset/ETHPy150Open ipython/ipython-py3k/IPython/zmq/kernelmanager.py/KernelManager.kill_kernel
3,095
def mkdirsp(path): try: os.makedirs(path) except __HOLE__ as exc: if exc.errno == errno.EEXIST and os.path.isdir(path): pass else: raise
OSError
dataset/ETHPy150Open openaddresses/machine/openaddr/cache.py/mkdirsp
3,096
def download(self, source_urls, workdir, conform=None): output_files = [] download_path = os.path.join(workdir, 'esri') mkdirsp(download_path) query_fields = self.field_names_to_request(conform) for source_url in source_urls: size = 0 file_path = self.get_file_path(source_url, download_path) if os.path.exists(file_path): output_files.append(file_path) _L.debug("File exists %s", file_path) continue metadata = self.get_layer_metadata(source_url) if query_fields is None: field_names = [f['name'] for f in metadata['fields']] else: field_names = query_fields[:] if X_FIELDNAME not in field_names: field_names.append(X_FIELDNAME) if Y_FIELDNAME not in field_names: field_names.append(Y_FIELDNAME) if GEOM_FIELDNAME not in field_names: field_names.append(GEOM_FIELDNAME) query_url = source_url + '/query' # Get the count of rows in the layer count_json = self.get_layer_feature_count(query_url) row_count = count_json.get('count') page_size = metadata.get('maxRecordCount', 500) if page_size > 1000: page_size = 1000 _L.info("Source has {} rows".format(row_count)) page_args = [] if metadata.get('supportsPagination') or \ (metadata.get('advancedQueryCapabilities') and metadata['advancedQueryCapabilities']['supportsPagination']): # If the layer supports pagination, we can use resultOffset/resultRecordCount to paginate # There's a bug where some servers won't handle these queries in combination with a list of # fields specified. We'll make a single, 1 row query here to check if the server supports this # and switch to querying for all fields if specifying the fields fails. if query_fields and not self.can_handle_pagination(query_url, query_fields): _L.info("Source does not support pagination with fields specified, so querying for all fields.") query_fields = None for offset in range(0, row_count, page_size): page_args.append({ 'resultOffset': offset, 'resultRecordCount': page_size, 'where': '1=1', 'geometryPrecision': 7, 'returnGeometry': 'true', 'outSR': 4326, 'outFields': ','.join(query_fields or ['*']), 'f': 'json', }) _L.info("Built {} requests using resultOffset method".format(len(page_args))) else: # If not, we can still use the `where` argument to paginate use_oids = True if metadata.get('supportsStatistics'): # If the layer supports statistics, we can request maximum and minimum object ID # to help build the pages oid_field_name = self.find_oid_field_name(metadata) try: (oid_min, oid_max) = self.get_layer_min_max(query_url, oid_field_name) for page_min in range(oid_min - 1, oid_max, page_size): page_max = min(page_min + page_size, oid_max) page_args.append({ 'where': '{} > {} AND {} <= {}'.format( oid_field_name, page_min, oid_field_name, page_max, ), 'geometryPrecision': 7, 'returnGeometry': 'true', 'outSR': 4326, 'outFields': ','.join(query_fields or ['*']), 'f': 'json', }) _L.info("Built {} requests using OID where clause method".format(len(page_args))) # If we reach this point we don't need to fall through to enumerating all object IDs # because the statistics method worked use_oids = False except DownloadError: _L.exception("Finding max/min from statistics failed. Trying OID enumeration.") if use_oids: # If the layer does not support statistics, we can request # all the individual IDs and page through them one chunk at # a time. oid_data = self.get_layer_oids(query_url) oids = oid_data['objectIds'] for i in range(0, len(oids), 100): oid_chunk = map(long if PY2 else int, oids[i:i+100]) page_args.append({ 'objectIds': ','.join(map(str, oid_chunk)), 'geometryPrecision': 7, 'returnGeometry': 'true', 'outSR': 4326, 'outFields': ','.join(query_fields or ['*']), 'f': 'json', }) _L.info("Built {} requests using OID enumeration method".format(len(page_args))) with csvopen(file_path, 'w', encoding='utf-8') as f: writer = csvDictWriter(f, fieldnames=field_names, encoding='utf-8') writer.writeheader() for query_args in page_args: try: response = request('POST', query_url, headers=self.headers, data=query_args) data = self.handle_esri_errors(response, "Could not retrieve this chunk of objects from ESRI source") except socket.timeout as e: raise DownloadError("Timeout when connecting to URL", e) except __HOLE__ as e: raise DownloadError("Could not parse JSON", e) except Exception as e: raise DownloadError("Could not connect to URL", e) finally: # Wipe out whatever we had written out so far f.truncate() error = data.get('error') if error: raise DownloadError("Problem querying ESRI dataset with args {}. Server said: {}".format(query_args, error['message'])) geometry_type = data.get('geometryType') features = data.get('features') for feature in features: try: ogr_geom = self.build_ogr_geometry(geometry_type, feature) row = feature.get('attributes', {}) row[GEOM_FIELDNAME] = ogr_geom.ExportToWkt() try: centroid = ogr_geom.Centroid() except RuntimeError as e: if 'Invalid number of points in LinearRing found' not in str(e): raise xmin, xmax, ymin, ymax = ogr_geom.GetEnvelope() row[X_FIELDNAME] = round(xmin/2 + xmax/2, 7) row[Y_FIELDNAME] = round(ymin/2 + ymax/2, 7) else: row[X_FIELDNAME] = round(centroid.GetX(), 7) row[Y_FIELDNAME] = round(centroid.GetY(), 7) writer.writerow({fn: row.get(fn) for fn in field_names}) size += 1 except TypeError: _L.debug("Skipping a geometry", exc_info=True) _L.info("Downloaded %s ESRI features for file %s", size, file_path) output_files.append(file_path) return output_files
ValueError
dataset/ETHPy150Open openaddresses/machine/openaddr/cache.py/EsriRestDownloadTask.download
3,097
def run(statement, filename=None, sort=-1): """Run statement under profiler optionally saving results in filename This function takes a single argument that can be passed to the "exec" statement, and an optional file name. In all cases this routine attempts to "exec" its first argument and gather profiling statistics from the execution. If no file name is present, then this function automatically prints a simple profiling report, sorted by the standard name string (file/line/function-name) that is presented in each line. """ prof = Profile() try: prof = prof.run(statement) except __HOLE__: pass if filename is not None: prof.dump_stats(filename) else: return prof.print_stats(sort)
SystemExit
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/profile.py/run
3,098
def runctx(statement, globals, locals, filename=None, sort=-1): """Run statement under profiler, supplying your own globals and locals, optionally saving results in filename. statement and filename have the same semantics as profile.run """ prof = Profile() try: prof = prof.runctx(statement, globals, locals) except __HOLE__: pass if filename is not None: prof.dump_stats(filename) else: return prof.print_stats(sort) # Backwards compatibility.
SystemExit
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/profile.py/runctx
3,099
def __init__(self, timer=None, bias=None): self.timings = {} self.cur = None self.cmd = "" self.c_func_name = "" if bias is None: bias = self.bias self.bias = bias # Materialize in local dict for lookup speed. if not timer: if _has_res: self.timer = resgetrusage self.dispatcher = self.trace_dispatch self.get_time = _get_time_resource elif hasattr(time, 'clock'): self.timer = self.get_time = time.clock self.dispatcher = self.trace_dispatch_i elif hasattr(os, 'times'): self.timer = os.times self.dispatcher = self.trace_dispatch self.get_time = _get_time_times else: self.timer = self.get_time = time.time self.dispatcher = self.trace_dispatch_i else: self.timer = timer t = self.timer() # test out timer function try: length = len(t) except __HOLE__: self.get_time = timer self.dispatcher = self.trace_dispatch_i else: if length == 2: self.dispatcher = self.trace_dispatch else: self.dispatcher = self.trace_dispatch_l # This get_time() implementation needs to be defined # here to capture the passed-in timer in the parameter # list (for performance). Note that we can't assume # the timer() result contains two values in all # cases. def get_time_timer(timer=timer, sum=sum): return sum(timer()) self.get_time = get_time_timer self.t = self.get_time() self.simulate_call('profiler') # Heavily optimized dispatch routine for os.times() timer
TypeError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/profile.py/Profile.__init__