Unnamed: 0
int64
0
10k
function
stringlengths
79
138k
label
stringclasses
20 values
info
stringlengths
42
261
7,800
def get_from_dictionary(self, dictionary, key): """Returns a value from the given `dictionary` based on the given `key`. If the given `key` cannot be found from the `dictionary`, this keyword fails. The given dictionary is never altered by this keyword. Example: | ${value} = | Get From Dictionary | ${D3} | b | => - ${value} = 2 """ try: return dictionary[key] except __HOLE__: raise RuntimeError("Dictionary does not contain key '%s'" % key)
KeyError
dataset/ETHPy150Open shellderp/sublime-robot-plugin/lib/robot/libraries/Collections.py/_Dictionary.get_from_dictionary
7,801
def _yield_dict_diffs(self, keys, dict1, dict2): for key in keys: try: assert_equals(dict1[key], dict2[key], msg='Key %s' % (key,)) except __HOLE__, err: yield unic(err)
AssertionError
dataset/ETHPy150Open shellderp/sublime-robot-plugin/lib/robot/libraries/Collections.py/_Dictionary._yield_dict_diffs
7,802
def get_output(self, variation): # We import this here, so App Engine Helper users don't get import # errors. from subprocess import Popen, PIPE for input in self.get_input(variation): args = ['uglifyjs'] try: args = args + settings.UGLIFIER_OPTIONS except __HOLE__: pass try: cmd = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE, universal_newlines=True) output, error = cmd.communicate(smart_str(input)) assert cmd.wait() == 0, 'Command returned bad result:\n%s' % error yield output.decode('utf-8') except Exception, e: raise ValueError("Failed to run UglifyJs. " "Please make sure you have Node.js and UglifyJS installed " "and that it's in your PATH.\n" "Error was: %s" % e)
AttributeError
dataset/ETHPy150Open adieu/django-mediagenerator/mediagenerator/filters/uglifier.py/Uglifier.get_output
7,803
def _auto_fn(name): """default dialect importer. plugs into the :class:`.PluginLoader` as a first-hit system. """ if "." in name: dialect, driver = name.split(".") else: dialect = name driver = "base" try: module = __import__('sqlalchemy.dialects.%s' % (dialect, )).dialects except __HOLE__: return None module = getattr(module, dialect) if hasattr(module, driver): module = getattr(module, driver) return lambda: module.dialect else: return None
ImportError
dataset/ETHPy150Open RoseOu/flasky/venv/lib/python2.7/site-packages/sqlalchemy/dialects/__init__.py/_auto_fn
7,804
@login_required def inspect(request, id=None, tube_prefix='', tube=''): if request.method == 'POST': id = request.POST['id'] try: id = int(id) except (__HOLE__, TypeError): id = None try: client = Client(request) except ConnectionError: return render_unavailable() if id: job = client.peek(id) if job is None: request.flash.put(notice='no job found with id #%d' % id) stats = [] buried = False else: buried = job.stats()['state'] == 'buried' stats = job.stats().items() else: job = None stats = [] buried = False tubes = client.tubes() return render_to_response('beanstalk/inspect.html', {'job': job, 'stats': stats, 'buried': buried, 'tubes': tubes, 'tube_prefix': tube_prefix, 'current_tube': tube}, context_instance=RequestContext(request))
ValueError
dataset/ETHPy150Open andreisavu/django-jack/jack/beanstalk/views.py/inspect
7,805
def _redirect_to_referer_or(request, dest): referer = request.META.get('HTTP_REFERER', None) if referer is None: return redirect(dest) try: redirect_to = urlsplit(referer, 'http', False)[2] except __HOLE__: redirect_to = dest return redirect(redirect_to)
IndexError
dataset/ETHPy150Open andreisavu/django-jack/jack/beanstalk/views.py/_redirect_to_referer_or
7,806
def test_create_raises_exception_with_bad_keys(self): try: Subscription.create({"bad_key": "value"}) self.assertTrue(False) except __HOLE__ as e: self.assertEquals("'Invalid keys: bad_key'", str(e))
KeyError
dataset/ETHPy150Open braintree/braintree_python/tests/unit/test_subscription.py/TestSubscription.test_create_raises_exception_with_bad_keys
7,807
def test_update_raises_exception_with_bad_keys(self): try: Subscription.update("id", {"bad_key": "value"}) self.assertTrue(False) except __HOLE__ as e: self.assertEquals("'Invalid keys: bad_key'", str(e))
KeyError
dataset/ETHPy150Open braintree/braintree_python/tests/unit/test_subscription.py/TestSubscription.test_update_raises_exception_with_bad_keys
7,808
def test_make_json_error_response(self): response = self.provider._make_json_error_response('some_error') self.assertEquals(400, response.status_code) try: response_json = response.json() except __HOLE__: response_json = response.json self.assertEquals({'error': 'some_error'}, response_json)
TypeError
dataset/ETHPy150Open NateFerrero/oauth2lib/oauth2lib/tests/test_provider.py/AuthorizationProviderTest.test_make_json_error_response
7,809
def IsPythonFile(filename): """Return True if filename is a Python file.""" if os.path.splitext(filename)[1] == '.py': return True try: with open(filename, 'rb') as fd: encoding = tokenize.detect_encoding(fd.readline)[0] # Check for correctness of encoding. with py3compat.open_with_encoding(filename, encoding=encoding) as fd: fd.read() except UnicodeDecodeError: encoding = 'latin-1' except (IOError, SyntaxError): # If we fail to detect encoding (or the encoding cookie is incorrect - which # will make detect_encoding raise SyntaxError), assume it's not a Python # file. return False try: with py3compat.open_with_encoding(filename, mode='r', encoding=encoding) as fd: first_line = fd.readlines()[0] except (__HOLE__, IndexError): return False return re.match(r'^#!.*\bpython[23]?\b', first_line)
IOError
dataset/ETHPy150Open google/yapf/yapf/yapflib/file_resources.py/IsPythonFile
7,810
def read_pid(pidfile, logger): """Returns the pid in `pidfile` or None if the file doesn't exist.""" _using_pidfile(pidfile, logger) try: return int(readfile(pidfile)) except __HOLE__ as e: if e.errno == errno.ENOENT: logger.info("Daemon not running (no lockfile)") return None raise
IOError
dataset/ETHPy150Open facebook/sparts/sparts/daemon.py/read_pid
7,811
def kill(pidfile, logger, signum=signal.SIGTERM): """Sends `signum` to the pid specified by `pidfile`. Logs messages to `logger`. Returns True if the process is not running, or signal was sent successfully. Returns False if the process for the pidfile was running and there was an error sending the signal.""" daemon_pid = read_pid(pidfile, logger) if daemon_pid is None: return True try: send_signal(daemon_pid, signum, logger) return True except __HOLE__ as e: if e.errno == errno.ESRCH: logger.warning("Daemon not running (Stale lockfile)") os.remove(pidfile) return True elif e.errno == errno.EPERM: logger.error("Unable to kill %d (EPERM)", daemon_pid) return False raise
OSError
dataset/ETHPy150Open facebook/sparts/sparts/daemon.py/kill
7,812
def status(pidfile, logger): """Checks to see if the process for the pid in `pidfile` is running. Logs messages to `logger`. Returns True if there is a program for the running pid. Returns False if not or if there was an error polling the pid.""" daemon_pid = read_pid(pidfile, logger) if daemon_pid is None: return False try: # Sending signal 0 simply checks if the pid can be sent a signal # and makes sure the pid exist. It doesn't interrupt the running # program in any way. send_signal(daemon_pid, 0, logger) logger.info("Daemon is alive") return True except __HOLE__ as e: if e.errno == errno.ESRCH: logger.warning("Daemon not running (Stale lockfile)") os.remove(pidfile) return False elif e.errno == errno.EPERM: logger.error("Unable to poll %d (EPERM)", daemon_pid) return False raise
OSError
dataset/ETHPy150Open facebook/sparts/sparts/daemon.py/status
7,813
def daemonize (self): ''' Do the UNIX double-fork magic, see Stevens' 'Advanced Programming in the UNIX Environment' for details (ISBN 0201563177) http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16 ''' try: pid = os.fork() if pid > 0: # Exit first parent sys.exit(0) except OSError, e: sys.stderr.write( 'fork #1 failed: %d (%s)\n' % (e.errno, e.strerror)) sys.exit(1) # Decouple from parent environment os.chdir(self.home_dir) os.setsid() os.umask(self.umask) # Do second fork try: pid = os.fork() if pid > 0: # Exit from second parent sys.exit(0) except __HOLE__, e: sys.stderr.write( 'fork #2 failed: %d (%s)\n' % (e.errno, e.strerror)) sys.exit(1) # This block breaks on OS X if sys.platform != 'darwin': # Redirect standard file descriptors sys.stdout.flush() sys.stderr.flush() si = file(self.stdin, 'r') so = file(self.stdout, 'a+') if self.stderr: se = file(self.stderr, 'a+', 0) else: se = so os.dup2(si.fileno(), sys.stdin.fileno()) os.dup2(so.fileno(), sys.stdout.fileno()) os.dup2(se.fileno(), sys.stderr.fileno()) def sigtermhandler (signum, frame): self.daemon_alive = False sys.exit() if self.use_gevent: import gevent gevent.reinit() gevent.signal(signal.SIGTERM, sigtermhandler, signal.SIGTERM, None) gevent.signal(signal.SIGINT, sigtermhandler, signal.SIGINT, None) else: signal.signal(signal.SIGTERM, sigtermhandler) signal.signal(signal.SIGINT, sigtermhandler) if self.verbose >= 1: print 'Started' # Make sure pid file is removed if we quit atexit.register(self.delpid) # Write pidfile pid = str(os.getpid()) file(self.pidfile, 'w+').write('%s\n' % pid)
OSError
dataset/ETHPy150Open leonjza/hogar/hogar/Utils/Daemon.py/Daemon.daemonize
7,814
def start (self, *args, **kwargs): ''' Start the daemon ''' if self.verbose >= 1: print 'Starting...' # Check for a pidfile to see if the daemon already runs try: pf = file(self.pidfile, 'r') pid = int(pf.read().strip()) pf.close() except IOError: pid = None except __HOLE__: pid = None if pid: message = 'pidfile %s already exists. Is it already running?\n' sys.stderr.write(message % self.pidfile) sys.exit(1) # Start the daemon self.daemonize() self.run(*args, **kwargs)
SystemExit
dataset/ETHPy150Open leonjza/hogar/hogar/Utils/Daemon.py/Daemon.start
7,815
def stop (self): ''' Stop the daemon ''' if self.verbose >= 1: print 'Stopping...' # Get the pid from the pidfile pid = self.get_pid() if not pid: message = 'pidfile %s does not exist. Not running?\n' sys.stderr.write(message % self.pidfile) # Just to be sure. A ValueError might occur if the PID file is # empty but does actually exist if os.path.exists(self.pidfile): os.remove(self.pidfile) return # Not an error in a restart # Try killing the daemon process try: i = 0 while 1: os.kill(pid, signal.SIGTERM) time.sleep(0.1) i += 1 if i % 10 == 0: os.kill(pid, signal.SIGHUP) except __HOLE__, err: err = str(err) if err.find('No such process') > 0: if os.path.exists(self.pidfile): os.remove(self.pidfile) else: print str(err) sys.exit(1) if self.verbose >= 1: print 'Stopped'
OSError
dataset/ETHPy150Open leonjza/hogar/hogar/Utils/Daemon.py/Daemon.stop
7,816
def get_pid (self): try: pf = file(self.pidfile, 'r') pid = int(pf.read().strip()) pf.close() except IOError: pid = None except __HOLE__: pid = None return pid
SystemExit
dataset/ETHPy150Open leonjza/hogar/hogar/Utils/Daemon.py/Daemon.get_pid
7,817
def _set_initial(self, initial): self._value = None try: self.value = initial[0] except __HOLE__: pass
IndexError
dataset/ETHPy150Open jmcarp/robobrowser/robobrowser/forms/fields.py/MultiOptionField._set_initial
7,818
def load_records(context, database, table, xml_dirpath, recursive=False, force_update=False): """Load metadata records from directory of files to database""" repo = repository.Repository(database, context, table=table) file_list = [] if os.path.isfile(xml_dirpath): file_list.append(xml_dirpath) elif recursive: for root, dirs, files in os.walk(xml_dirpath): for mfile in files: if mfile.endswith('.xml'): file_list.append(os.path.join(root, mfile)) else: for rec in glob(os.path.join(xml_dirpath, '*.xml')): file_list.append(rec) total = len(file_list) counter = 0 for recfile in sorted(file_list): counter += 1 LOGGER.info('Processing file %s (%d of %d)', recfile, counter, total) # read document try: exml = etree.parse(recfile, context.parser) except Exception as err: LOGGER.warn('XML document is not well-formed: %s', str(err)) continue record = metadata.parse_record(context, exml, repo) for rec in record: LOGGER.info('Inserting %s %s into database %s, table %s ....', rec.typename, rec.identifier, database, table) # TODO: do this as CSW Harvest try: repo.insert(rec, 'local', util.get_today_and_now()) LOGGER.info('Inserted') except __HOLE__ as err: if force_update: LOGGER.info('Record exists. Updating.') repo.update(rec) LOGGER.info('Updated') else: LOGGER.warn('ERROR: not inserted %s', err)
RuntimeError
dataset/ETHPy150Open geopython/pycsw/pycsw/core/admin.py/load_records
7,819
def export_records(context, database, table, xml_dirpath): """Export metadata records from database to directory of files""" repo = repository.Repository(database, context, table=table) LOGGER.info('Querying database %s, table %s ....', database, table) records = repo.session.query(repo.dataset) LOGGER.info('Found %d records\n', records.count()) LOGGER.info('Exporting records\n') dirpath = os.path.abspath(xml_dirpath) if not os.path.exists(dirpath): LOGGER.info('Directory %s does not exist. Creating...', dirpath) try: os.makedirs(dirpath) except __HOLE__ as err: raise RuntimeError('Could not create %s %s' % (dirpath, err)) for record in records.all(): identifier = \ getattr(record, context.md_core_model['mappings']['pycsw:Identifier']) LOGGER.info('Processing %s', identifier) if identifier.find(':') != -1: # it's a URN # sanitize identifier LOGGER.info(' Sanitizing identifier') identifier = identifier.split(':')[-1] # write to XML document filename = os.path.join(dirpath, '%s.xml' % identifier) try: LOGGER.info('Writing to file %s', filename) with open(filename, 'w') as xml: xml.write('<?xml version="1.0" encoding="UTF-8"?>\n') xml.write(record.xml) except Exception as err: raise RuntimeError("Error writing to %s" % filename, err)
OSError
dataset/ETHPy150Open geopython/pycsw/pycsw/core/admin.py/export_records
7,820
def get_sysprof(): """Get versions of dependencies""" none = 'Module not found' try: import sqlalchemy vsqlalchemy = sqlalchemy.__version__ except ImportError: vsqlalchemy = none try: import pyproj vpyproj = pyproj.__version__ except __HOLE__: vpyproj = none try: import shapely try: vshapely = shapely.__version__ except AttributeError: import shapely.geos vshapely = shapely.geos.geos_capi_version except ImportError: vshapely = none try: import owslib try: vowslib = owslib.__version__ except AttributeError: vowslib = 'Module found, version not specified' except ImportError: vowslib = none return '''pycsw system profile -------------------- Python version: %s os: %s SQLAlchemy: %s Shapely: %s lxml: %s libxml2: %s pyproj: %s OWSLib: %s''' % (sys.version_info, sys.platform, vsqlalchemy, vshapely, etree.__version__, etree.LIBXML_VERSION, vpyproj, vowslib)
ImportError
dataset/ETHPy150Open geopython/pycsw/pycsw/core/admin.py/get_sysprof
7,821
def __init__(self, bindaddr, sinkspecs, interval, percent, debug=0, key_prefix=''): _, host, port = parse_addr(bindaddr) if port is None: self.exit(E_BADADDR % bindaddr) self._bindaddr = (host, port) # TODO: generalize to support more than one sink type. currently # only the graphite backend is present, but we may want to write # stats to hbase, redis, etc. - ph # construct the sink and add hosts to it if not sinkspecs: self.exit(E_NOSINKS) self._sink = sink.GraphiteSink() errors = [] for spec in sinkspecs: try: self._sink.add(spec) except __HOLE__, ex: errors.append(ex) if errors: for err in errors: self.error(str(err)) self.exit('exiting.') self._percent = float(percent) self._interval = float(interval) self._debug = debug self._sock = None self._flush_task = None self._key_prefix = key_prefix self._reset_stats()
ValueError
dataset/ETHPy150Open phensley/gstatsd/gstatsd/service.py/StatsDaemon.__init__
7,822
def grid_one(request, year=datetime.now().year, month=datetime.now().month, day=None, calendar_slug=None, page=1): """ Shows a grid (similar in appearance to a physical calendar) of upcoming events for either a specific calendar or no calendar at all. """ try: page = int(page) if year: year = int(year) if month: month = int(month) except __HOLE__: raise Http404 calendar = None calendar_list = Calendar.objects.all().order_by('name') events = Event.objects.all() if calendar_slug: calendar = get_object_or_404(Calendar, slug=calendar_slug) events = events.filter(calendars=calendar) month_formatted = pycal.monthcalendar(year, month) month_minus = month - 1 month_plus = month + 1 month_name = pycal.month_name[month] weekday_header = pycal.weekheader(3).strip().split(" ") year_minus = year - 1 year_plus = year + 1 today = datetime.now().day this_month = datetime.now().month this_year = datetime.now().year event_list = events.filter(Q(start__year=year, start__month=month) | Q(end__year=year, end__month=month)) page_name = "This is a test of the calendaring system." page = { 'calendar': calendar, 'calendar_list': calendar_list, 'event_list': event_list, 'month': month, 'month_formatted': month_formatted, 'month_minus': month_minus, 'month_name': month_name, 'month_plus': month_plus, 'page_name': page_name, 'this_month': this_month, 'this_year': this_year, 'today': today, 'weekday_header': weekday_header, 'year': year, 'year_minus': year_minus, 'year_plus': year_plus, } return render_to_response('events/gridiron.html', page, context_instance=RequestContext(request))
ValueError
dataset/ETHPy150Open albatrossandco/brubeck_cms/brubeck/events/views.py/grid_one
7,823
def event_list(request, year=None, month=None, day=None, calendar_slug=None, page=1): """ Shows a list of upcoming events for either a specific calendar or no calendar at all. """ try: page = int(page) if year: year = int(year) if month: month = int(month) if day: day = int(day) except __HOLE__: raise Http404 calendar = None events = Event.not_past.all() if calendar_slug: calendar = get_object_or_404(Calendar, slug=calendar_slug) events = events.filter(calendars=calendar) if not year: events = events page_name = "Upcoming events" elif not month: events = events.filter(Q(start__year=year) | Q(end__year=year)) page_name = "Events in %s" % year elif not day: events = events.filter(Q(start__year=year, start__month=month) | Q(end__year=year, end__month=month)) page_name = "Events in %s" % date(year, month, 1).strftime("%B %Y") else: events = events.filter(Q(start__year=year, start__month=month, start__day=day) | Q(end__year=year, end__month=month, end__day=day)) #ymd = datetime.date(year, month, day) #events = events.filter(Q(start__gte=ymd) & Q(end__lte=ymd)) page_name = "Events on %s" % date(year, month, day).strftime("%B %d, %Y") if calendar: page_name += " from %s" % calendar paginator = Paginator(events, 10) try: event_page = paginator.page(page) except (EmptyPage, InvalidPage): raise Http404 page = { 'calendar': calendar, 'calendar_slug': calendar_slug, 'event_page': event_page, 'page_name': page_name } return render_to_response('events/event_list.html', page, context_instance=RequestContext(request))
ValueError
dataset/ETHPy150Open albatrossandco/brubeck_cms/brubeck/events/views.py/event_list
7,824
def event_detail(request, year=None, month=None, day=None, event_slug=None): """ Shows a specific event. """ try: if year: year = int(year) if month: month = int(month) if day: day = int(day) except __HOLE__: raise Http404 events = Event.objects.filter(start__year=year, start__month=month, start__day=day) try: event = events.get(slug=event_slug) except Event.DoesNotExist: raise Http404 # if calendar_slug: # return HttpResponsePermanentRedirect(event.get_absolute_url()) page = { 'event': event } return render_to_response('events/event_detail.html', page, context_instance=RequestContext(request))
ValueError
dataset/ETHPy150Open albatrossandco/brubeck_cms/brubeck/events/views.py/event_detail
7,825
def _diff_to_hdf(self, difflines, tabwidth): """ Translate a diff file into something suitable for inclusion in HDF. The result is [(filename, revname_old, revname_new, changes)], where changes has the same format as the result of `trac.versioncontrol.diff.hdf_diff`. If the diff cannot be parsed, this method returns None. """ def _markup_intraline_change(fromlines, tolines): from trac.versioncontrol.diff import get_change_extent for i in xrange(len(fromlines)): fr, to = fromlines[i], tolines[i] (start, end) = get_change_extent(fr, to) if start != 0 or end != 0: last = end+len(fr) fromlines[i] = fr[:start] + '\0' + fr[start:last] + \ '\1' + fr[last:] last = end+len(to) tolines[i] = to[:start] + '\0' + to[start:last] + \ '\1' + to[last:] import re space_re = re.compile(' ( +)|^ ') def htmlify(match): div, mod = divmod(len(match.group(0)), 2) return div * '&nbsp; ' + mod * '&nbsp;' comments = [] changes = [] lines = iter(difflines) try: line = lines.next() while True: oldpath = oldrev = newpath = newrev = '' oldinfo = newinfo = [] binary = False # consume preample, storing free lines in comments # (also detect the special case of git binary patches) if not line.startswith('--- '): if not line.startswith('Index: ') and line != '=' * 67: comments.append(line) if line == "GIT binary patch": binary = True diffcmd_line = comments[0] # diff --git a/... b/,,, oldpath, newpath = diffcmd_line.split()[-2:] if any(c.startswith('new file') for c in comments): oldpath = '/dev/null' if any(c.startswith('deleted file') for c in comments): newpath = '/dev/null' oldinfo = ['', oldpath] newinfo = ['', newpath] index = [c for c in comments if c.startswith('index ')] if index: # index 8f****78..1e****5c oldrev, newrev = index[0].split()[-1].split('..') oldinfo.append(oldrev) newinfo.append(newrev) line = lines.next() while line: comments.append(line) line = lines.next() else: line = lines.next() continue if not oldinfo and not newinfo: # Base filename/version from '--- <file> [rev]' oldinfo = line.split(None, 2) if len(oldinfo) > 1: oldpath = oldinfo[1] if len(oldinfo) > 2: oldrev = oldinfo[2] # Changed filename/version from '+++ <file> [rev]' line = lines.next() if not line.startswith('+++ '): self.log.debug('expected +++ after ---, got %s', line) return None newinfo = line.split(None, 2) if len(newinfo) > 1: newpath = newinfo[1] if len(newinfo) > 2: newrev = newinfo[2] shortrev = ('old', 'new') if oldpath or newpath: sep = re.compile(r'([/.~\\])') commonprefix = ''.join(os.path.commonprefix( [sep.split(newpath), sep.split(oldpath)])) commonsuffix = ''.join(os.path.commonprefix( [sep.split(newpath)[::-1], sep.split(oldpath)[::-1]])[::-1]) if len(commonprefix) > len(commonsuffix): common = commonprefix elif commonsuffix: common = commonsuffix.lstrip('/') a = oldpath[:-len(commonsuffix)] b = newpath[:-len(commonsuffix)] if len(a) < 4 and len(b) < 4: shortrev = (a, b) elif oldpath == '/dev/null': common = _("new file %(new)s", new=newpath.lstrip('b/')) shortrev = ('-', '+') elif newpath == '/dev/null': common = _("deleted file %(deleted)s", deleted=oldpath.lstrip('a/')) shortrev = ('+', '-') else: common = '(a) %s vs. (b) %s' % (oldpath, newpath) shortrev = ('a', 'b') else: common = '' groups = [] groups_title = [] changes.append({'change': 'edit', 'props': [], 'comments': '\n'.join(comments), 'binary': binary, 'diffs': groups, 'diffs_title': groups_title, 'old': {'path': common, 'rev': ' '.join(oldinfo[1:]), 'shortrev': shortrev[0]}, 'new': {'path': common, 'rev': ' '.join(newinfo[1:]), 'shortrev': shortrev[1]}}) comments = [] line = lines.next() while line: # "@@ -333,10 +329,8 @@" or "@@ -1 +1 @@ [... title ...]" r = re.match(r'@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@' '(.*)', line) if not r: break blocks = [] groups.append(blocks) fromline, fromend, toline, toend = \ [int(x or 1) for x in r.groups()[:4]] groups_title.append(r.group(5)) last_type = extra = None fromend += fromline toend += toline line = lines.next() while fromline < fromend or toline < toend or extra: # First character is the command command = ' ' if line: command, line = line[0], line[1:] # Make a new block? if (command == ' ') != last_type: last_type = command == ' ' kind = 'unmod' if last_type else 'mod' block = {'type': kind, 'base': {'offset': fromline - 1, 'lines': []}, 'changed': {'offset': toline - 1, 'lines': []}} blocks.append(block) else: block = blocks[-1] if command == ' ': sides = ['base', 'changed'] elif command == '+': last_side = 'changed' sides = [last_side] elif command == '-': last_side = 'base' sides = [last_side] elif command == '\\' and last_side: meta = block[last_side].setdefault('meta', {}) meta[len(block[last_side]['lines'])] = True sides = [last_side] elif command == '@': # ill-formed patch groups_title[-1] = "%s (%s)" % ( groups_title[-1], _("this hunk was shorter than expected")) line = '@'+line break else: self.log.debug('expected +, - or \\, got %s', command) return None for side in sides: if side == 'base': fromline += 1 else: toline += 1 block[side]['lines'].append(line) line = lines.next() extra = line and line[0] == '\\' except __HOLE__: pass # Go through all groups/blocks and mark up intraline changes, and # convert to html for o in changes: for group in o['diffs']: for b in group: base, changed = b['base'], b['changed'] f, t = base['lines'], changed['lines'] if b['type'] == 'mod': if len(f) == 0: b['type'] = 'add' elif len(t) == 0: b['type'] = 'rem' elif len(f) == len(t): _markup_intraline_change(f, t) for i in xrange(len(f)): line = expandtabs(f[i], tabwidth, '\0\1') line = escape(line, quotes=False) line = '<del>'.join([space_re.sub(htmlify, seg) for seg in line.split('\0')]) line = line.replace('\1', '</del>') f[i] = Markup(line) if 'meta' in base and i in base['meta']: f[i] = Markup('<em>%s</em>') % f[i] for i in xrange(len(t)): line = expandtabs(t[i], tabwidth, '\0\1') line = escape(line, quotes=False) line = '<ins>'.join([space_re.sub(htmlify, seg) for seg in line.split('\0')]) line = line.replace('\1', '</ins>') t[i] = Markup(line) if 'meta' in changed and i in changed['meta']: t[i] = Markup('<em>%s</em>') % t[i] return changes
StopIteration
dataset/ETHPy150Open edgewall/trac/trac/mimeview/patch.py/PatchRenderer._diff_to_hdf
7,826
def _assert_saved(self, subj): try: node = subj.__node__ if node is None: raise NotSaved(subj) except __HOLE__: raise NotSaved(subj)
AttributeError
dataset/ETHPy150Open nigelsmall/py2neo/py2neo/ext/ogm/store.py/Store._assert_saved
7,827
def authenticate(self, force=False): reqbody = json.dumps({'auth': { 'passwordCredentials': { 'username': self.user_id, 'password': self.key }, 'tenantId': self._ex_tenant_id }}) resp = self.request('/tokens', data=reqbody, headers={}, method='POST') if resp.status == httplib.UNAUTHORIZED: # HTTP UNAUTHORIZED (401): auth failed raise InvalidCredsError() elif resp.status != httplib.OK: body = 'code: %s body:%s' % (resp.status, resp.body) raise MalformedResponseError('Malformed response', body=body, driver=self.driver) else: try: body = json.loads(resp.body) except Exception: e = sys.exc_info()[1] raise MalformedResponseError('Failed to parse JSON', e) try: expires = body['access']['token']['expires'] self.auth_token = body['access']['token']['id'] self.auth_token_expires = parse_date(expires) self.urls = body['access']['serviceCatalog'] self.auth_user_info = None except __HOLE__: e = sys.exc_info()[1] raise MalformedResponseError('Auth JSON response is \ missing required elements', e) return self
KeyError
dataset/ETHPy150Open apache/libcloud/libcloud/compute/drivers/cloudwatt.py/CloudwattAuthConnection.authenticate
7,828
def parse_pajek(lines): """Parse Pajek format graph from string or iterable. Parameters ---------- lines : string or iterable Data in Pajek format. Returns ------- G : NetworkX graph See Also -------- read_pajek() """ import shlex # multigraph=False if is_string_like(lines): lines = iter(lines.split('\n')) lines = iter([line.rstrip('\n') for line in lines]) G = nx.MultiDiGraph() # are multiedges allowed in Pajek? assume yes while lines: try: l = next(lines) except: # EOF break if l.lower().startswith("*network"): try: label, name = l.split(None, 1) except __HOLE__: # Line was not of the form: *network NAME pass else: G.graph['name'] = name elif l.lower().startswith("*vertices"): nodelabels = {} l, nnodes = l.split() for i in range(int(nnodes)): l = next(lines) try: splitline = [x.decode('utf-8') for x in shlex.split(make_str(l).encode('utf-8'))] except AttributeError: splitline = shlex.split(str(l)) id, label = splitline[0:2] G.add_node(label) nodelabels[id] = label G.node[label] = {'id': id} try: x, y, shape = splitline[2:5] G.node[label].update({'x': float(x), 'y': float(y), 'shape': shape}) except: pass extra_attr = zip(splitline[5::2], splitline[6::2]) G.node[label].update(extra_attr) elif l.lower().startswith("*edges") or l.lower().startswith("*arcs"): if l.lower().startswith("*edge"): # switch from multidigraph to multigraph G = nx.MultiGraph(G) if l.lower().startswith("*arcs"): # switch to directed with multiple arcs for each existing edge G = G.to_directed() for l in lines: try: splitline = [x.decode('utf-8') for x in shlex.split(make_str(l).encode('utf-8'))] except AttributeError: splitline = shlex.split(str(l)) if len(splitline) < 2: continue ui, vi = splitline[0:2] u = nodelabels.get(ui, ui) v = nodelabels.get(vi, vi) # parse the data attached to this edge and put in a dictionary edge_data = {} try: # there should always be a single value on the edge? w = splitline[2:3] edge_data.update({'weight': float(w[0])}) except: pass # if there isn't, just assign a 1 # edge_data.update({'value':1}) extra_attr = zip(splitline[3::2], splitline[4::2]) edge_data.update(extra_attr) # if G.has_edge(u,v): # multigraph=True G.add_edge(u, v, **edge_data) return G
ValueError
dataset/ETHPy150Open networkx/networkx/networkx/readwrite/pajek.py/parse_pajek
7,829
def CreateFromPackage(self, filename, description, display_name, catalogs): """Create package info from a live package stored at filename. Args: filename: str description: str, like "Security update for Foo Software" display_name: str, like "Munki Client" catalogs: list of str catalog names. """ self.VerifyMunkiInstall() args = [self._GetMunkiPath(MAKEPKGINFO), filename] args.append('--description=%s' % description) if display_name: args.append('--displayname=%s' % display_name) for catalog in catalogs: args.append('--catalog=%s' % catalog) if 'PKGS_MUNKI_MAKEPKGINFO' in os.environ: args[0] = os.environ['PKGS_MUNKI_MAKEPKGINFO'] try: p = subprocess.Popen( args, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, shell=False) except __HOLE__, e: raise MunkiInstallError('Cannot execute %s: %s' % (' '.join(args), e)) (stdout, stderr) = p.communicate(None) status = p.poll() if status == 0 and stdout and not stderr: self.filename = filename else: raise MunkiError( 'makepkginfo: exit status %d, stderr=%s' % (status, stderr)) self.plist = plist.MunkiPackageInfoPlist(stdout) try: self.plist.Parse() except plist.Error, e: raise Error(str(e))
OSError
dataset/ETHPy150Open google/simian/src/simian/mac/munki/pkgs.py/MunkiPackageInfo.CreateFromPackage
7,830
def run(self, result=None): if result is None: result = self.defaultTestResult() result.startTest(self) test_method = getattr(self, self._testMethodName) self._test_method = test_method try: ok = False self._set_up() try: self.setUp() except KeyboardInterrupt: raise except: result.addError(self, sys.exc_info()) # setUp can fail because of app in some state returns internal #+ server error. It's good to know about it - it can say more #+ than that some element couldn't be found. try: self.driver.check_expected_errors(test_method) self.driver.check_expected_infos(test_method) except: result.addError(self, sys.exc_info()) return try: test_method() ok = True except self.failureException: self.make_screenshot() result.addFailure(self, sys.exc_info()) except __HOLE__: raise except: self.make_screenshot() result.addError(self, sys.exc_info()) try: self.driver.check_expected_errors(test_method) self.driver.check_expected_infos(test_method) except: ok = False result.addError(self, sys.exc_info()) try: self.tearDown() except KeyboardInterrupt: raise except: result.addError(self, sys.exc_info()) ok = False if ok: result.addSuccess(self) finally: if not ok: self.make_screenshot() result.stopTest(self) # Is nice to see at break point if test passed or not. # So this call have to be after stopTest which print result of test. self._tear_down()
KeyboardInterrupt
dataset/ETHPy150Open horejsek/python-webdriverwrapper/webdriverwrapper/unittest/testcase.py/WebdriverTestCase.run
7,831
@csv.DictReader.fieldnames.getter def fieldnames(self): if self._fieldnames is None: try: self._fieldnames = self.reader.next() except __HOLE__: pass self.line_num = self.reader.line_num self.__mv_fieldnames = [] self.__fieldnames = [] for name in self._fieldnames: if name.startswith('__mv_'): # Store this pair: <fieldname>, __mv_<fieldname> self.__mv_fieldnames.append((name[len('__mv_'):], name)) else: self.__fieldnames.append(name) return self.__fieldnames
StopIteration
dataset/ETHPy150Open hvandenb/splunk-elasticsearch/search-elasticsearch/bin/splunklib/searchcommands/splunk_csv/dict_reader.py/DictReader.fieldnames
7,832
def _hack_at_distutils(): # Windows-only workaround for some configurations: see # https://bugs.python.org/issue23246 (Python 2.7 with # a specific MS compiler suite download) if sys.platform == "win32": try: import setuptools # for side-effects, patches distutils except __HOLE__: pass
ImportError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/cffi-1.5.2/cffi/verifier.py/_hack_at_distutils
7,833
def _locate_module(self): if not os.path.isfile(self.modulefilename): if self.ext_package: try: pkg = __import__(self.ext_package, None, None, ['__doc__']) except __HOLE__: return # cannot import the package itself, give up # (e.g. it might be called differently before installation) path = pkg.__path__ else: path = None filename = self._vengine.find_module(self.get_module_name(), path, _get_so_suffixes()) if filename is None: return self.modulefilename = filename self._vengine.collect_types() self._has_module = True
ImportError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/cffi-1.5.2/cffi/verifier.py/Verifier._locate_module
7,834
def _compile_module(self): # compile this C source tmpdir = os.path.dirname(self.sourcefilename) outputfilename = ffiplatform.compile(tmpdir, self.get_extension()) try: same = ffiplatform.samefile(outputfilename, self.modulefilename) except __HOLE__: same = False if not same: _ensure_dir(self.modulefilename) shutil.move(outputfilename, self.modulefilename) self._has_module = True
OSError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/cffi-1.5.2/cffi/verifier.py/Verifier._compile_module
7,835
def _locate_engine_class(ffi, force_generic_engine): if _FORCE_GENERIC_ENGINE: force_generic_engine = True if not force_generic_engine: if '__pypy__' in sys.builtin_module_names: force_generic_engine = True else: try: import _cffi_backend except __HOLE__: _cffi_backend = '?' if ffi._backend is not _cffi_backend: force_generic_engine = True if force_generic_engine: from . import vengine_gen return vengine_gen.VGenericEngine else: from . import vengine_cpy return vengine_cpy.VCPythonEngine # ____________________________________________________________
ImportError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/cffi-1.5.2/cffi/verifier.py/_locate_engine_class
7,836
def cleanup_tmpdir(tmpdir=None, keep_so=False): """Clean up the temporary directory by removing all files in it called `_cffi_*.{c,so}` as well as the `build` subdirectory.""" tmpdir = tmpdir or _caller_dir_pycache() try: filelist = os.listdir(tmpdir) except __HOLE__: return if keep_so: suffix = '.c' # only remove .c files else: suffix = _get_so_suffixes()[0].lower() for fn in filelist: if fn.lower().startswith('_cffi_') and ( fn.lower().endswith(suffix) or fn.lower().endswith('.c')): try: os.unlink(os.path.join(tmpdir, fn)) except OSError: pass clean_dir = [os.path.join(tmpdir, 'build')] for dir in clean_dir: try: for fn in os.listdir(dir): fn = os.path.join(dir, fn) if os.path.isdir(fn): clean_dir.append(fn) else: os.unlink(fn) except OSError: pass
OSError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/cffi-1.5.2/cffi/verifier.py/cleanup_tmpdir
7,837
def _ensure_dir(filename): try: os.makedirs(os.path.dirname(filename)) except __HOLE__: pass
OSError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/cffi-1.5.2/cffi/verifier.py/_ensure_dir
7,838
def _fork(self, fid): ''' fid - fork id ''' try: pid = os.fork() except __HOLE__, e: self.logger.error( "service._fork(), fork #%d failed: %d (%s)\n" % (fid, e.errno, e.strerror)) raise OSError(e) return pid
OSError
dataset/ETHPy150Open ownport/pyservice/pyservice.py/Service._fork
7,839
def daemonize(self): ''' do the UNIX double-fork magic, see Stevens' "Advanced Programming in the UNIX Environment" for details (ISBN 0201563177) http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16 ''' def _maxfd(limit=1024): ''' Use the getrlimit method to retrieve the maximum file descriptor number that can be opened by this process. If there is not limit on the resource, use the default value limit - default maximum for the number of available file descriptors. ''' maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1] if maxfd == resource.RLIM_INFINITY: return limit else: return maxfd def _devnull(default="/dev/null"): # The standard I/O file descriptors are redirected to /dev/null by default. if hasattr(os, "devnull"): return os.devnull else: return default def _close_fds(preserve=None): preserve = preserve or [] for fd in xrange(0, _maxfd()): if fd not in preserve: try: os.close(fd) except __HOLE__: # fd wasn't open to begin with (ignored) pass pid = self._fork(1) # first fork if pid == 0: # the first child os.setsid() pid = self._fork(2) if pid == 0: # the second child os.chdir("/") os.umask(0) else: os._exit(0) _close_fds(logging_file_descriptors()) else: os._exit(0) os.open(_devnull(), os.O_RDWR) os.dup2(0, 1) # standard output (1) os.dup2(0, 2) # standard error (2) return True
OSError
dataset/ETHPy150Open ownport/pyservice/pyservice.py/Service.daemonize
7,840
def start(self): ''' Start the service ''' # Check for a pidfile to see if the service already runs current_pid = self.pidfile.validate() if current_pid: message = "pidfile %s exists. Service is running already" self.logger.error(message % current_pid) print >> sys.stderr, message % current_pid return # Start the service if self.daemonize(): # create pid file try: self.pidfile.link() except __HOLE__, err: self.logger.error('Error during service start, %s' % str(err)) print >> sys.stderr, 'Error during service start, %s' % str(err) return # activate handler for stop the process atexit.register(self.remove_pid) try: self.logger.info('process [%s] starting' % self.process.__name__) user_process = self.process() user_process.logger = self.logger if getattr(user_process, 'do_start'): user_process.do_start() if getattr(user_process, 'run'): user_process.run() else: msg = 'Method run() is not defined for the process: %s' % self.process self.logger.error(msg) raise RuntimeError(msg) except Exception, err: self.logger.error(err) print err return
RuntimeError
dataset/ETHPy150Open ownport/pyservice/pyservice.py/Service.start
7,841
def stop(self): ''' Stop the service ''' pid = self.pidfile.validate() if not pid: message = "pidfile %s does not exist. Service is not running" self.logger.error(message % self.pidfile.fname) return # not an error in a restart # Try killing the service process try: while 1: os.kill(pid, signal.SIGTERM) time.sleep(0.1) except __HOLE__, err: err = str(err) if err.find("No such process") > 0: self.pidfile.unlink() else: self.logger.error('Error during service stop, %s' % str(err)) raise OSError(err) self.logger.info('service [%s] was stopped by SIGTERM signal' % pid)
OSError
dataset/ETHPy150Open ownport/pyservice/pyservice.py/Service.stop
7,842
def status(self): srv = Service(self.process) pid = srv.pidfile.validate() if pid: try: os.kill(pid, 0) print 'Process {} is running, pid: {}'.format(srv.process.__name__, pid) return except (__HOLE__, TypeError): pass print "Process is not running".format(srv.process.__name__) # # Pidfile #
OSError
dataset/ETHPy150Open ownport/pyservice/pyservice.py/ServiceControl.status
7,843
def validate(self): """ Validate pidfile and make it stale if needed""" if not self.fname: return try: with open(self.fname, "r") as f: wpid = int(f.read() or 0) if wpid <= 0: return try: os.kill(wpid, 0) return wpid except __HOLE__, e: if e[0] == errno.ESRCH: return raise e except IOError, e: if e[0] == errno.ENOENT: return raise e # --------------------------------------------------- # Utils # --------------------------------------------------- # # Logging #
OSError
dataset/ETHPy150Open ownport/pyservice/pyservice.py/Pidfile.validate
7,844
def load_process(process_path): ''' load process PEP 338 - Executing modules as scripts http://www.python.org/dev/peps/pep-0338 ''' if '.' not in process_path: raise RuntimeError("Invalid process path: {}".format(process_path)) module_name, process_name = process_path.rsplit('.', 1) try: try: module = runpy.run_module(module_name) except ImportError: module = runpy.run_module(module_name + ".__init__") except ImportError, e: import traceback, pkgutil tb_tups = traceback.extract_tb(sys.exc_info()[2]) if pkgutil.__file__.startswith(tb_tups[-1][0]): # If the bottom most frame in our stack was in pkgutil, # then we can safely say that this ImportError occurred # because the top level class path was not found. raise RuntimeError("Unable to load process path: {}:\n{}".format(process_path, e)) else: # If the ImportError occurred further down, # raise original exception. raise try: return module[process_name] except __HOLE__, e: raise RuntimeError("Unable to find process in module: {}".format(process_path))
KeyError
dataset/ETHPy150Open ownport/pyservice/pyservice.py/load_process
7,845
def service(process=None, action=None): ''' control service ''' try: getattr(ServiceControl(process), action)() except __HOLE__, e: print >> sys.stderr, e
RuntimeError
dataset/ETHPy150Open ownport/pyservice/pyservice.py/service
7,846
def main(): import argparse parser = argparse.ArgumentParser(prog="pyservice", add_help=False) parser.add_argument("-v", "--version", action="version", version="%(prog)s, v.{}".format(__version__)) parser.add_argument("-h", "--help", action="store_true", help="show program's help text and exit") parser.add_argument("process", nargs='?', help=""" process class path to run (modulename.ProcessClass) or configuration file path to use (/path/to/config.py) """.strip()) parser.add_argument("action", nargs='?', choices="start stop restart status".split()) try: args = parser.parse_args() except __HOLE__: parser.print_help() return if args.help: parser.print_help() return if args.process and args.action in "start stop restart status".split(): if not args.process: parser.error("You need to specify a process for {}".format(args.action)) service(args.process, args.action) else: parser.print_help()
TypeError
dataset/ETHPy150Open ownport/pyservice/pyservice.py/main
7,847
def parse_hours_flag(args): """ If the `--hours` flag was used, get the time, and remove the flag and the time from the args list. Return FALSE if the flag was not present. """ try: pos = args.index('--hours') except __HOLE__: return False else: hours = args[pos + 1] try: hours = float(hours) except ValueError: fprintf("GITIME ERROR: %s is not a valid amount of hours. Your " "commit was NOT made. Try again." %hours, file=sys.stderr) sys.exit() else: del args[pos + 1] del args[pos] return hours
ValueError
dataset/ETHPy150Open jongoodnow/gitime/gitime/commit.py/parse_hours_flag
7,848
def update(self): """Get the Efergy monitor data from the web service.""" try: if self.type == 'instant_readings': url_string = _RESOURCE + 'getInstant?token=' + self.app_token response = get(url_string) self._state = response.json()['reading'] / 1000 elif self.type == 'budget': url_string = _RESOURCE + 'getBudget?token=' + self.app_token response = get(url_string) self._state = response.json()['status'] elif self.type == 'cost': url_string = _RESOURCE + 'getCost?token=' + self.app_token \ + '&offset=' + self.utc_offset + '&period=' \ + self.period response = get(url_string) self._state = response.json()['sum'] else: self._state = 'Unknown' except (RequestException, __HOLE__, KeyError): _LOGGER.warning('Could not update status for %s', self.name)
ValueError
dataset/ETHPy150Open home-assistant/home-assistant/homeassistant/components/sensor/efergy.py/EfergySensor.update
7,849
@staticmethod def _read_arg(args, key, default): try: return args[key] except __HOLE__: return default
KeyError
dataset/ETHPy150Open CacheBrowser/cachebrowser/cachebrowser/settings.py/CacheBrowserSettings._read_arg
7,850
def walk_all_tags(): for namespace_path in store.list_directory(store.repositories): for repos_path in store.list_directory(namespace_path): try: for tag in store.list_directory(repos_path): fname = tag.split('/').pop() if not fname.startswith('tag_'): continue (namespace, repos) = repos_path.split('/')[-2:] yield (namespace, repos, store.get_content(tag)) except __HOLE__: pass
OSError
dataset/ETHPy150Open docker/docker-registry/scripts/dump_repos_data.py/walk_all_tags
7,851
def __getattr__(self, name): try: return self[name] except __HOLE__: raise AttributeError("Attribute '%s' doesn't exists. " % name)
KeyError
dataset/ETHPy150Open klen/muffin/muffin/utils.py/Struct.__getattr__
7,852
def __init__(self, params, offset=0): agents.Agent.__init__(self, params, offset) try: self.maxprice = self.args[0] except (__HOLE__, IndexError): raise MissingParameter, 'maxprice' try: self.maxbuy = self.args[1] except IndexError: raise MissingParameter, 'maxbuy' del self.args # Successes self.sellhist = list() self.buyhist = list() # Bids self.buybids = list() self.sellbids = list()
AttributeError
dataset/ETHPy150Open jcbagneris/fms/fms/contrib/coleman/agents/avgbuyselltrader.py/AvgBuySellTrader.__init__
7,853
def is_password_usable(pw): # like Django's is_password_usable, but only checks for unusable # passwords, not invalidly encoded passwords too. try: # 1.5 from django.contrib.auth.hashers import UNUSABLE_PASSWORD return pw != UNUSABLE_PASSWORD except __HOLE__: # 1.6 from django.contrib.auth.hashers import UNUSABLE_PASSWORD_PREFIX return not pw.startswith(UNUSABLE_PASSWORD_PREFIX)
ImportError
dataset/ETHPy150Open fusionbox/django-authtools/authtools/forms.py/is_password_usable
7,854
def render(self, name, value, attrs): try: from django.forms.utils import flatatt except ImportError: from django.forms.util import flatatt # Django < 1.7 final_attrs = flatatt(self.build_attrs(attrs)) if not value or not is_password_usable(value): summary = ugettext("No password set.") else: try: identify_hasher(value) except __HOLE__: summary = ugettext("Invalid password format or unknown" " hashing algorithm.") else: summary = ugettext('*************') return format_html('<div{attrs}><strong>{summary}</strong></div>', attrs=final_attrs, summary=summary)
ValueError
dataset/ETHPy150Open fusionbox/django-authtools/authtools/forms.py/BetterReadOnlyPasswordHashWidget.render
7,855
def iter_json(self, _path=None, requests_params=None, **apiparams): """Reliably iterate through all data as json strings""" requests_params = dict(requests_params or {}) requests_params.setdefault('method', 'GET') requests_params.setdefault('stream', True) lastexc = None line = None offset = 0 for attempt in xrange(self.MAX_RETRIES): self._add_resume_param(line, offset, apiparams) try: for line in self._iter_lines(_path=_path, params=apiparams, **requests_params): yield line offset += 1 break except (__HOLE__, socket.error, rexc.RequestException) as exc: # catch requests exceptions other than HTTPError if isinstance(exc, rexc.HTTPError): raise lastexc = exc url = urlpathjoin(self.url, _path) msg = "Retrying read of %s in %ds: attempt=%d/%d error=%s" args = url, self.RETRY_INTERVAL, attempt, self.MAX_RETRIES, exc logger.debug(msg, *args) time.sleep(self.RETRY_INTERVAL) else: url = urlpathjoin(self.url, _path) logger.error("Failed %d times reading items from %s, params %s, " "last error was: %s", self.MAX_RETRIES, url, apiparams, lastexc)
ValueError
dataset/ETHPy150Open scrapinghub/python-hubstorage/hubstorage/resourcetype.py/DownloadableResource.iter_json
7,856
@property def _data(self): if self._cached is None: r = self.apiget() try: self._cached = r.next() except __HOLE__: self._cached = {} return self._cached
StopIteration
dataset/ETHPy150Open scrapinghub/python-hubstorage/hubstorage/resourcetype.py/MappingResourceType._data
7,857
def start(self): """ Start the server. """ s = socket.socket() s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) s.bind(('', config.socketport)) s.listen(1) try: while 1: conn, addr = s.accept() print('Connected by', addr) newClient = websocketclient.WebSocketClient(conn, addr, self) self.clients.append(newClient) newClient.start() except __HOLE__: [client.close() for client in self.clients] s.close()
KeyboardInterrupt
dataset/ETHPy150Open adngdb/python-websocket-server/websocketserver.py/WebSocketServer.start
7,858
def _get_explicit_connections(self): """ Returns ------- dict Explicit connections in this `Group`, represented as a mapping from the pathname of the target to the pathname of the source. """ connections = {} for sub in self.subgroups(): connections.update(sub._get_explicit_connections()) to_abs_uname = self._sysdata.to_abs_uname to_abs_pnames = self._sysdata.to_abs_pnames for tgt, srcs in iteritems(self._src): for src, idxs in srcs: try: src_pathnames = [to_abs_uname[src]] except __HOLE__ as error: try: src_pathnames = to_abs_pnames[src] except KeyError as error: raise NameError("Source '%s' cannot be connected to " "target '%s': '%s' does not exist." % (src, tgt, src)) try: for tgt_pathname in to_abs_pnames[tgt]: for src_pathname in src_pathnames: connections.setdefault(tgt_pathname, []).append((src_pathname, idxs)) except KeyError as error: try: to_abs_uname[tgt] except KeyError as error: raise NameError("Source '%s' cannot be connected to " "target '%s': '%s' does not exist." % (src, tgt, tgt)) else: raise NameError("Source '%s' cannot be connected to " "target '%s': Target must be a " "parameter but '%s' is an unknown." % (src, tgt, tgt)) return connections
KeyError
dataset/ETHPy150Open OpenMDAO/OpenMDAO/openmdao/core/group.py/Group._get_explicit_connections
7,859
def _build(self, *parameters): paths_to_add = self.prepare_build_path() with CustomEnvPath(paths_to_add=paths_to_add): paths = self.bii.bii_paths if len(parameters) == 1: # It's tuple, first element is list with actual parameters parameters = parameters[0] self._handle_parallel_build(parameters) build_options = ' '.join(parameters) # Necessary for building in windows (cygwin in path) cmd = '"%s" --build . %s' % (cmake_command(paths), build_options) self.bii.user_io.out.write('Building: %s\n' % cmd) retcode = simple_exe(cmd, cwd=paths.build) if 'Eclipse' in self.bii.hive_disk_image.settings.cmake.generator: ide = Eclipse(paths) try: ide.configure_project() except __HOLE__: pass if retcode != 0: raise BiiException('Build failed')
IOError
dataset/ETHPy150Open biicode/client/dev/cmake/cmake_tool_chain.py/CMakeToolChain._build
7,860
def keep_alive(self): """ """ self._p0 = False self.handle_signals() with self.setup_output() as (stdout, stderr): self._terminating = False startretries = self.data.get('startretries', 3) initial_startretries = self.data.get('startretries', 3) while startretries: start = time.time() if startretries != initial_startretries: log.info('Retry command (%i retries remain)' % startretries) env = os.environ.copy() update_env = {k:str(v) for (k, v) in self.data.get('env', {}).items()} env.update(update_env) cwd = self.data.get('cwd') or os.path.abspath(os.curdir) env_str = '\n'.join('\t%s: %r' % item for item in update_env.items()) if env_str: log.info("Setting Environment: \n%s" % env_str) if cwd: log.info("Setting Working Directory: %s" % cwd) log.info("Running Command: %s" % ' '.join(self.data['command'])) try: self._p0 = Popen(self.data['command'], stdout=stdout, stderr=stderr, env=env, cwd=cwd, bufsize=self.data.get('bufsize', 0), preexec_fn=self.preexec_fn) except __HOLE__ as err: log.exception('Program %s could not be started with popen' % self.name) self.state.update(child_pid=None, exit_status=1, reason='OSError running command "%s"' % self.data['command'][0]) return except: log.exception('Exception in keep_alive') self.steate.update(child_pid=None, exit_status=1, reason='There was an unknown exception opening command (check logs)') return log.info('Program started with pid %s' % self._p0.pid) self.state.update(child_pid=self._p0.pid, reason=None, exit_status=None, start_time=time.time()) try: status = self._p0.wait() except KeyboardInterrupt: log.error('Program %s was interrupted by user' % self.name) kill_tree(self._p0.pid) self.state.update(child_pid=None, exit_status=None, reason='Interrupted by user') raise except BaseException as err: log.error('Program %s was interrupted' % self.name) kill_tree(self._p0.pid) self.state.update(child_pid=None, exit_status=None, reason='Python BaseException') log.exception(err) raise self._p0 = False uptime = time.time() - start log.info('Command Exited with status %s' % status) log.info(' + Uptime %s' % uptime) if self._terminating: reason = "Terminated at user request" status = None elif status in self.data['exitcodes']: reason = "Program exited gracefully" elif uptime < self.data['startsecs']: reason = 'Program did not successfully start' startretries -= 1 else: reason = "Program exited unexpectedly with code %s" % (status) startretries = initial_startretries self.state.update(child_pid=None, exit_status=status, reason=reason) if self._terminating: break if status in self.data['exitcodes']: break log.debug("Exiting keep alive function")
OSError
dataset/ETHPy150Open Anaconda-Platform/chalmers/chalmers/program/base.py/ProgramBase.keep_alive
7,861
def stop_daemonlog(self): logger = logging.getLogger('chalmers') logger.removeHandler(self._daemonlog_hdlr) try: self._log_stream.close() except __HOLE__: # Ignore: # close() called during concurrent operation on the same file object. pass
IOError
dataset/ETHPy150Open Anaconda-Platform/chalmers/chalmers/program/base.py/ProgramBase.stop_daemonlog
7,862
def push_to_device(device_or_devices): # get an iterable list of devices even if one wasn't specified try: iter(device_or_devices) except __HOLE__: devices = (device_or_devices, ) else: devices = device_or_devices # keyed access to topics for which we'll have an APNs connection for each topic_frames = {} for device in devices: if device.topic in apns_cxns: if device.topic not in topic_frames: # create our keyed topic reference if it doesn't exist topic_frames[device.topic] = Frame() # decode from as-stored base64 into hex encoding for apns library token_hex = device.token.decode('base64').encode('hex') mdm_payload = MDMPayload(device.push_magic) # add a frame for this topic topic_frames[device.topic].add_item(token_hex, mdm_payload, random.getrandbits(32), 0, 10) else: # TODO: configure and use real logging print 'Cannot send APNs to device: no APNs connection found (by device topic)' # loop through our by-topic APNs Frames and send away for topic in topic_frames.keys(): apns_cxns[topic].gateway_server.send_notification_multiple(topic_frames[topic])
TypeError
dataset/ETHPy150Open jessepeterson/commandment/commandment/push.py/push_to_device
7,863
def _dispatch(self, request): try: try: method = self.get_method(request.method) except __HOLE__ as e: return request.error_respond(MethodNotFoundError(e)) # we found the method try: result = method(*request.args, **request.kwargs) except Exception as e: # an error occured within the method, return it return request.error_respond(e) # respond with result return request.respond(result) except Exception as e: # unexpected error, do not let client know what happened return request.error_respond(ServerError())
KeyError
dataset/ETHPy150Open mbr/tinyrpc/tinyrpc/dispatch/__init__.py/RPCDispatcher._dispatch
7,864
def get_method(self, name): """Retrieve a previously registered method. Checks if a method matching ``name`` has been registered. If :py:func:`get_method` cannot find a method, every subdispatcher with a prefix matching the method name is checked as well. If a method isn't found, a :py:class:`KeyError` is thrown. :param name: Callable to find. :param return: The callable. """ if name in self.method_map: return self.method_map[name] for prefix, subdispatchers in six.iteritems(self.subdispatchers): if name.startswith(prefix): for sd in subdispatchers: try: return sd.get_method(name[len(prefix):]) except __HOLE__: pass raise KeyError(name)
KeyError
dataset/ETHPy150Open mbr/tinyrpc/tinyrpc/dispatch/__init__.py/RPCDispatcher.get_method
7,865
def captureException(self, *args, **kwargs): assert self.client, 'captureException called before application configured' data = kwargs.get('data') if data is None: try: kwargs['data'] = get_data_from_request(request) except __HOLE__: # app is probably not configured yet pass return self.client.captureException(*args, **kwargs)
RuntimeError
dataset/ETHPy150Open getsentry/raven-python/raven/contrib/bottle/__init__.py/Sentry.captureException
7,866
def captureMessage(self, *args, **kwargs): assert self.client, 'captureMessage called before application configured' data = kwargs.get('data') if data is None: try: kwargs['data'] = get_data_from_request(request) except __HOLE__: # app is probably not configured yet pass return self.client.captureMessage(*args, **kwargs)
RuntimeError
dataset/ETHPy150Open getsentry/raven-python/raven/contrib/bottle/__init__.py/Sentry.captureMessage
7,867
def do_update_translations(target=("t", ""), lang=("l", ""), statistics=("s", False), i18n_dir=("i", ""), all=("a", False)): """ Update existing translations with updated pot files. """ if not target and not all: print_status('Please specify target.') sys.exit(1) elif target == 'kay': print_status('Updating core strings') root = path.join(kay.KAY_DIR, 'i18n') elif all: targets = get_user_apps() for target in targets: do_update_translations(target=target, lang=lang, statistics=statistics, i18n_dir=None, all=False) do_update_translations(target=kay.PROJECT_DIR, lang=lang, statistics=statistics, i18n_dir=None, all=False) sys.exit(0) else: if i18n_dir: root = i18n_dir else: root = path.join(target, 'i18n') if not path.isdir(root): print_status('source folder missing') sys.exit(1) print_status('Updating %s' % root) for domain in domains: if lang: filepath = path.join(root, lang, 'LC_MESSAGES', domain+'.po') if not path.exists(filepath): print_status("%s not found, skipped." % filepath) continue try: f = file(path.join(root, domain+'.pot')) except __HOLE__: print_status('Can not open file: %s, skipped.' % path.join(root, domain+'.pot')) continue try: template = read_po(f) finally: f.close() po_files = [] for lang_dir in listdir(root): filename = path.join(root, lang_dir, 'LC_MESSAGES', domain+'.po') if lang and filename != \ path.join(root, lang, 'LC_MESSAGES', domain+'.po'): continue if path.exists(filename): print_status('Updating %s' % filename, nl=False) locale = Locale.parse(lang_dir) f = file(filename) try: catalog = read_po(f, locale=locale, domain=domain) finally: f.close() catalog.update(template) # XXX: this is kinda dangerous, but as we are using a # revision control system anyways that shouldn't make # too many problems f = file(filename, 'w') try: write_po(f, catalog, ignore_obsolete=True, include_previous=False, width=79) finally: if statistics: translated = fuzzy = percentage = 0 for message in list(catalog)[1:]: if message.string: translated +=1 if 'fuzzy' in message.flags: fuzzy += 1 if len(catalog): percentage = translated * 100 // len(catalog) print_status("-> %d of %d messages (%d%%) translated" % ( translated, len(catalog), percentage), nl=False) if fuzzy: if fuzzy == 1: print_status("%d of which is fuzzy" % fuzzy, nl=False) else: print_status("%d of which are fuzzy" % fuzzy, nl=False) print_status() else: print_status() f.close() print_status('All done.')
IOError
dataset/ETHPy150Open IanLewis/kay/kay/management/update_translations.py/do_update_translations
7,868
def _setargs(self): """Copy func parameter names to obj attributes.""" try: for arg in _getargs(self.callable): setattr(self, arg, None) except (TypeError, AttributeError): if hasattr(self.callable, "__call__"): for arg in _getargs(self.callable.__call__): setattr(self, arg, None) # IronPython 1.0 raises NotImplementedError because # inspect.getargspec tries to access Python bytecode # in co_code attribute. except NotImplementedError: pass # IronPython 1B1 may raise IndexError in some cases, # but if we trap it here it doesn't prevent CP from working. except __HOLE__: pass
IndexError
dataset/ETHPy150Open clips/pattern/pattern/server/cherrypy/cherrypy/_cptools.py/Tool._setargs
7,869
def _configure(self, config_filename): """ Configure manager instance. """ self._logger.debug('Configuring from %r', config_filename) with open(config_filename, 'r') as inp: cfg = ConfigParser.ConfigParser() cfg.readfp(inp) for name in cfg.sections(): self._logger.debug(' name: %s', name) for allocator in self._allocators: if allocator.name == name: self._logger.debug(' existing allocator') allocator.configure(cfg) break else: if not cfg.has_option(name, 'classname'): self._logger.debug(' skipping %s', name) continue classname = cfg.get(name, 'classname') self._logger.debug(' classname: %s', classname) mod_name, _, cls_name = classname.rpartition('.') try: __import__(mod_name) except __HOLE__ as exc: raise RuntimeError("RAM configure %s: can't import %r: %s" % (name, mod_name, exc)) module = sys.modules[mod_name] if not hasattr(module, cls_name): raise RuntimeError('RAM configure %s: no class %r in %s' % (name, cls_name, mod_name)) cls = getattr(module, cls_name) allocator = cls(name) allocator.configure(cfg) self._allocators.append(allocator)
ImportError
dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.main/src/openmdao/main/resource.py/ResourceAllocationManager._configure
7,870
def _release(self, server): """ Release a server (proxy). """ with ResourceAllocationManager._lock: try: allocator, server, server_info = self._deployed_servers[id(server)] # Just being defensive. except __HOLE__: #pragma no cover self._logger.error('server %r not found', server) return del self._deployed_servers[id(server)] self._logger.info('release %r pid %d on %s', server_info['name'], server_info['pid'], server_info['host']) try: allocator.release(server) # Just being defensive. except Exception as exc: #pragma no cover self._logger.error("Can't release %r: %r", server_info['name'], exc) server._close.cancel()
KeyError
dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.main/src/openmdao/main/resource.py/ResourceAllocationManager._release
7,871
@staticmethod def validate_resources(resource_desc): """ Validate that `resource_desc` is legal. resource_desc: dict Description of required resources. """ for key, value in resource_desc.items(): try: if not _VALIDATORS[key](value): raise ValueError('Invalid resource value for %r: %r' % (key, value)) except __HOLE__: raise KeyError('Invalid resource key %r' % key) if 'max_cpus' in resource_desc: if 'min_cpus' not in resource_desc: raise KeyError('min_cpus required if max_cpus specified') min_cpus = resource_desc['min_cpus'] max_cpus = resource_desc['max_cpus'] if max_cpus < min_cpus: raise ValueError('max_cpus %d < min_cpus %d' % (max_cpus, min_cpus))
KeyError
dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.main/src/openmdao/main/resource.py/ResourceAllocationManager.validate_resources
7,872
def check_orphan_modules(self, resource_value): """ Returns a list of 'orphan' modules that are not available. resource_value: list List of 'orphan' module names. """ #FIXME: shouldn't pollute the environment like this does. not_found = [] for module in sorted(resource_value): if module: try: __import__(module) except __HOLE__: not_found.append(module) return not_found # To be implemented by real allocator.
ImportError
dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.main/src/openmdao/main/resource.py/ResourceAllocator.check_orphan_modules
7,873
def __init__(self, name='LocalAllocator', total_cpus=0, max_load=1.0, authkey=None, allow_shell=False, server_limit=0): super(LocalAllocator, self).__init__(name, authkey, allow_shell) if total_cpus > 0: self.total_cpus = total_cpus else: try: self.total_cpus = multiprocessing.cpu_count() # Just being defensive (according to docs this could happen). except __HOLE__: # pragma no cover self.total_cpus = 1 if max_load > 0.: self.max_load = max_load else: raise ValueError('%s: max_load must be > 0, got %g' % (name, max_load)) self.server_limit = server_limit
NotImplementedError
dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.main/src/openmdao/main/resource.py/LocalAllocator.__init__
7,874
@rbac('*') def max_servers(self, resource_desc, load_adjusted=False): """ Returns `total_cpus` * `max_load` if `resource_desc` is supported; otherwise, zero. The return value may be limited by `server_limit`. resource_desc: dict Description of required resources. load_adjusted: bool If True, then the returned number of servers is adjusted for current host loading. """ retcode, info = self.check_compatibility(resource_desc) if retcode != 0: return (0, info) avail_cpus = self.total_cpus * self.max_load if load_adjusted: # Check system load. try: loadavgs = os.getloadavg() # Not available on Windows. except __HOLE__: #pragma no cover pass else: self._logger.debug('loadavgs %.2f, %.2f, %.2f, max_load %.2f', loadavgs[0], loadavgs[1], loadavgs[2], self.max_load * self.total_cpus) avail_cpus -= int(loadavgs[0]) avail_cpus = max(int(avail_cpus), 1) if self.server_limit == 0: avail_cpus = min(avail_cpus, self.total_cpus) elif self.server_limit > 0: avail_cpus = min(avail_cpus, self.server_limit) # else no special limiting. if 'min_cpus' in resource_desc: req_cpus = resource_desc['min_cpus'] if req_cpus > avail_cpus: return (0, {'min_cpus': 'want %s, available %s' % (req_cpus, avail_cpus)}) else: return (avail_cpus / req_cpus, {}) else: return (avail_cpus, {})
AttributeError
dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.main/src/openmdao/main/resource.py/LocalAllocator.max_servers
7,875
@rbac('*') def time_estimate(self, resource_desc): """ Returns ``(estimate, criteria)`` indicating how well this allocator can satisfy the `resource_desc` request. The estimate will be: - >0 for an estimate of walltime (seconds). - 0 for no estimate. - -1 for no resource at this time. - -2 for no support for `resource_desc`. The returned criteria is a dictionary containing information related to the estimate, such as hostnames, load averages, unsupported resources, etc. resource_desc: dict Description of required resources. """ retcode, info = self.check_compatibility(resource_desc) if retcode != 0: return (retcode, info) # Check system load. try: loadavgs = os.getloadavg() # Not available on Windows. except __HOLE__: #pragma no cover criteria = { 'hostnames' : [socket.gethostname()], 'total_cpus' : self.total_cpus, } return (0, criteria) self._logger.debug('loadavgs %.2f, %.2f, %.2f, max_load %.2f', loadavgs[0], loadavgs[1], loadavgs[2], self.max_load * self.total_cpus) criteria = { 'hostnames' : [socket.gethostname()], 'loadavgs' : loadavgs, 'total_cpus' : self.total_cpus, 'max_load' : self.max_load } if (loadavgs[0] / self.total_cpus) < self.max_load: return (0, criteria) elif len(self._deployed_servers) == 0: # Ensure progress by always allowing 1 server. return (0, criteria) # Tests force max_load high to avoid other issues. else: #pragma no cover return (-1, criteria) # Try again later.
AttributeError
dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.main/src/openmdao/main/resource.py/LocalAllocator.time_estimate
7,876
def max_servers(self, resource_desc): """ Returns the total of :meth:`max_servers` across all :class:`LocalAllocator` in the cluster. resource_desc: dict Description of required resources. """ credentials = get_credentials() rdesc, info = self._check_local(resource_desc) if rdesc is None: return (0, info[1]) with self._lock: # Drain _reply_q. while True: try: self._reply_q.get_nowait() except Queue.Empty: break # Get counts via worker threads. todo = [] max_workers = 10 for i, host in enumerate(self.cluster): allocator = host.allocator if i < max_workers: worker_q = WorkerPool.get() worker_q.put((self._get_count, (allocator, rdesc, credentials), {}, self._reply_q)) else: todo.append(allocator) # Process counts. total = 0 for i in range(len(self.cluster)): worker_q, retval, exc, trace = self._reply_q.get() if exc: self._logger.error(trace) raise exc try: next_allocator = todo.pop(0) except __HOLE__: WorkerPool.release(worker_q) else: worker_q.put((self._get_count, (next_allocator, rdesc, credentials), {}, self._reply_q)) count = retval if count: total += count if 'min_cpus' in resource_desc: req_cpus = resource_desc['min_cpus'] if req_cpus > total: return (0, {'min_cpus': 'want %s, total %s' % (req_cpus, total)}) else: return (total / req_cpus, {}) else: return (total, {})
IndexError
dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.main/src/openmdao/main/resource.py/ClusterAllocator.max_servers
7,877
def _load_average(self, rdesc): """ Time estimate using load averages. """ credentials = get_credentials() min_cpus = rdesc.get('min_cpus', 0) if min_cpus: # Spread across LocalAllocators. rdesc['min_cpus'] = 1 avail_cpus = 0 with self._lock: best_host = None best_estimate = -2 best_criteria = {'': 'No LocalAllocator results'} # Prefer not to repeat use of just-used allocator. prev_host = self._last_deployed prev_estimate = -2 prev_criteria = None self._last_deployed = None # Drain _reply_q. while True: try: self._reply_q.get_nowait() except Queue.Empty: break # Get estimates via worker threads. todo = [] max_workers = 10 for i, host in enumerate(self.cluster): if i < max_workers: worker_q = WorkerPool.get() worker_q.put((self._get_estimate, (host, rdesc, credentials), {}, self._reply_q)) else: todo.append(host) # Process estimates. host_loads = [] # Sorted list of (load, criteria) for i in range(len(self.cluster)): worker_q, retval, exc, trace = self._reply_q.get() if exc: self._logger.error(trace) retval = None try: next_host = todo.pop(0) except __HOLE__: WorkerPool.release(worker_q) else: worker_q.put((self._get_estimate, (next_host, rdesc, credentials), {}, self._reply_q)) if retval is None: continue host, estimate, criteria = retval if estimate is None or estimate < -1: continue # Accumulate available cpus in cluster. avail_cpus += criteria['total_cpus'] # CPU-adjusted load (if available). if 'loadavgs' in criteria: load = criteria['loadavgs'][0] / criteria['total_cpus'] else: # Windows load = 0. # Insertion sort of host_loads. if estimate >= 0 and min_cpus: new_info = (load, criteria) if host_loads: for i, info in enumerate(host_loads): if load < info[0]: host_loads.insert(i, new_info) break else: host_loads.append(new_info) else: host_loads.append(new_info) # Update best estimate. if host is prev_host: prev_estimate = estimate prev_criteria = criteria elif (best_estimate <= 0 and estimate > best_estimate) or \ (best_estimate > 0 and estimate < best_estimate): best_host = host best_estimate = estimate best_criteria = criteria elif best_estimate == 0 and estimate == 0: if 'loadavgs' in best_criteria: best_load = best_criteria['loadavgs'][0] if load < best_load: best_host = host best_estimate = estimate best_criteria = criteria else: if 'loadavgs' in criteria: # Prefer non-Windows. best_host = host best_estimate = estimate best_criteria = criteria # If no alternative, repeat use of previous host. if best_estimate < 0 and prev_estimate >= 0: best_host = prev_host best_estimate = prev_estimate best_criteria = prev_criteria if avail_cpus < min_cpus: return (-2, {'min_cpus': 'want %d, available %d' \ % (min_cpus, avail_cpus)}) # Save best host in criteria in case we're asked to deploy. if best_host is not None: best_criteria['host'] = best_host if min_cpus: # Save min_cpus hostnames in criteria. hostnames = [] for load, criteria in host_loads: hostname = criteria['hostnames'][0] hostnames.append(hostname) if len(hostnames) >= min_cpus: break total_cpus = criteria['total_cpus'] max_load = criteria.get('max_load', 1) load *= total_cpus # Restore from cpu-adjusted value. max_load *= total_cpus load += 1 while load < max_load and len(hostnames) < min_cpus: hostnames.append(hostname) load += 1 if len(hostnames) >= min_cpus: break if len(hostnames) < min_cpus: return (-1, {'min_cpus': 'want %d, idle %d' \ % (min_cpus, len(hostnames))}) best_criteria['hostnames'] = hostnames return (best_estimate, best_criteria)
IndexError
dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.main/src/openmdao/main/resource.py/ClusterAllocator._load_average
7,878
def release(self, server): """ Release a server (proxy). server: :class:`OpenMDAO_Proxy` Server to be released. """ with self._lock: try: host = self._deployed_servers[id(server)][0] except __HOLE__: self._logger.error('server %r not found', server) return del self._deployed_servers[id(server)] host.allocated_cpus -= 1 try: host.allocator.release(server) except Exception as exc: self._logger.error("Can't release %r: %r", server, exc) server._close.cancel()
KeyError
dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.main/src/openmdao/main/resource.py/ClusterAllocator.release
7,879
def _make_bound_method(func, self): """ Magic for creating bound methods (used for _unload). """ class Foo(object): def meth(self): pass f = Foo() bound_method = type(f.meth) try: return bound_method(func, self, self.__class__) except __HOLE__: # python3 return bound_method(func, self)
TypeError
dataset/ETHPy150Open nltk/nltk/nltk/corpus/util.py/_make_bound_method
7,880
def __call__(self, env, start_response): try: # /v1/a/c or /v1/a/c/o split_path(env['PATH_INFO'], 3, 4, True) is_container_or_object_req = True except __HOLE__: is_container_or_object_req = False headers = [('Content-Type', 'text/plain'), ('Content-Length', str(sum(map(len, self.body))))] if is_container_or_object_req and self.policy_idx is not None: headers.append(('X-Backend-Storage-Policy-Index', str(self.policy_idx))) start_response(self.response_str, headers) while env['wsgi.input'].read(5): pass return self.body
ValueError
dataset/ETHPy150Open openstack/swift/test/unit/common/middleware/test_proxy_logging.py/FakeApp.__call__
7,881
def test_disconnect_on_readline(self): app = proxy_logging.ProxyLoggingMiddleware(FakeAppReadline(), {}) app.access_logger = FakeLogger() req = Request.blank('/', environ={'REQUEST_METHOD': 'GET', 'wsgi.input': FileLikeExceptor()}) try: resp = app(req.environ, start_response) # read body ''.join(resp) except __HOLE__: pass log_parts = self._log_parts(app) self.assertEqual(log_parts[6], '499') self.assertEqual(log_parts[10], '-') # read length
IOError
dataset/ETHPy150Open openstack/swift/test/unit/common/middleware/test_proxy_logging.py/TestProxyLogging.test_disconnect_on_readline
7,882
def test_disconnect_on_read(self): app = proxy_logging.ProxyLoggingMiddleware( FakeApp(['some', 'stuff']), {}) app.access_logger = FakeLogger() req = Request.blank('/', environ={'REQUEST_METHOD': 'GET', 'wsgi.input': FileLikeExceptor()}) try: resp = app(req.environ, start_response) # read body ''.join(resp) except __HOLE__: pass log_parts = self._log_parts(app) self.assertEqual(log_parts[6], '499') self.assertEqual(log_parts[10], '-') # read length
IOError
dataset/ETHPy150Open openstack/swift/test/unit/common/middleware/test_proxy_logging.py/TestProxyLogging.test_disconnect_on_read
7,883
@expose('/add/', methods=('GET', 'POST')) def add(self): if not self.can_create: abort(403) Form = self.get_add_form() if request.method == 'POST': form = Form() if form.validate_on_submit(): try: instance = self.save_model(self.model(), form, adding=True) flash(gettext('New %(model)s saved successfully', model=self.get_display_name()), 'success') return self.dispatch_save_redirect(instance) except Exception, ex: print traceback.format_exc() if hasattr(self, 'session'): self.session.rollback() flash(gettext('Failed to add model. %(error)s', error=str(ex)), 'error') else: try: form = Form(obj=self.model()) except __HOLE__: raise Exception('The database model for %r should have an ' '__init__ with all arguments set to defaults.' % self.model.__name__) return self.render(self.add_template, model=self.model, form=form)
TypeError
dataset/ETHPy150Open syrusakbary/Flask-SuperAdmin/flask_superadmin/model/base.py/BaseModelAdmin.add
7,884
def process_request(self, request): """ Check for denied User-Agents and rewrite the URL based on settings.APPEND_SLASH and settings.PREPEND_WWW """ # Check for denied User-Agents if 'HTTP_USER_AGENT' in request.META: for user_agent_regex in settings.DISALLOWED_USER_AGENTS: if user_agent_regex.search(request.META['HTTP_USER_AGENT']): logger.warning('Forbidden (User agent): %s', request.path, extra={ 'status_code': 403, 'request': request } ) return http.HttpResponseForbidden('<h1>Forbidden</h1>') # Check for a redirect based on settings.APPEND_SLASH # and settings.PREPEND_WWW host = request.get_host() old_url = [host, request.path] new_url = old_url[:] if (settings.PREPEND_WWW and old_url[0] and not old_url[0].startswith('www.')): new_url[0] = 'www.' + old_url[0] # Append a slash if APPEND_SLASH is set and the URL doesn't have a # trailing slash and there is no pattern for the current path if settings.APPEND_SLASH and (not old_url[1].endswith('/')): urlconf = getattr(request, 'urlconf', None) if (not urlresolvers.is_valid_path(request.path_info, urlconf) and urlresolvers.is_valid_path("%s/" % request.path_info, urlconf)): new_url[1] = new_url[1] + '/' if settings.DEBUG and request.method == 'POST': raise RuntimeError(("" "You called this URL via POST, but the URL doesn't end " "in a slash and you have APPEND_SLASH set. Django can't " "redirect to the slash URL while maintaining POST data. " "Change your form to point to %s%s (note the trailing " "slash), or set APPEND_SLASH=False in your Django " "settings.") % (new_url[0], new_url[1])) if new_url == old_url: # No redirects required. return if new_url[0]: newurl = "%s://%s%s" % ( 'https' if request.is_secure() else 'http', new_url[0], urlquote(new_url[1])) else: newurl = urlquote(new_url[1]) if request.META.get('QUERY_STRING', ''): if six.PY3: newurl += '?' + request.META['QUERY_STRING'] else: # `query_string` is a bytestring. Appending it to the unicode # string `newurl` will fail if it isn't ASCII-only. This isn't # allowed; only broken software generates such query strings. # Better drop the invalid query string than crash (#15152). try: newurl += '?' + request.META['QUERY_STRING'].decode() except __HOLE__: pass return http.HttpResponsePermanentRedirect(newurl)
UnicodeDecodeError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/Django-1.6.10/django/middleware/common.py/CommonMiddleware.process_request
7,885
def FindOutgoingInterface(source): """XXX Describe the strategy that is used...""" # If the COM object implements IProvideClassInfo2, it is easy to # find the default outgoing interface. try: pci = source.QueryInterface(comtypes.typeinfo.IProvideClassInfo2) guid = pci.GetGUID(1) except comtypes.COMError: pass else: # another try: block needed? try: interface = comtypes.com_interface_registry[str(guid)] except __HOLE__: tinfo = pci.GetClassInfo() tlib, index = tinfo.GetContainingTypeLib() GetModule(tlib) interface = comtypes.com_interface_registry[str(guid)] logger.debug("%s using sinkinterface %s", source, interface) return interface # If we can find the CLSID of the COM object, we can look for a # registered outgoing interface (__clsid has been set by # comtypes.client): clsid = source.__dict__.get('__clsid') try: interface = comtypes.com_coclass_registry[clsid]._outgoing_interfaces_[0] except KeyError: pass else: logger.debug("%s using sinkinterface from clsid %s", source, interface) return interface ## interface = find_single_connection_interface(source) ## if interface: ## return interface raise TypeError("cannot determine source interface")
KeyError
dataset/ETHPy150Open enthought/comtypes/comtypes/client/_events.py/FindOutgoingInterface
7,886
def find_single_connection_interface(source): # Enumerate the connection interfaces. If we find a single one, # return it, if there are more, we give up since we cannot # determine which one to use. cpc = source.QueryInterface(comtypes.connectionpoints.IConnectionPointContainer) enum = cpc.EnumConnectionPoints() iid = enum.next().GetConnectionInterface() try: enum.next() except __HOLE__: try: interface = comtypes.com_interface_registry[str(iid)] except KeyError: return None else: logger.debug("%s using sinkinterface from iid %s", source, interface) return interface else: logger.debug("%s has more than one connection point", source) return None
StopIteration
dataset/ETHPy150Open enthought/comtypes/comtypes/client/_events.py/find_single_connection_interface
7,887
def find_method(self, fq_name, mthname): impl = self._find_method(fq_name, mthname) # Caller of this method catches AttributeError, # so we need to be careful in the following code # not to raise one... try: # impl is a bound method, dissect it... im_self, im_func = impl.im_self, impl.im_func # decorate it with an error printer... method = report_errors(im_func) # and make a new bound method from it again. return comtypes.instancemethod(method, im_self, type(im_self)) except __HOLE__, details: raise RuntimeError(details)
AttributeError
dataset/ETHPy150Open enthought/comtypes/comtypes/client/_events.py/_SinkMethodFinder.find_method
7,888
def _find_method(self, fq_name, mthname): try: return super(_SinkMethodFinder, self).find_method(fq_name, mthname) except __HOLE__: try: return getattr(self.sink, fq_name) except AttributeError: return getattr(self.sink, mthname)
AttributeError
dataset/ETHPy150Open enthought/comtypes/comtypes/client/_events.py/_SinkMethodFinder._find_method
7,889
def test_odict(self): o = util.OrderedDict() o['a'] = 1 o['b'] = 2 o['snack'] = 'attack' o['c'] = 3 eq_(list(o.keys()), ['a', 'b', 'snack', 'c']) eq_(list(o.values()), [1, 2, 'attack', 3]) o.pop('snack') eq_(list(o.keys()), ['a', 'b', 'c']) eq_(list(o.values()), [1, 2, 3]) try: o.pop('eep') assert False except __HOLE__: pass eq_(o.pop('eep', 'woot'), 'woot') try: o.pop('whiff', 'bang', 'pow') assert False except TypeError: pass eq_(list(o.keys()), ['a', 'b', 'c']) eq_(list(o.values()), [1, 2, 3]) o2 = util.OrderedDict(d=4) o2['e'] = 5 eq_(list(o2.keys()), ['d', 'e']) eq_(list(o2.values()), [4, 5]) o.update(o2) eq_(list(o.keys()), ['a', 'b', 'c', 'd', 'e']) eq_(list(o.values()), [1, 2, 3, 4, 5]) o.setdefault('c', 'zzz') o.setdefault('f', 6) eq_(list(o.keys()), ['a', 'b', 'c', 'd', 'e', 'f']) eq_(list(o.values()), [1, 2, 3, 4, 5, 6])
KeyError
dataset/ETHPy150Open zzzeek/sqlalchemy/test/base/test_utils.py/OrderedDictTest.test_odict
7,890
def test_basic_sanity(self): IdentitySet = util.IdentitySet o1, o2, o3 = object(), object(), object() ids = IdentitySet([o1]) ids.discard(o1) ids.discard(o1) ids.add(o1) ids.remove(o1) assert_raises(KeyError, ids.remove, o1) eq_(ids.copy(), ids) # explicit __eq__ and __ne__ tests assert ids != None assert not(ids == None) ne_(ids, IdentitySet([o1, o2, o3])) ids.clear() assert o1 not in ids ids.add(o2) assert o2 in ids eq_(ids.pop(), o2) ids.add(o1) eq_(len(ids), 1) isuper = IdentitySet([o1, o2]) assert ids < isuper assert ids.issubset(isuper) assert isuper.issuperset(ids) assert isuper > ids eq_(ids.union(isuper), isuper) eq_(ids | isuper, isuper) eq_(isuper - ids, IdentitySet([o2])) eq_(isuper.difference(ids), IdentitySet([o2])) eq_(ids.intersection(isuper), IdentitySet([o1])) eq_(ids & isuper, IdentitySet([o1])) eq_(ids.symmetric_difference(isuper), IdentitySet([o2])) eq_(ids ^ isuper, IdentitySet([o2])) ids.update(isuper) ids |= isuper ids.difference_update(isuper) ids -= isuper ids.intersection_update(isuper) ids &= isuper ids.symmetric_difference_update(isuper) ids ^= isuper ids.update('foobar') try: ids |= 'foobar' assert False except TypeError: assert True try: s = set([o1, o2]) s |= ids assert False except __HOLE__: assert True assert_raises(TypeError, util.cmp, ids) assert_raises(TypeError, hash, ids)
TypeError
dataset/ETHPy150Open zzzeek/sqlalchemy/test/base/test_utils.py/IdentitySetTest.test_basic_sanity
7,891
def test_is_produced(self): """ test is_produced function """ class ChildNotOkContext(base.Context): __swagger_ref_object__ = ChildObj @classmethod def is_produced(kls, obj): return False class TestOkContext(base.Context): __swagger_ref_object__ = TestObj __swagger_child__ = { 'a': (None, ChildContext) } class TestNotOkContext(base.Context): __swagger_ref_object__ = TestObj __swagger_child__ = { 'a': (None, ChildNotOkContext) } tmp = {'t': {}} obj = {'a': {}} with TestOkContext(tmp, 't') as ctx: # should not raise ctx.parse(obj) ctx = TestNotOkContext(tmp, 't') try: # simulate what ContextManager does ctx.parse(obj) ctx.__exit__(None, None, None) except __HOLE__ as e: self.failUnlessEqual(e.args, ('Object is not instance of ChildObj but ChildObj',)) else: self.fail('ValueError not raised')
ValueError
dataset/ETHPy150Open mission-liao/pyswagger/pyswagger/tests/test_base.py/SwaggerBaseTestCase.test_is_produced
7,892
def run(command, data=None, timeout=None, kill_timeout=None, env=None, cwd=None): """Executes a given commmand and returns Response. Blocks until process is complete, or timeout is reached. """ command = expand_args(command) history = [] for c in command: if len(history): # due to broken pipe problems pass only first 10 KiB data = history[-1].std_out[0:10*1024] cmd = Command(c) try: out, err = cmd.run(data, timeout, kill_timeout, env, cwd) status_code = cmd.returncode except __HOLE__ as e: out, err = '', u"\n".join([e.strerror, traceback.format_exc()]) status_code = 127 r = Response(process=cmd) r.command = c r.std_out = out r.std_err = err r.status_code = status_code history.append(r) r = history.pop() r.history = history return r
OSError
dataset/ETHPy150Open kennethreitz/envoy/envoy/core.py/run
7,893
def compute(self): """ compute() -> None Translate input ports into (row, column) location """ loc = CellLocation.Location() def set_row_col(row, col): try: loc.col = ord(col) - ord('A') loc.row = int(row) - 1 except (TypeError, __HOLE__): raise ModuleError(self, 'ColumnRowAddress format error') ref = self.force_get_input("SheetReference") if ref: loc.sheetReference = ref loc.rowSpan = self.force_get_input("RowSpan", -1) loc.colSpan = self.force_get_input("ColumnSpan", -1) if self.has_input("Row") and self.has_input("Column"): loc.row = self.get_input("Row")-1 loc.col = self.get_input("Column")-1 elif self.has_input("ColumnRowAddress"): address = self.get_input("ColumnRowAddress") address = address.replace(' ', '').upper() if len(address) > 1: if address[0] >= 'A' and address[0] <= 'Z': set_row_col(address[1:], address[0]) else: set_row_col(address[:-1], address[-1]) self.set_output('value', loc)
ValueError
dataset/ETHPy150Open VisTrails/VisTrails/vistrails/packages/spreadsheet/basic_widgets.py/CellLocation.compute
7,894
def verify_challenge(self, entity, response): try: token = response.decode('utf-8') except __HOLE__ as exc: return self.state(False, entity, None) try: result = self.verify(token) if result: entity = entity or self.auth.username() return self.state(result, entity, None) except auth.PasswordError as exc: return self.state(False, entity, None) ## Client
ValueError
dataset/ETHPy150Open HenryHu/pybbs/bbsauth.py/BBSAuth.verify_challenge
7,895
def set_value(self, value): """The value have to be in the form '10px' or '10%', so numeric value plus measure unit """ v = 0 measure_unit = 'px' try: v = int(float(value.replace('px', ''))) except ValueError: try: v = int(float(value.replace('%', ''))) measure_unit = '%' except __HOLE__: pass self.numInput.set_value(v) self.dropMeasureUnit.set_value(measure_unit)
ValueError
dataset/ETHPy150Open dddomodossola/remi/editor/editor_widgets.py/CssSizeInput.set_value
7,896
@register.simple_tag(takes_context=True) def associated(context, backend): user = context.get('user') context['association'] = None if user and user.is_authenticated(): try: context['association'] = user.social_auth.filter( provider=backend.name )[0] except __HOLE__: pass return ''
IndexError
dataset/ETHPy150Open omab/python-social-auth/examples/django_example/example/app/templatetags/backend_utils.py/associated
7,897
def do_parallel_inference(args): """Perform inference in parallel on several observations matrices with joint parameters """ from treehmm import random_params, do_inference, plot_params, plot_energy, load_params from treehmm.vb_mf import normalize_trans from treehmm.static import float_type _x = sp.load(args.observe_matrix[0]) args.continuous_observations = _x.dtype != sp.int8 args.I, _, args.L = _x.shape I = args.I K = args.K L = args.L args.T = 'all' args.free_energy = [] args.observe = 'all.npy' args.last_free_energy = 0 args.emit_sum = sp.zeros((K, L), dtype=float_type) args.out_dir = args.out_dir.format(timestamp=time.strftime('%x_%X').replace('/', '-'), **args.__dict__) try: print 'making', args.out_dir os.makedirs(args.out_dir) except OSError: pass if args.warm_start: # args.last_free_energy, args.theta, args.alpha, args.beta, args.gamma, args.emit_probs, args.emit_sum = load_params(args) # args.warm_start = False print '# loading previous params for warm start from %s' % args.warm_start tmpargs = copy.deepcopy(args) tmpargs.out_dir = args.warm_start tmpargs.observe = 'all.npy' args.free_energy, args.theta, args.alpha, args.beta, args.gamma, args.emit_probs, args.emit_sum = load_params(tmpargs) try: args.free_energy = list(args.free_energy) except TypeError: # no previous free energy args.free_energy = [] print 'done' args.warm_start = False else: (args.theta, args.alpha, args.beta, args.gamma, args.emit_probs) = \ random_params(args.I, args.K, args.L, args.separate_theta) for p in ['free_energy', 'theta', 'alpha', 'beta', 'gamma', 'emit_probs', 'last_free_energy', 'emit_sum']: sp.save(os.path.join(args.out_dir, args.out_params.format(param=p, **args.__dict__)), args.__dict__[p]) args.iteration = 0 plot_params(args) print '# setting up job arguments' # set up new versions of args for other jobs job_args = [copy.copy(args) for i in range(len(args.observe_matrix))] for j, a in enumerate(job_args): a.observe_matrix = args.observe_matrix[j] a.observe = os.path.split(args.observe_matrix[j])[1] a.subtask = True a.func = None a.iteration = 0 a.max_iterations = 1 a.quiet_mode = True if j % 1000 == 0: print j if args.run_local: pool = multiprocessing.Pool() else: pool = sge.SGEPool() #job_handle = pool.imap_unordered(do_inference, job_args) converged = False for args.iteration in range(args.max_iterations): # import ipdb; ipdb.set_trace() # fresh parameters-- to be aggregated after jobs are run print 'iteration', args.iteration total_free = 0 if args.separate_theta: args.theta = sp.zeros((I - 1, K, K, K), dtype=float_type) else: args.theta = sp.zeros((K, K, K), dtype=float_type) args.alpha = sp.zeros((K, K), dtype=float_type) args.beta = sp.zeros((K, K), dtype=float_type) args.gamma = sp.zeros((K), dtype=float_type) args.emit_probs = sp.zeros((K, L), dtype=float_type) if True: # args.approx == 'clique': args.emit_sum = sp.zeros_like(args.emit_probs, dtype=float_type) else: args.emit_sum = sp.zeros((K, L), dtype=float_type) if args.run_local: iterator = pool.imap_unordered(do_inference, job_args, chunksize=args.chunksize) # wait for jobs to finish for result in iterator: pass else: jobs_handle = pool.map_async(do_inference, job_args, chunksize=args.chunksize) # wait for all jobs to finish for j in jobs_handle: j.wait() # sum free energies and parameters from jobs for a in job_args: # print '# loading from %s' % a.observe free_energy, theta, alpha, beta, gamma, emit_probs, emit_sum = load_params(a) # print 'free energy for this part:', free_energy if len(free_energy) > 0: last_free_energy = free_energy[-1] else: last_free_energy = 0 total_free += last_free_energy args.theta += theta args.alpha += alpha args.beta += beta args.gamma += gamma args.emit_probs += emit_probs args.emit_sum += emit_sum # renormalize and plot print 'normalize aggregation... total free energy is:', total_free args.free_energy.append(total_free) if len(args.free_energy) > 1 and args.free_energy[-1] != 0 and args.free_energy[-2] != 0 \ and abs((args.free_energy[-2] - args.free_energy[-1]) / args.free_energy[-2]) < args.epsilon: print 'converged. free energy diff:', args.free_energy, abs(args.free_energy[-2] - args.free_energy[-1]) / args.free_energy[-2] converged = True normalize_trans(args.theta, args.alpha, args.beta, args.gamma) # if True: #args.approx == 'clique': # #print 'clique emit renorm' # args.emit_probs[:] = args.emit_probs / args.emit_sum # else: # args.emit_probs[:] = sp.dot(sp.diag(1./args.emit_sum), args.emit_probs) args.emit_probs[:] = sp.dot(sp.diag(1. / args.emit_sum), args.emit_probs) for a in job_args: a.theta, a.alpha, a.beta, a.gamma, a.emit_probs, a.emit_sum = args.theta, args.alpha, args.beta, args.gamma, args.emit_probs, args.emit_sum for p in ['free_energy', 'theta', 'alpha', 'beta', 'gamma', 'emit_probs', 'lmd', 'tau']: try: sp.save(os.path.join(args.out_dir, args.out_params.format(param=p, **args.__dict__)), args.__dict__[p]) except __HOLE__: pass plot_params(args) plot_energy(args) if args.save_Q >= 3: print '# reconstructing chromosomes from *chunk*', in_order = {} # Q_chr16_all.trimmed.chunk*.npy => Q_chr16_all.trimmed.npy all_chunks = glob.glob(os.path.join(args.out_dir, '*_Q_*chunk*.npy')) for chunk in all_chunks: print chunk chunk_num = int(re.search(r'chunk(\d+)', chunk).groups()[0]) chrom_out = re.sub('chunk(\d+)\.', '', chunk) if chrom_out not in in_order: in_order[chrom_out] = {} in_order[chrom_out][chunk_num] = sp.load(chunk) for chrom_out in in_order: print 'reconstructing chromosomes from', in_order[chrom_out] if len(in_order[chrom_out]) > 1: final_array = sp.concatenate((in_order[chrom_out][0], in_order[chrom_out][1]), axis=1) for i in range(2, max(in_order[chrom_out])): final_array = sp.concatenate((final_array, in_order[chrom_out][i]), axis=1) else: final_array = in_order[chrom_out][0] sp.save(chrom_out, final_array) if converged: break
KeyError
dataset/ETHPy150Open uci-cbcl/tree-hmm/treehmm/do_parallel.py/do_parallel_inference
7,898
def __length(self, collection): try: return len(collection) except __HOLE__: return sum(1 for i in collection)
TypeError
dataset/ETHPy150Open jaimegildesagredo/expects/expects/matchers/built_in/have_len.py/have_length.__length
7,899
def geocode(self, address, restriction): try: locs = self._geocode(address, restriction) rlocs = self._geocode(restriction) if locs: if rlocs: restriction_latlon = rlocs[0][1] locs.sort(key=lambda loc: distance_func(loc[1], restriction_latlon)) description, (lat, lon) = locs[0] query = self.get_format_string(restriction) % address return description, (lat, lon), query except __HOLE__: return None except Exception as e: raise return None
KeyboardInterrupt
dataset/ETHPy150Open sirrice/dbtruck/dbtruck/analyze/geocode.py/DBTruckGeocoder.geocode