Unnamed: 0
int64
0
10k
function
stringlengths
79
138k
label
stringclasses
20 values
info
stringlengths
42
261
6,300
def init_device_api(self): """ Initialize the protocols used by this instance based on the protocol capabilities array which is return from A000. """ product_data = self.get_product_data() try: self.device_id = product_data.by_pid[L000.PID_PRODUCT_DATA][0].data self.protocol_array = product_data.by_pid[L000.PID_PROTOCOL_ARRAY][0].data.protocol_array _log.debug("init_device_api: product_id=%d, software_version=%0.2f, description=%s", self.device_id.product_id, self.device_id.software_version/100., self.device_id.description) _log.debug("init_device_api: protocol_array=%s", self.protocol_array) except (__HOLE__, TypeError): raise DeviceNotSupportedError("Product data not returned by device.") self.data_types_by_protocol = data_types_by_protocol(self.protocol_array) # the tuples in this section define an ordered collection # of protocols which are candidates to provide each specific # function. Each proto will be device based on the first one # whihc exists in this devices capabiltities. # This section needs to be updated whenever a new protocol # needs to be supported. self.link_proto = self._find_core_protocol("link", (L000, L001)) self.cmd_proto = self._find_core_protocol("command", (A010,)) self.trk_proto = self._find_app_protocol("get_trks", (A301, A302)) self.lap_proto = self._find_app_protocol("get_laps", (A906,)) self.run_proto = self._find_app_protocol("get_runs", (A1000,))
IndexError
dataset/ETHPy150Open braiden/python-ant-downloader/antd/garmin.py/Device.init_device_api
6,301
def read(self): try: return self.reader.next() except __HOLE__: return ""
StopIteration
dataset/ETHPy150Open braiden/python-ant-downloader/antd/garmin.py/MockHost.read
6,302
def encode(payload, key, algorithm='HS256'): segments = [] header = {"typ": "JWT", "alg": algorithm} segments.append(base64url_encode(binary(json.dumps(header)))) segments.append(base64url_encode(binary(json.dumps(payload)))) sign_input = '.'.join(segments) try: signature = signing_methods[algorithm](binary(sign_input), binary(key)) except __HOLE__: raise NotImplementedError("Algorithm not supported") segments.append(base64url_encode(signature)) return '.'.join(segments)
KeyError
dataset/ETHPy150Open awslabs/lambda-apigateway-twilio-tutorial/twilio/jwt/__init__.py/encode
6,303
def decode(jwt, key='', verify=True): try: signing_input, crypto_segment = jwt.rsplit('.', 1) header_segment, payload_segment = signing_input.split('.', 1) except ValueError: raise DecodeError("Not enough segments") try: header_raw = base64url_decode(binary(header_segment)).decode('utf-8') payload_raw = base64url_decode(binary(payload_segment)).decode('utf-8') header = json.loads(header_raw) payload = json.loads(payload_raw) signature = base64url_decode(binary(crypto_segment)) except (ValueError, TypeError): raise DecodeError("Invalid segment encoding") if verify: try: method = signing_methods[header['alg']] if not signature == method(binary(signing_input), binary(key)): raise DecodeError("Signature verification failed") except __HOLE__: raise DecodeError("Algorithm not supported") return payload
KeyError
dataset/ETHPy150Open awslabs/lambda-apigateway-twilio-tutorial/twilio/jwt/__init__.py/decode
6,304
def run(self): set_process_title("Cron scheduler process") self.log = logging.getLogger() for x in self.log.handlers: self.log.removeHandler(x) configure_logging( level=self.log_level, format='%(asctime)-15s %(process)d cron_scheduler %(levelname).1s: ' '%(message)s', filename=self.log_filename, ) self.log.debug("Starting") backend = get_backend() self.log.info("Loaded backend %s", backend) while self.running.value: try: self.tick(backend) # Sleep until the next second boundary. This corrects for skew # caused by the accumulation of tick() runtime. time.sleep((1 - time.time() % 1)) except __HOLE__: sys.exit(1) self.log.info("Exiting")
KeyboardInterrupt
dataset/ETHPy150Open thread/django-lightweight-queue/django_lightweight_queue/cron_scheduler.py/CronScheduler.run
6,305
def get_config(): config = [] def get_matcher(minval, maxval, t): if t == '*': return lambda x: True parts = re.split(r'\s*,\s*', t) if not parts: return t_parts = [int(x) for x in parts] for num in t_parts: assert num >= minval and num <= maxval, \ "Invalid time specified in cron config. " \ "Specified: %s, minval: %s, maxval: %s" % ( num, minval, maxval, ) return lambda x: x in t_parts for app_config in apps.get_app_configs(): app = app_config.name try: app_path = __import__(app, {}, {}, [app.split('.')[-1]]).__path__ except AttributeError: continue try: imp.find_module('cron', app_path) except __HOLE__: continue mod = __import__('%s.cron' % app, fromlist=(app,)) for row in mod.CONFIG: row['min_matcher'] = get_matcher(0, 59, row.get('minutes')) row['hour_matcher'] = get_matcher(0, 23, row.get('hours')) row['day_matcher'] = get_matcher(1, 7, row.get('days', '*')) row['queue'] = row.get('queue', 'cron') row['timeout'] = row.get('timeout', None) row['sigkill_on_stop'] = row.get('sigkill_on_stop', False) config.append(row) # We must ensure we have at least one worker for this queue. app_settings.WORKERS.setdefault(row['queue'], 1) return config
ImportError
dataset/ETHPy150Open thread/django-lightweight-queue/django_lightweight_queue/cron_scheduler.py/get_config
6,306
def separateExampleCache(): examples = {} succ = fail = 0 for cmdName, cmdInfo in factories.cmdlist.iteritems(): try: examples[cmdName] = cmdInfo.pop('example') succ += 1 except __HOLE__: fail += 1 pass print "succeeded", succ print "failed ", fail mayautils.writeCache( (factories.cmdlist, factories.nodeHierarchy, factories.uiClassList, factories.nodeCommandList, factories.moduleCmds), 'mayaCmdsList', 'the list of Maya commands', compressed=False ) mayautils.writeCache( examples, 'mayaCmdsExamples', 'the list of Maya command examples',compressed=False )
KeyError
dataset/ETHPy150Open CountZer0/PipelineConstructionSet/python/maya/site-packages/pymel-1.0.3/maintenance/cachetools.py/separateExampleCache
6,307
def upgradeCmdCaches(): import pymel.internal.cmdcache as cmdcache data = list(mayautils.loadCache('mayaCmdsList',compressed=False)) cmdlist = data[0] nodeHierarchy = data[1] cmdDocList = {} examples = {} succ = fail = 0 for cmdName, cmdInfo in cmdlist.iteritems(): flags = cmdcache.getCallbackFlags(cmdInfo) if flags: cmdlist[cmdName]['callbackFlags'] = flags try: examples[cmdName] = cmdInfo.pop('example') except __HOLE__: pass newCmdInfo = {} if 'description' in cmdInfo: newCmdInfo['description'] = cmdInfo.pop('description') newFlagInfo = {} if 'flags' in cmdInfo: for flag, flagInfo in cmdInfo['flags'].iteritems(): newFlagInfo[flag] = { 'docstring' : flagInfo.pop('docstring') } newCmdInfo['flags'] = newFlagInfo if newCmdInfo: cmdDocList[cmdName] = newCmdInfo if 'shortFlags' in cmdInfo: d = {} #print cmdName for flag, flagInfo in cmdInfo['shortFlags'].iteritems(): if isinstance(flagInfo, dict): d[flag] = flagInfo['longname'] elif isinstance(flagInfo, basestring): d[flag] = flagInfo else: raise TypeError cmdInfo['shortFlags'] = d hierarchy = [ (x.key, tuple( [y.key for y in x.parents()]), tuple( [y.key for y in x.childs()] ) ) \ for x in nodeHierarchy.preorder() ] data[0] = cmdlist data[1] = hierarchy mayautils.writeCache( tuple(data), 'mayaCmdsList', 'the list of Maya commands',compressed=True ) mayautils.writeCache( cmdDocList, 'mayaCmdsDocs', 'the Maya command documentation',compressed=True ) mayautils.writeCache( examples, 'mayaCmdsExamples', 'the list of Maya command examples',compressed=True ) # for cache, useVersion in [ ('mayaApiMelBridge',False), ('mayaApi',True) ]: # data = mayautils.loadCache(cache, useVersion=useVersion, compressed=False) # mayautils.writeCache(data, cache, useVersion=useVersion, compressed=True)
KeyError
dataset/ETHPy150Open CountZer0/PipelineConstructionSet/python/maya/site-packages/pymel-1.0.3/maintenance/cachetools.py/upgradeCmdCaches
6,308
def export_to_csv(search_keys, sec_keys_names, documents): """Helper to produce proper CSV file for search results export""" dman = DocumentTypeRuleManager() # Cleaning Up documents docs = {} for document in documents: docs[document.id] = document._doc # Init response CSV file response = HttpResponse(content_type='text/csv') response['Content-Disposition'] = 'attachment; filename=search_results.csv' writer = csv.writer(response) # Writing table headers counter = ['File', ] + ['Date'] + ['Username'] + ['Description', ] + sec_keys_names + ['Type', ] writer.writerow(counter) # Writing each file's data into appropriate rows for name, doc in docs.iteritems(): doc_sec_keys = [] for sec_key in sec_keys_names: try: # Adding secondary key's value for doc doc_sec_keys.append(unicode(doc['mdt_indexes'][sec_key]).encode('utf8')) except __HOLE__: # No value exists doc_sec_keys.append('Not given',) # Converting date to Y-m-d format cr_date = date_standardized(re.sub("T\d{2}:\d{2}:\d{2}Z", "", doc['metadata_created_date'])) # Catching Document's type rule to name it in export # This way should not produce SQL DB requests (Uses DocumentTypeRuleManagerInstance for this) docrule = dman.get_docrule_by_id(doc['metadata_doc_type_rule_id']) docrule_name = docrule.get_title() # Final row adding doc_row = [unicode(name).encode('utf8'), ] + [cr_date, ] + [unicode(doc['metadata_user_name']).encode('utf8'), ]+ [unicode(doc['metadata_description']).encode('utf8')] + doc_sec_keys + [unicode(docrule_name).encode('utf8'),] writer.writerow(doc_row) # Appending search request parameters into CSV writer.writerow('\r\n') writer.writerow(['Search with Keys:']) writer.writerow(['Key','Value']) for item, value in search_keys.iteritems(): if item == u'date': item = u'Indexing Date' if item == u'end_date': item = u'Indexing Date to' if item == u'description': item = u'Description' if item == u'docrule_id': item = u'Document Type' id = value docrule = dman.get_docrule_by_id(id) value = docrule.get_title() if not value.__class__.__name__ == 'tuple': writer.writerow([unicode(item).encode('utf8'), unicode(value).encode('utf8')]) else: writer.writerow(unicode(item).encode('utf8') + u': (from: ' + unicode(value[0]).encode('utf8') + u' to: ' + unicode(value[1]).encode('utf8') + u')') return response
KeyError
dataset/ETHPy150Open adlibre/Adlibre-DMS/adlibre_dms/apps/mdtui/data_exporter.py/export_to_csv
6,309
def get_instance(self, name=None): try: return registry[name] if name else registry except __HOLE__: raise ResourceException("The resource [%s] does not exist" % name)
KeyError
dataset/ETHPy150Open comodit/synapse-agent/synapse/resource_locator.py/ResourceLocator.get_instance
6,310
def test_attach_class_hint(self): try: connection = OrientSocket( "localhost", 2424 ) tx = TxCommitMessage(connection) tx.begin() tx.attach([1, 2, 3]) assert False # should not happens except __HOLE__ as e: assert 'A subclass of BaseMessage was expected' == str(e) assert True
AssertionError
dataset/ETHPy150Open mogui/pyorient/tests/test_raw_messages_3.py/RawMessages_5_TestCase.test_attach_class_hint
6,311
def test_private_prepare(self): try: connection = OrientSocket( "localhost", 2424 ) DbOpenMessage( connection )\ .prepare( ("GratefulDeadConcerts", "admin", "admin", DB_TYPE_DOCUMENT, "") ).send().fetch_response() tx = TxCommitMessage(connection) tx.begin() tx.prepare() assert False except __HOLE__ as e: print(str(e)) assert True
AttributeError
dataset/ETHPy150Open mogui/pyorient/tests/test_raw_messages_3.py/RawMessages_5_TestCase.test_private_prepare
6,312
def test_private_send(self): try: connection = OrientSocket( "localhost", 2424 ) DbOpenMessage( connection )\ .prepare( ("GratefulDeadConcerts", "admin", "admin", DB_TYPE_DOCUMENT, "") ).send().fetch_response() tx = TxCommitMessage(connection) tx.begin() tx.send() assert False except __HOLE__ as e: print(str(e)) assert True
AttributeError
dataset/ETHPy150Open mogui/pyorient/tests/test_raw_messages_3.py/RawMessages_5_TestCase.test_private_send
6,313
def test_private_fetch(self): try: connection = OrientSocket( "localhost", 2424 ) DbOpenMessage( connection )\ .prepare( ("GratefulDeadConcerts", "admin", "admin", DB_TYPE_DOCUMENT, "") ).send().fetch_response() tx = TxCommitMessage(connection) tx.begin() tx.fetch_response() assert False except __HOLE__ as e: print( str(e)) assert True
AttributeError
dataset/ETHPy150Open mogui/pyorient/tests/test_raw_messages_3.py/RawMessages_5_TestCase.test_private_fetch
6,314
def __exit__(self, exc_type, exc_value, tb): if exc_type is None: try: exc_name = self.expected.__name__ except __HOLE__: exc_name = str(self.expected) raise self.failureException( "{0} not raised".format(exc_name)) if not issubclass(exc_type, self.expected): # let unexpected exceptions pass through return False self.exception = exc_value # store for later retrieval return True
AttributeError
dataset/ETHPy150Open ducksboard/libsaas/libsaas/port.py/_AssertRaisesContext.__exit__
6,315
def _load(self, path): try: import adblockparser except __HOLE__: log.msg('WARNING: https://github.com/scrapinghub/adblockparser ' 'library is not available, filters are not loaded.') return for fname in os.listdir(path): if not fname.endswith('.txt'): continue fpath = os.path.join(path, fname) name = fname[:-len('.txt')] if not os.path.isfile(fpath): continue if self.verbosity >= 1: log.msg("Loading filter %s" % name) with open(fpath, 'rb') as f: lines = [line.decode('utf8').strip() for line in f] rules = adblockparser.AdblockRules( lines, supported_options=self.supported_options, skip_unsupported_rules=False, max_mem=512*1024*1024, # this doesn't actually use 512M ) filters_num = len(rules.rules) if self.verbosity >= 2: log.msg("%d rule(s) loaded for filter %s" % (filters_num, name)) if not rules.uses_re2 and filters_num > self.RE2_WARN_THRESHOLD: log.msg('WARNING: a filter %s with %d rules loaded, but ' 'pyre2 library is not installed. Matching may become ' 'slow; installing https://github.com/axiak/pyre2 is ' 'highly recommended.' % (name, filters_num)) self.filters[name] = rules
ImportError
dataset/ETHPy150Open scrapinghub/splash/splash/request_middleware.py/AdblockRulesRegistry._load
6,316
@check_job_permission def job_attempt_logs_json(request, job, attempt_index=0, name='syslog', offset=0): """For async log retrieval as Yarn servers are very slow""" try: attempt_index = int(attempt_index) attempt = job.job_attempts['jobAttempt'][attempt_index] log_link = attempt['logsLink'] except (__HOLE__, RestException), e: raise KeyError(_("Cannot find job attempt '%(id)s'.") % {'id': job.jobId}, e) link = '/%s/' % name params = {} if offset and int(offset) >= 0: params['start'] = offset root = Resource(get_log_client(log_link), urlparse.urlsplit(log_link)[2], urlencode=False) debug_info = '' try: response = root.get(link, params=params) log = html.fromstring(response, parser=html.HTMLParser()).xpath('/html/body/table/tbody/tr/td[2]')[0].text_content() except Exception, e: log = _('Failed to retrieve log: %s' % e) try: debug_info = '\nLog Link: %s' % log_link debug_info += '\nHTML Response: %s' % response LOG.error(debug_info) except: LOG.exception('failed to create debug info') response = {'log': LinkJobLogs._make_hdfs_links(log), 'debug': debug_info} return JsonResponse(response)
KeyError
dataset/ETHPy150Open cloudera/hue/apps/jobbrowser/src/jobbrowser/views.py/job_attempt_logs_json
6,317
@check_job_permission def single_task_attempt(request, job, taskid, attemptid): jt = get_api(request.user, request.jt) job_link = jt.get_job_link(job.jobId) task = job_link.get_task(taskid) try: attempt = task.get_attempt(attemptid) except (__HOLE__, RestException), e: raise PopupException(_("Cannot find attempt '%(id)s' in task") % {'id': attemptid}, e) return render("attempt.mako", request, { "attempt": attempt, "taskid": taskid, "joblnk": job_link, "task": task })
KeyError
dataset/ETHPy150Open cloudera/hue/apps/jobbrowser/src/jobbrowser/views.py/single_task_attempt
6,318
@check_job_permission def single_task_attempt_logs(request, job, taskid, attemptid): jt = get_api(request.user, request.jt) job_link = jt.get_job_link(job.jobId) task = job_link.get_task(taskid) try: attempt = task.get_attempt(attemptid) except (__HOLE__, RestException), e: raise KeyError(_("Cannot find attempt '%(id)s' in task") % {'id': attemptid}, e) first_log_tab = 0 try: # Add a diagnostic log if job_link.is_mr2: diagnostic_log = attempt.diagnostics else: diagnostic_log = ", ".join(task.diagnosticMap[attempt.attemptId]) logs = [diagnostic_log] # Add remaining logs logs += [section.strip() for section in attempt.get_task_log()] log_tab = [i for i, log in enumerate(logs) if log] if log_tab: first_log_tab = log_tab[0] except TaskTrackerNotFoundException: # Four entries, # for diagnostic, stdout, stderr and syslog logs = [_("Failed to retrieve log. TaskTracker not found.")] * 4 except urllib2.URLError: logs = [_("Failed to retrieve log. TaskTracker not ready.")] * 4 context = { "attempt": attempt, "taskid": taskid, "joblnk": job_link, "task": task, "logs": logs, "first_log_tab": first_log_tab, } if request.GET.get('format') == 'python': return context else: context['logs'] = [LinkJobLogs._make_links(log) for i, log in enumerate(logs)] if request.GET.get('format') == 'json': response = { "logs": context['logs'], "isRunning": job.status.lower() in ('running', 'pending', 'prep') } return JsonResponse(response) else: return render("attempt_logs.mako", request, context)
KeyError
dataset/ETHPy150Open cloudera/hue/apps/jobbrowser/src/jobbrowser/views.py/single_task_attempt_logs
6,319
def do_toggle(self): try: if not self.sensorEnabled: accelerometer.enable() Clock.schedule_interval(self.get_acceleration, 1 / 20.) self.sensorEnabled = True self.ids.toggle_button.text = "Stop Accelerometer" else: accelerometer.disable() Clock.unschedule(self.get_acceleration) self.sensorEnabled = False self.ids.toggle_button.text = "Start Accelerometer" except __HOLE__: import traceback traceback.print_exc() status = "Accelerometer is not implemented for your platform" self.ids.accel_status.text = status
NotImplementedError
dataset/ETHPy150Open kivy/plyer/examples/accelerometer/basic/main.py/AccelerometerTest.do_toggle
6,320
def Start(db, port=0, is_master=False, server_cls=ThreadedHTTPServer, reqhandler_cls=DataServerHandler): """Start the data server.""" # This is the service that will handle requests to the data store. if reqhandler_cls.MASTER or reqhandler_cls.DATA_SERVER: logging.fatal("Attempt to start server with duplicate request handler.") if not reqhandler_cls.SERVICE: reqhandler_cls.SERVICE = store.DataStoreService(db) # Create the command table for faster execution of remote calls. # Along with a method, each command has the required permissions. cmd = rdf_data_server.DataStoreCommand.Command reqhandler_cls.CMDTABLE = { cmd.DELETE_ATTRIBUTES: (reqhandler_cls.SERVICE.DeleteAttributes, "w"), cmd.DELETE_SUBJECT: (reqhandler_cls.SERVICE.DeleteSubject, "w"), cmd.MULTI_SET: (reqhandler_cls.SERVICE.MultiSet, "w"), cmd.MULTI_RESOLVE_PREFIX: (reqhandler_cls.SERVICE.MultiResolvePrefix, "r"), cmd.RESOLVE_MULTI: (reqhandler_cls.SERVICE.ResolveMulti, "r"), cmd.LOCK_SUBJECT: (reqhandler_cls.SERVICE.LockSubject, "w"), cmd.EXTEND_SUBJECT: (reqhandler_cls.SERVICE.ExtendSubject, "w"), cmd.UNLOCK_SUBJECT: (reqhandler_cls.SERVICE.UnlockSubject, "w"), cmd.SCAN_ATTRIBUTES: (reqhandler_cls.SERVICE.ScanAttributes, "r") } # Initialize nonce store for authentication. if not reqhandler_cls.NONCE_STORE: reqhandler_cls.NONCE_STORE = auth.NonceStore() if port == 0 or port is None: logging.debug("No port was specified as a parameter. Expecting to find " "port in configuration file.") else: logging.debug("Port specified was '%i'. Ignoring configuration directive " "Dataserver.port.", port) server_port = port or config_lib.CONFIG["Dataserver.port"] if is_master: logging.debug("Master server running on port '%i'", server_port) reqhandler_cls.InitMasterServer(server_port) else: logging.debug("Non-master data server running on port '%i'", server_port) reqhandler_cls.InitDataServer(server_port) reqhandler_cls.InitHandlerTables() logging.info("Starting! master: " + str(is_master) + " with handler " + reqhandler_cls.__name__) try: server = server_cls(("", server_port), reqhandler_cls) server.serve_forever() except __HOLE__: print ("Caught keyboard interrupt, stopping server at port %s" % server_port) except socket.error: print "Service already running at port %s" % server_port finally: if reqhandler_cls.MASTER: reqhandler_cls.MASTER.Stop() else: reqhandler_cls.DATA_SERVER.Stop()
KeyboardInterrupt
dataset/ETHPy150Open google/grr/grr/server/data_server/data_server.py/Start
6,321
def check_nondiff_rop(self, y): """ If your op is not differentiable(so you can't define Rop) test that an error is raised.""" raised = False try: tmp = tensor.Rop(y, self.x, self.v) except __HOLE__: raised = True if not raised: self.fail(( 'Op did not raise an error even though the function' ' is not differentiable'))
ValueError
dataset/ETHPy150Open rizar/attention-lvcsr/libs/Theano/theano/tests/test_rop.py/RopLop_checker.check_nondiff_rop
6,322
def check_rop_lop(self, y, out_shape): """ As check_mat_rop_lop, except the input is self.x which is a vector. The output is still a vector. """ # TEST ROP vx = numpy.asarray(self.rng.uniform(size=self.in_shape), theano.config.floatX) vv = numpy.asarray(self.rng.uniform(size=self.in_shape), theano.config.floatX) yv = tensor.Rop(y, self.x, self.v) rop_f = function([self.x, self.v], yv, on_unused_input='ignore') J, _ = theano.scan(lambda i, y, x: tensor.grad(y[i], x), sequences=tensor.arange(y.shape[0]), non_sequences=[y, self.x]) sy = tensor.dot(J, self.v) scan_f = function([self.x, self.v], sy, on_unused_input='ignore') v1 = rop_f(vx, vv) v2 = scan_f(vx, vv) assert numpy.allclose(v1, v2), ('ROP mismatch: %s %s' % (v1, v2)) known_fail = False try: self.check_nondiff_rop(theano.clone(y, replace={self.x: break_op(self.x)})) except __HOLE__: known_fail = True # TEST LOP vx = numpy.asarray(self.rng.uniform(size=self.in_shape), theano.config.floatX) vv = numpy.asarray(self.rng.uniform(size=out_shape), theano.config.floatX) yv = tensor.Lop(y, self.x, self.v) lop_f = function([self.x, self.v], yv, on_unused_input='ignore') J, _ = theano.scan(lambda i, y, x: tensor.grad(y[i], x), sequences=tensor.arange(y.shape[0]), non_sequences=[y, self.x]) sy = tensor.dot(self.v, J) scan_f = function([self.x, self.v], sy) v1 = lop_f(vx, vv) v2 = scan_f(vx, vv) assert numpy.allclose(v1, v2), ('LOP mismatch: %s %s' % (v1, v2)) if known_fail: raise SkipTest('Rop does not handle non-differentiable inputs ' 'correctly. Bug exposed by fixing Add.grad method.')
AssertionError
dataset/ETHPy150Open rizar/attention-lvcsr/libs/Theano/theano/tests/test_rop.py/RopLop_checker.check_rop_lop
6,323
def test_invalid_input(self): success = False try: tensor.Rop(0., [tensor.matrix()], [tensor.vector()]) success = True except __HOLE__: pass assert not success
ValueError
dataset/ETHPy150Open rizar/attention-lvcsr/libs/Theano/theano/tests/test_rop.py/test_RopLop.test_invalid_input
6,324
def _has_changed(self, initial, data): if initial is None: initial = ['' for x in range(0, len(data))] else: if not isinstance(initial, list): initial = self.decompress(initial) amount_widget, currency_widget = self.widgets amount_initial, currency_initial = initial try: amount_data = data[0] except IndexError: amount_data = None if amount_widget._has_changed(amount_initial, amount_data): return True try: currency_data = data[1] except __HOLE__: currency_data = None if currency_widget._has_changed(currency_initial, currency_data) and amount_data: return True return False
IndexError
dataset/ETHPy150Open django-money/django-money/djmoney/forms/widgets.py/MoneyWidget._has_changed
6,325
def process_locale_dir(self, locale_dir, files): """ Extract translatable literals from the specified files, creating or updating the POT file for a given locale directory. Uses the xgettext GNU gettext utility. """ build_files = [] for translatable in files: if self.verbosity > 1: self.stdout.write('processing file %s in %s\n' % ( translatable.file, translatable.dirpath )) if self.domain not in ('djangojs', 'django'): continue build_file = self.build_file_class(self, self.domain, translatable) try: build_file.preprocess() except __HOLE__ as e: self.stdout.write( 'UnicodeDecodeError: skipped file %s in %s (reason: %s)' % ( translatable.file, translatable.dirpath, e, ) ) continue build_files.append(build_file) if self.domain == 'djangojs': is_templatized = build_file.is_templatized args = [ 'xgettext', '-d', self.domain, '--language=%s' % ('C' if is_templatized else 'JavaScript',), '--keyword=gettext_noop', '--keyword=gettext_lazy', '--keyword=ngettext_lazy:1,2', '--keyword=pgettext:1c,2', '--keyword=npgettext:1c,2,3', '--output=-', ] elif self.domain == 'django': args = [ 'xgettext', '-d', self.domain, '--language=Python', '--keyword=gettext_noop', '--keyword=gettext_lazy', '--keyword=ngettext_lazy:1,2', '--keyword=ugettext_noop', '--keyword=ugettext_lazy', '--keyword=ungettext_lazy:1,2', '--keyword=pgettext:1c,2', '--keyword=npgettext:1c,2,3', '--keyword=pgettext_lazy:1c,2', '--keyword=npgettext_lazy:1c,2,3', '--output=-', ] else: return input_files = [bf.work_path for bf in build_files] with NamedTemporaryFile(mode='w+') as input_files_list: input_files_list.write('\n'.join(input_files)) input_files_list.flush() args.extend(['--files-from', input_files_list.name]) args.extend(self.xgettext_options) msgs, errors, status = popen_wrapper(args) if errors: if status != STATUS_OK: for build_file in build_files: build_file.cleanup() raise CommandError( 'errors happened while running xgettext on %s\n%s' % ('\n'.join(input_files), errors) ) elif self.verbosity > 0: # Print warnings self.stdout.write(errors) if msgs: if locale_dir is NO_LOCALE_DIR: file_path = os.path.normpath(build_files[0].path) raise CommandError( 'Unable to find a locale path to store translations for ' 'file %s' % file_path ) for build_file in build_files: msgs = build_file.postprocess_messages(msgs) potfile = os.path.join(locale_dir, '%s.pot' % str(self.domain)) write_pot_file(potfile, msgs) for build_file in build_files: build_file.cleanup()
UnicodeDecodeError
dataset/ETHPy150Open django/django/django/core/management/commands/makemessages.py/Command.process_locale_dir
6,326
def validate_and_simplify(geom, meter_units=False): if SHAPELY_SUPPORTS_BUFFER: try: # buffer(0) is nearly fast as is_valid return geom.buffer(0) except __HOLE__: # shapely raises ValueError if buffer(0) result is empty raise InvalidGeometryError('geometry is empty') orig_geom = geom if not geom.is_valid: tolerance = TOLERANCE_METERS if meter_units else TOLERANCE_DEEGREES try: geom = geom.simplify(tolerance, False) except ValueError: # shapely raises ValueError if buffer(0) result is empty raise InvalidGeometryError('geometry is empty') if not geom.is_valid: raise InvalidGeometryError('geometry is invalid, could not simplify: %s' % orig_geom) return geom
ValueError
dataset/ETHPy150Open omniscale/imposm/imposm/geom.py/validate_and_simplify
6,327
def intersection(self, geom): intersection_ids = list(self.index.intersection(geom.bounds)) if not intersection_ids: raise EmtpyGeometryError('No intersection or empty geometry') intersections = [] for i in intersection_ids: polygon = self.polygons[i] if polygon.contains(geom): return geom if polygon.intersects(geom): try: new_geom_part = polygon.intersection(geom) new_geom_part = filter_geometry_by_type(new_geom_part, geom.type) if new_geom_part: if isinstance(new_geom_part, list): intersections.extend(new_geom_part) else: intersections.append(new_geom_part) except TopologicalError: pass if not intersections: raise EmtpyGeometryError('No intersection or empty geometry') # intersections from multiple sub-polygons # try to merge them back to a single geometry try: if geom.type.endswith('Polygon'): union = cascaded_union(list(flatten_polygons(intersections))) elif geom.type.endswith('LineString'): linestrings = flatten_linestrings(intersections) linestrings = list(filter_invalid_linestrings(linestrings)) if not linestrings: raise EmtpyGeometryError() union = linemerge(linestrings) if union.type == 'MultiLineString': union = list(union.geoms) elif geom.type == 'Point': union = intersections[0] else: log.warn('unexpexted geometry type %s', geom.type) raise EmtpyGeometryError() except __HOLE__, ex: # likely an 'No Shapely geometry can be created from null value' error log.warn('could not create union: %s', ex) raise EmtpyGeometryError() return union
ValueError
dataset/ETHPy150Open omniscale/imposm/imposm/geom.py/LimitRTreeGeometry.intersection
6,328
def _color_name_to_rgb(self, color): " Turn 'ffffff', into (0xff, 0xff, 0xff). " try: rgb = int(color, 16) except __HOLE__: raise else: r = (rgb >> 16) & 0xff g = (rgb >> 8) & 0xff b = rgb & 0xff return r, g, b
ValueError
dataset/ETHPy150Open jonathanslenders/python-prompt-toolkit/prompt_toolkit/terminal/vt100_output.py/_EscapeCodeCache._color_name_to_rgb
6,329
def _color_to_code(self, color, bg=False): " Return a tuple with the vt100 values that represent this color. " table = BG_ANSI_COLORS if bg else FG_ANSI_COLORS # 16 ANSI colors. (Given by name.) if color in table: result = (table[color], ) # RGB colors. (Defined as 'ffffff'.) else: try: rgb = self._color_name_to_rgb(color) except __HOLE__: return () # When only 16 colors are supported, use that. if self._supports_only_16_colors(): if bg: result = (_16_bg_colors[rgb], ) else: result = (_16_fg_colors[rgb], ) # True colors. (Only when this feature is enabled.) elif self.true_color: r, g, b = rgb result = (48 if bg else 38, 2, r, g, b) # 256 RGB colors. else: result = (48 if bg else 38, 5, _256_colors[rgb]) return map(six.text_type, result)
ValueError
dataset/ETHPy150Open jonathanslenders/python-prompt-toolkit/prompt_toolkit/terminal/vt100_output.py/_EscapeCodeCache._color_to_code
6,330
def flush(self): """ Write to output stream and flush. """ if not self._buffer: return data = ''.join(self._buffer) try: # (We try to encode ourself, because that way we can replace # characters that don't exist in the character set, avoiding # UnicodeEncodeError crashes. E.g. u'\xb7' does not appear in 'ascii'.) # My Arch Linux installation of july 2015 reported 'ANSI_X3.4-1968' # for sys.stdout.encoding in xterm. if hasattr(self.stdout, 'encoding'): out = self.stdout if six.PY2 else self.stdout.buffer out.write(data.encode(self.stdout.encoding or 'utf-8', 'replace')) else: self.stdout.write(data) self.stdout.flush() except __HOLE__ as e: if e.args and e.args[0] == errno.EINTR: # Interrupted system call. Can happpen in case of a window # resize signal. (Just ignore. The resize handler will render # again anyway.) pass elif e.args and e.args[0] == 0: # This can happen when there is a lot of output and the user # sends a KeyboardInterrupt by pressing Control-C. E.g. in # a Python REPL when we execute "while True: print('test')". # (The `ptpython` REPL uses this `Output` class instead of # `stdout` directly -- in order to be network transparent.) # So, just ignore. pass else: raise self._buffer = []
IOError
dataset/ETHPy150Open jonathanslenders/python-prompt-toolkit/prompt_toolkit/terminal/vt100_output.py/Vt100_Output.flush
6,331
def parse_fields(r): query_time = datetime.datetime.utcnow() params = r.args.copy() for s in PARAMS_EXCLUDE: if s in params: del params[s] if params.get('q', None): query = json.loads(params['q']) del params['q'] else: query = dict() if g.get('customer', None): query['customer'] = g.get('customer') page = params.get('page', 1) if 'page' in params: del params['page'] page = int(page) if params.get('from-date', None): try: from_date = datetime.datetime.strptime(params['from-date'], '%Y-%m-%dT%H:%M:%S.%fZ') except ValueError as e: LOG.warning('Could not parse from-date query parameter: %s', e) raise from_date = from_date.replace(tzinfo=pytz.utc) del params['from-date'] else: from_date = None if params.get('to-date', None): try: to_date = datetime.datetime.strptime(params['to-date'], '%Y-%m-%dT%H:%M:%S.%fZ') except __HOLE__ as e: LOG.warning('Could not parse to-date query parameter: %s', e) raise to_date = to_date.replace(tzinfo=pytz.utc) del params['to-date'] else: to_date = query_time to_date = to_date.replace(tzinfo=pytz.utc) if from_date and to_date: query['lastReceiveTime'] = {'$gt': from_date, '$lte': to_date} elif to_date: query['lastReceiveTime'] = {'$lte': to_date} if params.get('duplicateCount', None): query['duplicateCount'] = int(params.get('duplicateCount')) del params['duplicateCount'] if params.get('repeat', None): query['repeat'] = True if params.get('repeat', 'true') == 'true' else False del params['repeat'] sort = list() direction = 1 if params.get('reverse', None): direction = -1 del params['reverse'] if params.get('sort-by', None): for sort_by in params.getlist('sort-by'): if sort_by in ['createTime', 'receiveTime', 'lastReceiveTime']: sort.append((sort_by, -direction)) # reverse chronological else: sort.append((sort_by, direction)) del params['sort-by'] else: sort.append(('lastReceiveTime', -direction)) group = list() if 'group-by' in params: group = params.get('group-by') del params['group-by'] if 'limit' in params: limit = params.get('limit') del params['limit'] else: limit = app.config['QUERY_LIMIT'] limit = int(limit) ids = params.getlist('id') if len(ids) == 1: query['$or'] = [{'_id': {'$regex': '^' + ids[0]}}, {'lastReceiveId': {'$regex': '^' + ids[0]}}] del params['id'] elif ids: query['$or'] = [{'_id': {'$regex': re.compile('|'.join(['^' + i for i in ids]))}}, {'lastReceiveId': {'$regex': re.compile('|'.join(['^' + i for i in ids]))}}] del params['id'] if 'fields' in params: fields = dict([(field, True) for field in params.get('fields').split(',')]) fields.update({'resource': True, 'event': True, 'environment': True, 'createTime': True, 'receiveTime': True, 'lastReceiveTime': True}) del params['fields'] elif 'fields!' in params: fields = dict([(field, False) for field in params.get('fields!').split(',')]) del params['fields!'] else: fields = dict() for field in params: value = params.getlist(field) if len(value) == 1: value = value[0] if field.endswith('!'): if value.startswith('~'): query[field[:-1]] = dict() query[field[:-1]]['$not'] = re.compile(value[1:], re.IGNORECASE) else: query[field[:-1]] = dict() query[field[:-1]]['$ne'] = value else: if value.startswith('~'): query[field] = dict() query[field]['$regex'] = re.compile(value[1:], re.IGNORECASE) else: query[field] = value else: if field.endswith('!'): if '~' in [v[0] for v in value]: value = '|'.join([v.lstrip('~') for v in value]) query[field[:-1]] = dict() query[field[:-1]]['$not'] = re.compile(value, re.IGNORECASE) else: query[field[:-1]] = dict() query[field[:-1]]['$nin'] = value else: if '~' in [v[0] for v in value]: value = '|'.join([v.lstrip('~') for v in value]) query[field] = dict() query[field]['$regex'] = re.compile(value, re.IGNORECASE) else: query[field] = dict() query[field]['$in'] = value return query, fields, sort, group, page, limit, query_time
ValueError
dataset/ETHPy150Open guardian/alerta/alerta/app/utils.py/parse_fields
6,332
def _transform_object(self, obj): wrapped = Transformable(obj) error_component = wrapped['error_component'].resolve() if error_component is not None and error_component == 'connect': raise errors.IgnoreObject("Error connecting") banner = wrapped['data']['banner'].resolve() ehlo = wrapped['data']['ehlo'].resolve() starttls = wrapped['data']['starttls'].resolve() zout = ZMapTransformOutput() try: tls_handshake = obj['data']['tls'] out, certificates = https.HTTPSTransform.make_tls_obj(tls_handshake) zout.transformed['tls'] = out zout.certificates = certificates except (__HOLE__, TypeError, IndexError): pass if banner is not None: zout.transformed['banner'] = self.clean_banner(banner) if ehlo is not None: zout.transformed['ehlo'] = self.clean_banner(ehlo) if starttls is not None: zout.transformed['starttls'] = self.clean_banner(starttls) if len(zout.transformed) == 0: raise errors.IgnoreObject("Empty output dict") return zout
KeyError
dataset/ETHPy150Open zmap/ztag/ztag/transforms/smtp.py/SMTPStartTLSTransform._transform_object
6,333
@classmethod def factory(cls, data): try: cm = CLASS_MAP[cls.__name__] for key in data: if key in cm and isinstance(data[key], list): l = [] for d in data[key]: l.append(JsonObjectFactory.create(cm[key], d)) data[key] = l elif key in cm: data[key] = JsonObjectFactory.create(cm[key], data[key]) except __HOLE__: pass return cls(**data)
KeyError
dataset/ETHPy150Open dave-tucker/hp-sdn-client/hpsdnclient/datatypes.py/JsonObject.factory
6,334
def __eq__(self, other): attributes = [attr for attr in dir(self) if not callable(getattr(self, attr)) and not attr.startswith("__")] for attr in attributes: try: if self.__getattribute__(attr) == other.__getattribute__(attr): continue else: return False except __HOLE__: return False else: return True # OpenFlow #
AttributeError
dataset/ETHPy150Open dave-tucker/hp-sdn-client/hpsdnclient/datatypes.py/JsonObject.__eq__
6,335
@classmethod def factory(cls, data): """ Override factory in the base class to create a single instance of the Match class for the 'match' key. We do this as each match field may only exist once. Actions are trickier as keys here are not unique. When multiple values are present, """ try: cm = CLASS_MAP[cls.__name__] for key in data: if key == 'match': new_match = {} for d in data[key]: for k in d: new_match[k] = d[k] data[key] = JsonObjectFactory.create('Match', new_match) elif key == 'actions': new_action = {} keys = [] for d in data[key]: keys.extend([(k, v) for k, v in d.items()]) num_keys = range(len(keys)) duplicates = {} for i in num_keys: key_name = keys[i][0] if key_name in duplicates: duplicates[key_name].append(i) else: duplicates[key_name] = [i] for k, v in duplicates.items(): if len(v) > 1: new_action[k] = [keys[i][1] for i in v] else: new_action[k] = keys[i][1] data[key] = JsonObjectFactory.create('Action', new_action) elif key in cm and isinstance(data[key], list): l = [] for d in data[key]: l.append(JsonObjectFactory.create(cm[key], d)) data[key] = l elif key in cm: data[key] = JsonObjectFactory.create(cm[key], data[key]) except __HOLE__: pass return cls(**data)
KeyError
dataset/ETHPy150Open dave-tucker/hp-sdn-client/hpsdnclient/datatypes.py/Flow.factory
6,336
def _load_rsa_private_key(pem): """PEM encoded PKCS#8 private key -> rsa.PrivateKey.""" # ADB uses private RSA keys in pkcs#8 format. 'rsa' library doesn't support # them natively. Do some ASN unwrapping to extract naked RSA key # (in der-encoded form). See https://www.ietf.org/rfc/rfc2313.txt. # Also http://superuser.com/a/606266. try: der = rsa.pem.load_pem(pem, 'PRIVATE KEY') keyinfo, _ = decoder.decode(der) if keyinfo[1][0] != univ.ObjectIdentifier( '1.2.840.113549.1.1.1'): # pragma: no cover raise ValueError('Not a DER-encoded OpenSSL private RSA key') private_key_der = keyinfo[2].asOctets() except __HOLE__: # pragma: no cover raise ValueError('Not a DER-encoded OpenSSL private RSA key') return rsa.PrivateKey.load_pkcs1(private_key_der, format='DER')
IndexError
dataset/ETHPy150Open google/python-adb/adb/sign_pythonrsa.py/_load_rsa_private_key
6,337
def _get_geom_value(self, col, doc): """For a given geospatial column name, return the appropriate representation given a document""" # TODO: This is information model dependent. Could extract somewhere res = None try: if col == "geom": # Resource center point (POINT type) if "geospatial_point_center" in doc: lat, lon = doc["geospatial_point_center"].get("lat", 0), doc["geospatial_point_center"].get("lon", 0) if lat != lon != 0: res = "POINT(%s %s)" % (lon, lat) # x,y elif col == "geom_loc": # Resource bounding box (POLYGON shape, 2D) geoc = None if "geospatial_bounds" in doc: geoc = doc["geospatial_bounds"] elif "constraint_list" in doc: # Find the first one - alternatively could expand a bbox for cons in doc["constraint_list"]: if isinstance(cons, dict) and cons.get("type_", None) == "GeospatialBounds": geoc = cons break if geoc: try: geovals = dict(x1=float(geoc["geospatial_longitude_limit_west"]), y1=float(geoc["geospatial_latitude_limit_south"]), x2=float(geoc["geospatial_longitude_limit_east"]), y2=float(geoc["geospatial_latitude_limit_north"])) if any((geovals["x1"], geovals["x2"], geovals["y1"], geovals["y2"])): res = ("POLYGON((%(x1)s %(y1)s, %(x2)s %(y1)s, %(x2)s %(y2)s, %(x1)s %(y2)s, %(x1)s %(y1)s))") % geovals except __HOLE__ as ve: log.warn("GeospatialBounds location values not parseable %s: %s", geoc, ve) if res: log.debug("Geospatial column %s value: %s", col, res) except Exception as ex: log.warn("Could not compute value for geospatial column %s: %s", col, ex) return res
ValueError
dataset/ETHPy150Open ooici/pyon/pyon/datastore/postgresql/base_store.py/PostgresDataStore._get_geom_value
6,338
def _get_range_value(self, col, doc): """For a given range column name, return the appropriate representation given a document""" # TODO: This is information model dependent. Could extract somewhere res = None try: if col == "vertical_range": # Resource vertical intent (NUMRANGE) geoc = None if "geospatial_bounds" in doc: geoc = doc["geospatial_bounds"] elif "constraint_list" in doc: # Find the first one - alternatively could expand a bbox for cons in doc["constraint_list"]: if isinstance(cons, dict) and cons.get("type_", None) == "GeospatialBounds": geoc = cons break if geoc: try: geovals = dict(z1=float(geoc["geospatial_vertical_min"]), z2=float(geoc["geospatial_vertical_max"])) if any((geovals["z1"], geovals["z2"])): res = "[%s, %s]" % (geovals["z1"], geovals["z2"]) except __HOLE__ as ve: log.warn("GeospatialBounds vertical values not parseable %s: %s", geoc, ve) elif col == "temporal_range": # Resource temporal intent (NUMRANGE) tempc = None if "nominal_datetime" in doc: # Case for DataProduct resources tempc = doc["nominal_datetime"] elif "constraint_list" in doc: # Case for Deployment resources # Find the first one - alternatively could expand a bbox for cons in doc["constraint_list"]: if isinstance(cons, dict) and cons.get("type_", None) == "TemporalBounds": tempc = cons break #elif "ts_created" in doc and "ts_updated" in doc: # # All other resources. # # Values are in seconds float since epoch # tempc = dict(start_datetime=parse_ion_ts(doc["ts_created"]), # end_datetime=parse_ion_ts(doc["ts_updated"])) if tempc and tempc["start_datetime"] and tempc["end_datetime"]: try: geovals = dict(t1=float(tempc["start_datetime"]), t2=float(tempc["end_datetime"])) if any((geovals["t1"], geovals["t2"])): res = "[%s, %s]" % (geovals["t1"], geovals["t2"]) except ValueError as ve: log.warn("TemporalBounds values not parseable %s: %s", tempc, ve) if res: log.debug("Numrange column %s value: %s", col, res) except Exception as ex: log.warn("Could not compute value for numrange column %s: %s", col, ex) return res
ValueError
dataset/ETHPy150Open ooici/pyon/pyon/datastore/postgresql/base_store.py/PostgresDataStore._get_range_value
6,339
def test_illegal_directory(self): logging.debug('') logging.debug('test_illegal_directory') try: # Set an illegal execution directory, verify error. comp = Component() comp.directory = '/illegal' comp.cpath_updated() except __HOLE__, exc: msg = ": Illegal path '/illegal', not a descendant of" self.assertEqual(str(exc)[:len(msg)], msg) else: self.fail('Expected ValueError')
ValueError
dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.main/src/openmdao/main/test/test_component.py/TestCase.test_illegal_directory
6,340
def test_protected_directory(self): if sys.platform == 'win32': raise SkipTest("Windows box has permission problems with this test") logging.debug('') logging.debug('test_protected_directory') # Create a protected directory. directory = 'protected' if os.path.exists(directory): os.rmdir(directory) os.mkdir(directory) os.chmod(directory, 0) exe_dir = os.path.join(directory, 'xyzzy') try: # Attempt auto-creation of execution directory in protected area. comp = Component() comp.directory = exe_dir comp.cpath_updated() except __HOLE__, exc: msg = ": Can't create execution directory" self.assertEqual(str(exc)[:len(msg)], msg) else: self.fail('Expected OSError') finally: os.chmod(directory, stat.S_IWUSR | stat.S_IWRITE | stat.S_IREAD) os.rmdir(directory)
OSError
dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.main/src/openmdao/main/test/test_component.py/TestCase.test_protected_directory
6,341
def test_file_in_place_of_directory(self): logging.debug('') logging.debug('test_file_in_place_of_directory') # Create a plain file. directory = 'plain_file' if os.path.exists(directory): os.remove(directory) out = open(directory, 'w') out.write('Hello world!\n') out.close() try: # Set execution directory to plain file. comp = Component() comp.directory = directory comp.cpath_updated() except __HOLE__, exc: path = os.path.join(os.getcwd(), directory) if sys.platform == 'win32': path = path.lower() self.assertEqual(str(exc), ": Execution directory path '%s' is not a directory." % path) else: self.fail('Expected ValueError') finally: os.remove(directory)
ValueError
dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.main/src/openmdao/main/test/test_component.py/TestCase.test_file_in_place_of_directory
6,342
def test_bad_new_directory(self): logging.debug('') logging.debug('test_bad_new_directory') comp = Component() comp.directory = '/illegal' try: comp.run() except __HOLE__, exc: msg = ": Illegal path '/illegal', not a descendant of" self.assertEqual(str(exc)[:len(msg)], msg) else: self.fail('Expected ValueError')
ValueError
dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.main/src/openmdao/main/test/test_component.py/TestCase.test_bad_new_directory
6,343
def test_execute(self): comp = Component() try: comp.execute() except __HOLE__ as err: self.assertEqual(str(err), ".execute") else: self.fail('expected NotImplementedError')
NotImplementedError
dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.main/src/openmdao/main/test/test_component.py/TestCase.test_execute
6,344
def test_run(self): comp = Component() try: comp.run() except __HOLE__ as err: self.assertEqual(str(err), ".execute") else: self.fail('expected NotImplementedError')
NotImplementedError
dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.main/src/openmdao/main/test/test_component.py/TestCase.test_run
6,345
def _set(request,name,value): profile = getProfile(request) try: variable = profile.variable_set.get(name=name) variable.value = value except __HOLE__: variable = Variable(profile=profile,name=name,value=value) variable.save() return ''
ObjectDoesNotExist
dataset/ETHPy150Open tmm1/graphite/webapp/graphite/cli/commands.py/_set
6,346
def _unset(request,name): profile = getProfile(request) try: variable = profile.variable_set.get(name=name) variable.delete() except __HOLE__: return stderr("Unknown variable %s" % name) return ''
ObjectDoesNotExist
dataset/ETHPy150Open tmm1/graphite/webapp/graphite/cli/commands.py/_unset
6,347
def _dosave(request,viewName): profile = getProfile(request) #First find our View log.info("Saving view '%s' under profile '%s'" % (viewName,profile.user.username)) try: view = profile.view_set.get(name=viewName) except __HOLE__: view = View(profile=profile,name=viewName) view.save() #Now re-associate the view with the correct Windows view.window_set.all().delete() for windowName,encodedString in request.GET.items(): try: if windowName in ('_','commandInput'): continue paramString = urllib.unquote_plus(encodedString) queryParams = cgi.parse_qs(paramString) modelParams = {} for key,value in queryParams.items(): #Clean up the window params key = str(key) value = str(value[0]) if key in ('top','left'): value = int(float( value.replace('px','') )) if key in ('width','height','interval'): value = int(float(value)) modelParams[key] = value if 'interval' not in modelParams: modelParams['interval'] = None win = Window(view=view,name=windowName,**modelParams) win.save() except: log.exception("Failed to process parameters for window '%s'" % windowName) return stdout('Saved view %s' % viewName)
ObjectDoesNotExist
dataset/ETHPy150Open tmm1/graphite/webapp/graphite/cli/commands.py/_dosave
6,348
def _load(request,viewName,above=None): if above: out = stdout("Loading view %s above the current view" % viewName) else: out = stdout("Loading view %s" % viewName) profile = getProfile(request) try: view = profile.view_set.get(name=viewName) except __HOLE__: return stderr("Unknown view %s" % viewName) if not above: out += "Windows.windows.each( function(w) {w.destroy();} );" for window in view.window_set.all(): out += _create(request,window.name) out += "win = %s_win;" % window.name out += "$('%s_img').src = '%s';" % (window.name,window.url) out += "win.show();" out += "win.setLocation(%d,%d);" % (window.top,window.left) out += "win.setSize(%d,%d);" % (window.width,window.height) if window.interval: out += "window.%s_interval = %d;" % (window.name,window.interval) out += "window.%s_timer = setTimeout('window.%s_redraw()', window.%s_interval);" % ((window.name,) * 3) return out
ObjectDoesNotExist
dataset/ETHPy150Open tmm1/graphite/webapp/graphite/cli/commands.py/_load
6,349
def _dogsave(request,graphName): profile = getProfile(request,allowDefault=False) if not profile: return stderr("You must be logged in to save graphs") url = request.GET.get('url') if not url: return stderr("No url specified!") try: existingGraph = profile.mygraph_set.get(name=graphName) existingGraph.url = url existingGraph.save() except __HOLE__: try: newGraph = MyGraph(profile=profile,name=graphName,url=url) newGraph.save() except: log.exception("Failed to create new MyGraph in _dogsave(), graphName=%s" % graphName) return stderr("Failed to save graph %s" % graphName) return stdout("Saved graph %s" % graphName)
ObjectDoesNotExist
dataset/ETHPy150Open tmm1/graphite/webapp/graphite/cli/commands.py/_dogsave
6,350
def _gload(request,user=None,graphName=None): if not user: profile = getProfile(request,allowDefault=False) if not profile: return stderr("You are not logged in so you must specify a username") else: try: profile = getProfileByUsername(user) except ObjectDoesNotExist: return stderr("User does not exist") try: myGraph = profile.mygraph_set.get(name=graphName) except __HOLE__: return stderr("Graph does not exist") out = _create(request,myGraph.name) out += "changeImage(%s_win,'%s');\n" % (myGraph.name.replace('.', '_'), myGraph.url) return out
ObjectDoesNotExist
dataset/ETHPy150Open tmm1/graphite/webapp/graphite/cli/commands.py/_gload
6,351
def _graphs(request,user=None): if not user: profile = getProfile(request,allowDefault=False) if not profile: return stderr("You are not logged in so you must specify a username") else: try: profile = getProfileByUsername(user) except __HOLE__: return stderr("User does not exist") out = "" if user: prefix = "~%s/" % user else: prefix = "" for graph in profile.mygraph_set.all(): out += stdout(prefix + graph.name) return out
ObjectDoesNotExist
dataset/ETHPy150Open tmm1/graphite/webapp/graphite/cli/commands.py/_graphs
6,352
def _rmview(request,viewName): profile = getProfile(request) try: view = profile.view_set.get(name=viewName) except __HOLE__: return stderr("No such view '%s'" % viewName) view.delete() return stdout("Deleted view %s" % viewName)
ObjectDoesNotExist
dataset/ETHPy150Open tmm1/graphite/webapp/graphite/cli/commands.py/_rmview
6,353
def _rmgraph(request,graphName): profile = getProfile(request,allowDefault=False) try: graph = profile.mygraph_set.get(name=graphName) except __HOLE__: return stderr("No such graph %s" % graphName) graph.delete() return stdout("Deleted graph %s" % graphName)
ObjectDoesNotExist
dataset/ETHPy150Open tmm1/graphite/webapp/graphite/cli/commands.py/_rmgraph
6,354
def delete_current(self, *_args): """Deletes the currently set wallpaper.""" if self.image_file != '': jpg_file = self.image_file inf_file = self.info_file self.next_photo() try: os.remove(jpg_file) os.remove(inf_file) banned = open(os.path.expanduser('~/.webilder/banned_photos'), 'a') banned.write(os.path.basename(jpg_file)+'\n') banned.close() except __HOLE__: pass else: self.next_photo()
IOError
dataset/ETHPy150Open thesamet/webilder/src/webilder/base_applet.py/BaseApplet.delete_current
6,355
def pick_in_list(list_name, obj_list): """Generic function to ask the user to choose from a list.""" print('\n%ss available' % list_name) for num, i in enumerate(obj_list): print('\t%d) %s' % (num+1, i.name)) try: selected_num = raw_input('\nPlease select a %s number [1]: ' % list_name.lower()) selected_num = int(selected_num) - 1 selected = obj_list[selected_num] except (ValueError, __HOLE__): selected = obj_list[0] print('Using %s %s.' % (list_name.lower(), selected.name)) return selected # Check if the user has application credentials
IndexError
dataset/ETHPy150Open runabove/python-runabove/examples/walkthrough/walkthrough.py/pick_in_list
6,356
@access.user @describeRoute( Description('Start a new upload or create an empty or link file.') .responseClass('Upload') .param('parentType', 'Type being uploaded into (folder or item).') .param('parentId', 'The ID of the parent.') .param('name', 'Name of the file being created.') .param('size', 'Size in bytes of the file.', dataType='integer', required=False) .param('mimeType', 'The MIME type of the file.', required=False) .param('linkUrl', 'If this is a link file, pass its URL instead ' 'of size and mimeType using this parameter.', required=False) .param('reference', 'If included, this information is passed to the ' 'data.process event when the upload is complete.', required=False) .errorResponse() .errorResponse('Write access was denied on the parent folder.', 403) .errorResponse('Failed to create upload.', 500) ) def initUpload(self, params): """ Before any bytes of the actual file are sent, a request should be made to initialize the upload. This creates the temporary record of the forthcoming upload that will be passed in chunks to the readChunk method. If you pass a "linkUrl" parameter, it will make a link file in the designated parent. """ self.requireParams(('name', 'parentId', 'parentType'), params) user = self.getCurrentUser() mimeType = params.get('mimeType', 'application/octet-stream') parentType = params['parentType'].lower() if parentType not in ('folder', 'item'): raise RestException('The parentType must be "folder" or "item".') parent = self.model(parentType).load(id=params['parentId'], user=user, level=AccessType.WRITE, exc=True) if 'linkUrl' in params: return self.model('file').filter( self.model('file').createLinkFile( url=params['linkUrl'], parent=parent, name=params['name'], parentType=parentType, creator=user), user) else: self.requireParams('size', params) try: upload = self.model('upload').createUpload( user=user, name=params['name'], parentType=parentType, parent=parent, size=int(params['size']), mimeType=mimeType, reference=params.get('reference')) except __HOLE__ as exc: if exc.errno == errno.EACCES: raise GirderException( 'Failed to create upload.', 'girder.api.v1.file.create-upload-failed') raise if upload['size'] > 0: return upload else: return self.model('file').filter( self.model('upload').finalizeUpload(upload), user)
OSError
dataset/ETHPy150Open girder/girder/girder/api/v1/file.py/File.initUpload
6,357
@access.user @describeRoute( Description('Upload a chunk of a file with multipart/form-data.') .consumes('multipart/form-data') .param('uploadId', 'The ID of the upload record.', paramType='form') .param('offset', 'Offset of the chunk in the file.', dataType='integer', paramType='form') .param('chunk', 'The actual bytes of the chunk. For external upload ' 'behaviors, this may be set to an opaque string that will be ' 'handled by the assetstore adapter.', dataType='File', paramType='body') .errorResponse('ID was invalid.') .errorResponse('Received too many bytes.') .errorResponse('Chunk is smaller than the minimum size.') .errorResponse('You are not the user who initiated the upload.', 403) .errorResponse('Failed to store upload.', 500) ) def readChunk(self, params): """ After the temporary upload record has been created (see initUpload), the bytes themselves should be passed up in ordered chunks. The user must remain logged in when passing each chunk, to authenticate that the writer of the chunk is the same as the person who initiated the upload. The passed offset is a verification mechanism for ensuring the server and client agree on the number of bytes sent/received. """ self.requireParams(('offset', 'uploadId', 'chunk'), params) user = self.getCurrentUser() upload = self.model('upload').load(params['uploadId'], exc=True) offset = int(params['offset']) chunk = params['chunk'] if upload['userId'] != user['_id']: raise AccessException('You did not initiate this upload.') if upload['received'] != offset: raise RestException( 'Server has received %s bytes, but client sent offset %s.' % ( upload['received'], offset)) try: if isinstance(chunk, cherrypy._cpreqbody.Part): return self.model('upload').handleChunk(upload, chunk.file) else: return self.model('upload').handleChunk(upload, chunk) except __HOLE__ as exc: if exc.errno == errno.EACCES: raise Exception('Failed to store upload.') raise
IOError
dataset/ETHPy150Open girder/girder/girder/api/v1/file.py/File.readChunk
6,358
def setUp(self): import boto from boto.exception import NoAuthHandlerFound from boto.s3.key import Key keys = ['AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY'] try: for k in keys: os.environ[k] self.bucket_name = os.environ.get('AWS_TEST_BUCKET', 'drf-to-s3-test') except __HOLE__: self.skipTest('To test s3, set %s in .env' % ' and '.join(keys)) conn = boto.connect_s3() bucket = conn.get_bucket(self.bucket_name) k = Key(bucket) k.key = "%s%s.txt" % (str(uuid.uuid4()), self.prefix) k.set_contents_from_string('This is a test of S3') self.existing_key = k.key self.existing_key_etag = k.etag self.bucket = bucket self.nonexisting_key = "%s%s.txt" % (str(uuid.uuid4()), self.prefix) self.new_key = None
KeyError
dataset/ETHPy150Open bodylabs/drf-to-s3/drf_to_s3/tests/test_s3.py/S3Test.setUp
6,359
def value_to_string(self, obj): value = super(MultiStorageFileField, self).value_to_string(obj) if not filer_settings.FILER_DUMP_PAYLOAD: return value try: payload_file = BytesIO(self.storage.open(value).read()) sha = hashlib.sha1() sha.update(payload_file.read()) if sha.hexdigest() != obj.sha1: warnings.warn('The checksum for "%s" diverges. Check for file consistency!' % obj.original_filename) payload_file.seek(0) encoded_string = base64.b64encode(payload_file.read()).decode('utf-8') return value, encoded_string except __HOLE__: warnings.warn('The payload for "%s" is missing. No such file on disk: %s!' % (obj.original_filename, self.storage.location)) return value
IOError
dataset/ETHPy150Open divio/django-filer/filer/fields/multistorage_file.py/MultiStorageFileField.value_to_string
6,360
def to_python(self, value): if isinstance(value, list) and len(value) == 2 and isinstance(value[0], six.text_type): filename, payload = value try: payload = base64.b64decode(payload) except __HOLE__: pass else: if self.storage.exists(filename): self.storage.delete(filename) self.storage.save(filename, ContentFile(payload)) return filename return value
TypeError
dataset/ETHPy150Open divio/django-filer/filer/fields/multistorage_file.py/MultiStorageFileField.to_python
6,361
def draw_content_line(self, line, row, window, highlight): if highlight: commit_color = self.INV_YELLOW code_color = self.INV_GREEN if line.current else self.INV_WHITE search_result_color = 0 else: commit_color = self.YELLOW code_color = self.GREEN if line.current else 0 search_result_color = self.INV_WHITE window.addstr(row, 0, line.sha[:7], commit_color) window.addstr(row, 7, '+ ' if line.current else ' ', code_color) cols = curses.COLS - 9 padded_line = line.line[:cols].rstrip().ljust(cols, ' ') window.addstr(row, 9, padded_line, code_color) if self.search_term: search_start = 0 try: while True: index = line.line.index(self.search_term, search_start) search_start = index + len(self.search_term) window.addstr(row, 9+index, self.search_term, search_result_color) except __HOLE__: pass
ValueError
dataset/ETHPy150Open georgebrock/git-browse/gitbrowse/browser.py/GitBrowser.draw_content_line
6,362
def get_encoding(name): """ Returns an Encoder object for the named encoding """ try: return __encodings[name.lower()]() except __HOLE__: try: msg = "Invalid encoding: '{}'".format(name) except: msg = "Invalid encoding" raise EncodingException(msg)
KeyError
dataset/ETHPy150Open ipfs/py-ipfs-api/ipfsApi/encoding.py/get_encoding
6,363
def _validate_host_route(self, route, ip_version): try: netaddr.IPNetwork(route['destination']) netaddr.IPAddress(route['nexthop']) except netaddr.core.AddrFormatError: err_msg = _("Invalid route: %s") % route raise exc.InvalidInput(error_message=err_msg) except __HOLE__: # netaddr.IPAddress would raise this err_msg = _("Invalid route: %s") % route raise exc.InvalidInput(error_message=err_msg) self._validate_ip_version(ip_version, route['nexthop'], 'nexthop') self._validate_ip_version(ip_version, route['destination'], 'destination')
ValueError
dataset/ETHPy150Open openstack/neutron/neutron/db/db_base_plugin_v2.py/NeutronDbPluginV2._validate_host_route
6,364
def _UploadMenu(self): """Prompts that enable a user to upload a file to the Document List feed.""" file_path = '' file_path = raw_input('Enter path to file: ') if not file_path: return elif not os.path.isfile(file_path): print 'Not a valid file.' return file_name = os.path.basename(file_path) ext = self._GetFileExtension(file_name) if not ext or ext not in gdata.docs.service.SUPPORTED_FILETYPES: print 'File type not supported. Check the file extension.' return else: content_type = gdata.docs.service.SUPPORTED_FILETYPES[ext] title = '' while not title: title = raw_input('Enter name for document: ') try: ms = gdata.MediaSource(file_path=file_path, content_type=content_type) except __HOLE__: print 'Problems reading file. Check permissions.' return if ext in ['CSV', 'ODS', 'XLS', 'XLSX']: print 'Uploading spreadsheet...' elif ext in ['PPT', 'PPS']: print 'Uploading presentation...' else: print 'Uploading word processor document...' entry = self.gd_client.Upload(ms, title) if entry: print 'Upload successful!' print 'Document now accessible at:', entry.GetAlternateLink().href else: print 'Upload error.'
IOError
dataset/ETHPy150Open kuri65536/python-for-android/python-build/python-libs/gdata/samples/docs/docs_example.py/DocsSample._UploadMenu
6,365
def _GetMenuChoice(self, max): """Retrieves the menu selection from the user. Args: max: [int] The maximum number of allowed choices (inclusive) Returns: The integer of the menu item chosen by the user. """ while True: input = raw_input('> ') try: num = int(input) except __HOLE__: print 'Invalid choice. Please choose a value between 1 and', max continue if num > max or num < 1: print 'Invalid choice. Please choose a value between 1 and', max else: return num
ValueError
dataset/ETHPy150Open kuri65536/python-for-android/python-build/python-libs/gdata/samples/docs/docs_example.py/DocsSample._GetMenuChoice
6,366
def Run(self): """Prompts the user to choose funtionality to be demonstrated.""" try: while True: self._PrintMenu() choice = self._GetMenuChoice(7) if choice == 1: self._ListDocuments() elif choice == 2: self._FullTextSearch() elif choice == 3: self._UploadMenu() elif choice == 4: self._DownloadMenu() elif choice == 5: self._ListAclPermissions() elif choice == 6: self._ModifyAclPermissions() elif choice == 7: print '\nGoodbye.' return except __HOLE__: print '\nGoodbye.' return
KeyboardInterrupt
dataset/ETHPy150Open kuri65536/python-for-android/python-build/python-libs/gdata/samples/docs/docs_example.py/DocsSample.Run
6,367
def force_text(s, encoding='utf-8', strings_only=False, errors='strict'): """ Similar to smart_text, except that lazy instances are resolved to strings, rather than kept as lazy objects. If strings_only is True, don't convert (some) non-string-like objects. """ # Handle the common case first for performance reasons. if issubclass(type(s), six.text_type): return s if strings_only and is_protected_type(s): return s try: if not issubclass(type(s), six.string_types): if six.PY3: if isinstance(s, bytes): s = six.text_type(s, encoding, errors) else: s = six.text_type(s) elif hasattr(s, '__unicode__'): s = six.text_type(s) else: s = six.text_type(bytes(s), encoding, errors) else: # Note: We use .decode() here, instead of six.text_type(s, encoding, # errors), so that if s is a SafeBytes, it ends up being a # SafeText at the end. s = s.decode(encoding, errors) except __HOLE__ as e: if not isinstance(s, Exception): raise ValueError(s, *e.args) else: # If we get to here, the caller has passed in an Exception # subclass populated with non-ASCII bytestring data without a # working unicode method. Try to handle this without raising a # further exception by individually forcing the exception args # to unicode. s = ' '.join(force_text(arg, encoding, strings_only, errors) for arg in s) return s
UnicodeDecodeError
dataset/ETHPy150Open joke2k/faker/faker/utils/text.py/force_text
6,368
def runtests(*test_args): if not settings.configured: settings.configure(**DEFAULT_SETTINGS) django.setup() parent = os.path.dirname(os.path.abspath(__file__)) sys.path.insert(0, parent) try: from django.test.runner import DiscoverRunner runner_class = DiscoverRunner test_args = ["pinax.models.tests"] except __HOLE__: from django.test.simple import DjangoTestSuiteRunner runner_class = DjangoTestSuiteRunner test_args = ["tests"] failures = runner_class(verbosity=1, interactive=True, failfast=False).run_tests(test_args) sys.exit(failures)
ImportError
dataset/ETHPy150Open pinax/pinax-models/runtests.py/runtests
6,369
@mark.parametrize('test_case', [ [['rerun'], True], [['rerun', '-s', 'a_service'], True], [['rerun', '-s', 'a_service', '-i', 'an_instance'], False], [['rerun', '-s', 'a_service', '-i', 'an_instance', '-d', _user_supplied_execution_date], False], [['rerun', '-s', 'a_service', '-i', 'an_instance', '-d', 'not_a_date'], True], [['rerun', '-v', '-v', '-s', 'a_service', '-i', 'an_instance', '-d', _user_supplied_execution_date], False], ]) def test_rerun_argparse(test_case): argv, should_exit = test_case parser = argparse.ArgumentParser() subparsers = parser.add_subparsers() add_subparser(subparsers) exited = False rc = None args = None try: args = parser.parse_args(argv) except __HOLE__ as sys_exit: exited = True rc = sys_exit.code assert exited == should_exit if should_exit: assert rc == 2 if args: if args.verbose: assert args.verbose == 2 # '-v' yields a verbosity level... if args.execution_date: assert isinstance(args.execution_date, datetime.datetime)
SystemExit
dataset/ETHPy150Open Yelp/paasta/tests/cli/test_cmds_rerun.py/test_rerun_argparse
6,370
@override_settings() def test_get_helpers__no_setting(self): try: del settings.DAGUERRE_PREADJUSTMENTS except __HOLE__: pass preadjust = Preadjust() self.assertRaisesMessage(CommandError, NO_ADJUSTMENTS, preadjust._get_helpers)
AttributeError
dataset/ETHPy150Open littleweaver/django-daguerre/daguerre/tests/unit/test_management.py/PreadjustTestCase.test_get_helpers__no_setting
6,371
def _get_streams(self): match = _url_re.match(self.url) channel = match.group("channel") res = http.get(STREAM_INFO_URL.format(channel)) urls = http.xml(res, schema=_livestream_schema) streams = {} for (name, parser), url in urls.items(): try: streams.update(parser(self.session, url)) except __HOLE__ as err: self.logger.warning("Unable to extract {0} streams: {1}", name, err) return streams
IOError
dataset/ETHPy150Open chrippa/livestreamer/src/livestreamer/plugins/ard_live.py/ard_live._get_streams
6,372
def deserialize(self, raw_value): try: value = float(raw_value) except __HOLE__: raise DeserializationError("Unable to deserialize \"{}\" into float for \"{}\"!".format(raw_value, self.name), raw_value, self.name) else: return value
ValueError
dataset/ETHPy150Open GreatFruitOmsk/nativeconfig/nativeconfig/options/float_option.py/FloatOption.deserialize
6,373
def deserialize_json(self, json_value): try: value = json.loads(json_value) except __HOLE__: raise DeserializationError("Invalid JSON value for \"{}\": \"{}\"!".format(self.name, json_value), json_value, self.name) else: if value is not None: if not isinstance(value, float): raise DeserializationError("\"{}\" is not a JSON float!".format(json_value), json_value, self.name) else: return float(value) else: return None
ValueError
dataset/ETHPy150Open GreatFruitOmsk/nativeconfig/nativeconfig/options/float_option.py/FloatOption.deserialize_json
6,374
def get_cpu_cores_per_run(coreLimit, num_of_threads, my_cgroups): """ Calculate an assignment of the available CPU cores to a number of parallel benchmark executions such that each run gets its own cores without overlapping of cores between runs. In case the machine has hyper-threading, this method tries to avoid putting two different runs on the same physical core (but it does not guarantee this if the number of parallel runs is too high to avoid it). In case the machine has multiple CPUs, this method avoids splitting a run across multiple CPUs if the number of cores per run is lower than the number of cores per CPU (splitting a run over multiple CPUs provides worse performance). It will also try to split the runs evenly across all available CPUs. A few theoretically-possible cases are not implemented, for example assigning three 10-core runs on a machine with two 16-core CPUs (this would have unfair core assignment and thus undesirable performance characteristics anyway). The list of available cores is read from the cgroup file system, such that the assigned cores are a subset of the cores that the current process is allowed to use. This script does currently not support situations where the available cores are asymmetrically split over CPUs, e.g. 3 cores on one CPU and 5 on another. @param coreLimit: the number of cores for each run @param num_of_threads: the number of parallel benchmark executions @return a list of lists, where each inner list contains the cores for one run """ try: # read list of available CPU cores allCpus = util.parse_int_list(my_cgroups.get_value(cgroups.CPUSET, 'cpus')) logging.debug("List of available CPU cores is %s.", allCpus) # read mapping of core to CPU ("physical package") physical_packages = [int(util.read_file('/sys/devices/system/cpu/cpu{0}/topology/physical_package_id'.format(core))) for core in allCpus] cores_of_package = collections.defaultdict(list) for core, package in zip(allCpus, physical_packages): cores_of_package[package].append(core) logging.debug("Physical packages of cores are %s.", cores_of_package) # read hyper-threading information (sibling cores sharing the same physical core) siblings_of_core = {} for core in allCpus: siblings = util.parse_int_list(util.read_file('/sys/devices/system/cpu/cpu{0}/topology/thread_siblings_list'.format(core))) siblings_of_core[core] = siblings logging.debug("Siblings of cores are %s.", siblings_of_core) except __HOLE__ as e: sys.exit("Could not read CPU information from kernel: {0}".format(e)) return _get_cpu_cores_per_run0(coreLimit, num_of_threads, allCpus, cores_of_package, siblings_of_core)
ValueError
dataset/ETHPy150Open sosy-lab/benchexec/benchexec/resources.py/get_cpu_cores_per_run
6,375
def get_memory_banks_per_run(coreAssignment, cgroups): """Get an assignment of memory banks to runs that fits to the given coreAssignment, i.e., no run is allowed to use memory that is not local (on the same NUMA node) to one of its CPU cores.""" try: # read list of available memory banks allMems = set(cgroups.read_allowed_memory_banks()) result = [] for cores in coreAssignment: mems = set() for core in cores: coreDir = '/sys/devices/system/cpu/cpu{0}/'.format(core) mems.update(_get_memory_banks_listed_in_dir(coreDir)) allowedMems = sorted(mems.intersection(allMems)) logging.debug("Memory banks for cores %s are %s, of which we can use %s.", cores, list(mems), allowedMems) result.append(allowedMems) assert len(result) == len(coreAssignment) if any(result) and os.path.isdir('/sys/devices/system/node/'): return result else: # All runs get the empty list of memory regions # because this system has no NUMA support return None except __HOLE__ as e: sys.exit("Could not read memory information from kernel: {0}".format(e))
ValueError
dataset/ETHPy150Open sosy-lab/benchexec/benchexec/resources.py/get_memory_banks_per_run
6,376
def check_memory_size(memLimit, num_of_threads, memoryAssignment, my_cgroups): """Check whether the desired amount of parallel benchmarks fits in the memory. Implemented are checks for memory limits via cgroup controller "memory" and memory bank restrictions via cgroup controller "cpuset", as well as whether the system actually has enough memory installed. @param memLimit: the memory limit in bytes per run @param num_of_threads: the number of parallel benchmark executions @param memoryAssignment: the allocation of memory banks to runs (if not present, all banks are assigned to all runs) """ try: # Check amount of memory allowed via cgroups. def check_limit(actualLimit): if actualLimit < memLimit: sys.exit("Cgroups allow only {} bytes of memory to be used, cannot execute runs with {} bytes of memory.".format(actualLimit, memLimit)) elif actualLimit < memLimit * num_of_threads: sys.exit("Cgroups allow only {} bytes of memory to be used, not enough for {} benchmarks with {} bytes each. Please reduce the number of threads".format(actualLimit, num_of_threads, memLimit)) if not os.path.isdir('/sys/devices/system/node/'): logging.debug("System without NUMA support in Linux kernel, ignoring memory assignment.") return if cgroups.MEMORY in my_cgroups: # We use the entries hierarchical_*_limit in memory.stat and not memory.*limit_in_bytes # because the former may be lower if memory.use_hierarchy is enabled. for key, value in my_cgroups.get_key_value_pairs(cgroups.MEMORY, 'stat'): if key == 'hierarchical_memory_limit' or key == 'hierarchical_memsw_limit': check_limit(int(value)) # Get list of all memory banks, either from memory assignment or from system. if not memoryAssignment: if cgroups.CPUSET in my_cgroups: allMems = my_cgroups.read_allowed_memory_banks() else: allMems = _get_memory_banks_listed_in_dir('/sys/devices/system/node/') memoryAssignment = [allMems] * num_of_threads # "fake" memory assignment: all threads on all banks else: allMems = set(itertools.chain(*memoryAssignment)) memSizes = dict((mem, _get_memory_bank_size(mem)) for mem in allMems) except __HOLE__ as e: sys.exit("Could not read memory information from kernel: {0}".format(e)) # Check whether enough memory is allocatable on the assigned memory banks. # As the sum of the sizes of the memory banks is at most the total size of memory in the system, # and we do this check always even if the banks are not restricted, # this also checks whether the system has actually enough memory installed. usedMem = collections.Counter() for mems_of_run in memoryAssignment: totalSize = sum(memSizes[mem] for mem in mems_of_run) if totalSize < memLimit: sys.exit("Memory banks {} do not have enough memory for one run, only {} bytes available.".format(mems_of_run, totalSize)) usedMem[tuple(mems_of_run)] += memLimit if usedMem[tuple(mems_of_run)] > totalSize: sys.exit("Memory banks {} do not have enough memory for all runs, only {} bytes available. Please reduce the number of threads.".format(mems_of_run, totalSize))
ValueError
dataset/ETHPy150Open sosy-lab/benchexec/benchexec/resources.py/check_memory_size
6,377
def setup_module(module): from nose import SkipTest try: import numpy except __HOLE__: raise SkipTest("numpy is required for nltk.metrics.segmentation")
ImportError
dataset/ETHPy150Open nltk/nltk/nltk/metrics/segmentation.py/setup_module
6,378
def test_fork(self): # Test using a client before and after a fork. if sys.platform == "win32": raise SkipTest("Can't fork on windows") try: import multiprocessing except __HOLE__: raise SkipTest("No multiprocessing module") db = self.client.pymongo_test # Ensure a socket is opened before the fork. db.test.find_one() def f(pipe): try: kill_cursors_executor = self.client._kill_cursors_executor servers = self.client._topology.select_servers( any_server_selector) # In child, only the thread that called fork() is alive. # The first operation should revive the rest. db.test.find_one() wait_until( lambda: all(s._monitor._executor._thread.is_alive() for s in servers), "restart monitor threads") wait_until(lambda: kill_cursors_executor._thread.is_alive(), "restart kill-cursors executor") except: traceback.print_exc() # Aid debugging. pipe.send(True) parent_pipe, child_pipe = multiprocessing.Pipe() p = multiprocessing.Process(target=f, args=(child_pipe,)) p.start() p.join(10) child_pipe.close() # Pipe will only have data if the child process failed. try: parent_pipe.recv() self.fail() except EOFError: pass
ImportError
dataset/ETHPy150Open mongodb/mongo-python-driver/test/test_client.py/TestClient.test_fork
6,379
def test_interrupt_signal(self): if sys.platform.startswith('java'): # We can't figure out how to raise an exception on a thread that's # blocked on a socket, whether that's the main thread or a worker, # without simply killing the whole thread in Jython. This suggests # PYTHON-294 can't actually occur in Jython. raise SkipTest("Can't test interrupts in Jython") # Test fix for PYTHON-294 -- make sure MongoClient closes its # socket if it gets an interrupt while waiting to recv() from it. db = self.client.pymongo_test # A $where clause which takes 1.5 sec to execute where = delay(1.5) # Need exactly 1 document so find() will execute its $where clause once db.drop_collection('foo') db.foo.insert_one({'_id': 1}) def interrupter(): # Raises KeyboardInterrupt in the main thread time.sleep(0.25) thread.interrupt_main() thread.start_new_thread(interrupter, ()) raised = False try: # Will be interrupted by a KeyboardInterrupt. next(db.foo.find({'$where': where})) except __HOLE__: raised = True # Can't use self.assertRaises() because it doesn't catch system # exceptions self.assertTrue(raised, "Didn't raise expected KeyboardInterrupt") # Raises AssertionError due to PYTHON-294 -- Mongo's response to the # previous find() is still waiting to be read on the socket, so the # request id's don't match. self.assertEqual( {'_id': 1}, next(db.foo.find()) )
KeyboardInterrupt
dataset/ETHPy150Open mongodb/mongo-python-driver/test/test_client.py/TestClient.test_interrupt_signal
6,380
def backwards(self, orm): # Convert all fields back to the single-language table. for entry in orm['fluent_blogs.Entry'].objects.all(): translations = orm['fluent_blogs.Entry_Translation'].objects.filter(master_id=entry.id) try: # Try default translation translation = translations.get(language_code=appsettings.FLUENT_BLOGS_DEFAULT_LANGUAGE_CODE) except __HOLE__: try: # Try internal fallback translation = translations.get(language_code__in=('en-us', 'en')) except ObjectDoesNotExist: # Hope there is a single translation translation = translations.get() entry.title = translation.title entry.slug = translation.slug entry.intro = translation.intro entry.save() # As intended: doesn't call Entry.save() but Model.save() only.
ObjectDoesNotExist
dataset/ETHPy150Open edoburu/django-fluent-blogs/fluent_blogs/south_migrations/0004_migrate_translated_fields.py/Migration.backwards
6,381
def take_action(self, options): if not options.settings: raise RuntimeError("For start serverm --settings parameter" " is mandatory!") try: settings_cls = load_class(options.settings) except __HOLE__: raise RuntimeError("Cannot load settings class: {0}".format(options.settings)) from webtools.application import Application app = Application(settings_cls()) app.listen(8888) # TODO: parametrize this import tornado.ioloop print("Listeing on :{0}".format(8888)) tornado.ioloop.IOLoop.instance().start()
ImportError
dataset/ETHPy150Open niwinz/tornado-webtools/webtools/management/commands/runserver.py/RunserverCommand.take_action
6,382
def construct_pipeline(cfg): """Construct the pipeline steps for sklearn.pipeline.Pipeline. Construct the pipeline steps for sklearn.pipeline.Pipeline and return the steps and the parameter to optimize (e.g. "logit__C" for the parameter "C" of the step named "logit"). Parameters: ----------- cfg : dict, method configuration describing the steps of an pipelined estimator """ steps = cfg["steps"] try: est_param = cfg["est_param"] except __HOLE__: est_param = None pipe = [] for step in steps: pipe.append(_get_step(step)) return {'steps': pipe}, est_param
KeyError
dataset/ETHPy150Open BenoitDamota/mempamal/mempamal/dynamic.py/construct_pipeline
6,383
def __init__(self, attrs=None): defaults = {'style': 'width: 75px; text-align: right'} try: self.currency_code = attrs.pop('currency_code') defaults.update(attrs) except (KeyError, __HOLE__): raise ValueError("MoneyFieldWidget must be instantiated with a currency_code.") super(MoneyFieldWidget, self).__init__(defaults)
TypeError
dataset/ETHPy150Open awesto/django-shop/shop/money/fields.py/MoneyFieldWidget.__init__
6,384
def histogram2d(x,y, bins=10, range=None, normed=False, weights=None): """ Compute the bi-dimensional histogram of two data samples. Parameters ---------- x : array_like, shape(N,) A sequence of values to be histogrammed along the first dimension. y : array_like, shape(M,) A sequence of values to be histogrammed along the second dimension. bins : int or [int, int] or array_like or [array, array], optional The bin specification: * If int, the number of bins for the two dimensions (nx=ny=bins). * If [int, int], the number of bins in each dimension (nx, ny = bins). * If array_like, the bin edges for the two dimensions (x_edges=y_edges=bins). * If [array, array], the bin edges in each dimension (x_edges, y_edges = bins). range : array_like, shape(2,2), optional The leftmost and rightmost edges of the bins along each dimension (if not specified explicitly in the `bins` parameters): ``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range will be considered outliers and not tallied in the histogram. normed : bool, optional If False, returns the number of samples in each bin. If True, returns the bin density, i.e. the bin count divided by the bin area. weights : array_like, shape(N,), optional An array of values ``w_i`` weighing each sample ``(x_i, y_i)``. Weights are normalized to 1 if `normed` is True. If `normed` is False, the values of the returned histogram are equal to the sum of the weights belonging to the samples falling into each bin. Returns ------- H : ndarray, shape(nx, ny) The bi-dimensional histogram of samples `x` and `y`. Values in `x` are histogrammed along the first dimension and values in `y` are histogrammed along the second dimension. xedges : ndarray, shape(nx,) The bin edges along the first dimension. yedges : ndarray, shape(ny,) The bin edges along the second dimension. See Also -------- histogram: 1D histogram histogramdd: Multidimensional histogram Notes ----- When `normed` is True, then the returned histogram is the sample density, defined such that: .. math:: \\sum_{i=0}^{nx-1} \\sum_{j=0}^{ny-1} H_{i,j} \\Delta x_i \\Delta y_j = 1 where `H` is the histogram array and :math:`\\Delta x_i \\Delta y_i` the area of bin `{i,j}`. Please note that the histogram does not follow the Cartesian convention where `x` values are on the abcissa and `y` values on the ordinate axis. Rather, `x` is histogrammed along the first dimension of the array (vertical), and `y` along the second dimension of the array (horizontal). This ensures compatibility with `histogramdd`. Examples -------- >>> x, y = np.random.randn(2, 100) >>> H, xedges, yedges = np.histogram2d(x, y, bins=(5, 8)) >>> H.shape, xedges.shape, yedges.shape ((5, 8), (6,), (9,)) """ from numpy import histogramdd try: N = len(bins) except __HOLE__: N = 1 if N != 1 and N != 2: xedges = yedges = asarray(bins, float) bins = [xedges, yedges] hist, edges = histogramdd([x,y], bins, range, normed, weights) return hist, edges[0], edges[1]
TypeError
dataset/ETHPy150Open nipy/nitime/nitime/index_utils.py/histogram2d
6,385
def _setup_config(self, dist, filename, section, vars, verbosity): """ Called to setup an application, given its configuration file/directory. The default implementation calls ``package.websetup.setup_config(command, filename, section, vars)`` or ``package.websetup.setup_app(command, config, vars)`` With ``setup_app`` the ``config`` object is a dictionary with the extra attributes ``global_conf``, ``local_conf`` and ``filename`` """ modules = [line.strip() for line in dist.get_metadata_lines('top_level.txt') if line.strip() and not line.strip().startswith('#')] if not modules: print('No modules are listed in top_level.txt') print('Try running python setup.py egg_info to regenerate that file') for mod_name in modules: mod_name = mod_name + '.websetup' try: mod = self._import_module(mod_name) except __HOLE__ as e: print(e) desc = getattr(e, 'args', ['No module named websetup'])[0] if not desc.startswith('No module named websetup'): raise mod = None if mod is None: continue if hasattr(mod, 'setup_app'): if verbosity: print('Running setup_app() from %s' % mod_name) self._call_setup_app(mod.setup_app, filename, section, vars) elif hasattr(mod, 'setup_config'): if verbosity: print('Running setup_config() from %s' % mod_name) mod.setup_config(None, filename, section, vars) else: print('No setup_app() or setup_config() function in %s (%s)' % (mod.__name__, mod.__file__))
ImportError
dataset/ETHPy150Open TurboGears/gearbox/gearbox/commands/setup_app.py/SetupAppCommand._setup_config
6,386
def __init__(self, conf): super(SettingsDialog, self).__init__() self.ui = Ui_SettingsDialog() self.ui.setupUi(self) self.conf = conf self.setWindowTitle('Settings') # try: # self.ui.conf_notesLocation.setText(self.conf['conf_notesLocation']) # except KeyError: # pass try: self.ui.tbl_notesLocations.blockSignals(True) for k, v in self.conf['conf_notesLocations'].iteritems(): self._insert_noteslocation_row(v, k) self.ui.tbl_notesLocations.blockSignals(False) except __HOLE__: pass try: self.ui.conf_author.setText(self.conf['conf_author']) except KeyError: pass if 'conf_checkbox_recordonsave' in self.conf.keys(): if int(self.conf['conf_checkbox_recordonsave']) == 0: self.ui.conf_checkbox_recordonsave.setChecked(False) else: self.ui.conf_checkbox_recordonsave.setChecked(True) if 'conf_checkbox_recordonexit' in self.conf.keys(): if int(self.conf['conf_checkbox_recordonexit']) == 0: self.ui.conf_checkbox_recordonexit.setChecked(False) else: self.ui.conf_checkbox_recordonexit.setChecked(True) if 'conf_checkbox_recordonswitch' in self.conf.keys(): if int(self.conf['conf_checkbox_recordonswitch']) == 0: self.ui.conf_checkbox_recordonswitch.setChecked(False) else: self.ui.conf_checkbox_recordonswitch.setChecked(True) if 'conf_checkbox_firstlinetitle' in self.conf.keys(): if int(self.conf['conf_checkbox_firstlinetitle']) == 0: self.ui.conf_checkbox_firstlinetitle.setChecked(False) else: self.ui.conf_checkbox_firstlinetitle.setChecked(True) # def load_folder_location(self): # if 'conf_notesLocation' in self.conf.keys(): # dirpath = self.conf['conf_notesLocation'] # else: # dirpath = None # # savedir = QtGui.QFileDialog.getExistingDirectory(self, 'Notes Directory', dirpath) # self.ui.conf_notesLocation.setText(savedir)
KeyError
dataset/ETHPy150Open akehrer/Motome/Motome/Controllers/SettingsDialog.py/SettingsDialog.__init__
6,387
def add_folder_location(self): notesdir = QtGui.QFileDialog.getExistingDirectory(self, 'Notes Directory', os.path.expanduser('~')) try: if notesdir not in self.conf['conf_notesLocations'].keys(): self._add_noteslocation_row(notesdir) except __HOLE__: self.conf['conf_notesLocations'] = {} self._add_noteslocation_row(notesdir)
KeyError
dataset/ETHPy150Open akehrer/Motome/Motome/Controllers/SettingsDialog.py/SettingsDialog.add_folder_location
6,388
def restore(self): try: self.row, self.col = self._saved.pop() except __HOLE__: # no prior save self.row, self.col = 0,0
IndexError
dataset/ETHPy150Open kdart/pycopia/QA/pycopia/terminal.py/Cursor.restore
6,389
def clear_tab(self): # <ESC>[g '''Clears tab at the current position.''' r, c = self.cursor() try: i = self._tabs.index(c) except __HOLE__: pass else: del self._tabs[i]
ValueError
dataset/ETHPy150Open kdart/pycopia/QA/pycopia/terminal.py/Screen.clear_tab
6,390
def _read(self, N=100): while 1: try: raw = self._pty.read(N) except __HOLE__, why: if why[0] == EAGAIN: continue else: raise except EOFError: self._pty = None raise else: break if self.printer: self.printer.write(raw) return raw
OSError
dataset/ETHPy150Open kdart/pycopia/QA/pycopia/terminal.py/Terminal._read
6,391
def _delete_objects(self, context, resource, objects): delete_op = getattr(self, 'delete_%s' % resource) for obj in objects: try: delete_op(context, obj['result']['id']) except __HOLE__: LOG.exception(_LE("Could not find %s to delete."), resource) except Exception: LOG.exception(_LE("Could not delete %(res)s %(id)s."), {'res': resource, 'id': obj['result']['id']})
KeyError
dataset/ETHPy150Open openstack/neutron/neutron/plugins/ml2/plugin.py/Ml2Plugin._delete_objects
6,392
def render(self, name, value, attrs=None): # Update the template parameters with any attributes passed in. if attrs: self.params.update(attrs) # Defaulting the WKT value to a blank string -- this # will be tested in the JavaScript and the appropriate # interface will be constructed. self.params['wkt'] = '' # If a string reaches here (via a validation error on another # field) then just reconstruct the Geometry. if isinstance(value, six.string_types): try: value = GEOSGeometry(value) except (GEOSException, __HOLE__) as err: logger.error( "Error creating geometry from value '%s' (%s)" % ( value, err) ) value = None if value and value.geom_type.upper() != self.geom_type: value = None # Constructing the dictionary of the map options. self.params['map_options'] = self.map_options() # Constructing the JavaScript module name using the name of # the GeometryField (passed in via the `attrs` keyword). # Use the 'name' attr for the field name (rather than 'field') self.params['name'] = name # note: we must switch out dashes for underscores since js # functions are created using the module variable js_safe_name = self.params['name'].replace('-','_') self.params['module'] = 'geodjango_%s' % js_safe_name if value: # Transforming the geometry to the projection used on the # OpenLayers map. srid = self.params['srid'] if value.srid != srid: try: ogr = value.ogr ogr.transform(srid) wkt = ogr.wkt except OGRException as err: logger.error( "Error transforming geometry from srid '%s' to srid '%s' (%s)" % ( value.srid, srid, err) ) wkt = '' else: wkt = value.wkt # Setting the parameter WKT with that of the transformed # geometry. self.params['wkt'] = wkt return loader.render_to_string(self.template, self.params, context_instance=geo_context)
ValueError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/django/contrib/gis/admin/widgets.py/OpenLayersWidget.render
6,393
def _has_changed(self, initial, data): """ Compare geographic value of data with its initial value. """ # Ensure we are dealing with a geographic object if isinstance(initial, six.string_types): try: initial = GEOSGeometry(initial) except (GEOSException, __HOLE__): initial = None # Only do a geographic comparison if both values are available if initial and data: data = fromstr(data) data.transform(initial.srid) # If the initial value was not added by the browser, the geometry # provided may be slightly different, the first time it is saved. # The comparison is done with a very low tolerance. return not initial.equals_exact(data, tolerance=0.000001) else: # Check for change of state of existence return bool(initial) != bool(data)
ValueError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/django/contrib/gis/admin/widgets.py/OpenLayersWidget._has_changed
6,394
@misc.cachedproperty def banner(self): """A banner that can be useful to display before running.""" connection_details = self._server.connection_details transport = connection_details.transport if transport.driver_version: transport_driver = "%s v%s" % (transport.driver_name, transport.driver_version) else: transport_driver = transport.driver_name try: hostname = socket.getfqdn() except socket.error: hostname = "???" try: pid = os.getpid() except __HOLE__: pid = "???" chapters = { 'Connection details': { 'Driver': transport_driver, 'Exchange': self._exchange, 'Topic': self._topic, 'Transport': transport.driver_type, 'Uri': connection_details.uri, }, 'Powered by': { 'Executor': reflection.get_class_name(self._executor), 'Thread count': getattr(self._executor, 'max_workers', "???"), }, 'Supported endpoints': [str(ep) for ep in self._endpoints], 'System details': { 'Hostname': hostname, 'Pid': pid, 'Platform': platform.platform(), 'Python': sys.version.split("\n", 1)[0].strip(), 'Thread id': tu.get_ident(), }, } return banner.make_banner('WBE worker', chapters)
OSError
dataset/ETHPy150Open openstack/taskflow/taskflow/engines/worker_based/worker.py/Worker.banner
6,395
def test_error_handling(self): try: self.h121.raise_exception("bad value", ValueError) except __HOLE__, err: self.assertEqual(str(err), "h1.h12.h121: bad value") else: self.fail('ValueError expected') self.h121._logger.error("can't start server") self.h121._logger.warning("I wouldn't recommend that") self.h121._logger.info("fyi") self.h121._logger.debug("dump value = 3")
ValueError
dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.main/src/openmdao/main/test/test_hierarchy.py/HierarchyTestCase.test_error_handling
6,396
def unknown_vals_mention_value_given_in_error(self): value = "penguinmints" try: self._run(_, hide=value) except __HOLE__ as e: msg = "Error from run(hide=xxx) did not tell user what the bad value was!" # noqa msg += "\nException msg: {0}".format(e) ok_(value in str(e), msg) else: assert False, "run() did not raise ValueError for bad hide= value" # noqa
ValueError
dataset/ETHPy150Open pyinvoke/invoke/tests/runners.py/Runner_.output_hiding.unknown_vals_mention_value_given_in_error
6,397
@skip_if_windows @patch('invoke.platform.tty') # stub @patch('invoke.platform.termios') def tty_stdins_have_settings_restored_on_KeyboardInterrupt( self, mock_termios, mock_tty ): # This test is re: GH issue #303 # tcgetattr returning some arbitrary value sentinel = [1, 7, 3, 27] mock_termios.tcgetattr.return_value = sentinel # Don't actually bubble up the KeyboardInterrupt... try: self._run(_, klass=_KeyboardInterruptingRunner) except __HOLE__: pass # Did we restore settings?! mock_termios.tcsetattr.assert_called_once_with( sys.stdin, mock_termios.TCSADRAIN, sentinel )
KeyboardInterrupt
dataset/ETHPy150Open pyinvoke/invoke/tests/runners.py/Runner_.character_buffered_stdin.tty_stdins_have_settings_restored_on_KeyboardInterrupt
6,398
def KeyboardInterrupt_is_still_raised(self): raised = None try: self._run(_, klass=_KeyboardInterruptingRunner) except __HOLE__ as e: raised = e assert raised is not None
KeyboardInterrupt
dataset/ETHPy150Open pyinvoke/invoke/tests/runners.py/Runner_.keyboard_interrupts_act_transparently.KeyboardInterrupt_is_still_raised
6,399
def _run(self, pty): runner = _KeyboardInterruptingFastLocal(Context(config=Config())) try: runner.run(_, pty=pty) except __HOLE__: pass return runner
KeyboardInterrupt
dataset/ETHPy150Open pyinvoke/invoke/tests/runners.py/Local_.send_interrupt._run