Unnamed: 0
int64
0
10k
function
stringlengths
79
138k
label
stringclasses
20 values
info
stringlengths
42
261
7,100
def handle(self, *args, **options): # bind host and port if len(args) > 0: host_port = args[0] else: host_port = None if host_port: host, _port = host_port.split(':', 1) port = int(_port) else: host = utils.get_settings_value('FTPSERVER_HOST') or '127.0.0.1' port = utils.get_settings_value('FTPSERVER_PORT') or 21 timeout = options['timeout'] \ or utils.get_settings_value('FTPSERVER_TIMEOUT') # passive ports _passive_ports = options['passive-ports'] \ or utils.get_settings_value('FTPSERVER_PASSIVE_PORTS') if _passive_ports: try: passive_ports = utils.parse_ports(_passive_ports) except (TypeError, __HOLE__): raise CommandError("Invalid passive ports: {}".format( options['passive-ports'])) else: passive_ports = None # masquerade address masquerade_address = options['masquerade-address'] \ or utils.get_settings_value('FTPSERVER_MASQUERADE_ADDRESS') # file access user file_access_user = options['file-access-user'] \ or utils.get_settings_value('FTPSERVER_FILE_ACCESS_USER') # certfile certfile = options['certfile'] \ or utils.get_settings_value('FTPSERVER_CERTFILE') # keyfile keyfile = options['keyfile'] \ or utils.get_settings_value('FTPSERVER_KEYFILE') # sendfile sendfile = options['sendfile'] \ or utils.get_settings_value('FTPSERVER_SENDFILE') # daemonize daemonize = options['daemonize'] \ or utils.get_settings_value('FTPSERVER_DAEMONIZE') if daemonize: daemonize_options = utils.get_settings_value( 'FTPSERVER_DAEMONIZE_OPTIONS') or {} become_daemon(**daemonize_options) # write pid to file pidfile = options['pidfile'] \ or utils.get_settings_value('FTPSERVER_PIDFILE') if pidfile: with open(pidfile, 'w') as f: f.write(str(os.getpid())) # select handler class if certfile or keyfile: if hasattr(handlers, 'TLS_FTPHandler'): handler_class = ( utils.get_settings_value('FTPSERVER_TLSHANDLER') ) or handlers.TLS_FTPHandler else: # unsupported raise CommandError( "Can't import OpenSSL. Please install pyOpenSSL.") else: handler_class = ( utils.get_settings_value('FTPSERVER_HANDLER') ) or handlers.FTPHandler authorizer_class = utils.get_settings_value('FTPSERVER_AUTHORIZER') \ or FTPAccountAuthorizer # setup server server = self.make_server( server_class=FTPServer, handler_class=handler_class, authorizer_class=authorizer_class, host_port=(host, port), file_access_user=file_access_user, timeout=timeout, passive_ports=passive_ports, masquerade_address=masquerade_address, certfile=certfile, keyfile=keyfile, sendfile=sendfile) # start server quit_command = 'CTRL-BREAK' if sys.platform == 'win32' else 'CONTROL-C' sys.stdout.write(( "Django version {version_dj}, using settings '{settings}'\n" "pyftpdlib version {version_ftp}\n" "Quit the server with {quit_command}.\n").format( version_dj=get_version(), version_ftp=pyftpdlib.__ver__, settings=settings.SETTINGS_MODULE, quit_command=quit_command)) server.serve_forever()
ValueError
dataset/ETHPy150Open tokibito/django-ftpserver/src/django_ftpserver/management/commands/ftpserver.py/Command.handle
7,101
def test_project_information(self): project = self.projects['base_test_project'] response = self.api_client.post( '/orchestra/api/project/project_information/', {'project_id': project.id}, format='json') self.assertEquals(response.status_code, 200) returned = json.loads(response.content.decode('utf-8')) unimportant_keys = ( 'id', 'task', 'short_description', 'start_datetime', 'end_datetime' ) def delete_keys(obj): if isinstance(obj, list): for item in obj: delete_keys(item) elif isinstance(obj, dict): for key in unimportant_keys: try: del obj[key] except __HOLE__: pass for value in obj.values(): delete_keys(value) delete_keys(returned) del returned['tasks']['step1']['project'] del (returned['tasks']['step1']['assignments'][0] ['iterations'][0]['assignment']) expected = { 'project': { 'task_class': 1, 'workflow_slug': 'w1', 'workflow_version_slug': 'test_workflow', 'project_data': {}, 'team_messages_url': None, 'priority': 0, }, 'tasks': { 'step1': { 'assignments': [{ 'status': 'Submitted', 'in_progress_task_data': {'test_key': 'test_value'}, 'worker': { 'username': self.workers[0].user.username, 'first_name': self.workers[0].user.first_name, 'last_name': self.workers[0].user.last_name, }, 'iterations': [{ 'status': 'Requested Review', 'submitted_data': {'test_key': 'test_value'}, }], }], 'latest_data': { 'test_key': 'test_value' }, 'status': 'Pending Review', 'step_slug': 'step1', } }, 'steps': [ {'slug': 'step1', 'description': 'The longer description of the first step', 'is_human': True}, {'slug': 'step2', 'description': 'The longer description of the second step', 'is_human': True}, {'slug': 'step3', 'description': 'The longer description of the third step', 'is_human': True} ] } self.assertEquals(returned, expected) response = self.api_client.post( '/orchestra/api/project/project_information/', {'project_id': -1}, format='json') self.ensure_response(response, {'error': 400, 'message': 'No project for given id'}, 400) # Getting project info without a project_id should fail. response = self.api_client.post( '/orchestra/api/project/project_information/', {'projetc_id': project.id}, # Typo. format='json') self.ensure_response(response, {'error': 400, 'message': 'project_id is required'}, 400) # Retrieve the third project, which has no task assignments. response = self.api_client.post( '/orchestra/api/project/project_information/', {'project_id': self.projects['no_task_assignments'].id}, format='json') returned = json.loads(response.content.decode('utf-8')) for key in ('id', 'project', 'start_datetime'): del returned['tasks']['step1'][key] self.assertEquals(response.status_code, 200) self.assertEquals(returned['tasks'], { 'step1': { 'assignments': [], 'latest_data': None, 'status': 'Awaiting Processing', 'step_slug': 'step1' } })
KeyError
dataset/ETHPy150Open unlimitedlabs/orchestra/orchestra/tests/test_project_api.py/ProjectAPITestCase.test_project_information
7,102
@classmethod def _factory(cls, lookup, synchronize_session, *arg): try: klass = lookup[synchronize_session] except __HOLE__: raise sa_exc.ArgumentError( "Valid strategies for session synchronization " "are %s" % (", ".join(sorted(repr(x) for x in lookup)))) else: return klass(*arg)
KeyError
dataset/ETHPy150Open zzzeek/sqlalchemy/lib/sqlalchemy/orm/persistence.py/BulkUD._factory
7,103
def test_defaultFailure(self): """ Test that log.failure() emits the right data. """ log = TestLogger() try: raise RuntimeError("baloney!") except __HOLE__: log.failure("Whoops") errors = self.flushLoggedErrors(RuntimeError) self.assertEqual(len(errors), 1) self.assertEqual(log.emitted["level"], LogLevel.critical) self.assertEqual(log.emitted["format"], "Whoops")
RuntimeError
dataset/ETHPy150Open twisted/twisted/twisted/logger/test/test_logger.py/LoggerTests.test_defaultFailure
7,104
def _fill_form(el, values): counts = {} if hasattr(values, 'mixed'): # For Paste request parameters values = values.mixed() inputs = _input_xpath(el) for input in inputs: name = input.get('name') if not name: continue if _takes_multiple(input): value = values.get(name, []) if not isinstance(value, (list, tuple)): value = [value] _fill_multiple(input, value) elif name not in values: continue else: index = counts.get(name, 0) counts[name] = index + 1 value = values[name] if isinstance(value, (list, tuple)): try: value = value[index] except __HOLE__: continue elif index > 0: continue _fill_single(input, value)
IndexError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/lxml-3.3.6/src/lxml/html/formfill.py/_fill_form
7,105
def _find_form(el, form_id=None, form_index=None): if form_id is None and form_index is None: forms = _forms_xpath(el) for form in forms: return form raise FormNotFound( "No forms in page") if form_id is not None: form = el.get_element_by_id(form_id) if form is not None: return form forms = _form_name_xpath(el, name=form_id) if forms: return forms[0] else: raise FormNotFound( "No form with the name or id of %r (forms: %s)" % (id, ', '.join(_find_form_ids(el)))) if form_index is not None: forms = _forms_xpath(el) try: return forms[form_index] except __HOLE__: raise FormNotFound( "There is no form with the index %r (%i forms found)" % (form_index, len(forms)))
IndexError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/lxml-3.3.6/src/lxml/html/formfill.py/_find_form
7,106
def get_data_from_bits(self, **kwargs): result_dict = {} for bit_name, bit_instance in self.bits.items(): if bit_name in self.params: params = self.params[bit_name] else: try: params = bit_instance.params except __HOLE__: params = None result_dict[bit_name] = bit_instance.get_data(params=params, **kwargs) return result_dict
AttributeError
dataset/ETHPy150Open chibisov/drf-extensions/rest_framework_extensions/key_constructor/constructors.py/KeyConstructor.get_data_from_bits
7,107
def bfs_edges(G, source, reverse=False): """Produce edges in a breadth-first-search starting at source.""" # Based on http://www.ics.uci.edu/~eppstein/PADS/BFS.py # by D. Eppstein, July 2004. if reverse and isinstance(G, nx.DiGraph): neighbors = G.predecessors_iter else: neighbors = G.neighbors_iter visited=set([source]) queue = deque([(source, neighbors(source))]) while queue: parent, children = queue[0] try: child = next(children) if child not in visited: yield parent, child visited.add(child) queue.append((child, neighbors(child))) except __HOLE__: queue.popleft()
StopIteration
dataset/ETHPy150Open gkno/gkno_launcher/src/networkx/algorithms/traversal/breadth_first_search.py/bfs_edges
7,108
def catkin_success(args, env={}): orig_environ = dict(os.environ) try: os.environ.update(env) catkin_main(args) except __HOLE__ as exc: ret = exc.code if ret != 0: import traceback traceback.print_exc() finally: os.environ = orig_environ return ret == 0
SystemExit
dataset/ETHPy150Open catkin/catkin_tools/tests/utils.py/catkin_success
7,109
def catkin_failure(args, env={}): orig_environ = dict(os.environ) try: os.environ.update(env) catkin_main(args) except __HOLE__ as exc: ret = exc.code finally: os.environ = orig_environ return ret != 0
SystemExit
dataset/ETHPy150Open catkin/catkin_tools/tests/utils.py/catkin_failure
7,110
def __exit__(self, exc_type, exc_value, tb): if self.expected is None: if exc_type is None: return True else: raise if exc_type is None: try: exc_name = self.expected.__name__ except __HOLE__: exc_name = str(self.expected) raise AssertionError("{0} not raised".format(exc_name)) if not issubclass(exc_type, self.expected): raise if self.expected_regex is None: return True expected_regex = self.expected_regex expected_regex = re.compile(expected_regex) if not expected_regex.search(str(exc_value)): raise AssertionError("'{0}' does not match '{1}'".format(expected_regex.pattern, str(exc_value))) return True
AttributeError
dataset/ETHPy150Open catkin/catkin_tools/tests/utils.py/AssertRaisesContext.__exit__
7,111
def __init__(self, servers, debug=0, pickleProtocol=0, pickler=pickle.Pickler, unpickler=pickle.Unpickler, pload=None, pid=None, server_max_key_length=SERVER_MAX_KEY_LENGTH, server_max_value_length=SERVER_MAX_VALUE_LENGTH, dead_retry=_DEAD_RETRY, socket_timeout=_SOCKET_TIMEOUT, cache_cas = False): """ Create a new Client object with the given list of servers. @param servers: C{servers} is passed to L{set_servers}. @param debug: whether to display error messages when a server can't be contacted. @param pickleProtocol: number to mandate protocol used by (c)Pickle. @param pickler: optional override of default Pickler to allow subclassing. @param unpickler: optional override of default Unpickler to allow subclassing. @param pload: optional persistent_load function to call on pickle loading. Useful for cPickle since subclassing isn't allowed. @param pid: optional persistent_id function to call on pickle storing. Useful for cPickle since subclassing isn't allowed. @param dead_retry: number of seconds before retrying a blacklisted server. Default to 30 s. @param socket_timeout: timeout in seconds for all calls to a server. Defaults to 3 seconds. @param cache_cas: (default False) If true, cas operations will be cached. WARNING: This cache is not expired internally, if you have a long-running process you will need to expire it manually via "client.reset_cas(), or the cache can grow unlimited. @param server_max_key_length: (default SERVER_MAX_KEY_LENGTH) Data that is larger than this will not be sent to the server. @param server_max_value_length: (default SERVER_MAX_VALUE_LENGTH) Data that is larger than this will not be sent to the server. """ local.__init__(self) self.debug = debug self.dead_retry = dead_retry self.socket_timeout = socket_timeout self.set_servers(servers) self.stats = {} self.cache_cas = cache_cas self.reset_cas() # Allow users to modify pickling/unpickling behavior self.pickleProtocol = pickleProtocol self.pickler = pickler self.unpickler = unpickler self.persistent_load = pload self.persistent_id = pid self.server_max_key_length = server_max_key_length self.server_max_value_length = server_max_value_length # figure out the pickler style file = StringIO() try: pickler = self.pickler(file, protocol = self.pickleProtocol) self.picklerIsKeyword = True except __HOLE__: self.picklerIsKeyword = False
TypeError
dataset/ETHPy150Open mozilla/source/vendor-local/lib/python/memcache.py/Client.__init__
7,112
def handle(self, **options): self.verbosity = options['verbosity'] self.interactive = options['interactive'] app_label = options['app_label'] start_migration_name = options['start_migration_name'] migration_name = options['migration_name'] no_optimize = options['no_optimize'] # Load the current graph state, check the app and migration they asked for exists loader = MigrationLoader(connections[DEFAULT_DB_ALIAS]) if app_label not in loader.migrated_apps: raise CommandError( "App '%s' does not have migrations (so squashmigrations on " "it makes no sense)" % app_label ) migration = self.find_migration(loader, app_label, migration_name) # Work out the list of predecessor migrations migrations_to_squash = [ loader.get_migration(al, mn) for al, mn in loader.graph.forwards_plan((migration.app_label, migration.name)) if al == migration.app_label ] if start_migration_name: start_migration = self.find_migration(loader, app_label, start_migration_name) start = loader.get_migration(start_migration.app_label, start_migration.name) try: start_index = migrations_to_squash.index(start) migrations_to_squash = migrations_to_squash[start_index:] except __HOLE__: raise CommandError( "The migration '%s' cannot be found. Maybe it comes after " "the migration '%s'?\n" "Have a look at:\n" " python manage.py showmigrations %s\n" "to debug this issue." % (start_migration, migration, app_label) ) # Tell them what we're doing and optionally ask if we should proceed if self.verbosity > 0 or self.interactive: self.stdout.write(self.style.MIGRATE_HEADING("Will squash the following migrations:")) for migration in migrations_to_squash: self.stdout.write(" - %s" % migration.name) if self.interactive: answer = None while not answer or answer not in "yn": answer = six.moves.input("Do you wish to proceed? [yN] ") if not answer: answer = "n" break else: answer = answer[0].lower() if answer != "y": return # Load the operations from all those migrations and concat together, # along with collecting external dependencies and detecting # double-squashing operations = [] dependencies = set() # We need to take all dependencies from the first migration in the list # as it may be 0002 depending on 0001 first_migration = True for smigration in migrations_to_squash: if smigration.replaces: raise CommandError( "You cannot squash squashed migrations! Please transition " "it to a normal migration first: " "https://docs.djangoproject.com/en/%s/topics/migrations/#squashing-migrations" % get_docs_version() ) operations.extend(smigration.operations) for dependency in smigration.dependencies: if isinstance(dependency, SwappableTuple): if settings.AUTH_USER_MODEL == dependency.setting: dependencies.add(("__setting__", "AUTH_USER_MODEL")) else: dependencies.add(dependency) elif dependency[0] != smigration.app_label or first_migration: dependencies.add(dependency) first_migration = False if no_optimize: if self.verbosity > 0: self.stdout.write(self.style.MIGRATE_HEADING("(Skipping optimization.)")) new_operations = operations else: if self.verbosity > 0: self.stdout.write(self.style.MIGRATE_HEADING("Optimizing...")) optimizer = MigrationOptimizer() new_operations = optimizer.optimize(operations, migration.app_label) if self.verbosity > 0: if len(new_operations) == len(operations): self.stdout.write(" No optimizations possible.") else: self.stdout.write( " Optimized from %s operations to %s operations." % (len(operations), len(new_operations)) ) # Work out the value of replaces (any squashed ones we're re-squashing) # need to feed their replaces into ours replaces = [] for migration in migrations_to_squash: if migration.replaces: replaces.extend(migration.replaces) else: replaces.append((migration.app_label, migration.name)) # Make a new migration with those operations subclass = type("Migration", (migrations.Migration, ), { "dependencies": dependencies, "operations": new_operations, "replaces": replaces, }) if start_migration_name: new_migration = subclass("%s_squashed_%s" % (start_migration.name, migration.name), app_label) else: new_migration = subclass("0001_squashed_%s" % migration.name, app_label) new_migration.initial = True # Write out the new migration file writer = MigrationWriter(new_migration) with open(writer.path, "wb") as fh: fh.write(writer.as_string()) if self.verbosity > 0: self.stdout.write(self.style.MIGRATE_HEADING("Created new squashed migration %s" % writer.path)) self.stdout.write(" You should commit this migration but leave the old ones in place;") self.stdout.write(" the new migration will be used for new installs. Once you are sure") self.stdout.write(" all instances of the codebase have applied the migrations you squashed,") self.stdout.write(" you can delete them.") if writer.needs_manual_porting: self.stdout.write(self.style.MIGRATE_HEADING("Manual porting required")) self.stdout.write(" Your migrations contained functions that must be manually copied over,") self.stdout.write(" as we could not safely copy their implementation.") self.stdout.write(" See the comment at the top of the squashed migration for details.")
ValueError
dataset/ETHPy150Open django/django/django/core/management/commands/squashmigrations.py/Command.handle
7,113
def find_migration(self, loader, app_label, name): try: return loader.get_migration_by_prefix(app_label, name) except AmbiguityError: raise CommandError( "More than one migration matches '%s' in app '%s'. Please be " "more specific." % (name, app_label) ) except __HOLE__: raise CommandError( "Cannot find a migration matching '%s' from app '%s'." % (name, app_label) )
KeyError
dataset/ETHPy150Open django/django/django/core/management/commands/squashmigrations.py/Command.find_migration
7,114
def clean(self, value): """ Value can be either a string in the format XXX.XXX.XXX-XX or an 11-digit number. """ value = super(BRCPFField, self).clean(value) if value in EMPTY_VALUES: return u'' orig_value = value[:] if not value.isdigit(): value = re.sub("[-\.]", "", value) try: int(value) except __HOLE__: raise ValidationError(self.error_messages['digits_only']) if len(value) != 11: raise ValidationError(self.error_messages['max_digits']) orig_dv = value[-2:] new_1dv = sum([i * int(value[idx]) for idx, i in enumerate(range(10, 1, -1))]) new_1dv = DV_maker(new_1dv % 11) value = value[:-2] + str(new_1dv) + value[-1] new_2dv = sum([i * int(value[idx]) for idx, i in enumerate(range(11, 1, -1))]) new_2dv = DV_maker(new_2dv % 11) value = value[:-1] + str(new_2dv) if value[-2:] != orig_dv: raise ValidationError(self.error_messages['invalid']) return orig_value
ValueError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.4/django/contrib/localflavor/br/forms.py/BRCPFField.clean
7,115
def clean(self, value): """ Value can be either a string in the format XX.XXX.XXX/XXXX-XX or a group of 14 characters. """ value = super(BRCNPJField, self).clean(value) if value in EMPTY_VALUES: return u'' orig_value = value[:] if not value.isdigit(): value = re.sub("[-/\.]", "", value) try: int(value) except __HOLE__: raise ValidationError(self.error_messages['digits_only']) if len(value) != 14: raise ValidationError(self.error_messages['max_digits']) orig_dv = value[-2:] new_1dv = sum([i * int(value[idx]) for idx, i in enumerate(range(5, 1, -1) + range(9, 1, -1))]) new_1dv = DV_maker(new_1dv % 11) value = value[:-2] + str(new_1dv) + value[-1] new_2dv = sum([i * int(value[idx]) for idx, i in enumerate(range(6, 1, -1) + range(9, 1, -1))]) new_2dv = DV_maker(new_2dv % 11) value = value[:-1] + str(new_2dv) if value[-2:] != orig_dv: raise ValidationError(self.error_messages['invalid']) return orig_value
ValueError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.4/django/contrib/localflavor/br/forms.py/BRCNPJField.clean
7,116
def close(self): """Closes the WebSocket connection.""" self._started = False if not self.server_terminated: if not self.stream.closed(): self._write_frame(True, 0x8, b'') self.server_terminated = True if self.graceful_shutdown: if self.client_terminated: if self._waiting is not None: try: self.stream.io_loop.remove_timeout(self._waiting) except __HOLE__: pass self._waiting = None self._terminate() elif self._waiting is None: # Give the client a few seconds to complete a clean shutdown, # otherwise just close the connection. self._waiting = self.stream.io_loop.add_timeout( time.time() + self.graceful_shutdown, self._abort) else: if self.client_terminated: return self._terminate()
KeyError
dataset/ETHPy150Open benoitc/gaffer/gaffer/httpclient/websocket.py/WebSocket.close
7,117
def _handle_message(self, opcode, data): if self.client_terminated: return if opcode == 0x1: # UTF-8 data try: decoded = data.decode("utf-8") except __HOLE__: self._abort() return self._async_callback(self.on_message)(decoded) elif opcode == 0x2: # Binary data self._async_callback(self.on_message)(data) elif opcode == 0x8: # Close self.client_terminated = True self.close() elif opcode == 0x9: # Ping self._write_frame(True, 0xA, data) self._async_callback(self.on_ping)() elif opcode == 0xA: # Pong self._async_callback(self.on_pong)() else: self._abort()
UnicodeDecodeError
dataset/ETHPy150Open benoitc/gaffer/gaffer/httpclient/websocket.py/WebSocket._handle_message
7,118
def __init__(self, loop, url, api_key=None, **kwargs): loop = loop try: self.heartbeat_timeout = kwargs.pop('heartbeat') except __HOLE__: self.heartbeat_timeout = 15.0 self.api_key = api_key # define status self.active = False self.closed = False # dict to maintain opened channels self.channels = dict() # dict to maintain commands self.commands = dict() # emitter for global events self._emitter = EventEmitter(loop) self._heartbeat = pyuv.Timer(loop) super(GafferSocket, self).__init__(loop, url, **kwargs) # make sure we authenticate first if self.api_key is not None: self.write_message("AUTH:%s" % self.api_key)
KeyError
dataset/ETHPy150Open benoitc/gaffer/gaffer/httpclient/websocket.py/GafferSocket.__init__
7,119
def __getitem__(self, topic): try: channel = self.channels[topic] except __HOLE__: raise KeyError("%s channel isn't subscribed" % topic) return channel
KeyError
dataset/ETHPy150Open benoitc/gaffer/gaffer/httpclient/websocket.py/GafferSocket.__getitem__
7,120
def __init__(self, loop, url, mode=3, api_key=None, **kwargs): loop = loop self.api_key = api_key # initialize the capabilities self.mode = mode self.readable = False self.writable = False if mode & pyuv.UV_READABLE: self.readable = True if mode & pyuv.UV_WRITABLE: self.writable = True # set heartbeat try: self.heartbeat_timeout = kwargs.pop('heartbeat') except __HOLE__: self.heartbeat_timeout = 15.0 self._heartbeat = pyuv.Timer(loop) # pending messages queue self._queue = deque() self.pending = {} # define status self.active = False self.closed = False # read callback self._read_callback = None super(IOChannel, self).__init__(loop, url, **kwargs) # make sure we authenticate first if self.api_key is not None: msg = Message("AUTH:%s" % self.api_key) self.write_message(msg.encode())
KeyError
dataset/ETHPy150Open benoitc/gaffer/gaffer/httpclient/websocket.py/IOChannel.__init__
7,121
def on_message(self, raw): msg = decode_frame(raw) if msg.type in (FRAME_ERROR_TYPE, FRAME_RESPONSE_TYPE): # did we received an error? error = None result = None if msg.type == FRAME_ERROR_TYPE: error = json.loads(msg.body.decode('utf-8')) self.close() else: result = msg.body if msg.id == "gaffer_error": if self._on_error_cb is not None: return self._async_callback(self._on_error_cb)(self, error) # handle message callback if any try: callback = self.pending.pop(msg.id) except __HOLE__: return self._async_callback(callback)(self, result, error) elif msg.type == FRAME_MESSAGE_TYPE: if self._read_callback is not None: self._async_callback(self._read_callback)(self, msg.body)
KeyError
dataset/ETHPy150Open benoitc/gaffer/gaffer/httpclient/websocket.py/IOChannel.on_message
7,122
@sockets.route('/api/cluster_comms') def cluster_comms(ws): """Websocket to communicate with the test clusters Commands are logical actions one end of the socket wishes the other end to take. Responses are follow ups to a command, which there can be multiple, back and forth between client and server until the receiving end marks a response as Done. Command structure: {type:'command', command_id:'some unique string for this command', message:'some message to the other end', action:'some action for the receiver to take', // Extra parameters: foo: ..., bar: ..., } Response structure: {type:'response', command_id:'the command id this is a response to', message:'some message to the other end', done: true/false (the responder considers the command complete) // Extra parameters: foo: ..., bar: ..., } Possible commands: * authenticate - server asks client to authenticate * get_work - client asks for a test * test_done - client is done with a test, and sending artifacts * cancel_test - server asks client to cancel test * shutdown - server asks client to shutdown service Protocol: Authentication: * client initiates connection to this server * server sends client a random challenge token {type:'command', command_id='zzzz', action:'authenticate', token:'xxxxxxx'} * client signs challenge token with it's private key ands sends the signature {type:'response', command_id='zzz', cluster:'bdplab', signature:'xxxxxxxx'} * server verifies the signature is against the token it sent and the public key it has on file for the cluster. * server sends a 'you are authenticated' response. {type:'response', command_id='zzz', authenticated: true, done:true} Task loop: * client sends a 'give me work' request. {type:'command', command_id='yyy', action:'get_work'} * server sends a 'ok, wait for work' response. {type:'response', command_id='yyy', action:'wait'} * server sends a single test to the cluster {type:'response', command_id='yyy', test:{...}} * client responds 'ok, received test' response {type:'response', command_id:'yyy', test_id:'xxxxxxx'} * server updates status of test to in_progress in database {type:'response', command_id:'yyy', message:'test_updated', done:true} * client sends artifacts via streaming protocol (See below) * client sends 'ok, test done, artifacts sent.' request. {type:'command', command_id:'llll', action:'test_done', test_id:'xxxxxxx'} * server updates status of test to completed * server sends a 'ok, test updated' response {type:'response', command_id:'llll', test_id:'xxxxxx', message='test_update', done:true} Streaming: protocol for streaming raw data: console output, binary artifacts etc. * Sending peer sends a "I'm going to send binary data to you" request: {type:'command', command_id='xxx', action:'stream', test_id='xxxxx', kind:"[console|failure|chart|system_logs|stress_logs]", name='name', eof='$$$EOF$$$', keepalive='$$$KEEPALIVE$$$'} * Receiving peer sends response indicating it's ready to receive the stream: {type:'response', command_id='xxx', action='ready'} * Peer starts sending arbitrary binary data messages. * The receiving peer reads binary data. If it encounters $$$KEEPALIVE$$$ as it's own message, it will omit that data, as it's only meant to keep the socket open. * Once $$$EOF$$$ is seen by the receiving peer, in it's own message, the receiving peer can respond: {type:'response', command_id='xxx', message:'stream_received', done:true} """ context = {'apikey': APIKey.load(SERVER_KEY_PATH), 'cluster': None} def authenticate(): token_to_sign = random_token() cmd = Command.new(ws, action='authenticate', token=token_to_sign) response = cmd.send() context['cluster'] = cluster = response['cluster'] client_pubkey = db.get_pub_key(cluster) client_apikey = APIKey(client_pubkey['pubkey']) # Verify the client correctly signed the token: try: client_apikey.verify_message(token_to_sign, response.get('signature')) except: response.respond(message='Bad Signature of token for authentication', done=True) log.error('client provided bad signature for auth token') raise response.respond(authenticated=True, done=True) # Client will ask us to authenticate too: command = receive_data(ws) assert command.get('action') == 'authenticate' data = {'signature' :context['apikey'].sign_message(command['token'])} response = command.respond(**data) if response.get('authenticated') != True: raise UnauthenticatedError("Our peer could not validate our signed auth token") def get_work(command): # Mark any existing in_process jobs for this cluster as # failed. If the cluster is asking for new work, then these # got dropped on the floor: for test in db.get_in_progress_tests(context['cluster']): db.update_test_status(test['test_id'], 'failed') # Find the next test scheduled for the client's cluster: tests = db.get_scheduled_tests(context['cluster'], limit=1) if len(tests) > 0: test_id = tests[0]['test_id'] else: # No tests are currently scheduled. # Register a zmq listener of notifications of incoming tests, with a timeout. # When we see any test scheduled notification for our cluster, redo the query. # If timeout reached, redo the query anyway in case we missed the notification. def setup_zmq(): zmq_context = zmq.Context() zmq_socket = zmq_context.socket(zmq.SUB) zmq_socket.connect('tcp://127.0.0.1:5557') zmq_socket.setsockopt_string( zmq.SUBSCRIBE, unicode('scheduled {cluster} '.format(cluster=context['cluster']))) zmq_socket.setsockopt(zmq.RCVTIMEO, 15000) return zmq_socket zmq_socket = setup_zmq() while True: try: cluster, test_id = zmq_socket.recv_string().split() except zmq.error.Again: pass except zmq.error.ZMQError, e: if e.errno == zmq.POLLERR: log.error(e) # Interrupted zmq socket code, reinitialize: # I get this when I resize my terminal.. WTF? zmq_socket = setup_zmq() finally: tests = db.get_scheduled_tests(context['cluster'], limit=1) if len(tests) > 0: test_id = tests[0]['test_id'] break else: # Send no-work-yet message: console_publish(context['cluster'], {'ctl':'WAIT'}) command.respond(action='wait', follow_up=False) test = db.get_test(test_id) # Give the test to the client: response = command.respond(test=test) # Expect an prepared status message back: assert response['test_id'] == test['test_id'] and \ response['status'] == 'prepared' # Update the test status: db.update_test_status(test['test_id'], 'in_progress') # Let the client know they can start it: response.respond(test_id=test['test_id'], status="in_progress", done=True) def test_done(command): """Receive completed test artifacts from client""" db.update_test_status(command['test_id'], command['status']) # Record test failure message, if any: if command['status'] == 'failed': msg = (command.get('message','') + "\n" + command.get('stacktrace','')).strip() db.update_test_artifact(command['test_id'], 'failure', msg) # Send response: command.respond(test_id=command['test_id'], message='test_update', done=True) def receive_artifact_chunk_object(command): command.respond(message="ready", follow_up=False, done=False) tmp = cStringIO.StringIO() chunk_sha = hashlib.sha256() def frame_callback(frame, binary): if not binary: frame = frame.encode("utf-8") chunk_sha.update(frame) tmp.write(frame) socket_comms.receive_stream(ws, command, frame_callback) # save chunk to db db.insert_artifact_chunk(command['object_id'], command['chunk_id'], command['chunk_size'], chunk_sha.hexdigest(), tmp, command['num_of_chunks'], command['file_size'], command['object_sha']) # respond with current sha command.respond(message='chunk_received', done=True, chunk_id=command['chunk_id'], chunk_sha=chunk_sha.hexdigest()) def receive_artifact_chunk_complete(command): db.update_test_artifact(command['test_id'], command['kind'], None, command['name'], available=command['successful'], object_id=command['object_id']) command.respond(message='ok', stored_chunk_shas=_get_stored_chunks(command['object_id']), done=True) def receive_artifact_chunk_query(command): command.respond(message='ok', stored_chunk_shas=_get_stored_chunks(command['object_id']), done=True) def _get_stored_chunks(object_id): """ This is super lame, but.... currently returning a list as a value on commands breaks the assertion functionality on the client """ chunk_info = db.get_chunk_info(object_id) return ','.join(["{}:{}".format(hsh['chunk_id'], hsh['chunk_sha']) for hsh in chunk_info]) def receive_stream(command): """Receive a stream of data""" command.respond(message="ready", follow_up=False) log.debug("Receving data stream ....") if command['kind'] == 'console': console_dir = os.path.join(os.path.expanduser("~"), ".cstar_perf", "console_out") try: os.makedirs(console_dir) except __HOLE__: pass console = open(os.path.join(console_dir, command['test_id']), "w") tmp = cStringIO.StringIO() sha = hashlib.sha256() try: def frame_callback(frame, binary): if not binary: frame = frame.encode("utf-8") if command['kind'] == 'console': console.write(frame) console_publish(context['cluster'], {'job_id':command['test_id'], 'msg':frame}) console.flush() else: console_publish(context['cluster'], {'job_id':command['test_id'], 'ctl':'IN_PROGRESS'}) sha.update(frame) tmp.write(frame) socket_comms.receive_stream(ws, command, frame_callback) if command['kind'] == 'console': console.close() # TODO: confirm with the client that the sha is correct # before storing finally: # In the event of a socket error, we always want to commit # what we have of the artifact to the database. Better to # have something than nothing. It's the client's # responsibility to resend artifacts that failed. db.update_test_artifact(command['test_id'], command['kind'], tmp, command['name']) command.respond(message='stream_received', done=True, sha256=sha.hexdigest()) # Client and Server both authenticate to eachother: authenticate() try: # Dispatch on client commands: while True: command = receive_data(ws) assert command['type'] == 'command' if command['action'] == 'get_work': console_publish(context['cluster'], {'ctl':'WAIT'}) get_work(command) elif command['action'] == 'test_done': console_publish(context['cluster'], {'ctl':'DONE'}) test_done(command) elif command['action'] == 'stream': receive_stream(command) elif command['action'] == 'chunk-stream-query': receive_artifact_chunk_query(command) elif command['action'] == 'chunk-stream': receive_artifact_chunk_object(command) elif command['action'] == 'chunk-stream-complete': receive_artifact_chunk_complete(command) elif command['action'] == 'good_bye': log.info("client said good_bye. Closing socket.") break finally: console_publish(context['cluster'], {'ctl':'GOODBYE'})
OSError
dataset/ETHPy150Open datastax/cstar_perf/frontend/cstar_perf/frontend/server/cluster_api.py/cluster_comms
7,123
def get_magic_type(self): """Checks the volume for its magic bytes and returns the magic.""" with io.open(self.disk.get_fs_path(), "rb") as file: file.seek(self.offset) fheader = file.read(min(self.size, 4096) if self.size else 4096) # TODO fallback to img-cat image -s blocknum | file - # if we were able to load the module magic try: # noinspection PyUnresolvedReferences import magic if hasattr(magic, 'from_buffer'): # using https://github.com/ahupp/python-magic logger.debug("Using python-magic Python package for file type magic") result = magic.from_buffer(fheader).decode() return result elif hasattr(magic, 'open'): # using Magic file extensions by Rueben Thomas (Ubuntu python-magic module) logger.debug("Using python-magic system package for file type magic") ms = magic.open(magic.NONE) ms.load() result = ms.buffer(fheader) ms.close() return result else: logger.warning("The python-magic module is not available, but another module named magic was found.") except __HOLE__: logger.warning("The python-magic module is not available.") except AttributeError: logger.warning("The python-magic module is not available, but another module named magic was found.") return None
ImportError
dataset/ETHPy150Open ralphje/imagemounter/imagemounter/volume.py/Volume.get_magic_type
7,124
def mount(self): """Based on the file system type as determined by :func:`determine_fs_type`, the proper mount command is executed for this volume. The volume is mounted in a temporary path (or a pretty path if :attr:`pretty` is enabled) in the mountpoint as specified by :attr:`mountpoint`. If the file system type is a LUKS container or LVM, additional methods may be called, adding subvolumes to :attr:`volumes` :return: boolean indicating whether the mount succeeded """ raw_path = self.get_raw_path() self.determine_fs_type() # we need a mountpoint if it is not a lvm or luks volume if self.fstype not in ('luks', 'lvm', 'bde') and self.fstype not in VOLUME_SYSTEM_TYPES and \ self.fstype in FILE_SYSTEM_TYPES and not self._make_mountpoint(): return False # Prepare mount command try: def call_mount(type, opts): cmd = ['mount', raw_path, self.mountpoint, '-t', type, '-o', opts] if not self.disk.read_write: cmd[-1] += ',ro' _util.check_call_(cmd, stdout=subprocess.PIPE) if self.fstype == 'ext': call_mount('ext4', 'noexec,noload,loop,offset=' + str(self.offset)) elif self.fstype == 'ufs': call_mount('ufs', 'ufstype=ufs2,loop,offset=' + str(self.offset)) elif self.fstype == 'ntfs': call_mount('ntfs', 'show_sys_files,noexec,force,loop,offset=' + str(self.offset)) elif self.fstype == 'xfs': call_mount('xfs', 'norecovery,loop,offset=' + str(self.offset)) elif self.fstype == 'hfs+': call_mount('hfsplus', 'force,loop,offset=' + str(self.offset) + ',sizelimit=' + str(self.size)) elif self.fstype in ('iso', 'udf', 'squashfs', 'cramfs', 'minix', 'fat', 'hfs'): mnt_type = {'iso': 'iso9660', 'fat': 'vfat'}.get(self.fstype, self.fstype) call_mount(mnt_type, 'loop,offset=' + str(self.offset)) elif self.fstype == 'vmfs': if not self._find_loopback(): return False _util.check_call_(['vmfs-fuse', self.loopback, self.mountpoint], stdout=subprocess.PIPE) elif self.fstype == 'unknown': # mounts without specifying the filesystem type cmd = ['mount', raw_path, self.mountpoint, '-o', 'loop,offset=' + str(self.offset)] if not self.disk.read_write: cmd[-1] += ',ro' _util.check_call_(cmd, stdout=subprocess.PIPE) elif self.fstype == 'jffs2': self._open_jffs2() elif self.fstype == 'luks': self._open_luks_container() elif self.fstype == 'bde': self._open_bde_container() elif self.fstype == 'lvm': self._open_lvm() for _ in self.volumes.detect_volumes('lvm'): pass elif self.fstype == 'dir': os.rmdir(self.mountpoint) os.symlink(raw_path, self.mountpoint) elif self.fstype in VOLUME_SYSTEM_TYPES: for _ in self.volumes.detect_volumes(self.fstype): pass else: try: size = self.size / self.disk.block_size except __HOLE__: size = self.size logger.warning("Unsupported filesystem {0} (type: {1}, block offset: {2}, length: {3})" .format(self, self.fstype, self.offset / self.disk.block_size, size)) return False self.was_mounted = True return True except Exception as e: logger.exception("Execution failed due to {}".format(e), exc_info=True) self.exception = e try: if self.mountpoint: os.rmdir(self.mountpoint) self.mountpoint = "" if self.loopback: self.loopback = "" except Exception as e2: logger.exception("Clean-up failed", exc_info=True) return False
TypeError
dataset/ETHPy150Open ralphje/imagemounter/imagemounter/volume.py/Volume.mount
7,125
def _open_bde_container(self): """Mounts a BDE container. Uses key material provided by the :attr:`keys` attribute. The key material should be provided in the same format as to :cmd:`bdemount`, used as follows: k:full volume encryption and tweak key p:passphrase r:recovery password s:file to startup key (.bek) :return: the Volume contained in the BDE container, or None on failure. """ self.bde_path = tempfile.mkdtemp(prefix='image_mounter_bde_') try: if str(self.index) in self.keys: t, v = self.keys[str(self.index)].split(':', 1) key = ['-' + t, v] else: logger.warning("No key material provided for %s", self) key = [] except __HOLE__: logger.exception("Invalid key material provided (%s) for %s. Expecting [arg]:[value]", self.keys.get(str(self.index)), self) return None # noinspection PyBroadException try: cmd = ["bdemount", self.get_raw_path(), self.bde_path, '-o', str(self.offset)] cmd.extend(key) _util.check_call_(cmd) except Exception: self.bde_path = "" logger.exception("Failed mounting BDE volume %s.", self) return None container = self.volumes._make_subvolume() container.index = "{0}.0".format(self.index) container.fsdescription = 'BDE Volume' container.flag = 'alloc' container.offset = 0 container.size = self.size return container
ValueError
dataset/ETHPy150Open ralphje/imagemounter/imagemounter/volume.py/Volume._open_bde_container
7,126
def unmount(self): """Unounts the volume from the filesystem.""" for volume in self.volumes: volume.unmount() if self.loopback and self.volume_group: try: _util.check_call_(["lvm", 'vgchange', '-a', 'n', self.volume_group], stdout=subprocess.PIPE) except Exception: return False self.volume_group = "" if self.loopback and self.luks_path: try: _util.check_call_(['cryptsetup', 'luksClose', self.luks_path], stdout=subprocess.PIPE) except Exception: return False self.luks_path = "" if self.bde_path: if not _util.clean_unmount(['fusermount', '-u'], self.bde_path): return False self.bde_path = "" if self.loopback: try: _util.check_call_(['losetup', '-d', self.loopback]) except Exception: return False self.loopback = "" if self.bindmountpoint: if not _util.clean_unmount(['umount'], self.bindmountpoint, rmdir=False): return False self.bindmountpoint = "" if self.mountpoint: if not _util.clean_unmount(['umount'], self.mountpoint): return False self.mountpoint = "" if self.carvepoint: try: shutil.rmtree(self.carvepoint) except __HOLE__: return False else: self.carvepoint = "" return True
OSError
dataset/ETHPy150Open ralphje/imagemounter/imagemounter/volume.py/Volume.unmount
7,127
@property def name(self): try: return "-" + self._name() except __HOLE__: return ''
NotImplementedError
dataset/ETHPy150Open niwinz/django-greenqueue/greenqueue/worker/base.py/BaseWorker.name
7,128
def _process_task(self, name, uuid, args, kwargs): try: _task = self.lib.task_by_name(name) except __HOLE__: log.error("greenqueue-worker: received unknown or unregistret method call: %s", name) return task_callable = self.get_callable_for_task(_task) self.process_callable(uuid, task_callable, args, kwargs)
ValueError
dataset/ETHPy150Open niwinz/django-greenqueue/greenqueue/worker/base.py/BaseWorker._process_task
7,129
def run(self): load_modules() self.lib = library while not self.stop_event.is_set(): try: name, uuid, args, kwargs = self.queue_in.get(True) log.debug("greenqueue-worker{0}: received message from queue - {1}:{2}".format(self.name, name, uuid)) self._process_task(name, uuid, args, kwargs) except __HOLE__: self.stop_event.set()
KeyboardInterrupt
dataset/ETHPy150Open niwinz/django-greenqueue/greenqueue/worker/base.py/BaseWorker.run
7,130
def _handleClassSetUp(self, test, result): previousClass = getattr(result, '_previousTestClass', None) currentClass = test.__class__ if currentClass == previousClass: return if result._moduleSetUpFailed: return if getattr(currentClass, "__unittest_skip__", False): return try: currentClass._classSetupFailed = False except __HOLE__: # test may actually be a function # so its class will be a builtin-type pass setUpClass = getattr(currentClass, 'setUpClass', None) if setUpClass is not None: _call_if_exists(result, '_setupStdout') try: setUpClass() except Exception as e: if isinstance(result, _DebugResult): raise currentClass._classSetupFailed = True className = util.strclass(currentClass) errorName = 'setUpClass (%s)' % className self._addClassOrModuleLevelException(result, e, errorName) finally: _call_if_exists(result, '_restoreStdout')
TypeError
dataset/ETHPy150Open amrdraz/kodr/app/brython/www/src/Lib/unittest/suite.py/TestSuite._handleClassSetUp
7,131
def _handleModuleFixture(self, test, result): previousModule = self._get_previous_module(result) currentModule = test.__class__.__module__ if currentModule == previousModule: return self._handleModuleTearDown(result) result._moduleSetUpFailed = False try: module = sys.modules[currentModule] except __HOLE__: return setUpModule = getattr(module, 'setUpModule', None) if setUpModule is not None: _call_if_exists(result, '_setupStdout') try: setUpModule() except Exception as e: if isinstance(result, _DebugResult): raise result._moduleSetUpFailed = True errorName = 'setUpModule (%s)' % currentModule self._addClassOrModuleLevelException(result, e, errorName) finally: _call_if_exists(result, '_restoreStdout')
KeyError
dataset/ETHPy150Open amrdraz/kodr/app/brython/www/src/Lib/unittest/suite.py/TestSuite._handleModuleFixture
7,132
def _handleModuleTearDown(self, result): previousModule = self._get_previous_module(result) if previousModule is None: return if result._moduleSetUpFailed: return try: module = sys.modules[previousModule] except __HOLE__: return tearDownModule = getattr(module, 'tearDownModule', None) if tearDownModule is not None: _call_if_exists(result, '_setupStdout') try: tearDownModule() except Exception as e: if isinstance(result, _DebugResult): raise errorName = 'tearDownModule (%s)' % previousModule self._addClassOrModuleLevelException(result, e, errorName) finally: _call_if_exists(result, '_restoreStdout')
KeyError
dataset/ETHPy150Open amrdraz/kodr/app/brython/www/src/Lib/unittest/suite.py/TestSuite._handleModuleTearDown
7,133
def _isnotsuite(test): "A crude way to tell apart testcases and suites with duck-typing" try: iter(test) except __HOLE__: return True return False
TypeError
dataset/ETHPy150Open amrdraz/kodr/app/brython/www/src/Lib/unittest/suite.py/_isnotsuite
7,134
def has(self, node): """ Check if node has heartbeat monitor @param node: Node @type node: L{Node} @return: True or False """ try: m = self._lookup(node) return True except __HOLE__, e: return False
KeyError
dataset/ETHPy150Open selfsk/nodeset.core/src/nodeset/core/heartbeat.py/NodeHeartBeat.has
7,135
def _skip_if_no_lxml(): try: import lxml except __HOLE__: raise nose.SkipTest("no lxml")
ImportError
dataset/ETHPy150Open pydata/pandas/pandas/io/tests/test_data.py/_skip_if_no_lxml
7,136
def _skip_if_no_bs(): try: import bs4 import html5lib except __HOLE__: raise nose.SkipTest("no html5lib/bs4")
ImportError
dataset/ETHPy150Open pydata/pandas/pandas/io/tests/test_data.py/_skip_if_no_bs
7,137
def is_fd_closed(fd): try: os.fstat(fd) return False except __HOLE__: return True
OSError
dataset/ETHPy150Open d11wtq/dockerpty/tests/unit/test_io.py/is_fd_closed
7,138
def upgrade(self, version, name='database_version', pkg=None): """Invokes `do_upgrade(env, version, cursor)` in module `"%s/db%i.py" % (pkg, version)`, for each required version upgrade. :param version: the expected integer database version. :param name: the name of the entry in the SYSTEM table that contains the database version. Defaults to `database_version`, which contains the database version for Trac. :param pkg: the package containing the upgrade modules. :raises TracError: if the package or module doesn't exist. """ dbver = self.get_database_version(name) for i in range(dbver + 1, version + 1): module = 'db%i' % i try: upgrades = __import__(pkg, globals(), locals(), [module]) except ImportError: raise TracError(_("No upgrade package %(pkg)s", pkg=pkg)) try: script = getattr(upgrades, module) except __HOLE__: raise TracError(_("No upgrade module %(module)s.py", module=module)) with self.env.db_transaction as db: cursor = db.cursor() script.do_upgrade(self.env, i, cursor) self.set_database_version(i, name)
AttributeError
dataset/ETHPy150Open edgewall/trac/trac/db/api.py/DatabaseManager.upgrade
7,139
def parse_connection_uri(db_str): """Parse the database connection string. The database connection string for an environment is specified through the `database` option in the `[trac]` section of trac.ini. :return: a tuple containing the scheme and a dictionary of attributes: `user`, `password`, `host`, `port`, `path`, `params`. :since: 1.1.3 """ if not db_str: section = tag.a("[trac]", title=_("TracIni documentation"), class_='trac-target-new', href='http://trac.edgewall.org/wiki/TracIni' '#trac-section') raise ConfigurationError( tag_("Database connection string is empty. Set the %(option)s " "configuration option in the %(section)s section of " "trac.ini. Please refer to the %(doc)s for help.", option=tag.code("database"), section=section, doc=_doc_db_str())) try: scheme, rest = db_str.split(':', 1) except ValueError: raise _invalid_db_str(db_str) if not rest.startswith('/'): if scheme == 'sqlite' and rest: # Support for relative and in-memory SQLite connection strings host = None path = rest else: raise _invalid_db_str(db_str) else: if not rest.startswith('//'): host = None rest = rest[1:] elif rest.startswith('///'): host = None rest = rest[3:] else: rest = rest[2:] if '/' in rest: host, rest = rest.split('/', 1) else: host = rest rest = '' path = None if host and '@' in host: user, host = host.split('@', 1) if ':' in user: user, password = user.split(':', 1) else: password = None if user: user = urllib.unquote(user) if password: password = unicode_passwd(urllib.unquote(password)) else: user = password = None if host and ':' in host: host, port = host.split(':', 1) try: port = int(port) except ValueError: raise _invalid_db_str(db_str) else: port = None if not path: path = '/' + rest if os.name == 'nt': # Support local paths containing drive letters on Win32 if len(rest) > 1 and rest[1] == '|': path = "%s:%s" % (rest[0], rest[2:]) params = {} if '?' in path: path, qs = path.split('?', 1) qs = qs.split('&') for param in qs: try: name, value = param.split('=', 1) except __HOLE__: raise _invalid_db_str(db_str) value = urllib.unquote(value) params[name] = value args = zip(('user', 'password', 'host', 'port', 'path', 'params'), (user, password, host, port, path, params)) return scheme, dict([(key, value) for key, value in args if value]) # Compatibility for Trac < 1.1.3. Will be removed in 1.3.1.
ValueError
dataset/ETHPy150Open edgewall/trac/trac/db/api.py/parse_connection_uri
7,140
def __new__(cls, x=0, base=10): """ From the Py3 int docstring: | int(x=0) -> integer | int(x, base=10) -> integer | | Convert a number or string to an integer, or return 0 if no | arguments are given. If x is a number, return x.__int__(). For | floating point numbers, this truncates towards zero. | | If x is not a number or if base is given, then x must be a string, | bytes, or bytearray instance representing an integer literal in the | given base. The literal can be preceded by '+' or '-' and be | surrounded by whitespace. The base defaults to 10. Valid bases are | 0 and 2-36. Base 0 means to interpret the base from the string as an | integer literal. | >>> int('0b100', base=0) | 4 """ try: val = x.__int__() except AttributeError: val = x else: if not isint(val): raise TypeError('__int__ returned non-int ({0})'.format( type(val))) if base != 10: # Explicit base if not (istext(val) or isbytes(val) or isinstance(val, bytearray)): raise TypeError( "int() can't convert non-string with explicit base") try: return super(newint, cls).__new__(cls, val, base) except TypeError: return super(newint, cls).__new__(cls, newbytes(val), base) # After here, base is 10 try: return super(newint, cls).__new__(cls, val) except __HOLE__: # Py2 long doesn't handle bytearray input with an explicit base, so # handle this here. # Py3: int(bytearray(b'10'), 2) == 2 # Py2: int(bytearray(b'10'), 2) == 2 raises TypeError # Py2: long(bytearray(b'10'), 2) == 2 raises TypeError try: return super(newint, cls).__new__(cls, newbytes(val)) except: raise TypeError("newint argument must be a string or a number," "not '{0}'".format(type(val)))
TypeError
dataset/ETHPy150Open OpenCobolIDE/OpenCobolIDE/open_cobol_ide/extlibs/future/types/newint.py/newint.__new__
7,141
def emit_rmic_classes(target, source, env): """Create and return lists of Java RMI stub and skeleton class files to be created from a set of class files. """ class_suffix = env.get('JAVACLASSSUFFIX', '.class') classdir = env.get('JAVACLASSDIR') if not classdir: try: s = source[0] except IndexError: classdir = '.' else: try: classdir = s.attributes.java_classdir except AttributeError: classdir = '.' classdir = env.Dir(classdir).rdir() if str(classdir) == '.': c_ = None else: c_ = str(classdir) + os.sep slist = [] for src in source: try: classname = src.attributes.java_classname except __HOLE__: classname = str(src) if c_ and classname[:len(c_)] == c_: classname = classname[len(c_):] if class_suffix and classname[:-len(class_suffix)] == class_suffix: classname = classname[-len(class_suffix):] s = src.rfile() s.attributes.java_classdir = classdir s.attributes.java_classname = classname slist.append(s) stub_suffixes = ['_Stub'] if env.get('JAVAVERSION') == '1.4': stub_suffixes.append('_Skel') tlist = [] for s in source: for suff in stub_suffixes: fname = s.attributes.java_classname.replace('.', os.sep) + \ suff + class_suffix t = target[0].File(fname) t.attributes.java_lookupdir = target[0] tlist.append(t) return tlist, source
AttributeError
dataset/ETHPy150Open kayhayen/Nuitka/nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Tool/rmic.py/emit_rmic_classes
7,142
def _start_batch(self): fn = self.options.batch if not fn: return None if fn == '-': f = sys.stdin else: f = open(fn, 'rb') def wrapper(): for l in f.readlines(): l = l.strip() if not l: continue try: # Try to parse line as json yield json.loads(l) except __HOLE__: # If it is not json, we expect one word with '@' sign assert len(l.split()) == 1 login, domain = l.split('@') # ensure there is something email-like yield {'to': l} return wrapper()
ValueError
dataset/ETHPy150Open lavr/python-emails/scripts/make_rfc822.py/MakeRFC822._start_batch
7,143
def _setup_environment(environ): import platform # Cygwin requires some special voodoo to set the environment variables # properly so that Oracle will see them. if platform.system().upper().startswith('CYGWIN'): try: import ctypes except __HOLE__ as e: from django.core.exceptions import ImproperlyConfigured raise ImproperlyConfigured("Error loading ctypes: %s; " "the Oracle backend requires ctypes to " "operate correctly under Cygwin." % e) kernel32 = ctypes.CDLL('kernel32') for name, value in environ: kernel32.SetEnvironmentVariableA(name, value) else: import os os.environ.update(environ)
ImportError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/django/db/backends/oracle/base.py/_setup_environment
7,144
def _cursor(self): cursor = None if not self._valid_connection(): conn_string = convert_unicode(self._connect_string()) conn_params = self.settings_dict['OPTIONS'].copy() if 'use_returning_into' in conn_params: del conn_params['use_returning_into'] self.connection = Database.connect(conn_string, **conn_params) cursor = FormatStylePlaceholderCursor(self.connection) # Set the territory first. The territory overrides NLS_DATE_FORMAT # and NLS_TIMESTAMP_FORMAT to the territory default. When all of # these are set in single statement it isn't clear what is supposed # to happen. cursor.execute("ALTER SESSION SET NLS_TERRITORY = 'AMERICA'") # Set oracle date to ansi date format. This only needs to execute # once when we create a new connection. We also set the Territory # to 'AMERICA' which forces Sunday to evaluate to a '1' in # TO_CHAR(). cursor.execute( "ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS'" " NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS.FF'" + (" TIME_ZONE = 'UTC'" if settings.USE_TZ else '')) if 'operators' not in self.__dict__: # Ticket #14149: Check whether our LIKE implementation will # work for this connection or we need to fall back on LIKEC. # This check is performed only once per DatabaseWrapper # instance per thread, since subsequent connections will use # the same settings. try: cursor.execute("SELECT 1 FROM DUAL WHERE DUMMY %s" % self._standard_operators['contains'], ['X']) except utils.DatabaseError: self.operators = self._likec_operators else: self.operators = self._standard_operators try: self.oracle_version = int(self.connection.version.split('.')[0]) # There's no way for the DatabaseOperations class to know the # currently active Oracle version, so we do some setups here. # TODO: Multi-db support will need a better solution (a way to # communicate the current version). if self.oracle_version <= 9: self.ops.regex_lookup = self.ops.regex_lookup_9 else: self.ops.regex_lookup = self.ops.regex_lookup_10 except __HOLE__: pass try: self.connection.stmtcachesize = 20 except: # Django docs specify cx_Oracle version 4.3.1 or higher, but # stmtcachesize is available only in 4.3.2 and up. pass connection_created.send(sender=self.__class__, connection=self) if not cursor: cursor = FormatStylePlaceholderCursor(self.connection) return cursor # Oracle doesn't support savepoint commits. Ignore them.
ValueError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/django/db/backends/oracle/base.py/DatabaseWrapper._cursor
7,145
def executemany(self, query, params=None): # cx_Oracle doesn't support iterators, convert them to lists if params is not None and not isinstance(params, (list, tuple)): params = list(params) try: args = [(':arg%d' % i) for i in range(len(params[0]))] except (__HOLE__, TypeError): # No params given, nothing to do return None # cx_Oracle wants no trailing ';' for SQL statements. For PL/SQL, it # it does want a trailing ';' but not a trailing '/'. However, these # characters must be included in the original query in case the query # is being passed to SQL*Plus. if query.endswith(';') or query.endswith('/'): query = query[:-1] query = convert_unicode(query % tuple(args), self.charset) formatted = [self._format_params(i) for i in params] self._guess_input_sizes(formatted) try: return self.cursor.executemany(query, [self._param_generator(p) for p in formatted]) except Database.IntegrityError as e: six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2]) except Database.DatabaseError as e: # cx_Oracle <= 4.4.0 wrongly raises a DatabaseError for ORA-01400. if hasattr(e.args[0], 'code') and e.args[0].code == 1400 and not isinstance(e, IntegrityError): six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2]) six.reraise(utils.DatabaseError, utils.DatabaseError(*tuple(e.args)), sys.exc_info()[2])
IndexError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/django/db/backends/oracle/base.py/FormatStylePlaceholderCursor.executemany
7,146
def main(): parser = argparse.ArgumentParser() parser.add_argument('dataName', type=str, help='name of python script that produces data to analyze.') parser.add_argument('allocModelName', type=str, help='name of allocation model. {MixModel, DPMixModel}') parser.add_argument('obsModelName', type=str, help='name of observation model. {Gauss, ZMGauss}') parser.add_argument('algName', type=str, help='name of learning algorithms to consider, {EM, VB, moVB, soVB}.') parser.add_argument('--jobname', type=str, default='defaultjob', help='name of experiment whose results should be plotted') parser.add_argument('--topW', type=int, default=10, help='the number of top words printed for a given topic') parser.add_argument('--taskids', type=str, default=None, help="int ids for tasks (individual runs) of the given job to plot." + \ 'Ex: "1" or "3" or "1,2,3" or "1-6"') parser.add_argument('--savefilename', type=str, default=None, help="absolute path to directory to save figure") parser.add_argument('--iterid', type=int, default=None) args = parser.parse_args() rootpath = os.path.join(os.environ['BNPYOUTDIR'], args.dataName, \ args.allocModelName, args.obsModelName) jobpath = os.path.join(rootpath, args.algName, args.jobname) if not os.path.exists(jobpath): raise ValueError("No such path: %s" % (jobpath)) taskids = PlotELBO.parse_task_ids(jobpath, args.taskids) Data = loadData(jobpath) if args.savefilename is not None and len(taskids) > 0: try: args.savefilename % ('1') except __HOLE__: raise ValueError("Missing or bad format string in savefilename %s" % (args.savefilename) ) for taskid in taskids: taskpath = os.path.join(jobpath, taskid) if args.iterid is None: prefix = "Best" else: prefix = "Iter%05d" % (args.iterid) hmodel = bnpy.ioutil.ModelReader.load_model(taskpath, prefix) # Print top words across all topics learnedK = hmodel.allocModel.K savefid = taskpath + "/top_words.txt" fid = open(savefid,'w+') for k in xrange(learnedK): lamvec = hmodel.obsModel.comp[k].lamvec elamvec = lamvec / lamvec.sum() topW_ind = np.argsort(elamvec)[-args.topW:] for w in xrange(args.topW): word = str(Data.vocab_dict[topW_ind[w]]) fid.write( word + ", " ) fid.write("...\n") fid.close()
TypeError
dataset/ETHPy150Open daeilkim/refinery/refinery/bnpy/bnpy-dev/bnpy/viz/PrintTopics.py/main
7,147
def get_language(language_code): for tag in normalize_language_tag(language_code): if tag in _languages: return _languages[tag] try: module = __import__(tag, globals(), locals()) except __HOLE__: continue _languages[tag] = module return module return None
ImportError
dataset/ETHPy150Open adieu/allbuttonspressed/docutils/parsers/rst/languages/__init__.py/get_language
7,148
def obj_make_compatible(self, primitive, target_version): super(ComputeNode, self).obj_make_compatible(primitive, target_version) target_version = versionutils.convert_version_to_tuple(target_version) if target_version < (1, 16): if 'disk_allocation_ratio' in primitive: del primitive['disk_allocation_ratio'] if target_version < (1, 15): if 'uuid' in primitive: del primitive['uuid'] if target_version < (1, 14): if 'ram_allocation_ratio' in primitive: del primitive['ram_allocation_ratio'] if 'cpu_allocation_ratio' in primitive: del primitive['cpu_allocation_ratio'] if target_version < (1, 13) and primitive.get('service_id') is None: # service_id is non-nullable in versions before 1.13 try: service = objects.Service.get_by_compute_host( self._context, primitive['host']) primitive['service_id'] = service.id except (exception.ComputeHostNotFound, __HOLE__): # NOTE(hanlind): In case anything goes wrong like service not # found or host not being set, catch and set a fake value just # to allow for older versions that demand a value to work. # Setting to -1 will, if value is later used result in a # ServiceNotFound, so should be safe. primitive['service_id'] = -1 if target_version < (1, 7) and 'host' in primitive: del primitive['host'] if target_version < (1, 5) and 'numa_topology' in primitive: del primitive['numa_topology'] if target_version < (1, 4) and 'host_ip' in primitive: del primitive['host_ip'] if target_version < (1, 3) and 'stats' in primitive: # pre 1.3 version does not have a stats field del primitive['stats']
KeyError
dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/objects/compute_node.py/ComputeNode.obj_make_compatible
7,149
@staticmethod def _host_from_db_object(compute, db_compute): if (('host' not in db_compute or db_compute['host'] is None) and 'service_id' in db_compute and db_compute['service_id'] is not None): # FIXME(sbauza) : Unconverted compute record, provide compatibility # This has to stay until we can be sure that any/all compute nodes # in the database have been converted to use the host field # Service field of ComputeNode could be deprecated in a next patch, # so let's use directly the Service object try: service = objects.Service.get_by_id( compute._context, db_compute['service_id']) except exception.ServiceNotFound: compute.host = None return try: compute.host = service.host except (__HOLE__, exception.OrphanedObjectError): # Host can be nullable in Service compute.host = None elif 'host' in db_compute and db_compute['host'] is not None: # New-style DB having host as a field compute.host = db_compute['host'] else: # We assume it should not happen but in case, let's set it to None compute.host = None
AttributeError
dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/objects/compute_node.py/ComputeNode._host_from_db_object
7,150
def default (self, obj): try: return obj.__json__() except __HOLE__, e: return json.JSONEncoder.default(self, obj)
AttributeError
dataset/ETHPy150Open fp7-ofelia/ocf/ofam/src/src/foam/core/json.py/APIEncoder.default
7,151
def import_from_string(path): i = path.rfind('.') module, attr = path[:i], path[i + 1:] try: mod = import_module(module) except __HOLE__, e: raise ImproperlyConfigured( 'Error importing module %s: "%s"' % (module, e) ) try: instance = getattr(mod, attr) except AttributeError: raise ImproperlyConfigured( 'Module "%s" does not define a "%s" attribute' % (module, attr) ) return instance
ImportError
dataset/ETHPy150Open lincolnloop/django-debug-logging/debug_logging/utils.py/import_from_string
7,152
def parse_and_bind(self, string): '''Parse and execute single line of a readline init file.''' try: log('parse_and_bind("%s")' % string) if string.startswith('#'): return if string.startswith('set'): m = re.compile(r'set\s+([-a-zA-Z0-9]+)\s+(.+)\s*$').match(string) if m: var_name = m.group(1) val = m.group(2) try: setattr(self, var_name.replace('-','_'), val) except AttributeError: log('unknown var="%s" val="%s"' % (var_name, val)) else: log('bad set "%s"' % string) return m = re.compile(r'\s*(.+)\s*:\s*([-a-zA-Z]+)\s*$').match(string) if m: key = m.group(1) func_name = m.group(2) py_name = func_name.replace('-', '_') try: func = getattr(self.mode, py_name) except __HOLE__: log('unknown func key="%s" func="%s"' % (key, func_name)) if self.debug: print 'pyreadline parse_and_bind error, unknown function to bind: "%s"' % func_name return self.mode._bind_key(key, func) except: log('error') raise
AttributeError
dataset/ETHPy150Open deanhiller/databus/webapp/play1.3.x/python/Lib/site-packages/pyreadline/rlmain.py/Readline.parse_and_bind
7,153
@classmethod def _compile_patterns(cls, patterns, field_name="Unknown", spec="Unknown"): compiled_patterns = [] for p in patterns: try: compiled_patterns.append(re.compile(fnmatch_translate_extended(p))) except (__HOLE__, re.error) as e: raise cls.InvalidPatternError( 'In {spec}, "{field_value}" in {field_name} can\'t be compiled: {msg}' .format(field_name=field_name, field_value=p, spec=spec, msg=e)) return compiled_patterns
TypeError
dataset/ETHPy150Open pantsbuild/pants/src/python/pants/backend/jvm/tasks/unpack_jars.py/UnpackJars._compile_patterns
7,154
def len(obj): try: return _len(obj) except __HOLE__: try: # note: this is an internal undocumented API, # don't rely on it in your own programs return obj.__length_hint__() except AttributeError: raise TypeError
TypeError
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/test/test_iterlen.py/len
7,155
def tag_model(cls, admin_cls=None, field_name='tags', sort_tags=False, select_field=False, auto_add_admin_field=True, admin_list_display=True): """ tag_model accepts a number of named parameters: admin_cls If set to a subclass of ModelAdmin, will insert the tag field into the list_display and list_filter fields. field_name Defaults to "tags", can be used to name your tag field differently. sort_tags Boolean, defaults to False. If set to True, a pre_save handler will be inserted to sort the tag field alphabetically. This is useful in case you want a canonical representation for a tag collection, as when yo're presenting a list of tag combinations (e.g. in an admin filter list). select_field If True, show a multi select instead of the standard CharField for tag entry. auto_add_admin_field If True, attempts to add the tag field to the admin class. """ try: from tagging.registry import register as tagging_register except __HOLE__: from tagging import register as tagging_register cls.add_to_class(field_name, ( TagSelectField if select_field else TagField )(field_name.capitalize(), blank=True)) # use another name for the tag descriptor # See http://code.google.com/p/django-tagging/issues/detail?id=95 for the # reason why try: tagging_register(cls, tag_descriptor_attr='tagging_' + field_name) except AlreadyRegistered: return if admin_cls: if admin_list_display: admin_cls.list_display.append(field_name) admin_cls.list_filter.append(field_name) if auto_add_admin_field and hasattr( admin_cls, 'add_extension_options'): admin_cls.add_extension_options(_('Tagging'), { 'fields': (field_name,) }) if sort_tags: pre_save.connect(pre_save_handler, sender=cls) # ------------------------------------------------------------------------
ImportError
dataset/ETHPy150Open feincms/feincms/feincms/contrib/tagging.py/tag_model
7,156
def _open(self): # load descriptive fields while 1: offset = self.fp.tell() tag, size = self.field() if not tag or tag == (8,10): break if size: tagdata = self.fp.read(size) else: tagdata = None if tag in self.info.keys(): if isinstance(self.info[tag], list): self.info[tag].append(tagdata) else: self.info[tag] = [self.info[tag], tagdata] else: self.info[tag] = tagdata # print tag, self.info[tag] # mode layers = ord(self.info[(3,60)][0]) component = ord(self.info[(3,60)][1]) if self.info.has_key((3,65)): id = ord(self.info[(3,65)][0])-1 else: id = 0 if layers == 1 and not component: self.mode = "L" elif layers == 3 and component: self.mode = "RGB"[id] elif layers == 4 and component: self.mode = "CMYK"[id] # size self.size = self.getint((3,20)), self.getint((3,30)) # compression try: compression = COMPRESSION[self.getint((3,120))] except __HOLE__: raise IOError, "Unknown IPTC image compression" # tile if tag == (8,10): if compression == "raw" and self._is_raw(offset, self.size): self.tile = [(compression, (offset, size + 5, -1), (0, 0, self.size[0], self.size[1]))] else: self.tile = [("iptc", (compression, offset), (0, 0, self.size[0], self.size[1]))]
KeyError
dataset/ETHPy150Open kleientertainment/ds_mod_tools/pkg/win32/Python27/Lib/site-packages/PIL/IptcImagePlugin.py/IptcImageFile._open
7,157
def getiptcinfo(im): import TiffImagePlugin, JpegImagePlugin import StringIO data = None if isinstance(im, IptcImageFile): # return info dictionary right away return im.info elif isinstance(im, JpegImagePlugin.JpegImageFile): # extract the IPTC/NAA resource try: app = im.app["APP13"] if app[:14] == "Photoshop 3.0\x00": app = app[14:] # parse the image resource block offset = 0 while app[offset:offset+4] == "8BIM": offset = offset + 4 # resource code code = JpegImagePlugin.i16(app, offset) offset = offset + 2 # resource name (usually empty) name_len = ord(app[offset]) name = app[offset+1:offset+1+name_len] offset = 1 + offset + name_len if offset & 1: offset = offset + 1 # resource data block size = JpegImagePlugin.i32(app, offset) offset = offset + 4 if code == 0x0404: # 0x0404 contains IPTC/NAA data data = app[offset:offset+size] break offset = offset + size if offset & 1: offset = offset + 1 except (__HOLE__, KeyError): pass elif isinstance(im, TiffImagePlugin.TiffImageFile): # get raw data from the IPTC/NAA tag (PhotoShop tags the data # as 4-byte integers, so we cannot use the get method...) try: type, data = im.tag.tagdata[TiffImagePlugin.IPTC_NAA_CHUNK] except (AttributeError, KeyError): pass if data is None: return None # no properties # create an IptcImagePlugin object without initializing it class FakeImage: pass im = FakeImage() im.__class__ = IptcImageFile # parse the IPTC information chunk im.info = {} im.fp = StringIO.StringIO(data) try: im._open() except (IndexError, KeyError): pass # expected failure return im.info
AttributeError
dataset/ETHPy150Open kleientertainment/ds_mod_tools/pkg/win32/Python27/Lib/site-packages/PIL/IptcImagePlugin.py/getiptcinfo
7,158
def validate_input(self, validation_definition): """In this example we are using external validation to verify that the Github repository exists. If validate_input does not raise an Exception, the input is assumed to be valid. Otherwise it prints the exception as an error message when telling splunkd that the configuration is invalid. When using external validation, after splunkd calls the modular input with --scheme to get a scheme, it calls it again with --validate-arguments for each instance of the modular input in its configuration files, feeding XML on stdin to the modular input to do validation. It is called the same way whenever a modular input's configuration is edited. :param validation_definition: a ValidationDefinition object """ # Get the values of the parameters, and construct a URL for the Github API owner = validation_definition.parameters["owner"] repo_name = validation_definition.parameters["repo_name"] repo_url = "https://api.github.com/repos/%s/%s" % (owner, repo_name) # Read the response from the Github API, then parse the JSON data into an object response = urllib2.urlopen(repo_url).read() jsondata = json.loads(response) # If there is only 1 field in the jsondata object,some kind or error occurred # with the Github API. # Typically, this will happen with an invalid repository. if len(jsondata) == 1: raise ValueError("The Github repository was not found.") # If the API response seems normal, validate the fork count # If there's something wrong with getting fork_count, raise a ValueError try: fork_count = int(jsondata["forks_count"]) except __HOLE__ as ve: raise ValueError("Invalid fork count: %s", ve.message)
ValueError
dataset/ETHPy150Open splunk/splunk-sdk-python/examples/github_forks/github_forks.py/MyScript.validate_input
7,159
def generator_queue(generator, max_q_size=10, wait_time=0.05, nb_worker=1): '''Builds a threading queue out of a data generator. Used in `fit_generator`, `evaluate_generator`, `predict_generator`. ''' q = queue.Queue() _stop = threading.Event() def data_generator_task(): while not _stop.is_set(): try: if q.qsize() < max_q_size: try: generator_output = next(generator) except __HOLE__: continue q.put(generator_output) else: time.sleep(wait_time) except Exception: _stop.set() raise generator_threads = [threading.Thread(target=data_generator_task) for _ in range(nb_worker)] for thread in generator_threads: thread.daemon = True thread.start() return q, _stop
ValueError
dataset/ETHPy150Open fchollet/keras/keras/engine/training.py/generator_queue
7,160
def _fit_loop(self, f, ins, out_labels=[], batch_size=32, nb_epoch=100, verbose=1, callbacks=[], val_f=None, val_ins=None, shuffle=True, callback_metrics=[]): '''Abstract fit function for f(ins). Assume that f returns a list, labeled by out_labels. # Arguments f: Keras function returning a list of tensors ins: list of tensors to be fed to `f` out_labels: list of strings, display names of the outputs of `f` batch_size: integer batch size nb_epoch: number of times to iterate over the data verbose: verbosity mode, 0, 1 or 2 callbacks: list of callbacks to be called during training val_f: Keras function to call for validation val_ins: list of tensors to be fed to `val_f` shuffle: whether to shuffle the data at the beginning of each epoch callback_metrics: list of strings, the display names of the metrics passed to the callbacks. They should be the concatenation of list the display names of the outputs of `f` and the list of display names of the outputs of `f_val`. # Returns `History` object. ''' do_validation = False if val_f and val_ins: do_validation = True if verbose: print('Train on %d samples, validate on %d samples' % (len(ins[0]), len(val_ins[0]))) nb_train_sample = len(ins[0]) index_array = np.arange(nb_train_sample) self.history = cbks.History() callbacks = [cbks.BaseLogger()] + callbacks + [self.history] if verbose: callbacks += [cbks.ProgbarLogger()] callbacks = cbks.CallbackList(callbacks) # it's possible to callback a different model than self # (used by Sequential models) if hasattr(self, 'callback_model') and self.callback_model: callback_model = self.callback_model else: callback_model = self callbacks._set_model(callback_model) callbacks._set_params({ 'batch_size': batch_size, 'nb_epoch': nb_epoch, 'nb_sample': nb_train_sample, 'verbose': verbose, 'do_validation': do_validation, 'metrics': callback_metrics, }) callbacks.on_train_begin() callback_model.stop_training = False self.validation_data = val_ins for epoch in range(nb_epoch): callbacks.on_epoch_begin(epoch) if shuffle == 'batch': index_array = batch_shuffle(index_array, batch_size) elif shuffle: np.random.shuffle(index_array) batches = make_batches(nb_train_sample, batch_size) for batch_index, (batch_start, batch_end) in enumerate(batches): batch_ids = index_array[batch_start:batch_end] try: if type(ins[-1]) is float: # do not slice the training phase flag ins_batch = slice_X(ins[:-1], batch_ids) + [ins[-1]] else: ins_batch = slice_X(ins, batch_ids) except __HOLE__: raise Exception('TypeError while preparing batch. ' 'If using HDF5 input data, ' 'pass shuffle="batch".') batch_logs = {} batch_logs['batch'] = batch_index batch_logs['size'] = len(batch_ids) callbacks.on_batch_begin(batch_index, batch_logs) outs = f(ins_batch) if type(outs) != list: outs = [outs] for l, o in zip(out_labels, outs): batch_logs[l] = o callbacks.on_batch_end(batch_index, batch_logs) epoch_logs = {} if batch_index == len(batches) - 1: # last batch # validation if do_validation: # replace with self._evaluate val_outs = self._test_loop(val_f, val_ins, batch_size=batch_size, verbose=0) if type(val_outs) != list: val_outs = [val_outs] # same labels assumed for l, o in zip(out_labels, val_outs): epoch_logs['val_' + l] = o callbacks.on_epoch_end(epoch, epoch_logs) if callback_model.stop_training: break callbacks.on_train_end() return self.history
TypeError
dataset/ETHPy150Open fchollet/keras/keras/engine/training.py/Model._fit_loop
7,161
def fetchone(self): """ Fetch the next row of a query result set, returning a single sequence, or None when no more data is available. Alias for ``next()``. """ try: return self.next() except __HOLE__: return None
StopIteration
dataset/ETHPy150Open crate/crate-python/src/crate/client/cursor.py/Cursor.fetchone
7,162
def fetchmany(self, count=None): """ Fetch the next set of rows of a query result, returning a sequence of sequences (e.g. a list of tuples). An empty sequence is returned when no more rows are available. """ if count is None: count = self.arraysize if count == 0: return self.fetchall() result = [] for i in range(count): try: result.append(self.next()) except __HOLE__: pass return result
StopIteration
dataset/ETHPy150Open crate/crate-python/src/crate/client/cursor.py/Cursor.fetchmany
7,163
def fetchall(self): """ Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples). Note that the cursor's arraysize attribute can affect the performance of this operation. """ result = [] iterate = True while iterate: try: result.append(self.next()) except __HOLE__: iterate = False return result
StopIteration
dataset/ETHPy150Open crate/crate-python/src/crate/client/cursor.py/Cursor.fetchall
7,164
def __init__(self, *args, **kwds): """ A dictionary which maintains the insertion order of keys. """ if len(args) > 1: raise TypeError('expected at most 1 arguments, got %d' % len(args)) try: self.__end except __HOLE__: self.clear() self.update(*args, **kwds)
AttributeError
dataset/ETHPy150Open pycassa/pycassa/pycassa/util.py/OrderedDict.__init__
7,165
def run(self, dataset_slug, upload_id, external_id_field_index=None, *args, **kwargs): """ Execute import. """ from panda.models import Dataset, DataUpload log = logging.getLogger(self.name) log.info('Beginning import, dataset_slug: %s' % dataset_slug) try: dataset = Dataset.objects.get(slug=dataset_slug) except Dataset.DoesNotExist: log.warning('Import failed due to Dataset being deleted, dataset_slug: %s' % dataset_slug) return upload = DataUpload.objects.get(id=upload_id) task_status = dataset.current_task task_status.begin(ugettext('Preparing to import')) line_count = self._count_lines(upload.get_path()) if self.is_aborted(): task_status.abort('Aborted during preperation') log.warning('Import aborted, dataset_slug: %s' % dataset_slug) return f = open(upload.get_path(), 'r') reader = CSVKitReader(f, encoding=upload.encoding, **upload.dialect_as_parameters()) reader.next() add_buffer = [] data_typer = DataTyper(dataset.column_schema) throttle = config_value('PERF', 'TASK_THROTTLE') i = 0 while True: # The row number which is about to be read, for error handling and indexing i += 1 try: row = reader.next() except StopIteration: i -= 1 break except __HOLE__: raise DataImportError(ugettext('This CSV file contains characters that are not %(encoding)s encoded in or after row %(row)i. You need to re-upload this file and input the correct encoding in order to import data from this file.') % { 'encoding': upload.encoding, 'row': i }) external_id = None if external_id_field_index is not None: external_id = row[external_id_field_index] data = utils.solr.make_data_row(dataset, row, data_upload=upload, external_id=external_id) data = data_typer(data, row) add_buffer.append(data) if i % SOLR_ADD_BUFFER_SIZE == 0: solr.add(settings.SOLR_DATA_CORE, add_buffer) add_buffer = [] task_status.update(ugettext('%.0f%% complete (estimated)') % floor(float(i) / float(line_count) * 100)) if self.is_aborted(): task_status.abort(ugettext('Aborted after importing %.0f%% (estimated)') % floor(float(i) / float(line_count) * 100)) log.warning('Import aborted, dataset_slug: %s' % dataset_slug) return time.sleep(throttle) if add_buffer: solr.add(settings.SOLR_DATA_CORE, add_buffer) add_buffer = [] solr.commit(settings.SOLR_DATA_CORE) f.close() task_status.update('100% complete') # Refresh dataset from database so there is no chance of crushing changes made since the task started try: dataset = Dataset.objects.get(slug=dataset_slug) except Dataset.DoesNotExist: log.warning('Import could not be completed due to Dataset being deleted, dataset_slug: %s' % dataset_slug) return if not dataset.row_count: dataset.row_count = i else: dataset.row_count += i dataset.column_schema = data_typer.schema dataset.save() # Refres upload = DataUpload.objects.get(id=upload_id) upload.imported = True upload.save() log.info('Finished import, dataset_slug: %s' % dataset_slug) return data_typer
UnicodeDecodeError
dataset/ETHPy150Open pandaproject/panda/panda/tasks/import_csv.py/ImportCSVTask.run
7,166
def __init__(self, user, *args, **kwargs): "Sets choices and initial value" super(SettingsForm, self).__init__(*args, **kwargs) self.fields['default_perspective'].label = _("Default Perspective") self.fields['language'].label = _("Language") self.fields['default_timezone'].label = _("Time Zone") self.fields['email_notifications'].label = _("E-mail Notifications") self.user = user self.fields['default_perspective'].queryset = Object.filter_permitted( user, Perspective.objects) try: conf = ModuleSetting.get_for_module( 'treeio.core', 'default_perspective', user=self.user)[0] default_perspective = Perspective.objects.get(pk=long(conf.value)) self.fields['default_perspective'].initial = default_perspective.id except: pass self.fields['default_timezone'].choices = getattr( settings, 'HARDTREE_SERVER_TIMEZONE') timezone = settings.HARDTREE_SERVER_DEFAULT_TIMEZONE try: conf = ModuleSetting.get('default_timezone', user=user)[0] timezone = conf.value except: pass self.fields['default_timezone'].initial = timezone self.fields['language'].choices = getattr( settings, 'HARDTREE_LANGUAGES', [('en', 'English')]) language = getattr(settings, 'HARDTREE_LANGUAGES_DEFAULT', '') try: conf = ModuleSetting.get('language', user=user)[0] language = conf.value except __HOLE__: pass self.fields['language'].initial = language try: conf = ModuleSetting.get('email_notifications', user=user)[0] self.fields['email_notifications'].initial = conf.value except: self.fields[ 'email_notifications'].initial = settings.HARDTREE_ALLOW_EMAIL_NOTIFICATIONS perspective = user.get_perspective() modules = perspective.modules.filter(display=True).order_by('title') if not modules: modules = Module.objects.filter(display=True).order_by('title') self.fields['notifications_for_modules'].choices = [ (module.pk, module.title) for module in modules] try: modules = NotificationSetting.objects.get( owner=self.user).modules.all() self.fields['notifications_for_modules'].initial = [ m.pk for m in modules] except (NotificationSetting.DoesNotExist, NotificationSetting.MultipleObjectsReturned): pass
IndexError
dataset/ETHPy150Open treeio/treeio/treeio/account/forms.py/SettingsForm.__init__
7,167
def parse_categories(self, headers): kind = action = None mixins = collections.Counter() schemes = collections.defaultdict(list) try: categories = headers["Category"] except KeyError: raise exception.OCCIInvalidSchema("No categories") for ctg in _quoted_split(categories): ll = _quoted_split(ctg, "; ") d = {"term": ll[0]} # assumes 1st element => term's value try: d.update(dict([_split_unquote(i) for i in ll[1:]])) except __HOLE__: raise exception.OCCIInvalidSchema("Unable to parse category") ctg_class = d.get("class", None) ctg_type = '%(scheme)s%(term)s' % d if ctg_class == "kind": if kind is not None: raise exception.OCCIInvalidSchema("Duplicated Kind") kind = ctg_type elif ctg_class == "action": if action is not None: raise exception.OCCIInvalidSchema("Duplicated action") action = ctg_type elif ctg_class == "mixin": mixins[ctg_type] += 1 schemes[d["scheme"]].append(d["term"]) if action and kind: raise exception.OCCIInvalidSchema("Action and kind together?") return { "category": kind or action, "mixins": mixins, "schemes": schemes, }
ValueError
dataset/ETHPy150Open openstack/ooi/ooi/wsgi/parsers.py/TextParser.parse_categories
7,168
def parse_attributes(self, headers): attrs = {} try: header_attrs = headers["X-OCCI-Attribute"] for attr in _quoted_split(header_attrs): l = _split_unquote(attr) attrs[l[0].strip()] = l[1] except __HOLE__: pass return attrs
KeyError
dataset/ETHPy150Open openstack/ooi/ooi/wsgi/parsers.py/TextParser.parse_attributes
7,169
def parse_links(self, headers): links = {} try: header_links = headers["Link"] except KeyError: return links for link in _quoted_split(header_links): ll = _quoted_split(link, "; ") # remove the "<" and ">" if ll[0][1] != "<" and ll[0][-1] != ">": raise exception.OCCIInvalidSchema("Unable to parse link") link_dest = ll[0][1:-1] try: d = dict([_split_unquote(i) for i in ll[1:]]) except __HOLE__: raise exception.OCCIInvalidSchema("Unable to parse link") links[link_dest] = d return links
ValueError
dataset/ETHPy150Open openstack/ooi/ooi/wsgi/parsers.py/TextParser.parse_links
7,170
def __call__(self, request, *args, **kwargs): stage = { '1': 'preview', '2': 'post', }.get(request.POST.get(self.unused_name('stage')), 'preview') self.parse_params(request, *args, **kwargs) try: method = getattr(self, stage + '_' + request.method.lower()) except __HOLE__: raise Http404 return method(request)
AttributeError
dataset/ETHPy150Open django/django-formtools/formtools/preview.py/FormPreview.__call__
7,171
def unused_name(self, name): """ Given a first-choice name, adds an underscore to the name until it reaches a name that isn't claimed by any field in the form. This is calculated rather than being hard-coded so that no field names are off-limits for use in the form. """ while 1: try: self.form.base_fields[name] except __HOLE__: break # This field name isn't being used by the form. name += '_' return name
KeyError
dataset/ETHPy150Open django/django-formtools/formtools/preview.py/FormPreview.unused_name
7,172
def datetime(self): """ Returns a datetime object at the end of the selected (or current) date. This is needed to properly filter transactions that occur during the day we are filtering for """ # note: empty string is parsed as today's date date = self.config.get('date') or '' try: date = parser.parse(date).date() except __HOLE__: date = datetime.utcnow().date() return datetime(date.year, date.month, date.day, 23, 59, 59)
ValueError
dataset/ETHPy150Open dimagi/commcare-hq/corehq/apps/reports/commtrack/data_sources.py/SimplifiedInventoryDataSource.datetime
7,173
def get(self, key, failobj=None): """Get dictionary item, defaulting to another value if it does not exist. Args: key: Key of item to get. Key is case insensitive, so "d['Key']" is the same as "d['key']". failobj: Value to return if key not in dictionary. """ try: cased_key = self.caseless_keys[key.lower()] except __HOLE__: return failobj return self.data[cased_key]
KeyError
dataset/ETHPy150Open GoogleCloudPlatform/python-compat-runtime/appengine-compat/exported_appengine_sdk/google/appengine/api/urlfetch.py/_CaselessDict.get
7,174
def update(self, dict=None, **kwargs): """Update dictionary using values from another dictionary and keywords. Args: dict: Dictionary to update from. kwargs: Keyword arguments to update from. """ if dict: try: keys = dict.keys() except __HOLE__: for k, v in dict: self[k] = v else: for k in keys: self[k] = dict[k] if kwargs: self.update(kwargs)
AttributeError
dataset/ETHPy150Open GoogleCloudPlatform/python-compat-runtime/appengine-compat/exported_appengine_sdk/google/appengine/api/urlfetch.py/_CaselessDict.update
7,175
def _indicator_diff(self, calc_name, emitter_names, other_doc): indicators = [] for emitter_name in emitter_names: class NormalizedEmittedValue(object): """Normalize the values to the dictionary form to allow comparison""" def __init__(self, value): if isinstance(value, dict): self.value = value elif isinstance(value, list): self.value = dict(date=value[0], value=value[1], group_by=None) if self.value['date'] and not isinstance(self.value['date'], datetime.date): self.value['date'] = datetime.datetime.strptime(self.value['date'], '%Y-%m-%d').date() def __key(self): gb = self.value['group_by'] return self.value['date'], self.value['value'], tuple(gb) if gb else None def __eq__(x, y): return x.__key() == y.__key() def __hash__(self): return hash(self.__key()) def __repr__(self): return str(self.value) if other_doc: self_values = set([NormalizedEmittedValue(v) for v in self[calc_name][emitter_name]]) try: _vals = other_doc[calc_name][emitter_name] except __HOLE__: _vals = () other_values = set([NormalizedEmittedValue(v) for v in _vals]) values_diff = [v for v in list(self_values - other_values)] else: values_diff = [NormalizedEmittedValue(v) for v in self[calc_name][emitter_name]] values = [v.value for v in values_diff] indicators.append(self._indicator_meta(calc_name, emitter_name, values=values)) return indicators
KeyError
dataset/ETHPy150Open dimagi/commcare-hq/corehq/ex-submodules/fluff/indicators.py/IndicatorDocument._indicator_diff
7,176
def save_to_sql(self, diff, engine): if not diff: # empty indicator document return default_key = (self.id,) + tuple(diff['group_values']) rows = {} def set_row_val(rowkey, col_name, col_value): row = rows.setdefault(rowkey, {}) row[col_name] = col_value flat_keys = None try: flat_keys = self._flat_fields.keys() except __HOLE__: pass for change in diff['indicator_changes']: name = '{0}_{1}'.format(change['calculator'], change['emitter']) for value_dict in change['values']: value = value_dict['value'] group_by = value_dict['group_by'] date = value_dict['date'] if group_by: key = (self.id,) + tuple(group_by) + (date,) else: key = default_key + (date,) set_row_val(key, name, value) for flat_key in flat_keys: set_row_val(key, flat_key, self[flat_key]) types = self.get_group_types() types['date'] = 'date' names = ['doc_id'] + self.get_group_names() + ['date'] connection = engine.connect() try: # delete all existing rows for this doc to ensure we aren't left with stale data delete = self._table.delete(self._table.c.doc_id == self.id) connection.execute(delete) for key, columns in rows.items(): key_columns = dict(zip(names, key)) for name, value in key_columns.items(): if value is None: key_columns[name] = default_null_value_placeholder(types[name]) all_columns = dict(key_columns.items() + columns.items()) try: insert = self._table.insert().values(**all_columns) connection.execute(insert) except sqlalchemy.exc.IntegrityError: if columns: update = self._table.update().values(**columns) for k, v in key_columns.items(): update = update.where(self._table.c[k] == v) connection.execute(update) finally: connection.close()
AttributeError
dataset/ETHPy150Open dimagi/commcare-hq/corehq/ex-submodules/fluff/indicators.py/IndicatorDocument.save_to_sql
7,177
def __contains__(self, key): try: o = self.data[key]() except __HOLE__: return False return o is not None
KeyError
dataset/ETHPy150Open Southpaw-TACTIC/TACTIC/src/context/client/tactic-api-python-4.0.api04/Lib/weakref.py/WeakValueDictionary.__contains__
7,178
def has_key(self, key): try: o = self.data[key]() except __HOLE__: return False return o is not None
KeyError
dataset/ETHPy150Open Southpaw-TACTIC/TACTIC/src/context/client/tactic-api-python-4.0.api04/Lib/weakref.py/WeakValueDictionary.has_key
7,179
def get(self, key, default=None): try: wr = self.data[key] except __HOLE__: return default else: o = wr() if o is None: # This should only happen return default else: return o
KeyError
dataset/ETHPy150Open Southpaw-TACTIC/TACTIC/src/context/client/tactic-api-python-4.0.api04/Lib/weakref.py/WeakValueDictionary.get
7,180
def pop(self, key, *args): try: o = self.data.pop(key)() except __HOLE__: if args: return args[0] raise if o is None: raise KeyError, key else: return o
KeyError
dataset/ETHPy150Open Southpaw-TACTIC/TACTIC/src/context/client/tactic-api-python-4.0.api04/Lib/weakref.py/WeakValueDictionary.pop
7,181
def setdefault(self, key, default=None): try: wr = self.data[key] except __HOLE__: self.data[key] = KeyedRef(default, self._remove, key) return default else: return wr()
KeyError
dataset/ETHPy150Open Southpaw-TACTIC/TACTIC/src/context/client/tactic-api-python-4.0.api04/Lib/weakref.py/WeakValueDictionary.setdefault
7,182
def has_key(self, key): try: wr = ref(key) except __HOLE__: return 0 return wr in self.data
TypeError
dataset/ETHPy150Open Southpaw-TACTIC/TACTIC/src/context/client/tactic-api-python-4.0.api04/Lib/weakref.py/WeakKeyDictionary.has_key
7,183
def __contains__(self, key): try: wr = ref(key) except __HOLE__: return 0 return wr in self.data
TypeError
dataset/ETHPy150Open Southpaw-TACTIC/TACTIC/src/context/client/tactic-api-python-4.0.api04/Lib/weakref.py/WeakKeyDictionary.__contains__
7,184
def _get_memory(pid, timestamps=False, include_children=False): # .. only for current process and only on unix.. if pid == -1: pid = os.getpid() # .. cross-platform but but requires psutil .. if has_psutil: process = psutil.Process(pid) try: # avoid useing get_memory_info since it does not exists # in psutil > 2.0 and accessing it will cause exception. meminfo_attr = 'memory_info' if hasattr(process, 'memory_info') else 'get_memory_info' mem = getattr(process, meminfo_attr)()[0] / _TWO_20 if include_children: try: for p in process.get_children(recursive=True): mem += getattr(p, meminfo_attr)()[0] / _TWO_20 except __HOLE__: # fix for newer psutil for p in process.children(recursive=True): mem += getattr(p, meminfo_attr)()[0] / _TWO_20 if timestamps: return (mem, time.time()) else: return mem except psutil.AccessDenied: pass # continue and try to get this from ps # .. scary stuff .. if os.name == 'posix': if include_children: raise NotImplementedError('The psutil module is required when to' ' monitor memory usage of children' ' processes') warnings.warn("psutil module not found. memory_profiler will be slow") # .. # .. memory usage in MiB .. # .. this should work on both Mac and Linux .. # .. subprocess.check_output appeared in 2.7, using Popen .. # .. for backwards compatibility .. out = subprocess.Popen(['ps', 'v', '-p', str(pid)], stdout=subprocess.PIPE ).communicate()[0].split(b'\n') try: vsz_index = out[0].split().index(b'RSS') mem = float(out[1].split()[vsz_index]) / 1024 if timestamps: return(mem, time.time()) else: return mem except: if timestamps: return (-1, time.time()) else: return -1 else: raise NotImplementedError('The psutil module is required for non-unix ' 'platforms')
AttributeError
dataset/ETHPy150Open fabianp/memory_profiler/memory_profiler.py/_get_memory
7,185
def add_function(self, func): """ Record line profiling information for the given Python function. """ try: # func_code does not exist in Python3 code = func.__code__ except __HOLE__: warnings.warn("Could not extract a code object for the object %r" % func) else: self.code_map.add(code)
AttributeError
dataset/ETHPy150Open fabianp/memory_profiler/memory_profiler.py/LineProfiler.add_function
7,186
@line_cell_magic def mprun(self, parameter_s='', cell=None): """ Execute a statement under the line-by-line memory profiler from the memory_profiler module. Usage, in line mode: %mprun -f func1 -f func2 <statement> Usage, in cell mode: %%mprun -f func1 -f func2 [statement] code... code... In cell mode, the additional code lines are appended to the (possibly empty) statement in the first line. Cell mode allows you to easily profile multiline blocks without having to put them in a separate function. The given statement (which doesn't require quote marks) is run via the LineProfiler. Profiling is enabled for the functions specified by the -f options. The statistics will be shown side-by-side with the code through the pager once the statement has completed. Options: -f <function>: LineProfiler only profiles functions and methods it is told to profile. This option tells the profiler about these functions. Multiple -f options may be used. The argument may be any expression that gives a Python function or method object. However, one must be careful to avoid spaces that may confuse the option parser. Additionally, functions defined in the interpreter at the In[] prompt or via %run currently cannot be displayed. Write these functions out to a separate file and import them. One or more -f options are required to get any useful results. -T <filename>: dump the text-formatted statistics with the code side-by-side out to a text file. -r: return the LineProfiler object after it has completed profiling. -c: If present, add the memory usage of any children process to the report. """ from io import StringIO from memory_profiler import show_results, LineProfiler # Local imports to avoid hard dependency. from distutils.version import LooseVersion import IPython ipython_version = LooseVersion(IPython.__version__) if ipython_version < '0.11': from IPython.genutils import page from IPython.ipstruct import Struct from IPython.ipapi import UsageError else: from IPython.core.page import page from IPython.utils.ipstruct import Struct from IPython.core.error import UsageError # Escape quote markers. opts_def = Struct(T=[''], f=[]) parameter_s = parameter_s.replace('"', r'\"').replace("'", r"\'") opts, arg_str = self.parse_options(parameter_s, 'rf:T:c', list_all=True) opts.merge(opts_def) global_ns = self.shell.user_global_ns local_ns = self.shell.user_ns if cell is not None: arg_str += '\n' + cell # Get the requested functions. funcs = [] for name in opts.f: try: funcs.append(eval(name, global_ns, local_ns)) except Exception as e: raise UsageError('Could not find function %r.\n%s: %s' % (name, e.__class__.__name__, e)) include_children = 'c' in opts profile = LineProfiler(include_children=include_children) for func in funcs: profile(func) # Add the profiler to the builtins for @profile. if 'profile' in builtins.__dict__: had_profile = True old_profile = builtins.__dict__['profile'] else: had_profile = False old_profile = None builtins.__dict__['profile'] = profile try: profile.runctx(arg_str, global_ns, local_ns) message = '' except __HOLE__: message = "*** SystemExit exception caught in code being profiled." except KeyboardInterrupt: message = ("*** KeyboardInterrupt exception caught in code being " "profiled.") finally: if had_profile: builtins.__dict__['profile'] = old_profile # Trap text output. stdout_trap = StringIO() show_results(profile, stdout_trap) output = stdout_trap.getvalue() output = output.rstrip() if ipython_version < '0.11': page(output, screen_lines=self.shell.rc.screen_length) else: page(output) print(message,) text_file = opts.T[0] if text_file: with open(text_file, 'w') as pfile: pfile.write(output) print('\n*** Profile printout saved to text file %s. %s' % (text_file, message)) return_value = None if 'r' in opts: return_value = profile return return_value # a timeit-style %memit magic for IPython
SystemExit
dataset/ETHPy150Open fabianp/memory_profiler/memory_profiler.py/MemoryProfilerMagics.mprun
7,187
@classmethod def register_magics(cls, ip): from distutils.version import LooseVersion import IPython ipython_version = LooseVersion(IPython.__version__) if ipython_version < '0.13': try: _register_magic = ip.define_magic except __HOLE__: # ipython 0.10 _register_magic = ip.expose_magic _register_magic('mprun', cls.mprun.__func__) _register_magic('memit', cls.memit.__func__) else: ip.register_magics(cls) # commenting out due to failures with some versions of IPython # see https://github.com/fabianp/memory_profiler/issues/106 # # Ensuring old interface of magics expose for IPython 0.10 # magic_mprun = MemoryProfilerMagics().mprun.__func__ # magic_memit = MemoryProfilerMagics().memit.__func__
AttributeError
dataset/ETHPy150Open fabianp/memory_profiler/memory_profiler.py/MemoryProfilerMagics.register_magics
7,188
def get_version(self, filename=None, version=-1, **kwargs): """Get a file from GridFS by ``"filename"`` or metadata fields. Returns a version of the file in GridFS whose filename matches `filename` and whose metadata fields match the supplied keyword arguments, as an instance of :class:`~gridfs.grid_file.GridOut`. Version numbering is a convenience atop the GridFS API provided by MongoDB. If more than one file matches the query (either by `filename` alone, by metadata fields, or by a combination of both), then version ``-1`` will be the most recently uploaded matching file, ``-2`` the second most recently uploaded, etc. Version ``0`` will be the first version uploaded, ``1`` the second version, etc. So if three versions have been uploaded, then version ``0`` is the same as version ``-3``, version ``1`` is the same as version ``-2``, and version ``2`` is the same as version ``-1``. Raises :class:`~gridfs.errors.NoFile` if no such version of that file exists. :Parameters: - `filename`: ``"filename"`` of the file to get, or `None` - `version` (optional): version of the file to get (defaults to -1, the most recent version uploaded) - `**kwargs` (optional): find files by custom metadata. .. versionchanged:: 3.1 ``get_version`` no longer ensures indexes. """ query = kwargs if filename is not None: query["filename"] = filename cursor = self.__files.find(query) if version < 0: skip = abs(version) - 1 cursor.limit(-1).skip(skip).sort("uploadDate", DESCENDING) else: cursor.limit(-1).skip(version).sort("uploadDate", ASCENDING) try: grid_file = next(cursor) return GridOut(self.__collection, file_document=grid_file) except __HOLE__: raise NoFile("no version %d for filename %r" % (version, filename))
StopIteration
dataset/ETHPy150Open mongodb/mongo-python-driver/gridfs/__init__.py/GridFS.get_version
7,189
def open_download_stream_by_name(self, filename, revision=-1): """Opens a Stream from which the application can read the contents of `filename` and optional `revision`. For example:: my_db = MongoClient().test fs = GridFSBucket(my_db) grid_out = fs.open_download_stream_by_name("test_file") contents = grid_out.read() Returns an instance of :class:`~gridfs.grid_file.GridOut`. Raises :exc:`~gridfs.errors.NoFile` if no such version of that file exists. Raises :exc:`~ValueError` filename is not a string. :Parameters: - `filename`: The name of the file to read from. - `revision` (optional): Which revision (documents with the same filename and different uploadDate) of the file to retrieve. Defaults to -1 (the most recent revision). :Note: Revision numbers are defined as follows: 0 = the original stored file 1 = the first revision 2 = the second revision etc... -2 = the second most recent revision -1 = the most recent revision """ validate_string("filename", filename) query = {"filename": filename} cursor = self._files.find(query) if revision < 0: skip = abs(revision) - 1 cursor.limit(-1).skip(skip).sort("uploadDate", DESCENDING) else: cursor.limit(-1).skip(revision).sort("uploadDate", ASCENDING) try: grid_file = next(cursor) return GridOut(self._collection, file_document=grid_file) except __HOLE__: raise NoFile( "no version %d for filename %r" % (revision, filename))
StopIteration
dataset/ETHPy150Open mongodb/mongo-python-driver/gridfs/__init__.py/GridFSBucket.open_download_stream_by_name
7,190
def init(**kwargs): # VCR needs access ot the parent shell for playback global shell try: shell = kwargs['shell'] except __HOLE__: raise RuntimeError('VCR: Unable to initialize', 'RadSSH shell not accessible')
KeyError
dataset/ETHPy150Open radssh/radssh/radssh/core_plugins/vcr.py/init
7,191
def frontendediting_request_processor(page, request): """ Sets the frontend editing state in the cookie depending on the ``frontend_editing`` GET parameter and the user's permissions. """ if 'frontend_editing' not in request.GET: return response = HttpResponseRedirect(request.path) if request.user.has_module_perms('page'): if 'frontend_editing' in request.GET: try: enable_fe = int(request.GET['frontend_editing']) > 0 except __HOLE__: enable_fe = False if enable_fe: response.set_cookie(str('frontend_editing'), enable_fe) clear_cache() else: response.delete_cookie(str('frontend_editing')) clear_cache() else: response.delete_cookie(str('frontend_editing')) # Redirect to cleanup URLs return response
ValueError
dataset/ETHPy150Open django-leonardo/django-leonardo/leonardo/module/web/processors/edit.py/frontendediting_request_processor
7,192
def setUp(self): self.check_for_psutils() try: from pikos.cymonitors.line_memory_monitor import ( LineMemoryMonitor) except __HOLE__: self.skipTest('Cython LineMemoryMonitor is not available') from pikos.cymonitors.line_memory_monitor import LineMemoryMonitor self.maxDiff = None self.helper = MonitoringHelper() self.filename = self.helper.filename self.recorder = ListRecorder( filter_=OnValue('filename', self.filename)) self.monitor = LineMemoryMonitor(self.recorder) self.helper.monitor = self.monitor
ImportError
dataset/ETHPy150Open enthought/pikos/pikos/tests/test_cline_memory_monitor.py/TestLineMemoryMonitor.setUp
7,193
def test_issue2(self): """ Test for issue #2. """ monitor = self.monitor FOO = """ def foo(): a = [] for i in range(20): a.append(i+sum(a)) foo() """ @monitor.attach def boo(): code = compile(FOO, 'foo', 'exec') exec code in globals(), {} try: boo() except __HOLE__: msg = ("Issue #2 -- line monitor fails when exec is used" " on code compiled from a string -- exists.") self.fail(msg)
TypeError
dataset/ETHPy150Open enthought/pikos/pikos/tests/test_cline_memory_monitor.py/TestLineMemoryMonitor.test_issue2
7,194
def check_for_psutils(self): try: import psutil # noqa except __HOLE__: self.skipTest('Could not import psutils, skipping test.')
ImportError
dataset/ETHPy150Open enthought/pikos/pikos/tests/test_cline_memory_monitor.py/TestLineMemoryMonitor.check_for_psutils
7,195
def try_save(self, *args, **kw): """Try to save the specified Model instance `obj`. Return `True` on success, `False` if this instance wasn't saved and should be deferred. """ obj = self.object try: """ """ m = getattr(obj, 'before_dumpy_save', None) if m is not None: m() if not self.deserializer.quick: try: obj.full_clean() except __HOLE__ as e: raise # Exception("{0} : {1}".format(obj2str(obj), e)) obj.save(*args, **kw) logger.debug("%s has been saved" % obj2str(obj)) self.deserializer.register_success() return True #~ except ValidationError,e: #~ except ObjectDoesNotExist,e: #~ except (ValidationError,ObjectDoesNotExist), e: #~ except (ValidationError,ObjectDoesNotExist,IntegrityError), e: except Exception as e: if True: if not settings.SITE.loading_from_dump: # hand-written fixtures are expected to yield in savable # order logger.warning("Failed to save %s:" % obj2str(obj)) raise deps = [f.rel.model for f in obj._meta.fields if f.rel is not None] if not deps: logger.exception(e) raise Exception( "Failed to save independent %s." % obj2str(obj)) self.deserializer.register_failure(self, e) return False #~ except Exception,e: #~ logger.exception(e) #~ raise Exception("Failed to save %s. Abandoned." % obj2str(obj))
ValidationError
dataset/ETHPy150Open lsaffre/lino/lino/utils/dpy.py/FakeDeserializedObject.try_save
7,196
@classmethod def _config_parser(cls, repo, parent_commit, read_only): """:return: Config Parser constrained to our submodule in read or write mode :raise IOError: If the .gitmodules file cannot be found, either locally or in the repository at the given parent commit. Otherwise the exception would be delayed until the first access of the config parser""" parent_matches_head = True if parent_commit is not None: try: parent_matches_head = repo.head.commit == parent_commit except __HOLE__: # We are most likely in an empty repository, so the HEAD doesn't point to a valid ref pass # end hanlde parent_commit if not repo.bare and parent_matches_head: fp_module = os.path.join(repo.working_tree_dir, cls.k_modules_file) else: assert parent_commit is not None, "need valid parent_commit in bare repositories" try: fp_module = cls._sio_modules(parent_commit) except KeyError: raise IOError("Could not find %s file in the tree of parent commit %s" % (cls.k_modules_file, parent_commit)) # END handle exceptions # END handle non-bare working tree if not read_only and (repo.bare or not parent_matches_head): raise ValueError("Cannot write blobs of 'historical' submodule configurations") # END handle writes of historical submodules return SubmoduleConfigParser(fp_module, read_only=read_only)
ValueError
dataset/ETHPy150Open gitpython-developers/GitPython/git/objects/submodule/base.py/Submodule._config_parser
7,197
def _clear_cache(self): # clear the possibly changed values for name in self._cache_attrs: try: delattr(self, name) except __HOLE__: pass # END try attr deletion # END for each name to delete
AttributeError
dataset/ETHPy150Open gitpython-developers/GitPython/git/objects/submodule/base.py/Submodule._clear_cache
7,198
def _config_parser_constrained(self, read_only): """:return: Config Parser constrained to our submodule in read or write mode""" try: pc = self.parent_commit except __HOLE__: pc = None # end hande empty parent repository parser = self._config_parser(self.repo, pc, read_only) parser.set_submodule(self) return SectionConstraint(parser, sm_section(self.name))
ValueError
dataset/ETHPy150Open gitpython-developers/GitPython/git/objects/submodule/base.py/Submodule._config_parser_constrained
7,199
@classmethod def add(cls, repo, name, path, url=None, branch=None, no_checkout=False): """Add a new submodule to the given repository. This will alter the index as well as the .gitmodules file, but will not create a new commit. If the submodule already exists, no matter if the configuration differs from the one provided, the existing submodule will be returned. :param repo: Repository instance which should receive the submodule :param name: The name/identifier for the submodule :param path: repository-relative or absolute path at which the submodule should be located It will be created as required during the repository initialization. :param url: git-clone compatible URL, see git-clone reference for more information If None, the repository is assumed to exist, and the url of the first remote is taken instead. This is useful if you want to make an existing repository a submodule of anotherone. :param branch: name of branch at which the submodule should (later) be checked out. The given branch must exist in the remote repository, and will be checked out locally as a tracking branch. It will only be written into the configuration if it not None, which is when the checked out branch will be the one the remote HEAD pointed to. The result you get in these situation is somewhat fuzzy, and it is recommended to specify at least 'master' here. Examples are 'master' or 'feature/new' :param no_checkout: if True, and if the repository has to be cloned manually, no checkout will be performed :return: The newly created submodule instance :note: works atomically, such that no change will be done if the repository update fails for instance""" if repo.bare: raise InvalidGitRepositoryError("Cannot add submodules to bare repositories") # END handle bare repos path = cls._to_relative_path(repo, path) # assure we never put backslashes into the url, as some operating systems # like it ... if url is not None: url = to_native_path_linux(url) # END assure url correctness # INSTANTIATE INTERMEDIATE SM sm = cls(repo, cls.NULL_BIN_SHA, cls.k_default_mode, path, name, url='invalid-temporary') if sm.exists(): # reretrieve submodule from tree try: return repo.head.commit.tree[path] except __HOLE__: # could only be in index index = repo.index entry = index.entries[index.entry_key(path, 0)] sm.binsha = entry.binsha return sm # END handle exceptions # END handle existing # fake-repo - we only need the functionality on the branch instance br = git.Head(repo, git.Head.to_full_path(str(branch) or cls.k_head_default)) has_module = sm.module_exists() branch_is_default = branch is None if has_module and url is not None: if url not in [r.url for r in sm.module().remotes]: raise ValueError( "Specified URL '%s' does not match any remote url of the repository at '%s'" % (url, sm.abspath)) # END check url # END verify urls match mrepo = None if url is None: if not has_module: raise ValueError("A URL was not given and existing repository did not exsit at %s" % path) # END check url mrepo = sm.module() urls = [r.url for r in mrepo.remotes] if not urls: raise ValueError("Didn't find any remote url in repository at %s" % sm.abspath) # END verify we have url url = urls[0] else: # clone new repo kwargs = {'n': no_checkout} if not branch_is_default: kwargs['b'] = br.name # END setup checkout-branch # _clone_repo(cls, repo, url, path, name, **kwargs): mrepo = cls._clone_repo(repo, url, path, name, **kwargs) # END verify url # It's important to add the URL to the parent config, to let `git submodule` know. # otherwise there is a '-' character in front of the submodule listing # a38efa84daef914e4de58d1905a500d8d14aaf45 mymodule (v0.9.0-1-ga38efa8) # -a38efa84daef914e4de58d1905a500d8d14aaf45 submodules/intermediate/one writer = sm.repo.config_writer() writer.set_value(sm_section(name), 'url', url) writer.release() # update configuration and index index = sm.repo.index writer = sm.config_writer(index=index, write=False) writer.set_value('url', url) writer.set_value('path', path) sm._url = url if not branch_is_default: # store full path writer.set_value(cls.k_head_option, br.path) sm._branch_path = br.path # END handle path writer.release() del(writer) # we deliberatly assume that our head matches our index ! sm.binsha = mrepo.head.commit.binsha index.add([sm], write=True) return sm
KeyError
dataset/ETHPy150Open gitpython-developers/GitPython/git/objects/submodule/base.py/Submodule.add