Unnamed: 0
int64
0
10k
function
stringlengths
79
138k
label
stringclasses
20 values
info
stringlengths
42
261
5,900
def handle_response(self, method, response): try: data = response.json(object_hook=self.get_json_object_hook) except __HOLE__: data = None if response.status_code == 200: return ApiResponse(response, method, data) if data is None: raise TwitterApiError( 'Unable to decode JSON response.', response=response, request_method=method, ) error_code, error_msg = self.get_twitter_error_details(data) kwargs = { 'response': response, 'request_method': method, 'error_code': error_code, } if response.status_code == 401 or 'Bad Authentication data' in error_msg: raise TwitterAuthError(error_msg, **kwargs) if response.status_code == 404: raise TwitterApiError('Invalid API resource.', **kwargs) if response.status_code == 429: raise TwitterRateLimitError(error_msg, **kwargs) raise TwitterApiError(error_msg, **kwargs)
ValueError
dataset/ETHPy150Open inueni/birdy/birdy/twitter.py/BaseTwitterClient.handle_response
5,901
def get_request_token(self, base_auth_url=None, callback_url=None, auto_set_token=True, **kwargs): if callback_url: self.session._client.client.callback_uri = to_unicode(callback_url, 'utf-8') try: token = self.session.fetch_request_token(self.request_token_url) except requests.RequestException as e: raise TwitterClientError(str(e)) except __HOLE__ as e: raise TwitterClientError('Response does not contain a token.') if base_auth_url: token['auth_url'] = self.session.authorization_url(base_auth_url, **kwargs) if auto_set_token: self.auto_set_token(token) return JSONObject(token)
ValueError
dataset/ETHPy150Open inueni/birdy/birdy/twitter.py/UserClient.get_request_token
5,902
def get_access_token(self, oauth_verifier, auto_set_token=True): required = (self.access_token, self.access_token_secret) if not all(required): raise TwitterClientError('''%s must be initialized with access_token and access_token_secret to fetch authorized access token.''' % self.__class__.__name__) self.session._client.client.verifier = to_unicode(oauth_verifier, 'utf-8') try: token = self.session.fetch_access_token(self.access_token_url) except requests.RequestException as e: raise TwitterClientError(str(e)) except __HOLE__: raise TwitterClientError('Reponse does not contain a token.') if auto_set_token: self.auto_set_token(token) return JSONObject(token)
ValueError
dataset/ETHPy150Open inueni/birdy/birdy/twitter.py/UserClient.get_access_token
5,903
def get_access_token(self, auto_set_token=True): data = {'grant_type': 'client_credentials'} try: response = self.session.post(self.request_token_url, auth=self.auth, data=data) data = json.loads(response.content.decode('utf-8')) access_token = data['access_token'] except requests.RequestException as e: raise TwitterClientError(str(e)) except (__HOLE__, KeyError): raise TwitterClientError('Response does not contain an access token.') if auto_set_token: self.access_token = access_token self.session = self.get_oauth_session() return access_token
ValueError
dataset/ETHPy150Open inueni/birdy/birdy/twitter.py/AppClient.get_access_token
5,904
def reset_config(): while True: try: CONFIG._pop_object() except __HOLE__: break
IndexError
dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/Paste-2.0.1/tests/test_config.py/reset_config
5,905
def __init__(self, generator): self.g = generator self.io_loop = IOLoop.instance() try: self.call(self.g.next()) except __HOLE__: pass
StopIteration
dataset/ETHPy150Open truemped/tornadotools/tornadotools/adisp.py/CallbackDispatcher.__init__
5,906
def _send_result(self, results, single): try: result = results[0] if single else results if isinstance(result, Exception): self.call(self.g.throw(result)) else: self.call(self.g.send(result)) except __HOLE__: pass
StopIteration
dataset/ETHPy150Open truemped/tornadotools/tornadotools/adisp.py/CallbackDispatcher._send_result
5,907
def get_module_by_name(self, _module): """Returns the module with the given name. Args: _module: A str containing the name of the module. Returns: The module.Module with the provided name. Raises: request_info.ModuleDoesNotExistError: The module does not exist. """ try: return self._module_name_to_module[_module] except __HOLE__: raise request_info.ModuleDoesNotExistError(_module)
KeyError
dataset/ETHPy150Open GoogleCloudPlatform/python-compat-runtime/appengine-compat/exported_appengine_sdk/google/appengine/tools/devappserver2/dispatcher.py/Dispatcher.get_module_by_name
5,908
def _resolve_target(self, hostname, path): """Returns the module and instance that should handle this request. Args: hostname: A string containing the value of the host header in the request or None if one was not present. path: A string containing the path of the request. Returns: A tuple (_module, inst) where: _module: The module.Module that should handle this request. inst: The instance.Instance that should handle this request or None if the module's load balancing should decide on the instance. Raises: request_info.ModuleDoesNotExistError: if hostname is not known. """ if self._port == 80: default_address = self.host else: default_address = '%s:%s' % (self.host, self._port) if not hostname or hostname == default_address: return self._module_for_request(path), None default_address_offset = hostname.find(default_address) if default_address_offset > 0: prefix = hostname[:default_address_offset - 1] # The prefix should be 'module', but might be 'instance.version.module', # 'version.module', or 'instance.module'. These alternatives work in # production, but devappserver2 doesn't support running multiple versions # of the same module. All we can really do is route to the default # version of the specified module. if '.' in prefix: logging.warning('Ignoring instance/version in %s; multiple versions ' 'are not supported in devappserver.', prefix) module_name = prefix.split('.')[-1] return self._get_module_with_soft_routing(module_name, None), None else: if ':' in hostname: port = int(hostname.split(':', 1)[1]) else: port = 80 try: _module, inst = self._port_registry.get(port) except __HOLE__: raise request_info.ModuleDoesNotExistError(hostname) if not _module: _module = self._module_for_request(path) return _module, inst
KeyError
dataset/ETHPy150Open GoogleCloudPlatform/python-compat-runtime/appengine-compat/exported_appengine_sdk/google/appengine/tools/devappserver2/dispatcher.py/Dispatcher._resolve_target
5,909
def get_os_instance_and_project_id(context, fixed_ip): try: nova = clients.nova(context) os_address = nova.fixed_ips.get(fixed_ip) os_instances = nova.servers.list( search_opts={'hostname': os_address.hostname, 'all_tenants': True}) return next((os_instance.id, os_instance.tenant_id) for os_instance in os_instances if any((addr['addr'] == fixed_ip and addr['OS-EXT-IPS:type'] == 'fixed') for addr in itertools.chain( *six.itervalues(os_instance.addresses)))) except (nova_exception.NotFound, __HOLE__): raise exception.EC2MetadataNotFound()
StopIteration
dataset/ETHPy150Open openstack/ec2-api/ec2api/metadata/api.py/get_os_instance_and_project_id
5,910
def get_os_instance_and_project_id_by_provider_id(context, provider_id, fixed_ip): neutron = clients.neutron(context) os_subnets = neutron.list_subnets(advanced_service_providers=[provider_id], fields=['network_id']) if not os_subnets: raise exception.EC2MetadataNotFound() os_networks = [subnet['network_id'] for subnet in os_subnets['subnets']] try: os_port = neutron.list_ports( fixed_ips='ip_address=' + fixed_ip, network_id=os_networks, fields=['device_id', 'tenant_id'])['ports'][0] except __HOLE__: raise exception.EC2MetadataNotFound() os_instance_id = os_port['device_id'] project_id = os_port['tenant_id'] return os_instance_id, project_id
IndexError
dataset/ETHPy150Open openstack/ec2-api/ec2api/metadata/api.py/get_os_instance_and_project_id_by_provider_id
5,911
def get_namespace_from_filepath(filename): namespace = os.path.dirname(filename).strip(os.sep).replace(os.sep, config.get('namespace_delimiter')) if '{namespace}' in config.get('filename_format'): try: splitted_filename = os.path.basename(filename).split('.') if namespace: namespace += config.get('namespace_delimiter') namespace += splitted_filename[config.get('filename_format').index('{namespace}')] except __HOLE__: raise I18nFileLoadError("incorrect file format.") return namespace
ValueError
dataset/ETHPy150Open tuvistavie/python-i18n/i18n/resource_loader.py/get_namespace_from_filepath
5,912
def __getattr__(self, name): """ All attributes of the spatial backend return False by default. """ try: return self.__dict__[name] except __HOLE__: return False
KeyError
dataset/ETHPy150Open CollabQ/CollabQ/vendor/django/contrib/gis/db/backend/base.py/BaseSpatialBackend.__getattr__
5,913
def sha256(fileName): """Compute sha256 hash of the specified file""" m = hashlib.sha256() try: fd = open(fileName,"rb") except __HOLE__: print "Unable to open the file in readmode:", fileName return content = fd.readlines() fd.close() for eachLine in content: m.update(eachLine) return m.hexdigest()
IOError
dataset/ETHPy150Open smart-classic/smart_server/smart/models/record_object.py/sha256
5,914
@inherit_docstring_from(QueryStrategy) def make_query(self): dataset = self.dataset try: unlabeled_entry_ids, X_pool = zip(*dataset.get_unlabeled_entries()) except __HOLE__: # might be no more unlabeled data left return while self.budget_used < self.T: self.calc_query() ask_idx = self.random_state_.choice( np.arange(len(self.unlabeled_invert_id_idx)), size=1, p=self.query_dist )[0] ask_id = self.unlabeled_entry_ids[ask_idx] if ask_id in unlabeled_entry_ids: self.budget_used += 1 return ask_id else: self.update(ask_id, dataset.data[ask_id][1]) raise ValueError("Out of query budget")
ValueError
dataset/ETHPy150Open ntucllab/libact/libact/query_strategies/active_learning_by_learning.py/ActiveLearningByLearning.make_query
5,915
@staticmethod def tail(fname, window): """Read last N lines from file fname.""" try: f = open(fname, 'r') except __HOLE__, err: if err.errno == errno.ENOENT: return [] else: raise else: BUFSIZ = 1024 f.seek(0, os.SEEK_END) fsize = f.tell() block = -1 data = "" exit = False while not exit: step = (block * BUFSIZ) if abs(step) >= fsize: f.seek(0) exit = True else: f.seek(step, os.SEEK_END) data = f.read().strip() if data.count('\n') >= window: break else: block -= 1 return data.splitlines()[-window:]
IOError
dataset/ETHPy150Open gmcquillan/firetower/firetower/util/log_watcher.py/LogWatcher.tail
5,916
def __init__(self, *args, **kwargs): super(AdminParametersForm, self).__init__(*args, **kwargs) self.field_widths = { "default_domain_quota": 2 } hide_fields = False dpath = None code, output = exec_cmd("which dovecot") if not code: dpath = output.strip() else: known_paths = getattr( settings, "DOVECOT_LOOKUP_PATH", ("/usr/sbin/dovecot", "/usr/local/sbin/dovecot") ) for fpath in known_paths: if os.path.isfile(fpath) and os.access(fpath, os.X_OK): dpath = fpath if dpath: try: code, version = exec_cmd("%s --version" % dpath) except __HOLE__: hide_fields = True else: if code or not version.strip().startswith("2"): hide_fields = True else: hide_fields = True if hide_fields: del self.fields["handle_mailboxes"] del self.fields["mailboxes_owner"]
OSError
dataset/ETHPy150Open tonioo/modoboa/modoboa/admin/app_settings.py/AdminParametersForm.__init__
5,917
def update(dest, upd, recursive_update=True, merge_lists=False): ''' Recursive version of the default dict.update Merges upd recursively into dest If recursive_update=False, will use the classic dict.update, or fall back on a manual merge (helpful for non-dict types like FunctionWrapper) If merge_lists=True, will aggregate list object types instead of replace. This behavior is only activated when recursive_update=True. By default merge_lists=False. ''' if (not isinstance(dest, collections.Mapping)) \ or (not isinstance(upd, collections.Mapping)): raise TypeError('Cannot update using non-dict types in dictupdate.update()') updkeys = list(upd.keys()) if not set(list(dest.keys())) & set(updkeys): recursive_update = False if recursive_update: for key in updkeys: val = upd[key] try: dest_subkey = dest.get(key, None) except __HOLE__: dest_subkey = None if isinstance(dest_subkey, collections.Mapping) \ and isinstance(val, collections.Mapping): ret = update(dest_subkey, val, merge_lists=merge_lists) dest[key] = ret elif isinstance(dest_subkey, list) \ and isinstance(val, list): if merge_lists: dest[key] = dest.get(key, []) + val else: dest[key] = upd[key] else: dest[key] = upd[key] return dest else: try: dest.update(upd) except AttributeError: # this mapping is not a dict for k in upd: dest[k] = upd[k] return dest
AttributeError
dataset/ETHPy150Open saltstack/salt/salt/utils/dictupdate.py/update
5,918
def test_small_verbosedict(): expected_string = ("You tried to access the key 'b' " "which does not exist. " "The extant keys are: ['a']") dd = core.verbosedict() dd['a'] = 1 assert_equal(dd['a'], 1) try: dd['b'] except __HOLE__ as e: assert_equal(eval(six.text_type(e)), expected_string) else: # did not raise a KeyError assert(False)
KeyError
dataset/ETHPy150Open scikit-beam/scikit-beam/skbeam/core/tests/test_utils.py/test_small_verbosedict
5,919
def test_large_verbosedict(): expected_sting = ("You tried to access the key 'a' " "which does not exist. There are 100 " "extant keys, which is too many to show you") dd = core.verbosedict() for j in range(100): dd[j] = j # test success for j in range(100): assert_equal(dd[j], j) # test failure try: dd['a'] except __HOLE__ as e: assert_equal(eval(six.text_type(e)), expected_sting) else: # did not raise a KeyError assert(False)
KeyError
dataset/ETHPy150Open scikit-beam/scikit-beam/skbeam/core/tests/test_utils.py/test_large_verbosedict
5,920
def test_subtract_reference_images(): num_images = 10 img_dims = 200 ones = np.ones((img_dims, img_dims)) img_lst = [ones * _ for _ in range(num_images)] img_arr = np.asarray(img_lst) is_dark_lst = [True] is_dark = False was_dark = True while len(is_dark_lst) < num_images: if was_dark: is_dark = False else: is_dark = np.random.rand() > 0.5 was_dark = is_dark is_dark_lst.append(is_dark) is_dark_arr = np.asarray(is_dark_lst) # make sure that a list of 2d images can be passed in core.subtract_reference_images(imgs=img_lst, is_reference=is_dark_arr) # make sure that the reference arr can actually be a list core.subtract_reference_images(imgs=img_arr, is_reference=is_dark_lst) # make sure that both input arrays can actually be lists core.subtract_reference_images(imgs=img_arr, is_reference=is_dark_lst) # test that the number of returned images is equal to the expected number # of returned images num_expected_images = is_dark_lst.count(False) # subtract an additional value if the last image is a reference image # num_expected_images -= is_dark_lst[len(is_dark_lst)-1] subtracted = core.subtract_reference_images(img_lst, is_dark_lst) try: assert_equal(num_expected_images, len(subtracted)) except AssertionError as ae: print('is_dark_lst: {0}'.format(is_dark_lst)) print('num_expected_images: {0}'.format(num_expected_images)) print('len(subtracted): {0}'.format(len(subtracted))) six.reraise(AssertionError, ae, sys.exc_info()[2]) # test that the image subtraction values are behaving as expected img_sum_lst = [img_dims * img_dims * val for val in range(num_images)] expected_return_val = 0 dark_val = 0 for idx, (is_dark, img_val) in enumerate(zip(is_dark_lst, img_sum_lst)): if is_dark: dark_val = img_val else: expected_return_val = expected_return_val - dark_val + img_val # test that the image subtraction was actually processed correctly return_sum = sum(subtracted) try: while True: return_sum = sum(return_sum) except __HOLE__: # thrown when return_sum is a single number pass try: assert_equal(expected_return_val, return_sum) except AssertionError as ae: print('is_dark_lst: {0}'.format(is_dark_lst)) print('expected_return_val: {0}'.format(expected_return_val)) print('return_sum: {0}'.format(return_sum)) six.reraise(AssertionError, ae, sys.exc_info()[2])
TypeError
dataset/ETHPy150Open scikit-beam/scikit-beam/skbeam/core/tests/test_utils.py/test_subtract_reference_images
5,921
def test_img_to_relative_xyi(random_seed=None): from skbeam.core.utils import img_to_relative_xyi # make the RNG deterministic if random_seed is not None: np.random.seed(42) # set the maximum image dims maxx = 2000 maxy = 2000 # create a randomly sized image nx = int(np.random.rand() * maxx) ny = int(np.random.rand() * maxy) # create a randomly located center cx = np.random.rand() * nx cy = np.random.rand() * ny # generate the image img = np.ones((nx, ny)) # generate options for the x center to test edge conditions cx_lst = [0, cx, nx] # generate options for the y center to test edge conditions cy_lst = [0, cy, ny] for cx, cy in zip(cx_lst, cy_lst): # call the function x, y, i = img_to_relative_xyi(img=img, cx=cx, cy=cy) logger.debug('y {0}'.format(y)) logger.debug('sum(y) {0}'.format(sum(y))) expected_total_y = sum(np.arange(ny, dtype=np.int64) - cy) * nx logger.debug('expected_total_y {0}'.format(expected_total_y)) logger.debug('x {0}'.format(x)) logger.debug('sum(x) {0}'.format(sum(x))) expected_total_x = sum(np.arange(nx, dtype=np.int64) - cx) * ny logger.debug('expected_total_x {0}'.format(expected_total_x)) expected_total_intensity = nx * ny try: assert_almost_equal(sum(x), expected_total_x, decimal=0) assert_almost_equal(sum(y), expected_total_y, decimal=0) assert_equal(sum(i), expected_total_intensity) except __HOLE__ as ae: logger.error('img dims: ({0}, {1})'.format(nx, ny)) logger.error('img center: ({0}, {1})'.format(cx, cy)) logger.error('sum(returned_x): {0}'.format(sum(x))) logger.error('expected_x: {0}'.format(expected_total_x)) logger.error('sum(returned_y): {0}'.format(sum(y))) logger.error('expected_y: {0}'.format(expected_total_y)) logger.error('sum(returned_i): {0}'.format(sum(i))) logger.error('expected_x: {0}'.format(expected_total_intensity)) six.reraise(AssertionError, ae, sys.exc_info()[2])
AssertionError
dataset/ETHPy150Open scikit-beam/scikit-beam/skbeam/core/tests/test_utils.py/test_img_to_relative_xyi
5,922
def write(self, payload): try: NailgunProtocol.write_chunk(self._socket, self._chunk_type, payload) except __HOLE__ as e: # If the remote client disconnects and we try to perform a write (e.g. socket.send/sendall), # an 'error: [Errno 32] Broken pipe' exception can be thrown. Setting mask_broken_pipe=True # safeguards against this case (which is unexpected for most writers of sys.stdout etc) so # that we don't awkwardly interrupt the runtime by throwing this exception on writes to # stdout/stderr. if e.errno == errno.EPIPE and not self._mask_broken_pipe: raise
IOError
dataset/ETHPy150Open pantsbuild/pants/src/python/pants/java/nailgun_io.py/NailgunStreamWriter.write
5,923
def fmatch_best(needle, haystack, min_ratio=0.6): try: return sorted( fmatch_iter(needle, haystack, min_ratio), reverse=True, )[0][1] except __HOLE__: pass
IndexError
dataset/ETHPy150Open celery/kombu/kombu/utils/text.py/fmatch_best
5,924
def get_category(self, slug): """ Get the category object """ try: return get_category_for_slug(slug) except __HOLE__ as e: raise Http404(str(e))
ObjectDoesNotExist
dataset/ETHPy150Open edoburu/django-fluent-blogs/fluent_blogs/views/entries.py/EntryCategoryArchive.get_category
5,925
def __new__(cls, *uris, **settings): address = register_server(*uris, **settings) http_uri = address.http_uri("/") try: inst = cls.__instances[address] except __HOLE__: inst = super(DBMS, cls).__new__(cls) inst.address = address inst.__remote__ = Resource(http_uri) inst.__graph = None cls.__instances[address] = inst return inst
KeyError
dataset/ETHPy150Open nigelsmall/py2neo/py2neo/database/__init__.py/DBMS.__new__
5,926
def __eq__(self, other): try: return remote(self) == remote(other) except __HOLE__: return False
AttributeError
dataset/ETHPy150Open nigelsmall/py2neo/py2neo/database/__init__.py/DBMS.__eq__
5,927
def _bean_dict(self, name): info = Resource(remote(self).uri.string + "db/manage/server/jmx/domain/org.neo4j").get().content raw_config = [b for b in info["beans"] if b["name"].endswith("name=%s" % name)][0] d = {} for attribute in raw_config["attributes"]: name = attribute["name"] value = attribute.get("value") if value == "true": d[name] = True elif value == "false": d[name] = False else: try: d[name] = int(value) except (TypeError, __HOLE__): d[name] = value return d
ValueError
dataset/ETHPy150Open nigelsmall/py2neo/py2neo/database/__init__.py/DBMS._bean_dict
5,928
def __new__(cls, *uris, **settings): database = settings.pop("database", "data") address = register_server(*uris, **settings) key = (cls, address, database) try: inst = cls.__instances[key] except __HOLE__: inst = super(Graph, cls).__new__(cls) inst.address = address inst.__remote__ = Resource(address.http_uri("/db/%s/" % database)) inst.transaction_uri = Resource(address.http_uri("/db/%s/transaction" % database)).uri.string inst.transaction_class = HTTPTransaction use_bolt = address.bolt if use_bolt is None: use_bolt = version_tuple(inst.__remote__.get().content["neo4j_version"]) >= (3,) if use_bolt: auth = get_auth(address) inst.driver = GraphDatabase.driver(address.bolt_uri("/"), auth=None if auth is None else auth.bolt_auth_token, encypted=address.secure, user_agent="/".join(PRODUCT)) inst.transaction_class = BoltTransaction cls.__instances[key] = inst return inst
KeyError
dataset/ETHPy150Open nigelsmall/py2neo/py2neo/database/__init__.py/Graph.__new__
5,929
def node(self, id_): """ Fetch a node by ID. This method creates an object representing the remote node with the ID specified but fetches no data from the server. For this reason, there is no guarantee that the entity returned actually exists. :param id_: """ resource = remote(self).resolve("node/%s" % id_) uri_string = resource.uri.string try: return Node.cache[uri_string] except __HOLE__: node = self.evaluate("MATCH (a) WHERE id(a)={x} RETURN a", x=id_) if node is None: raise IndexError("Node %d not found" % id_) else: return node
KeyError
dataset/ETHPy150Open nigelsmall/py2neo/py2neo/database/__init__.py/Graph.node
5,930
def pull(self, subgraph): """ Pull data to one or more entities from their remote counterparts. :param subgraph: the collection of nodes and relationships to pull """ try: subgraph.__db_pull__(self) except __HOLE__: raise TypeError("No method defined to pull object %r" % subgraph)
AttributeError
dataset/ETHPy150Open nigelsmall/py2neo/py2neo/database/__init__.py/Graph.pull
5,931
def push(self, subgraph): """ Push data from one or more entities to their remote counterparts. :param subgraph: the collection of nodes and relationships to push """ try: subgraph.__db_push__(self) except __HOLE__: raise TypeError("No method defined to push object %r" % subgraph)
AttributeError
dataset/ETHPy150Open nigelsmall/py2neo/py2neo/database/__init__.py/Graph.push
5,932
def relationship(self, id_): """ Fetch a relationship by ID. :param id_: """ resource = remote(self).resolve("relationship/" + str(id_)) uri_string = resource.uri.string try: return Relationship.cache[uri_string] except __HOLE__: relationship = self.evaluate("MATCH ()-[r]->() WHERE id(r)={x} RETURN r", x=id_) if relationship is None: raise IndexError("Relationship %d not found" % id_) else: return relationship
KeyError
dataset/ETHPy150Open nigelsmall/py2neo/py2neo/database/__init__.py/Graph.relationship
5,933
def fetch(self): try: return self.buffer.popleft() except __HOLE__: if self.loaded: return None else: self.transaction.process() return self.fetch()
IndexError
dataset/ETHPy150Open nigelsmall/py2neo/py2neo/database/__init__.py/HTTPDataSource.fetch
5,934
def load(self, data): assert not self.loaded try: entities = self.transaction.entities.popleft() except (__HOLE__, IndexError): entities = {} self._keys = keys = tuple(data["columns"]) hydrate = self.graph._hydrate for record in data["data"]: values = [] for i, value in enumerate(record["rest"]): key = keys[i] cached = entities.get(key) values.append(hydrate(value, inst=cached)) self.buffer.append(Record(keys, values)) self.loaded = True
AttributeError
dataset/ETHPy150Open nigelsmall/py2neo/py2neo/database/__init__.py/HTTPDataSource.load
5,935
def fetch(self): try: return self.buffer.popleft() except __HOLE__: if self.loaded: return None else: self.connection.send() while not self.buffer and not self.loaded: self.connection.fetch() return self.fetch()
IndexError
dataset/ETHPy150Open nigelsmall/py2neo/py2neo/database/__init__.py/BoltDataSource.fetch
5,936
def create(self, subgraph): """ Create remote nodes and relationships that correspond to those in a local subgraph. Any entities in *subgraph* that are already bound to remote entities will remain unchanged, those which are not will become bound to their newly-created counterparts. For example:: >>> from py2neo import Graph, Node, Relationship >>> g = Graph() >>> tx = g.begin() >>> a = Node("Person", name="Alice") >>> tx.create(a) >>> b = Node("Person", name="Bob") >>> ab = Relationship(a, "KNOWS", b) >>> tx.create(ab) >>> tx.commit() >>> g.exists(ab) True :param subgraph: a :class:`.Node`, :class:`.Relationship` or other creatable object """ try: subgraph.__db_create__(self) except __HOLE__: raise TypeError("No method defined to create object %r" % subgraph)
AttributeError
dataset/ETHPy150Open nigelsmall/py2neo/py2neo/database/__init__.py/Transaction.create
5,937
def degree(self, subgraph): """ Return the total number of relationships attached to all nodes in a subgraph. :param subgraph: a :class:`.Node`, :class:`.Relationship` or other :class:`.Subgraph` :returns: the total number of distinct relationships """ try: return subgraph.__db_degree__(self) except __HOLE__: raise TypeError("No method defined to determine the degree of object %r" % subgraph)
AttributeError
dataset/ETHPy150Open nigelsmall/py2neo/py2neo/database/__init__.py/Transaction.degree
5,938
def delete(self, subgraph): """ Delete the remote nodes and relationships that correspond to those in a local subgraph. :param subgraph: a :class:`.Node`, :class:`.Relationship` or other :class:`.Subgraph` """ try: subgraph.__db_delete__(self) except __HOLE__: raise TypeError("No method defined to delete object %r" % subgraph)
AttributeError
dataset/ETHPy150Open nigelsmall/py2neo/py2neo/database/__init__.py/Transaction.delete
5,939
def exists(self, subgraph): """ Determine whether one or more graph entities all exist within the database. Note that if any nodes or relationships in *subgraph* are not bound to remote counterparts, this method will return ``False``. :param subgraph: a :class:`.Node`, :class:`.Relationship` or other :class:`.Subgraph` :returns: ``True`` if all entities exist remotely, ``False`` otherwise """ try: return subgraph.__db_exists__(self) except __HOLE__: raise TypeError("No method defined to determine the existence of object %r" % subgraph)
AttributeError
dataset/ETHPy150Open nigelsmall/py2neo/py2neo/database/__init__.py/Transaction.exists
5,940
def merge(self, subgraph, primary_label=None, primary_key=None): """ Merge nodes and relationships from a local subgraph into the database. Each node and relationship is merged independently, with nodes merged first and relationships merged second. For each node, the merge is carried out by comparing that node with a potential remote equivalent on the basis of a label and property value. If no remote match is found, a new node is created. The label and property to use for comparison are determined by `primary_label` and `primary_key` but may be overridden for individual nodes by the presence of `__primarylabel__` and `__primarykey__` attributes on the node itself. Note that multiple property keys may be specified by using a tuple. For each relationship, the merge is carried out by comparing that relationship with a potential remote equivalent on the basis of matching start and end nodes plus relationship type. If no remote match is found, a new relationship is created. :param subgraph: a :class:`.Node`, :class:`.Relationship` or other :class:`.Subgraph` object :param primary_label: label on which to match any existing nodes :param primary_key: property key(s) on which to match any existing nodes """ try: subgraph.__db_merge__(self, primary_label, primary_key) except __HOLE__: raise TypeError("No method defined to merge object %r" % subgraph)
AttributeError
dataset/ETHPy150Open nigelsmall/py2neo/py2neo/database/__init__.py/Transaction.merge
5,941
def separate(self, subgraph): """ Delete the remote relationships that correspond to those in a local subgraph. This leaves any nodes untouched. :param subgraph: a :class:`.Node`, :class:`.Relationship` or other :class:`.Subgraph` """ try: subgraph.__db_separate__(self) except __HOLE__: raise TypeError("No method defined to separate object %r" % subgraph)
AttributeError
dataset/ETHPy150Open nigelsmall/py2neo/py2neo/database/__init__.py/Transaction.separate
5,942
def run(self, statement, parameters=None, **kwparameters): self._assert_unfinished() connection = self.session.connection try: entities = self.entities.popleft() except __HOLE__: entities = {} source = BoltDataSource(connection, entities, remote(self.graph).uri.string) run_response = Response(connection) run_response.on_success = source.on_header run_response.on_failure = source.on_failure pull_all_response = Response(connection) pull_all_response.on_record = source.on_record pull_all_response.on_success = source.on_footer pull_all_response.on_failure = source.on_failure s, p = normalise_request(statement, parameters, **kwparameters) connection.append(RUN, (s, p), run_response) connection.append(PULL_ALL, (), pull_all_response) self.sources.append(source) if self.autocommit: self.finish() return Cursor(source)
IndexError
dataset/ETHPy150Open nigelsmall/py2neo/py2neo/database/__init__.py/BoltTransaction.run
5,943
def evaluate(self, field=0): """ Return the value of the first field from the next record (or the value of another field if explicitly specified). This method attempts to move the cursor one step forward and, if successful, selects and returns an individual value from the new current record. By default, this value will be taken from the first value in that record but this can be overridden with the `field` argument, which can represent either a positional index or a textual key. If the cursor cannot be moved forward or if the record contains no values, :py:const:`None` will be returned instead. This method is particularly useful when it is known that a Cypher query returns only a single value. :param field: field to select value from (optional) :returns: value of the field or :py:const:`None` Example: >>> from py2neo import Graph >>> g = Graph() >>> g.run("MATCH (a) WHERE a.email={x} RETURN a.name", x="[email protected]").evaluate() 'Bob Robertson' """ if self.forward(): try: return self._current[field] except __HOLE__: return None else: return None
IndexError
dataset/ETHPy150Open nigelsmall/py2neo/py2neo/database/__init__.py/Cursor.evaluate
5,944
def __getitem__(self, item): if isinstance(item, string): try: return tuple.__getitem__(self, self.__keys.index(item)) except __HOLE__: raise KeyError(item) elif isinstance(item, slice): return self.__class__(self.__keys[item.start:item.stop], tuple.__getitem__(self, item)) else: return tuple.__getitem__(self, item)
ValueError
dataset/ETHPy150Open nigelsmall/py2neo/py2neo/database/__init__.py/Record.__getitem__
5,945
def touchopen(filename, *args, **kwargs): try: os.remove(filename) except __HOLE__: pass open(filename, "a").close() # "touch" file return open(filename, *args, **kwargs) # The constrained memory should have no more than 1024 cells
OSError
dataset/ETHPy150Open crista/exercises-in-programming-style/01-good-old-times/tf-01.py/touchopen
5,946
def get_backend_register(self, k, default=None): try: return json.loads(self.backend_register).get(k, default) except __HOLE__: return default
AttributeError
dataset/ETHPy150Open open-cloud/xos/xos/core/models/plcorebase.py/PlModelMixIn.get_backend_register
5,947
def set_backend_register(self, k, v): br = {} try: br=json.loads(self.backend_register) except __HOLE__: br={} br[k] = v self.backend_register = json.dumps(br)
AttributeError
dataset/ETHPy150Open open-cloud/xos/xos/core/models/plcorebase.py/PlModelMixIn.set_backend_register
5,948
def get_backend_details(self): try: scratchpad = json.loads(self.backend_register) except AttributeError: return (None, None, None, None) try: exponent = scratchpad['exponent'] except __HOLE__: exponent = None try: last_success_time = scratchpad['last_success'] dt = datetime.datetime.fromtimestamp(last_success_time) last_success = dt.strftime("%Y-%m-%d %H:%M") except KeyError: last_success = None try: failures = scratchpad['failures'] except KeyError: failures=None try: last_failure_time = scratchpad['last_failure'] dt = datetime.datetime.fromtimestamp(last_failure_time) last_failure = dt.strftime("%Y-%m-%d %H:%M") except KeyError: last_failure = None return (exponent, last_success, last_failure, failures)
KeyError
dataset/ETHPy150Open open-cloud/xos/xos/core/models/plcorebase.py/PlModelMixIn.get_backend_details
5,949
def delete(self, *args, **kwds): # so we have something to give the observer purge = kwds.get('purge',False) if purge: del kwds['purge'] silent = kwds.get('silent',False) if silent: del kwds['silent'] try: purge = purge or observer_disabled except __HOLE__: pass if (purge): super(PlCoreBase, self).delete(*args, **kwds) else: if (not self.write_protect): self.deleted = True self.enacted=None self.policed=None self.save(update_fields=['enacted','deleted','policed'], silent=silent)
NameError
dataset/ETHPy150Open open-cloud/xos/xos/core/models/plcorebase.py/PlCoreBase.delete
5,950
def test_string(): """ Dumping and loading a string """ filename, mode = 'test.h5', 'w' string_obj = "The quick brown fox jumps over the lazy dog" dump(string_obj, filename, mode) string_hkl = load(filename) #print "Initial list: %s"%list_obj #print "Unhickled data: %s"%list_hkl try: assert type(string_obj) == type(string_hkl) == str assert string_obj == string_hkl os.remove(filename) except __HOLE__: os.remove(filename) raise
AssertionError
dataset/ETHPy150Open telegraphic/hickle/tests/test_hickle.py/test_string
5,951
def test_unicode(): """ Dumping and loading a unicode string """ filename, mode = 'test.h5', 'w' u = unichr(233) + unichr(0x0bf2) + unichr(3972) + unichr(6000) dump(u, filename, mode) u_hkl = load(filename) try: assert type(u) == type(u_hkl) == unicode assert u == u_hkl # For those interested, uncomment below to see what those codes are: # for i, c in enumerate(u_hkl): # print i, '%04x' % ord(c), unicodedata.category(c), # print unicodedata.name(c) except __HOLE__: os.remove(filename) raise
AssertionError
dataset/ETHPy150Open telegraphic/hickle/tests/test_hickle.py/test_unicode
5,952
def test_list(): """ Dumping and loading a list """ filename, mode = 'test.h5', 'w' list_obj = [1, 2, 3, 4, 5] dump(list_obj, filename, mode) list_hkl = load(filename) #print "Initial list: %s"%list_obj #print "Unhickled data: %s"%list_hkl try: assert type(list_obj) == type(list_hkl) == list assert list_obj == list_hkl import h5py a = h5py.File(filename) os.remove(filename) except __HOLE__: print "ERR:", list_obj, list_hkl import h5py os.remove(filename) raise
AssertionError
dataset/ETHPy150Open telegraphic/hickle/tests/test_hickle.py/test_list
5,953
def test_set(): """ Dumping and loading a list """ filename, mode = 'test.h5', 'w' list_obj = set([1, 0, 3, 4.5, 11.2]) dump(list_obj, filename, mode) list_hkl = load(filename) #print "Initial list: %s"%list_obj #print "Unhickled data: %s"%list_hkl try: assert type(list_obj) == type(list_hkl) == set assert list_obj == list_hkl os.remove(filename) except __HOLE__: os.remove(filename) raise
AssertionError
dataset/ETHPy150Open telegraphic/hickle/tests/test_hickle.py/test_set
5,954
def test_numpy(): """ Dumping and loading numpy array """ filename, mode = 'test.h5', 'w' dtypes = ['float32', 'float64', 'complex64', 'complex128'] for dt in dtypes: array_obj = np.ones(8, dtype=dt) dump(array_obj, filename, mode) array_hkl = load(filename) try: assert array_hkl.dtype == array_obj.dtype assert np.all((array_hkl, array_obj)) os.remove(filename) except __HOLE__: os.remove(filename) print array_hkl print array_obj raise
AssertionError
dataset/ETHPy150Open telegraphic/hickle/tests/test_hickle.py/test_numpy
5,955
def test_masked(): """ Test masked numpy array """ filename, mode = 'test.h5', 'w' a = np.ma.array([1,2,3,4], dtype='float32', mask=[0,1,0,0]) dump(a, filename, mode) a_hkl = load(filename) try: assert a_hkl.dtype == a.dtype assert np.all((a_hkl, a)) os.remove(filename) except __HOLE__: os.remove(filename) print a_hkl print a raise
AssertionError
dataset/ETHPy150Open telegraphic/hickle/tests/test_hickle.py/test_masked
5,956
def test_dict(): """ Test dictionary dumping and loading """ filename, mode = 'test.h5', 'w' dd = { 'name' : 'Danny', 'age' : 28, 'height' : 6.1, 'dork' : True, 'nums' : [1, 2, 3], 'narr' : np.array([1,2,3]), #'unic' : u'dan[at]thetelegraphic.com' } dump(dd, filename, mode) dd_hkl = load(filename) for k in dd.keys(): try: assert k in dd_hkl.keys() if type(dd[k]) is type(np.array([1])): assert np.all((dd[k], dd_hkl[k])) else: #assert dd_hkl[k] == dd[k] pass assert type(dd_hkl[k]) == type(dd[k]) except __HOLE__: print k print dd_hkl[k] print dd[k] print type(dd_hkl[k]), type(dd[k]) os.remove(filename) raise os.remove(filename)
AssertionError
dataset/ETHPy150Open telegraphic/hickle/tests/test_hickle.py/test_dict
5,957
def test_compression(): """ Test compression on datasets""" filename, mode = 'test.h5', 'w' dtypes = ['int32', 'float32', 'float64', 'complex64', 'complex128'] comps = [None, 'gzip', 'lzf'] for dt in dtypes: for cc in comps: array_obj = np.ones(32768, dtype=dt) dump(array_obj, filename, mode, compression=cc) print cc, os.path.getsize(filename) array_hkl = load(filename) try: assert array_hkl.dtype == array_obj.dtype assert np.all((array_hkl, array_obj)) os.remove(filename) except __HOLE__: os.remove(filename) print array_hkl print array_obj raise
AssertionError
dataset/ETHPy150Open telegraphic/hickle/tests/test_hickle.py/test_compression
5,958
def test_masked_dict(): """ Test dictionaries with masked arrays """ filename, mode = 'test.h5', 'w' dd = { "data" : np.ma.array([1,2,3], mask=[True, False, False]), "data2" : np.array([1,2,3,4,5]) } dump(dd, filename, mode) dd_hkl = load(filename) for k in dd.keys(): try: assert k in dd_hkl.keys() if type(dd[k]) is type(np.array([1])): assert np.all((dd[k], dd_hkl[k])) elif type(dd[k]) is type(np.ma.array([1])): print dd[k].data print dd_hkl[k].data assert np.allclose(dd[k].data, dd_hkl[k].data) assert np.allclose(dd[k].mask, dd_hkl[k].mask) assert type(dd_hkl[k]) == type(dd[k]) except __HOLE__: print k print dd_hkl[k] print dd[k] print type(dd_hkl[k]), type(dd[k]) os.remove(filename) raise os.remove(filename)
AssertionError
dataset/ETHPy150Open telegraphic/hickle/tests/test_hickle.py/test_masked_dict
5,959
def test_track_times(): """ Verify that track_times = False produces identical files """ hashes = [] for obj, filename, mode, kwargs in DUMP_CACHE: if isinstance(filename, hickle.H5FileWrapper): filename = str(filename.file_name) kwargs['track_times'] = False caching_dump(obj, filename, mode, **kwargs) hashes.append(md5sum(filename)) os.remove(filename) time.sleep(1) for hash1, (obj, filename, mode, kwargs) in zip(hashes, DUMP_CACHE): if isinstance(filename, hickle.H5FileWrapper): filename = str(filename.file_name) caching_dump(obj, filename, mode, **kwargs) hash2 = md5sum(filename) print hash1, hash2 try: assert hash1 == hash2 os.remove(filename) except __HOLE__: os.remove(filename) raise
AssertionError
dataset/ETHPy150Open telegraphic/hickle/tests/test_hickle.py/test_track_times
5,960
def test_comp_kwargs(): """ Test compression with some kwargs for shuffle and chunking """ filename, mode = 'test.h5', 'w' dtypes = ['int32', 'float32', 'float64', 'complex64', 'complex128'] comps = [None, 'gzip', 'lzf'] chunks = [(100, 100), (250, 250)] shuffles = [True, False] scaleoffsets = [0, 1, 2] for dt in dtypes: for cc in comps: for ch in chunks: for sh in shuffles: for so in scaleoffsets: kwargs = { 'compression' : cc, 'dtype': dt, 'chunks': ch, 'shuffle': sh, 'scaleoffset': so } #array_obj = np.random.random_integers(low=-8192, high=8192, size=(1000, 1000)).astype(dt) array_obj = NESTED_DICT dump(array_obj, filename, mode, compression=cc) print kwargs, os.path.getsize(filename) array_hkl = load(filename) try: os.remove(filename) except __HOLE__: os.remove(filename) print array_hkl print array_obj raise
AssertionError
dataset/ETHPy150Open telegraphic/hickle/tests/test_hickle.py/test_comp_kwargs
5,961
def run_file_cleanup(): """ Clean up temp files """ for filename in ('test.hdf', 'test.hkl', 'test.h5'): try: os.remove(filename) except __HOLE__: pass
OSError
dataset/ETHPy150Open telegraphic/hickle/tests/test_hickle.py/run_file_cleanup
5,962
def test_list_long_type(): """ Check long comes back out as a long """ filename, mode = 'test.h5', 'w' list_obj = [1L, 2L, 3L, 4L, 5L] dump(list_obj, filename, mode) list_hkl = load(filename) #print "Initial list: %s"%list_obj #print "Unhickled data: %s"%list_hkl try: assert type(list_obj) == type(list_hkl) == list assert list_obj == list_hkl assert type(list_obj[0]) == type(list_hkl[0]) os.remove(filename) except __HOLE__: print "ERR:", list_obj, list_hkl import h5py a = h5py.File(filename) print a.keys() print a['data'].keys() os.remove(filename) raise
AssertionError
dataset/ETHPy150Open telegraphic/hickle/tests/test_hickle.py/test_list_long_type
5,963
def test_list_order(): """ https://github.com/telegraphic/hickle/issues/26 """ d = [np.arange(n + 1) for n in range(20)] hickle.dump(d, 'test.h5') d_hkl = hickle.load('test.h5') try: for ii, xx in enumerate(d): assert d[ii].shape == d_hkl[ii].shape for ii, xx in enumerate(d): assert np.allclose(d[ii], d_hkl[ii]) except __HOLE__: print d[ii], d_hkl[ii] raise
AssertionError
dataset/ETHPy150Open telegraphic/hickle/tests/test_hickle.py/test_list_order
5,964
def test_legacy_hickles(): try: a = load("hickle_1_1_0.hkl") b = load("hickle_1_3_0.hkl") import h5py d = h5py.File("hickle_1_1_0.hkl")["data"]["a"][:] d2 = h5py.File("hickle_1_3_0.hkl")["data"]["a"][:] assert np.allclose(d, a["a"]) assert np.allclose(d2, b["a"]) except __HOLE__: # For travis-CI a = load("tests/hickle_1_1_0.hkl") b = load("tests/hickle_1_3_0.hkl") print a print b
IOError
dataset/ETHPy150Open telegraphic/hickle/tests/test_hickle.py/test_legacy_hickles
5,965
def __validate_simple_subfield(self, parameter, field, segment_list, _segment_index=0): """Verifies that a proposed subfield actually exists and is a simple field. Here, simple means it is not a MessageField (nested). Args: parameter: String; the '.' delimited name of the current field being considered. This is relative to some root. field: An instance of a subclass of messages.Field. Corresponds to the previous segment in the path (previous relative to _segment_index), since this field should be a message field with the current segment as a field in the message class. segment_list: The full list of segments from the '.' delimited subfield being validated. _segment_index: Integer; used to hold the position of current segment so that segment_list can be passed as a reference instead of having to copy using segment_list[1:] at each step. Raises: TypeError: If the final subfield (indicated by _segment_index relative to the length of segment_list) is a MessageField. TypeError: If at any stage the lookup at a segment fails, e.g if a.b exists but a.b.c does not exist. This can happen either if a.b is not a message field or if a.b.c is not a property on the message class from a.b. """ if _segment_index >= len(segment_list): if isinstance(field, messages.MessageField): field_class = field.__class__.__name__ raise TypeError('Can\'t use messages in path. Subfield %r was ' 'included but is a %s.' % (parameter, field_class)) return segment = segment_list[_segment_index] parameter += '.' + segment try: field = field.type.field_by_name(segment) except (__HOLE__, KeyError): raise TypeError('Subfield %r from path does not exist.' % (parameter,)) self.__validate_simple_subfield(parameter, field, segment_list, _segment_index=_segment_index + 1)
AttributeError
dataset/ETHPy150Open AppScale/appscale/AppServer/google/appengine/ext/endpoints/api_config.py/ApiConfigGenerator.__validate_simple_subfield
5,966
def get_setting(key): """ Get setting values from CONF_FILE :param key: :return: """ try: with open(CONF_FILE) as cf: settings = json.load(cf) except __HOLE__: return None if key in settings.keys(): return settings[key] return None # pragma: no cover
IOError
dataset/ETHPy150Open dwighthubbard/hostlists/hostlists/hostlists.py/get_setting
5,967
def multikeysort(items, columns): comparers = [ ((operator.itemgetter(col[1:].strip()), -1) if col.startswith('-') else (operator.itemgetter(col.strip()), 1)) for col in columns ] def comparer(left, right): for fn, mult in comparers: try: result = cmp_compat(fn(left), fn(right)) except __HOLE__: return 0 if result: return mult * result else: return 0 try: # noinspection PyArgumentList return sorted(items, cmp=comparer) except TypeError: # Python 3 removed the cmp parameter import functools return sorted(items, key=functools.cmp_to_key(comparer))
KeyError
dataset/ETHPy150Open dwighthubbard/hostlists/hostlists/hostlists.py/multikeysort
5,968
def compress(hostnames): """ Compress a list of host into a more compact range representation """ domain_dict = {} result = [] for host in hostnames: if '.' in host: domain = '.'.join(host.split('.')[1:]) else: domain = '' try: domain_dict[domain].append(host) except __HOLE__: domain_dict[domain] = [host] domains = list(domain_dict.keys()) domains.sort() for domain in domains: hosts = compress_domain(domain_dict[domain]) result += hosts return result
KeyError
dataset/ETHPy150Open dwighthubbard/hostlists/hostlists/hostlists.py/compress
5,969
def compress_domain(hostnames): """ Compress a list of hosts in a domain into a more compact representation """ hostnames.sort() prev_dict = {'prefix': "", 'suffix': '', 'number': 0} items = [] items_block = [] new_hosts = [] for host in hostnames: try: parsed_dict = re.match( r"(?P<prefix>[^0-9]+)(?P<number>\d+)(?P<suffix>.*).?", host ).groupdict() # To generate the range we need the entries sorted numerically # but to ensure we don't loose any leading 0s we don't want to # replace the number parameter that is a string with the leading # 0s. parsed_dict['number_int'] = int(parsed_dict['number']) new_hosts.append(parsed_dict) except __HOLE__: if '.' not in host: host += '.' parsed_dict = {'host': compress([host])[0].strip('.')} else: parsed_dict = {'host': host} new_hosts.append(parsed_dict) new_hosts = multikeysort(new_hosts, ['prefix', 'number_int']) for parsed_dict in new_hosts: if 'host' in parsed_dict.keys() or \ parsed_dict['prefix'] != prev_dict['prefix'] or \ parsed_dict['suffix'] != prev_dict['suffix'] or \ int(parsed_dict['number']) != int(prev_dict['number']) + 1: if len(items_block): items.append(items_block) items_block = [parsed_dict] else: items_block.append(parsed_dict) prev_dict = parsed_dict items.append(items_block) result = [] for item in items: if len(item): if len(item) == 1 and 'host' in item[0].keys(): result.append(item[0]['host']) elif len(item) == 1: result.append( '%s%s%s' % ( item[0]['prefix'], item[0]['number'], item[0]['suffix'] ) ) else: result.append( '%s[%s-%s]%s' % ( item[0]['prefix'], item[0]['number'], item[-1]['number'], item[0]['suffix'] ) ) return result
AttributeError
dataset/ETHPy150Open dwighthubbard/hostlists/hostlists/hostlists.py/compress_domain
5,970
def clean_fields(self, exclude=None): """ This is an override of the default model clean_fields method. Essentially, in addition to validating the fields, this method validates the :class:`Template` instance that is used to render this :class:`Page`. This is useful for catching template errors before they show up as 500 errors on a live site. """ if exclude is None: exclude = [] try: super(Page, self).clean_fields(exclude) except ValidationError, e: errors = e.message_dict else: errors = {} if 'template' not in errors and 'template' not in exclude: try: self.template.clean_fields() self.template.clean() except __HOLE__, e: errors['template'] = e.messages if errors: raise ValidationError(errors)
ValidationError
dataset/ETHPy150Open ithinksw/philo/philo/models/pages.py/Page.clean_fields
5,971
def __init__(self, coord=None, mode='continuous'): super(Coordinate, self).__init__() if coord is None: self.value = 0 return if not isinstance(coord, list) and not isinstance(coord, str): raise TypeError("Coordinates parameter must be a list with coordinates [x, y, z] or [x, y, z, value] or a string with coordinates delimited by commas.") if isinstance(coord, str): # coordinate as a string. Values delimited by a comma. coord = coord.split(',') if len(coord) not in [3,4]: raise TypeError("Parameter must be a list with coordinates [x, y, z] or [x, y, z, value].") self.x = coord[0] self.y = coord[1] self.z = coord[2] if len(coord) == 4: self.value = coord[3] else: self.value = 0 # coordinates and value must be digits: try: if mode == 'index': int(self.x), int(self.y), int(self.z), float(self.value) else: float(self.x), float(self.y), float(self.z), float(self.value) except __HOLE__: raise TypeError("All coordinates must be int and the value can be a float or a int. x="+str(self.x)+", y="+str(self.y)+", z="+str(self.z)+", value="+str(self.value))
ValueError
dataset/ETHPy150Open neuropoly/spinalcordtoolbox/scripts/msct_types.py/Coordinate.__init__
5,972
@staticmethod def datetime_from_string(value): try: return datetime.datetime.strptime(value, PINBOARD_DATETIME_FORMAT) except __HOLE__: return datetime.datetime.strptime(value, PINBOARD_ALTERNATE_DATETIME_FORMAT)
ValueError
dataset/ETHPy150Open lionheart/pinboard.py/pinboard/pinboard.py/Pinboard.datetime_from_string
5,973
def __init__(self, dbname,overwrite=False): self.dbname = dbname if overwrite: try: os.remove( dbname ) except __HOLE__: pass self.conn = sqlite3.connect(dbname) if overwrite: self.setup()
OSError
dataset/ETHPy150Open bmander/graphserver/pygs/graphserver/ext/osm/profiledb.py/ProfileDB.__init__
5,974
def get(self, id): c = self.get_cursor() c.execute( "SELECT profile FROM profiles WHERE id = ?", (id,) ) try: (profile,) = c.next() except __HOLE__: return None finally: c.close() return unpack_coords( profile )
StopIteration
dataset/ETHPy150Open bmander/graphserver/pygs/graphserver/ext/osm/profiledb.py/ProfileDB.get
5,975
def _filter_drag_end(self, event): source = self.filterTreeWidget.currentItem() source_name = source.text(0) target = self.filterTreeWidget.itemAt(event.pos()) source_top, source_group = self._get_filter_item_coords(source) parent = target.parent() if parent: if parent.parent(): target_top = parent.parent().text(0) target_group = parent.text(0) else: target_top = parent.text(0) if target.data(1, Qt.UserRole) == \ self.FilterTreeRoleGroup: target_group = target.text(0) else: target_group = None else: target_top = target.text(0) target_group = None # Find drop position target_index = self.filterTreeWidget.indexAt(event.pos()) target_height = self.filterTreeWidget.rowHeight(target_index) pos = event.pos() pos.setY((pos.y() - target_height / 2)) item_above_target = self.filterTreeWidget.itemAt(pos) above_target = item_above_target.text(0) if item_above_target \ else None item = self.filter_managers[source_top].get_item(source_name, source_group) try: self.filter_managers[source_top].remove_item(source_name, source_group) if target_group: self.filter_managers[target_top].add_item(item, source_name, target_group) else: self.filter_managers[target_top].add_item(item, source_name) except __HOLE__ as e: QMessageBox.critical(self, 'Error moving item', str(e)) return self.filter_managers[target_top].move_item(source_name, above_target, target_group) self.populate_filter_tree() self.filters_changed.emit(source_top) # Method injection into the filter tree (to enable custom drag and drop # behavior)
ValueError
dataset/ETHPy150Open rproepp/spykeviewer/spykeviewer/ui/filter_dock.py/FilterDock._filter_drag_end
5,976
def __init__(self, *args, **kwargs): dispatcher.send(signal=signals.pre_init, sender=self.__class__, args=args, kwargs=kwargs) # There is a rather weird disparity here; if kwargs, it's set, then args # overrides it. It should be one or the other; don't duplicate the work # The reason for the kwargs check is that standard iterator passes in by # args, and nstantiation for iteration is 33% faster. args_len = len(args) if args_len > len(self._meta.fields): # Daft, but matches old exception sans the err msg. raise IndexError("Number of args exceeds number of fields") fields_iter = iter(self._meta.fields) if not kwargs: # The ordering of the izip calls matter - izip throws StopIteration # when an iter throws it. So if the first iter throws it, the second # is *not* consumed. We rely on this, so don't change the order # without changing the logic. for val, field in izip(args, fields_iter): setattr(self, field.attname, val) else: # Slower, kwargs-ready version. for val, field in izip(args, fields_iter): setattr(self, field.attname, val) kwargs.pop(field.name, None) # Maintain compatibility with existing calls. if isinstance(field.rel, ManyToOneRel): kwargs.pop(field.attname, None) # Now we're left with the unprocessed fields that *must* come from # keywords, or default. for field in fields_iter: if kwargs: if isinstance(field.rel, ManyToOneRel): try: # Assume object instance was passed in. rel_obj = kwargs.pop(field.name) except __HOLE__: try: # Object instance wasn't passed in -- must be an ID. val = kwargs.pop(field.attname) except KeyError: val = field.get_default() else: # Object instance was passed in. Special case: You can # pass in "None" for related objects if it's allowed. if rel_obj is None and field.null: val = None else: try: val = getattr(rel_obj, field.rel.get_related_field().attname) except AttributeError: raise TypeError("Invalid value: %r should be a %s instance, not a %s" % (field.name, field.rel.to, type(rel_obj))) else: val = kwargs.pop(field.attname, field.get_default()) else: val = field.get_default() setattr(self, field.attname, val) if kwargs: for prop in kwargs.keys(): try: if isinstance(getattr(self.__class__, prop), property): setattr(self, prop, kwargs.pop(prop)) except AttributeError: pass if kwargs: raise TypeError, "'%s' is an invalid keyword argument for this function" % kwargs.keys()[0] dispatcher.send(signal=signals.post_init, sender=self.__class__, instance=self)
KeyError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-0.96/django/db/models/base.py/Model.__init__
5,977
def _collect_sub_objects(self, seen_objs): """ Recursively populates seen_objs with all objects related to this object. When done, seen_objs will be in the format: {model_class: {pk_val: obj, pk_val: obj, ...}, model_class: {pk_val: obj, pk_val: obj, ...}, ...} """ pk_val = self._get_pk_val() if pk_val in seen_objs.setdefault(self.__class__, {}): return seen_objs.setdefault(self.__class__, {})[pk_val] = self for related in self._meta.get_all_related_objects(): rel_opts_name = related.get_accessor_name() if isinstance(related.field.rel, OneToOneRel): try: sub_obj = getattr(self, rel_opts_name) except __HOLE__: pass else: sub_obj._collect_sub_objects(seen_objs) else: for sub_obj in getattr(self, rel_opts_name).all(): sub_obj._collect_sub_objects(seen_objs)
ObjectDoesNotExist
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-0.96/django/db/models/base.py/Model._collect_sub_objects
5,978
def _get_next_or_previous_by_FIELD(self, field, is_next, **kwargs): op = is_next and '>' or '<' where = '(%s %s %%s OR (%s = %%s AND %s.%s %s %%s))' % \ (backend.quote_name(field.column), op, backend.quote_name(field.column), backend.quote_name(self._meta.db_table), backend.quote_name(self._meta.pk.column), op) param = str(getattr(self, field.attname)) q = self.__class__._default_manager.filter(**kwargs).order_by((not is_next and '-' or '') + field.name, (not is_next and '-' or '') + self._meta.pk.name) q._where.append(where) q._params.extend([param, param, getattr(self, self._meta.pk.attname)]) try: return q[0] except __HOLE__: raise self.DoesNotExist, "%s matching query does not exist." % self.__class__._meta.object_name
IndexError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-0.96/django/db/models/base.py/Model._get_next_or_previous_by_FIELD
5,979
def _save_FIELD_file(self, field, filename, raw_contents, save=True): directory = field.get_directory_name() try: # Create the date-based directory if it doesn't exist. os.makedirs(os.path.join(settings.MEDIA_ROOT, directory)) except __HOLE__: # Directory probably already exists. pass filename = field.get_filename(filename) # If the filename already exists, keep adding an underscore to the name of # the file until the filename doesn't exist. while os.path.exists(os.path.join(settings.MEDIA_ROOT, filename)): try: dot_index = filename.rindex('.') except ValueError: # filename has no dot filename += '_' else: filename = filename[:dot_index] + '_' + filename[dot_index:] # Write the file to disk. setattr(self, field.attname, filename) full_filename = self._get_FIELD_filename(field) fp = open(full_filename, 'wb') fp.write(raw_contents) fp.close() # Save the width and/or height, if applicable. if isinstance(field, ImageField) and (field.width_field or field.height_field): from django.utils.images import get_image_dimensions width, height = get_image_dimensions(full_filename) if field.width_field: setattr(self, field.width_field, width) if field.height_field: setattr(self, field.height_field, height) # Save the object because it has changed unless save is False if save: self.save()
OSError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-0.96/django/db/models/base.py/Model._save_FIELD_file
5,980
def GetFromStore(self, file_path): """Retrieve file from zip files. Get the file from the source, it must not have been in the memcache. If possible, we'll use the zip file index to quickly locate where the file should be found. (See MapToFileArchive documentation for assumptions about file ordering.) If we don't have an index or don't find the file where the index says we should, look through all the zip files to find it. Args: file_path: the file that we're looking for Returns: The contents of the requested file """ resp_data = None file_itr = iter(self.zipfilenames) # check the index, if we have one, to see what archive the file is in archive_name = self.MapFileToArchive(file_path) if not archive_name: archive_name = file_itr.next()[0] while resp_data is None and archive_name: zip_archive = self.LoadZipFile(archive_name) if zip_archive: # we expect some lookups will fail, and that's okay, 404s will deal # with that try: resp_data = CacheFile() info = os.stat(archive_name) #lastmod = datetime.datetime.fromtimestamp(info[8]) lastmod = datetime.datetime(*zip_archive.getinfo(file_path).date_time) resp_data.file = zip_archive.read(file_path) resp_data.lastmod = lastmod resp_data.etag = '"%s"' % md5_constructor(resp_data.file).hexdigest() except (__HOLE__, RuntimeError), err: # no op x = False resp_data = None if resp_data is not None: logging.info('%s read from %s', file_path, archive_name) try: archive_name = file_itr.next()[0] except (StopIteration), err: archive_name = False return resp_data
KeyError
dataset/ETHPy150Open aehlke/manabi/apps/dojango/appengine/memcache_zipserve.py/MemcachedZipHandler.GetFromStore
5,981
def LoadZipFile(self, zipfilename): """Convenience method to load zip file. Just a convenience method to load the zip file from the data store. This is useful if we ever want to change data stores and also as a means of dependency injection for testing. This method will look at our file cache first, and then load and cache the file if there's a cache miss Args: zipfilename: the name of the zip file to load Returns: The zip file requested, or None if there is an I/O error """ zip_archive = None zip_archive = self.zipfile_cache.get(zipfilename) if zip_archive is None: try: zip_archive = zipfile.ZipFile(zipfilename) self.zipfile_cache[zipfilename] = zip_archive except (IOError, __HOLE__), err: logging.error('Can\'t open zipfile %s, cause: %s' % (zipfilename, err)) return zip_archive
RuntimeError
dataset/ETHPy150Open aehlke/manabi/apps/dojango/appengine/memcache_zipserve.py/MemcachedZipHandler.LoadZipFile
5,982
def handle_port(self, context, data): """Notify all agent extensions to handle port.""" for extension in self: try: extension.obj.handle_port(context, data) # TODO(QoS) add agent extensions exception and catch them here except __HOLE__: LOG.exception( _LE("Agent Extension '%(name)s' failed " "while handling port update"), {'name': extension.name} )
AttributeError
dataset/ETHPy150Open openstack/neutron/neutron/agent/l2/extensions/manager.py/AgentExtensionsManager.handle_port
5,983
def delete_port(self, context, data): """Notify all agent extensions to delete port.""" for extension in self: try: extension.obj.delete_port(context, data) # TODO(QoS) add agent extensions exception and catch them here # instead of AttributeError except __HOLE__: LOG.exception( _LE("Agent Extension '%(name)s' failed " "while handling port deletion"), {'name': extension.name} )
AttributeError
dataset/ETHPy150Open openstack/neutron/neutron/agent/l2/extensions/manager.py/AgentExtensionsManager.delete_port
5,984
@never_cache def password_reset_confirm(request, uidb64=None, token=None, template_name='registration/password_reset_confirm.html', token_generator=default_token_generator, set_password_form=SetPasswordForm, post_reset_redirect=None, current_app=None, extra_context=None): """ View that checks the hash in a password reset link and presents a form for entering a new password. """ assert uidb64 is not None and token is not None # checked by URLconf if post_reset_redirect is None: post_reset_redirect = reverse('django.contrib.auth.views.password_reset_complete') try: uid = urlsafe_base64_decode(str(uidb64)) user = User.objects.get(id=uid) except (__HOLE__, ValueError, User.DoesNotExist): user = None if user is not None and token_generator.check_token(user, token): validlink = True if request.method == 'POST': form = set_password_form(user, request.POST) if form.is_valid(): form.save() return HttpResponseRedirect(post_reset_redirect) else: form = set_password_form(None) else: validlink = False form = None context = { 'form': form, 'validlink': validlink, } context.update(extra_context or {}) return render_to_response(template_name, context, context_instance=RequestContext(request, current_app=current_app))
TypeError
dataset/ETHPy150Open adieu/django-nonrel/django/contrib/auth/views.py/password_reset_confirm
5,985
def dict_for_mongo(d): for key, value in d.items(): if type(value) == list: value = [dict_for_mongo(e) if type(e) == dict else e for e in value] elif type(value) == dict: value = dict_for_mongo(value) elif key == '_id': try: d[key] = int(value) except __HOLE__: # if it is not an int don't convert it pass if _is_invalid_for_mongo(key): del d[key] d[_encode_for_mongo(key)] = value return d
ValueError
dataset/ETHPy150Open kobotoolbox/kobocat/onadata/apps/viewer/models/parsed_instance.py/dict_for_mongo
5,986
def build_suite(app_module): "Create a complete Django test suite for the provided application module" suite = unittest.TestSuite() # Load unit and doctests in the models.py file suite.addTest(unittest.defaultTestLoader.loadTestsFromModule(app_module)) try: suite.addTest(doctest.DocTestSuite(app_module, checker=doctestOutputChecker, runner=DocTestRunner)) except ValueError: # No doc tests in models.py pass # Check to see if a separate 'tests' module exists parallel to the # models module try: app_path = app_module.__name__.split('.')[:-1] test_module = __import__('.'.join(app_path + [TEST_MODULE]), {}, {}, TEST_MODULE) suite.addTest(unittest.defaultTestLoader.loadTestsFromModule(test_module)) try: suite.addTest(doctest.DocTestSuite(test_module, checker=doctestOutputChecker, runner=DocTestRunner)) except ValueError: # No doc tests in tests.py pass except ImportError, e: # Couldn't import tests.py. Was it due to a missing file, or # due to an import error in a tests.py that actually exists? import os.path from imp import find_module try: mod = find_module(TEST_MODULE, [os.path.dirname(app_module.__file__)]) except __HOLE__: # 'tests' module doesn't exist. Move on. pass else: # The module exists, so there must be an import error in the # test module itself. We don't need the module; close the file # handle returned by find_module. mod[0].close() raise return suite
ImportError
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-0.96/django/test/simple.py/build_suite
5,987
def main(argv): parser = ArgumentParser(usage=__doc__.lstrip()) parser.add_argument("--verbose", "-v", action="count", default=1, help="more verbosity") parser.add_argument("--no-build", "-n", action="store_true", default=False, help="do not build the project (use system installed version)") parser.add_argument("--build-only", "-b", action="store_true", default=False, help="just build, do not run any tests") parser.add_argument("--doctests", action="store_true", default=False, help="Run doctests in module") parser.add_argument("--refguide-check", action="store_true", default=False, help="Run refguide check (do not run regular tests.)") parser.add_argument("--coverage", action="store_true", default=False, help=("report coverage of project code. HTML output goes " "under build/coverage")) parser.add_argument("--gcov", action="store_true", default=False, help=("enable C code coverage via gcov (requires GCC). " "gcov output goes to build/**/*.gc*")) parser.add_argument("--lcov-html", action="store_true", default=False, help=("produce HTML for C code coverage information " "from a previous run with --gcov. " "HTML output goes to build/lcov/")) parser.add_argument("--mode", "-m", default="fast", help="'fast', 'full', or something that could be " "passed to nosetests -A [default: fast]") parser.add_argument("--submodule", "-s", default=None, help="Submodule whose tests to run (cluster, constants, ...)") parser.add_argument("--pythonpath", "-p", default=None, help="Paths to prepend to PYTHONPATH") parser.add_argument("--tests", "-t", action='append', help="Specify tests to run") parser.add_argument("--python", action="store_true", help="Start a Python shell with PYTHONPATH set") parser.add_argument("--ipython", "-i", action="store_true", help="Start IPython shell with PYTHONPATH set") parser.add_argument("--shell", action="store_true", help="Start Unix shell with PYTHONPATH set") parser.add_argument("--debug", "-g", action="store_true", help="Debug build") parser.add_argument("--parallel", "-j", type=int, default=1, help="Number of parallel jobs during build (requires " "Numpy 1.10 or greater).") parser.add_argument("--show-build-log", action="store_true", help="Show build output rather than using a log file") parser.add_argument("--bench", action="store_true", help="Run benchmark suite instead of test suite") parser.add_argument("--bench-compare", action="append", metavar="BEFORE", help=("Compare benchmark results of current HEAD to BEFORE. " "Use an additional --bench-compare=COMMIT to override HEAD with COMMIT. " "Note that you need to commit your changes first!" )) parser.add_argument("args", metavar="ARGS", default=[], nargs=REMAINDER, help="Arguments to pass to Nose, Python or shell") args = parser.parse_args(argv) if args.bench_compare: args.bench = True args.no_build = True # ASV does the building if args.lcov_html: # generate C code coverage output lcov_generate() sys.exit(0) if args.pythonpath: for p in reversed(args.pythonpath.split(os.pathsep)): sys.path.insert(0, p) if args.gcov: gcov_reset_counters() if args.debug and args.bench: print("*** Benchmarks should not be run against debug version; remove -g flag ***") if not args.no_build: site_dir = build_project(args) sys.path.insert(0, site_dir) os.environ['PYTHONPATH'] = site_dir extra_argv = args.args[:] if extra_argv and extra_argv[0] == '--': extra_argv = extra_argv[1:] if args.python: if extra_argv: # Don't use subprocess, since we don't want to include the # current path in PYTHONPATH. sys.argv = extra_argv with open(extra_argv[0], 'r') as f: script = f.read() sys.modules['__main__'] = imp.new_module('__main__') ns = dict(__name__='__main__', __file__=extra_argv[0]) exec_(script, ns) sys.exit(0) else: import code code.interact() sys.exit(0) if args.ipython: import IPython IPython.embed(user_ns={}) sys.exit(0) if args.shell: shell = os.environ.get('SHELL', 'sh') print("Spawning a Unix shell...") os.execv(shell, [shell] + extra_argv) sys.exit(1) if args.coverage: dst_dir = os.path.join(ROOT_DIR, 'build', 'coverage') fn = os.path.join(dst_dir, 'coverage_html.js') if os.path.isdir(dst_dir) and os.path.isfile(fn): shutil.rmtree(dst_dir) extra_argv += ['--cover-html', '--cover-html-dir='+dst_dir] if args.refguide_check: cmd = [os.path.join(ROOT_DIR, 'tools', 'refguide_check.py'), '--doctests'] if args.submodule: cmd += [args.submodule] os.execv(sys.executable, [sys.executable] + cmd) sys.exit(0) if args.bench: # Run ASV items = extra_argv if args.tests: items += args.tests if args.submodule: items += [args.submodule] bench_args = [] for a in items: bench_args.extend(['--bench', a]) if not args.bench_compare: cmd = [os.path.join(ROOT_DIR, 'benchmarks', 'run.py'), 'run', '-n', '-e', '--python=same'] + bench_args os.execv(sys.executable, [sys.executable] + cmd) sys.exit(1) else: if len(args.bench_compare) == 1: commit_a = args.bench_compare[0] commit_b = 'HEAD' elif len(args.bench_compare) == 2: commit_a, commit_b = args.bench_compare else: p.error("Too many commits to compare benchmarks for") # Check for uncommitted files if commit_b == 'HEAD': r1 = subprocess.call(['git', 'diff-index', '--quiet', '--cached', 'HEAD']) r2 = subprocess.call(['git', 'diff-files', '--quiet']) if r1 != 0 or r2 != 0: print("*"*80) print("WARNING: you have uncommitted changes --- these will NOT be benchmarked!") print("*"*80) # Fix commit ids (HEAD is local to current repo) p = subprocess.Popen(['git', 'rev-parse', commit_b], stdout=subprocess.PIPE) out, err = p.communicate() commit_b = out.strip() p = subprocess.Popen(['git', 'rev-parse', commit_a], stdout=subprocess.PIPE) out, err = p.communicate() commit_a = out.strip() cmd = [os.path.join(ROOT_DIR, 'benchmarks', 'run.py'), '--current-repo', 'continuous', '-e', '-f', '1.05', commit_a, commit_b] + bench_args os.execv(sys.executable, [sys.executable] + cmd) sys.exit(1) test_dir = os.path.join(ROOT_DIR, 'build', 'test') if args.build_only: sys.exit(0) elif args.submodule: modname = PROJECT_MODULE + '.' + args.submodule try: __import__(modname) test = sys.modules[modname].test except (ImportError, KeyError, __HOLE__) as e: print("Cannot run tests for %s (%s)" % (modname, e)) sys.exit(2) elif args.tests: def fix_test_path(x): # fix up test path p = x.split(':') p[0] = os.path.relpath(os.path.abspath(p[0]), test_dir) return ':'.join(p) tests = [fix_test_path(x) for x in args.tests] def test(*a, **kw): extra_argv = kw.pop('extra_argv', ()) extra_argv = extra_argv + tests[1:] kw['extra_argv'] = extra_argv from numpy.testing import Tester return Tester(tests[0]).test(*a, **kw) else: __import__(PROJECT_MODULE) test = sys.modules[PROJECT_MODULE].test # Run the tests under build/test try: shutil.rmtree(test_dir) except OSError: pass try: os.makedirs(test_dir) except OSError: pass shutil.copyfile(os.path.join(ROOT_DIR, '.coveragerc'), os.path.join(test_dir, '.coveragerc')) cwd = os.getcwd() try: os.chdir(test_dir) result = test(args.mode, verbose=args.verbose, extra_argv=extra_argv, doctests=args.doctests, coverage=args.coverage) finally: os.chdir(cwd) if isinstance(result, bool): sys.exit(0 if result else 1) elif result.wasSuccessful(): sys.exit(0) else: sys.exit(1)
AttributeError
dataset/ETHPy150Open scipy/scipy/runtests.py/main
5,988
def lcov_generate(): try: os.unlink(LCOV_OUTPUT_FILE) except __HOLE__: pass try: shutil.rmtree(LCOV_HTML_DIR) except OSError: pass print("Capturing lcov info...") subprocess.call(['lcov', '-q', '-c', '-d', os.path.join(ROOT_DIR, 'build'), '-b', ROOT_DIR, '--output-file', LCOV_OUTPUT_FILE]) print("Generating lcov HTML output...") ret = subprocess.call(['genhtml', '-q', LCOV_OUTPUT_FILE, '--output-directory', LCOV_HTML_DIR, '--legend', '--highlight']) if ret != 0: print("genhtml failed!") else: print("HTML output generated under build/lcov/") # # Python 3 support #
OSError
dataset/ETHPy150Open scipy/scipy/runtests.py/lcov_generate
5,989
def _wrap_writer_for_text(fp, encoding): try: fp.write('') except __HOLE__: fp = io.TextIOWrapper(fp, encoding) return fp
TypeError
dataset/ETHPy150Open pallets/flask/flask/json.py/_wrap_writer_for_text
5,990
def run(): """This client pushes PE Files -> ELS Indexer.""" # Grab server args args = client_helper.grab_server_args() # Start up workbench connection workbench = zerorpc.Client(timeout=300, heartbeat=60) workbench.connect('tcp://'+args['server']+':'+args['port']) # Test out PEFile -> strings -> indexer -> search data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),'../data/pe/bad') file_list = [os.path.join(data_path, child) for child in os.listdir(data_path)][:20] for filename in file_list: # Skip OS generated files if '.DS_Store' in filename: continue with open(filename, 'rb') as f: base_name = os.path.basename(filename) md5 = workbench.store_sample(f.read(), base_name, 'exe') # Index the strings and features output (notice we can ask for any worker output) # Also (super important) it all happens on the server side. workbench.index_worker_output('strings', md5, 'strings', None) print '\n<<< Strings for PE: %s Indexed>>>' % (base_name) workbench.index_worker_output('pe_features', md5, 'pe_features', None) print '<<< Features for PE: %s Indexed>>>' % (base_name) # Well we should execute some queries against ElasticSearch at this point but as of # version 1.2+ the dynamic scripting disabled by default, see # 'http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/modules-scripting.html#_enabling_dynamic_scripting # Now actually do something interesing with our ELS index # ES Facets are kewl (http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-facets.html) facet_query = '{"facets" : {"tag" : {"terms" : {"field" : "string_list"}}}}' results = workbench.search_index('strings', facet_query) try: print '\nQuery: %s' % facet_query print 'Number of hits: %d' % results['hits']['total'] print 'Max Score: %f' % results['hits']['max_score'] pprint.pprint(results['facets']) except TypeError: print 'Probably using a Stub Indexer, if you want an ELS Indexer see the readme' # Fuzzy is kewl (http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/query-dsl-fuzzy-query.html) fuzzy_query = '{"fields":["md5","sparse_features.imported_symbols"],' \ '"query": {"fuzzy" : {"sparse_features.imported_symbols" : "loadlibrary"}}}' results = workbench.search_index('pe_features', fuzzy_query) try: print '\nQuery: %s' % fuzzy_query print 'Number of hits: %d' % results['hits']['total'] print 'Max Score: %f' % results['hits']['max_score'] pprint.pprint([(hit['fields']['md5'], hit['fields']['sparse_features.imported_symbols']) for hit in results['hits']['hits']]) except __HOLE__: print 'Probably using a Stub Indexer, if you want an ELS Indexer see the readme'
TypeError
dataset/ETHPy150Open SuperCowPowers/workbench/workbench/clients/pe_indexer.py/run
5,991
def install_grabix(env): """a wee tool for random access into BGZF files https://github.com/arq5x/grabix """ version = "0.1.6" revision = "ba792bc872d38d3cb5a69b2de00e39a6ac367d69" try: uptodate = versioncheck.up_to_date(env, "grabix", version, stdout_flag="version:") # Old versions will not have any version information except __HOLE__: uptodate = False if uptodate: return repository = "git clone https://github.com/arq5x/grabix.git" _get_install(repository, env, _make_copy("ls -1 grabix"), revision=revision)
IOError
dataset/ETHPy150Open chapmanb/cloudbiolinux/cloudbio/custom/bio_nextgen.py/install_grabix
5,992
def get_default_mrcluster(): """ Get the default JT (not necessarily HA). """ global MR_CACHE global MR_NAME_CACHE try: all_mrclusters() return MR_CACHE.get(MR_NAME_CACHE) except __HOLE__: # Return an arbitrary cluster candidates = all_mrclusters() if candidates: return candidates.values()[0] return None
KeyError
dataset/ETHPy150Open cloudera/hue/desktop/libs/hadoop/src/hadoop/cluster.py/get_default_mrcluster
5,993
def get_default_yarncluster(): """ Get the default RM (not necessarily HA). """ global MR_NAME_CACHE try: return conf.YARN_CLUSTERS[MR_NAME_CACHE] except __HOLE__: return get_yarn()
KeyError
dataset/ETHPy150Open cloudera/hue/desktop/libs/hadoop/src/hadoop/cluster.py/get_default_yarncluster
5,994
def tearDown(self): try: os.remove('SLSQP.out') except OSError: pass try: os.remove('SNOPT_print.out') os.remove('SNOPT_summary.out') except __HOLE__: pass
OSError
dataset/ETHPy150Open OpenMDAO/OpenMDAO/mpitest/test_mpi_opt.py/TestMPIOpt.tearDown
5,995
def tearDown(self): try: os.remove('SLSQP.out') except OSError: pass try: os.remove('SNOPT_print.out') os.remove('SNOPT_summary.out') except __HOLE__: pass
OSError
dataset/ETHPy150Open OpenMDAO/OpenMDAO/mpitest/test_mpi_opt.py/ParallelMPIOptAsym.tearDown
5,996
def tearDown(self): try: os.remove('SLSQP.out') except OSError: pass try: os.remove('SNOPT_print.out') os.remove('SNOPT_summary.out') except __HOLE__: pass
OSError
dataset/ETHPy150Open OpenMDAO/OpenMDAO/mpitest/test_mpi_opt.py/ParallelMPIOptPromoted.tearDown
5,997
def tearDown(self): try: os.remove('SLSQP.out') except OSError: pass try: os.remove('SNOPT_print.out') os.remove('SNOPT_summary.out') except __HOLE__: pass
OSError
dataset/ETHPy150Open OpenMDAO/OpenMDAO/mpitest/test_mpi_opt.py/ParallelMPIOpt.tearDown
5,998
def create(self, req, body): parser = req.get_parser()(req.headers, req.body) scheme = {"category": storage.StorageResource.kind} obj = parser.parse() validator = occi_validator.Validator(obj) validator.validate(scheme) attrs = obj.get("attributes", {}) name = attrs.get("occi.core.title", "OCCI Volume") # TODO(enolfc): this should be handled by the validator try: size = attrs["occi.storage.size"] except __HOLE__: raise exception.Invalid() volume = self.os_helper.volume_create(req, name, size) st = storage.StorageResource(title=volume["displayName"], id=volume["id"], size=volume["size"], state=helpers.vol_state(volume["status"])) return collection.Collection(resources=[st])
KeyError
dataset/ETHPy150Open openstack/ooi/ooi/api/storage.py/Controller.create
5,999
def service(self): '''Handle a request ''' # The key is the request path key = self.request.url.path.strip('/') # Set content-type self.response.headers = ['Content-Type: application/json'] # Standard reply rsp = '{"status": "OK"}' if not key: # Empty key means list all avilable keys if self.request.method == 'GET': rsp = json_encode({'keys': self.entries.keys()}) else: rsp = self.method_not_allowed() else: # Non-empty key means manipulate the store # HTTP method defines the action if self.request.method == 'GET': # Read an entry try: rsp = json_encode(self.entries[key]) except KeyError: rsp = self.not_found(key) elif self.request.method in ('PUT', 'POST'): # Set an antry self.entries[key] = json_decode(self.request.input.read(1024*1024)) elif self.request.method == 'DELETE': # Delete an entry try: del self.entries[key] except __HOLE__: rsp = self.not_found(key) else: rsp = self.method_not_allowed() # Respond self.response.headers.append('Content-Length: %d' % (len(rsp)+1) ) self.response(rsp, '\n')
KeyError
dataset/ETHPy150Open rsms/smisk/examples/core/key-value-store/app.py/KeyValueStore.service