Unnamed: 0
int64 0
10k
| function
stringlengths 79
138k
| label
stringclasses 20
values | info
stringlengths 42
261
|
---|---|---|---|
5,200 |
def run(self):
'''Run loop'''
logger.info("processor starting...")
while not self._quit:
try:
task, response = self.inqueue.get(timeout=1)
self.on_task(task, response)
self._exceptions = 0
except Queue.Empty as e:
continue
except __HOLE__:
break
except Exception as e:
logger.exception(e)
self._exceptions += 1
if self._exceptions > self.EXCEPTION_LIMIT:
break
continue
logger.info("processor exiting...")
|
KeyboardInterrupt
|
dataset/ETHPy150Open binux/pyspider/pyspider/processor/processor.py/Processor.run
|
5,201 |
def handle(self, *args, **options):
logger = logging.getLogger(__name__)
logger.info('start django-sockjs-server')
self.config = SockJSServerSettings()
io_loop = tornado.ioloop.IOLoop.instance()
router = SockJSRouterPika(
SockJSConnection,
self.config.listen_location,
user_settings=self.config.router_settings)
app = tornado.web.Application([(r"/stats/(.*)", StatsHandler, dict(sockjs_server=router._connection.sockjs_server))] +
router.urls)
app.listen(
self.config.listen_port,
address=self.config.listen_addr
)
try:
io_loop.start()
except __HOLE__:
pass
|
KeyboardInterrupt
|
dataset/ETHPy150Open alfss/django-sockjs-server/django_sockjs_server/management/commands/sockjs_server.py/Command.handle
|
5,202 |
def fake_virtual_interface_delete_by_instance(context, instance_id):
vif = copy.copy(virtual_interfacees)
addresses = [m for m in vif
if m['instance_id'] == instance_id]
try:
for address in addresses:
vif.remove(address)
except __HOLE__:
pass
|
ValueError
|
dataset/ETHPy150Open BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova/nova/tests/unit/db/fakes.py/fake_virtual_interface_delete_by_instance
|
5,203 |
def base_n_decoder(alphabet=ALPHABET):
"""Decode a Base X encoded string into the number
Arguments:
- `string`: The encoded string
- `alphabet`: The alphabet to use for encoding
"""
base = len(alphabet)
char_value = dict(((c, v) for v, c in enumerate(alphabet)))
def f(string):
num = 0
try:
for char in string:
num = num * base + char_value[char]
except __HOLE__:
raise ValueError('Unexpected character %r' % char)
return num
return f
|
KeyError
|
dataset/ETHPy150Open bbangert/velruse/velruse/app/baseconvert.py/base_n_decoder
|
5,204 |
def traverse_dot_path(self, traverser):
if traverser.remaining_paths:
new_value = None
name = traverser.next_part
try:
new_value = self[int(name)]
except __HOLE__:
raise DotPathNotFound("Invalid index given, must be an integer")
except IndexError:
pass
traverser.next(value=new_value)
else:
traverser.end(value=self)
|
ValueError
|
dataset/ETHPy150Open zbyte64/django-dockit/dockit/schema/common.py/DotPathList.traverse_dot_path
|
5,205 |
def traverse_dot_path(self, traverser):
if traverser.remaining_paths:
new_value = None
name = traverser.next_part
try:
new_value = self[name]
except __HOLE__:
pass
traverser.next(value=new_value)
else:
traverser.end(value=self)
|
KeyError
|
dataset/ETHPy150Open zbyte64/django-dockit/dockit/schema/common.py/DotPathDict.traverse_dot_path
|
5,206 |
@property
def program_id(self):
try:
return self.kwargs['prog_id']
except __HOLE__:
raise Http404()
|
KeyError
|
dataset/ETHPy150Open dimagi/commcare-hq/corehq/apps/programs/views.py/EditProgramView.program_id
|
5,207 |
def _undo_manager_default(self):
""" Trait initializer. """
# We make sure the undo package is entirely optional.
try:
from apptools.undo.api import UndoManager
except __HOLE__:
return None
return UndoManager()
|
ImportError
|
dataset/ETHPy150Open enthought/pyface/pyface/workbench/workbench.py/Workbench._undo_manager_default
|
5,208 |
@cached_property
def url_patterns(self):
# urlconf_module might be a valid set of patterns, so we default to it
patterns = getattr(self.urlconf_module, "urlpatterns", self.urlconf_module)
try:
iter(patterns)
except __HOLE__:
msg = (
"The included URLconf '{name}' does not appear to have any "
"patterns in it. If you see valid patterns in the file then "
"the issue is probably caused by a circular import."
)
raise ImproperlyConfigured(msg.format(name=self.urlconf_name))
return patterns
|
TypeError
|
dataset/ETHPy150Open django/django/django/urls/resolvers.py/RegexURLResolver.url_patterns
|
5,209 |
def __init__(self):
self.services = {"openvpn": self.openvpn, "rdp": self.rdp, "sshkey": self.sshkey, "vnckey": self.vnckey}
self.crowbar_readme = "https://github.com/galkan/crowbar/blob/master/README.md"
self.openvpn_path = "/usr/sbin/openvpn"
self.vpn_failure = re.compile("SIGTERM\[soft,auth-failure\] received, process exiting")
self.vpn_success = re.compile("Initialization Sequence Completed")
self.vpn_remote_regex = re.compile("^\s+remote\s[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\s[0-9]{1,3}")
self.vpn_warning = "Warning! Both \"remote\" options were used at the same time. But command line \"remote\" options will be used!"
self.xfreerdp_path = "/usr/bin/xfreerdp"
self.rdp_success = "Authentication only, exit status 0"
self.rdp_display_error = "Please check that the \$DISPLAY environment variable is properly set."
self.vncviewer_path = "/usr/bin/vncviewer"
self.vnc_success = "Authentication successful"
description = "Crowbar is a brute force tool which supports OpenVPN, Remote Desktop Protocol, SSH Private Keys and VNC Keys."
usage = "Usage: use --help for further information"
parser = argparse.ArgumentParser(description=description, usage=usage)
parser.add_argument('-b', '--brute', dest='brute', help='Target service', choices=self.services.keys(),
required=True)
parser.add_argument('-s', '--server', dest='server', action='store', help='Static target')
parser.add_argument('-S', '--serverfile', dest='server_file', action='store', help='Multiple targets stored in a file')
parser.add_argument('-u', '--username', dest='username', action='store', nargs='+', help='Static name to login with')
parser.add_argument('-U', '--usernamefile', dest='username_file', action='store', help='Multiple names to login with, stored in a file')
parser.add_argument('-n', '--number', dest='thread', action='store', help='Number of threads to be active at once', default=5, type=int)
parser.add_argument('-l', '--log', dest='log_file', action='store', help='Log file (only write attempts)', metavar='FILE',
default="crowbar.log")
parser.add_argument('-o', '--output', dest='output', action='store', help='Output file (write everything else)', metavar='FILE',
default="crowbar.out")
parser.add_argument('-c', '--passwd', dest='passwd', action='store', help='Static password to login with')
parser.add_argument('-C', '--passwdfile', dest='passwd_file', action='store', help='Multiple passwords to login with, stored in a file',
metavar='FILE')
parser.add_argument('-t', '--timeout', dest='timeout', action='store', help='[SSH] How long to wait for each thread (seconds)', default=10, type=int)
parser.add_argument('-p', '--port', dest='port', action='store', help='Alter the port if the service is not using the default value', type=int)
parser.add_argument('-k', '--keyfile', dest='key_file', action='store', help='[SSH/VNC] (Private) Key file or folder containing multiple files')
parser.add_argument('-m', '--config', dest='config', action='store', help='[OpenVPN] Configuration file ')
parser.add_argument('-d', '--discover', dest='discover', action='store_true', help='Port scan before attacking open ports', default=False)
parser.add_argument('-v', '--verbose', dest='verbose', action="count", help='Enable verbose output (-vv for more)', default=False)
parser.add_argument('-D', '--debug', dest='debug', action='store_true', help='Enable debug mode', default=False)
parser.add_argument('-q', '--quiet', dest='quiet', action='store_true', help='Only display successful logins', default=False)
parser.add_argument('options', nargs='*', action=AddressAction)
try:
self.args = parser.parse_args()
except Exception, err:
raise CrowbarExceptions(str(err))
self.ip_list = []
if self.args.discover:
self.nmap = Nmap()
else:
iprange = IpRange()
try:
if self.args.server is not None:
for _ in self.args.server.split(","):
for ip in iprange.iprange(_):
self.ip_list.append(ip)
else:
for _ in open(self.args.server_file, "r"):
for ip in iprange.iprange(_):
if not ip in self.ip_list:
self.ip_list.append(ip)
except __HOLE__:
mess = "File: %s cannot be opened!" % os.path.abspath(self.args.server_file)
raise CrowbarExceptions(mess)
except:
mess = "Invalid IP Address! Please use IP/CIDR notation <192.168.37.37/32, 192.168.1.0/24>"
raise CrowbarExceptions(mess)
if self.args.verbose:
self.logger = Logger(self.args.log_file, self.args.output, True)
else:
self.logger = Logger(self.args.log_file, self.args.output)
self.logger.output_file("START")
if not self.args.quiet:
self.logger.output_file(__banner__)
if self.args.verbose:
self.logger.output_file("Brute Force Type: %s" % self.args.brute)
self.logger.output_file(" Output File: %s" % os.path.abspath(self.args.output))
self.logger.output_file(" Log File: %s" % os.path.abspath(self.args.log_file))
self.logger.output_file(" Discover Mode: %s" % self.args.discover)
self.logger.output_file(" Verbose Mode: %s" % self.args.verbose)
self.logger.output_file(" Debug Mode: %s" % self.args.debug)
|
IOError
|
dataset/ETHPy150Open galkan/crowbar/lib/main.py/Main.__init__
|
5,210 |
def __init__(self, lock):
"""Constructor for _BaseCondition.
@type lock: threading.Lock
@param lock: condition base lock
"""
object.__init__(self)
try:
self._release_save = lock._release_save
except __HOLE__:
self._release_save = self._base_release_save
try:
self._acquire_restore = lock._acquire_restore
except AttributeError:
self._acquire_restore = self._base_acquire_restore
try:
self._is_owned = lock.is_owned
except AttributeError:
self._is_owned = self._base_is_owned
self._lock = lock
# Export the lock's acquire() and release() methods
self.acquire = lock.acquire
self.release = lock.release
|
AttributeError
|
dataset/ETHPy150Open ganeti/ganeti/lib/locking.py/_BaseCondition.__init__
|
5,211 |
def _json_convert(obj):
"""Recursive helper method that converts BSON types so they can be
converted into json.
"""
from numpy import ndarray
if hasattr(obj, 'iteritems') or hasattr(obj, 'items'): # PY3 support
return SON(((k, _json_convert(v)) for k, v in obj.iteritems()))
elif hasattr(obj, '__iter__') and not isinstance(obj, string_types) and not isinstance(obj, ndarray):
return list((_json_convert(v) for v in obj))
try:
return default(obj)
except __HOLE__:
return obj
|
TypeError
|
dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.lib/src/openmdao/lib/casehandlers/pymongo_bson/json_util.py/_json_convert
|
5,212 |
def __getattr__(self, name):
try:
return self.__getattribute__(name)
except __HOLE__:
pass
def dummy(*args, **kwargs):
pass
return dummy
|
AttributeError
|
dataset/ETHPy150Open Dav1dde/glad/glad/lang/common/loader.py/NullLoader.__getattr__
|
5,213 |
def extract_keyword_queries(store, label_store, folders, fid, sid, include_original=False):
'''Transforms a folder structure into positive and negative examples
to feed to ``linker.model.extract``. This transforms SortingDesk's
foldering structure into *supervision* data for the extractor.
This works best if folder name (``fid``) is the ``name`` of an entity
in question or, more generally, a query that a user might have
issued to a search engine. In particular, this approach makes
sense for the annotated version of this task, which is what
SortingDesk enables.
This returns five queries with the original_query name:
0. if `include_original`, then original name; the model will
eliminate if bad but it seems like it's a mistake to omit
1. plus the most predictive keyword
2. minus the least predictive keyword
3. minus the most predictive keyword for the negative class
4. plus the least predictive keyword for the negative class
Additionally, if any of these words are the name, we skip to the
next keyword in the list.
Returns a three tuple of ([unicode], [unicode], bool) where the
first list is query strings to send to a search engine, and the
second list is feature strings to put in a StringCounter.
'''
keyword_feature_keys = []
query_names = fid.split('_')
## quotes added so that google treats the name as one token
name1 = ' '.join(query_names)
#keyword_feature_keys.append(name1)
original_query = '\"' + name1 + '\"'
logger.info('the original query was %s', original_query)
queries = []
## 0. original name
if include_original:
logger.info('query 0: including the original: %r', original_query)
queries.append(original_query)
if sid:
name2 = ' '.join(sid.split('_'))
keyword_feature_keys.append(name2)
queries.append( '\"' + name2 + '\"' )
## generate positive and negative examples by traversing folders
try:
ids = map(itemgetter(0), folders.items(fid, sid))
except __HOLE__:
logger.info('Folder traversal failed to find ids, so no '
'training data; giving up on model-based queries')
# third return value of `False` means no observations
return queries, keyword_feature_keys, False
positive_fcs = map(itemgetter(1), store.get_many(ids))
negative_ids = imap(itemgetter(0),
negative_subfolder_ids(label_store, folders, fid, sid))
negative_fcs = map(itemgetter(1), store.get_many(negative_ids))
## These features were selected by manual inspection of current
## FOSS NER output.
pos_words, neg_words = extract(positive_fcs, negative_fcs,
features=['GPE', 'PERSON', 'ORGANIZATION'])
## 1. plus the most predictive keyword
query_plus_pred = original_query + ' ' + \
name_filter(pos_words, query_names)
logger.info('query 1: + most predictive: %r', query_plus_pred)
queries.append(query_plus_pred)
## 2. minus the least predictive keyword
query_min_least = original_query + ' -' + \
name_filter(reversed(pos_words), query_names)
logger.info('query 2: - least predictive: %r', query_min_least)
queries.append(query_min_least)
## 3. minus the most predictive keyword for the negative class
query_min_most_neg = original_query + ' -' + \
name_filter(neg_words, query_names)
logger.info('query 3: - most predictive for neg: %r', query_min_most_neg)
queries.append(query_min_most_neg)
## 4. plus the least predictive keyword for the negative class
query_plus_least_neg = original_query + ' ' + \
name_filter(reversed(neg_words), query_names)
logger.info('query 4: + least predictive for neg: %r', query_plus_least_neg)
queries.append(query_plus_least_neg)
## for debugging
# logger.info('length %d', len(positive_fcs))
# for fc in positive_fcs:
# logger.info('pos fc %r', fc['title'])
# logger.info('pos fc %r', positive_fcs[3]['GPE'])
# logger.info('pos fc %r', positive_fcs[3].keys())
# logger.info('pos fc %r', positive_fcs[3]['PERSON'])
# logger.info('positive keywords: %r', pos_words)
# logger.info('negative keywords: %r', neg_words)
# logger.info('most positive keyword: %r', pos_words[0])
return queries, keyword_feature_keys, True
|
KeyError
|
dataset/ETHPy150Open dossier/dossier.models/dossier/models/linker/worker.py/extract_keyword_queries
|
5,214 |
def unquote(s):
"""
Undo the effects of quote(). Based heavily on urllib.unquote().
"""
mychr = chr
myatoi = int
list = s.split('_')
res = [list[0]]
myappend = res.append
del list[0]
for item in list:
if item[1:2]:
try:
myappend(mychr(myatoi(item[:2], 16)) + item[2:])
except __HOLE__:
myappend('_' + item)
else:
myappend('_' + item)
return "".join(res)
|
ValueError
|
dataset/ETHPy150Open dcramer/django-compositepks/django/contrib/admin/util.py/unquote
|
5,215 |
def get_deleted_objects(deleted_objects, perms_needed, user, obj, opts, current_depth, admin_site):
"Helper function that recursively populates deleted_objects."
nh = _nest_help # Bind to local variable for performance
if current_depth > 16:
return # Avoid recursing too deep.
opts_seen = []
for related in opts.get_all_related_objects():
has_admin = related.model in admin_site._registry
if related.opts in opts_seen:
continue
opts_seen.append(related.opts)
rel_opts_name = related.get_accessor_name()
if isinstance(related.field.rel, models.OneToOneRel):
try:
sub_obj = getattr(obj, rel_opts_name)
except __HOLE__:
pass
else:
if has_admin:
p = '%s.%s' % (related.opts.app_label, related.opts.get_delete_permission())
if not user.has_perm(p):
perms_needed.add(related.opts.verbose_name)
# We don't care about populating deleted_objects now.
continue
if not has_admin:
# Don't display link to edit, because it either has no
# admin or is edited inline.
nh(deleted_objects, current_depth, [u'%s: %s' % (force_unicode(capfirst(related.opts.verbose_name)), sub_obj), []])
else:
# Display a link to the admin page.
nh(deleted_objects, current_depth, [mark_safe(u'%s: <a href="../../../../%s/%s/%s/">%s</a>' %
(escape(force_unicode(capfirst(related.opts.verbose_name))),
related.opts.app_label,
related.opts.object_name.lower(),
sub_obj._get_pk_val(), sub_obj)), []])
get_deleted_objects(deleted_objects, perms_needed, user, sub_obj, related.opts, current_depth+2, admin_site)
else:
has_related_objs = False
for sub_obj in getattr(obj, rel_opts_name).all():
has_related_objs = True
if not has_admin:
# Don't display link to edit, because it either has no
# admin or is edited inline.
nh(deleted_objects, current_depth, [u'%s: %s' % (force_unicode(capfirst(related.opts.verbose_name)), sub_obj), []])
else:
# Display a link to the admin page.
nh(deleted_objects, current_depth, [mark_safe(u'%s: <a href="../../../../%s/%s/%s/">%s</a>' % \
(escape(force_unicode(capfirst(related.opts.verbose_name))), related.opts.app_label, related.opts.object_name.lower(), sub_obj._get_pk_val(), escape(sub_obj))), []])
get_deleted_objects(deleted_objects, perms_needed, user, sub_obj, related.opts, current_depth+2, admin_site)
# If there were related objects, and the user doesn't have
# permission to delete them, add the missing perm to perms_needed.
if has_admin and has_related_objs:
p = '%s.%s' % (related.opts.app_label, related.opts.get_delete_permission())
if not user.has_perm(p):
perms_needed.add(related.opts.verbose_name)
for related in opts.get_all_related_many_to_many_objects():
has_admin = related.model in admin_site._registry
if related.opts in opts_seen:
continue
opts_seen.append(related.opts)
rel_opts_name = related.get_accessor_name()
has_related_objs = False
# related.get_accessor_name() could return None for symmetrical relationships
if rel_opts_name:
rel_objs = getattr(obj, rel_opts_name, None)
if rel_objs:
has_related_objs = True
if has_related_objs:
for sub_obj in rel_objs.all():
if not has_admin:
# Don't display link to edit, because it either has no
# admin or is edited inline.
nh(deleted_objects, current_depth, [_('One or more %(fieldname)s in %(name)s: %(obj)s') % \
{'fieldname': force_unicode(related.field.verbose_name), 'name': force_unicode(related.opts.verbose_name), 'obj': escape(sub_obj)}, []])
else:
# Display a link to the admin page.
nh(deleted_objects, current_depth, [
mark_safe((_('One or more %(fieldname)s in %(name)s:') % {'fieldname': escape(force_unicode(related.field.verbose_name)), 'name': escape(force_unicode(related.opts.verbose_name))}) + \
(u' <a href="../../../../%s/%s/%s/">%s</a>' % \
(related.opts.app_label, related.opts.module_name, sub_obj._get_pk_val(), escape(sub_obj)))), []])
# If there were related objects, and the user doesn't have
# permission to change them, add the missing perm to perms_needed.
if has_admin and has_related_objs:
p = u'%s.%s' % (related.opts.app_label, related.opts.get_change_permission())
if not user.has_perm(p):
perms_needed.add(related.opts.verbose_name)
|
ObjectDoesNotExist
|
dataset/ETHPy150Open dcramer/django-compositepks/django/contrib/admin/util.py/get_deleted_objects
|
5,216 |
def _SelectDownloadStrategy(dst_url):
"""Get download strategy based on the destination object.
Args:
dst_url: Destination StorageUrl.
Returns:
gsutil Cloud API DownloadStrategy.
"""
dst_is_special = False
if dst_url.IsFileUrl():
# Check explicitly first because os.stat doesn't work on 'nul' in Windows.
if dst_url.object_name == os.devnull:
dst_is_special = True
try:
mode = os.stat(dst_url.object_name).st_mode
if stat.S_ISCHR(mode):
dst_is_special = True
except __HOLE__:
pass
if dst_is_special:
return CloudApi.DownloadStrategy.ONE_SHOT
else:
return CloudApi.DownloadStrategy.RESUMABLE
|
OSError
|
dataset/ETHPy150Open GoogleCloudPlatform/gsutil/gslib/copy_helper.py/_SelectDownloadStrategy
|
5,217 |
def _UploadFileToObjectResumable(src_url, src_obj_filestream,
src_obj_size, dst_url, dst_obj_metadata,
preconditions, gsutil_api, logger):
"""Uploads the file using a resumable strategy.
Args:
src_url: Source FileUrl to upload. Must not be a stream.
src_obj_filestream: File pointer to uploadable bytes.
src_obj_size: Size of the source object.
dst_url: Destination StorageUrl for the upload.
dst_obj_metadata: Metadata for the target object.
preconditions: Preconditions for the upload, if any.
gsutil_api: gsutil Cloud API instance to use for the upload.
logger: for outputting log messages.
Returns:
Elapsed upload time, uploaded Object with generation, md5, and size fields
populated.
"""
tracker_file_name = GetTrackerFilePath(
dst_url, TrackerFileType.UPLOAD,
gsutil_api.GetApiSelector(provider=dst_url.scheme))
encryption_tuple, encryption_key_sha256 = GetEncryptionTupleAndSha256Hash()
def _UploadTrackerCallback(serialization_data):
"""Creates a new tracker file for starting an upload from scratch.
This function is called by the gsutil Cloud API implementation and the
the serialization data is implementation-specific.
Args:
serialization_data: Serialization data used in resuming the upload.
"""
tracker_file = None
try:
tracker_file = open(tracker_file_name, 'w')
tracker_data = {
ENCRYPTION_UPLOAD_TRACKER_ENTRY: encryption_key_sha256,
SERIALIZATION_UPLOAD_TRACKER_ENTRY: str(serialization_data)
}
tracker_file.write(json.dumps(tracker_data))
except __HOLE__ as e:
RaiseUnwritableTrackerFileException(tracker_file_name, e.strerror)
finally:
if tracker_file:
tracker_file.close()
# This contains the upload URL, which will uniquely identify the
# destination object.
tracker_data = GetUploadTrackerData(
tracker_file_name, logger, encryption_key_sha256=encryption_key_sha256)
if tracker_data:
logger.info(
'Resuming upload for %s', src_url.url_string)
retryable = True
progress_callback = FileProgressCallbackHandler(
ConstructAnnounceText('Uploading', dst_url.url_string),
gsutil_api.status_queue).call
if global_copy_helper_opts.test_callback_file:
with open(global_copy_helper_opts.test_callback_file, 'rb') as test_fp:
progress_callback = pickle.loads(test_fp.read()).call
start_time = time.time()
num_startover_attempts = 0
# This loop causes us to retry when the resumable upload failed in a way that
# requires starting over with a new upload ID. Retries within a single upload
# ID within the current process are handled in
# gsutil_api.UploadObjectResumable, and retries within a single upload ID
# spanning processes happens if an exception occurs not caught below (which
# will leave the tracker file in place, and cause the upload ID to be reused
# the next time the user runs gsutil and attempts the same upload).
while retryable:
try:
uploaded_object = gsutil_api.UploadObjectResumable(
src_obj_filestream, object_metadata=dst_obj_metadata,
canned_acl=global_copy_helper_opts.canned_acl,
preconditions=preconditions, provider=dst_url.scheme,
size=src_obj_size, serialization_data=tracker_data,
encryption_tuple=encryption_tuple, fields=UPLOAD_RETURN_FIELDS,
tracker_callback=_UploadTrackerCallback,
progress_callback=progress_callback)
retryable = False
except ResumableUploadStartOverException, e:
# This can happen, for example, if the server sends a 410 response code.
# In that case the current resumable upload ID can't be reused, so delete
# the tracker file and try again up to max retries.
num_startover_attempts += 1
retryable = (num_startover_attempts < GetNumRetries())
if not retryable:
raise
# If the server sends a 404 response code, then the upload should only
# be restarted if it was the object (and not the bucket) that was missing.
try:
gsutil_api.GetBucket(dst_obj_metadata.bucket, provider=dst_url.scheme)
except NotFoundException:
raise
logger.info('Restarting upload from scratch after exception %s', e)
DeleteTrackerFile(tracker_file_name)
tracker_data = None
src_obj_filestream.seek(0)
# Reset the progress callback handler.
progress_callback = FileProgressCallbackHandler(
ConstructAnnounceText('Uploading', dst_url.url_string),
gsutil_api.status_queue).call
logger.info('\n'.join(textwrap.wrap(
'Resumable upload of %s failed with a response code indicating we '
'need to start over with a new resumable upload ID. Backing off '
'and retrying.' % src_url.url_string)))
time.sleep(min(random.random() * (2 ** num_startover_attempts),
GetMaxRetryDelay()))
except ResumableUploadAbortException:
retryable = False
raise
finally:
if not retryable:
DeleteTrackerFile(tracker_file_name)
end_time = time.time()
elapsed_time = end_time - start_time
return (elapsed_time, uploaded_object)
|
IOError
|
dataset/ETHPy150Open GoogleCloudPlatform/gsutil/gslib/copy_helper.py/_UploadFileToObjectResumable
|
5,218 |
def _GetDownloadFile(dst_url, src_obj_metadata, logger):
"""Creates a new download file, and deletes the file that will be replaced.
Names and creates a temporary file for this download. Also, if there is an
existing file at the path where this file will be placed after the download
is completed, that file will be deleted.
Args:
dst_url: Destination FileUrl.
src_obj_metadata: Metadata from the source object.
logger: for outputting log messages.
Returns:
(download_file_name, need_to_unzip)
download_file_name: The name of the temporary file to which the object will
be downloaded.
need_to_unzip: If true, a temporary zip file was used and must be
uncompressed as part of validation.
"""
dir_name = os.path.dirname(dst_url.object_name)
if dir_name and not os.path.exists(dir_name):
# Do dir creation in try block so can ignore case where dir already
# exists. This is needed to avoid a race condition when running gsutil
# -m cp.
try:
os.makedirs(dir_name)
except __HOLE__, e:
if e.errno != errno.EEXIST:
raise
need_to_unzip = False
# For gzipped objects download to a temp file and unzip. For the XML API,
# this represents the result of a HEAD request. For the JSON API, this is
# the stored encoding which the service may not respect. However, if the
# server sends decompressed bytes for a file that is stored compressed
# (double compressed case), there is no way we can validate the hash and
# we will fail our hash check for the object.
if ObjectIsGzipEncoded(src_obj_metadata):
need_to_unzip = True
download_file_name = _GetDownloadTempZipFileName(dst_url)
logger.info(
'Downloading to temp gzip filename %s', download_file_name)
else:
download_file_name = _GetDownloadTempFileName(dst_url)
# If a file exists at the permanent destination (where the file will be moved
# after the download is completed), delete it here to reduce disk space
# requirements.
if os.path.exists(dst_url.object_name):
os.unlink(dst_url.object_name)
# Downloads open the temporary download file in r+b mode, which requires it
# to already exist, so we create it here if it doesn't exist already.
fp = open(download_file_name, 'ab')
fp.close()
return download_file_name, need_to_unzip
|
OSError
|
dataset/ETHPy150Open GoogleCloudPlatform/gsutil/gslib/copy_helper.py/_GetDownloadFile
|
5,219 |
def _MaintainSlicedDownloadTrackerFiles(src_obj_metadata, dst_url,
download_file_name, logger,
api_selector, num_components):
"""Maintains sliced download tracker files in order to permit resumability.
Reads or creates a sliced download tracker file representing this object
download. Upon an attempt at cross-process resumption, the contents of the
sliced download tracker file are verified to make sure a resumption is
possible and appropriate. In the case that a resumption should not be
attempted, existing component tracker files are deleted (to prevent child
processes from attempting resumption), and a new sliced download tracker
file is created.
Args:
src_obj_metadata: Metadata from the source object. Must include etag and
generation.
dst_url: Destination FileUrl.
download_file_name: Temporary file name to be used for the download.
logger: for outputting log messages.
api_selector: The Cloud API implementation used.
num_components: The number of components to perform this download with.
"""
assert src_obj_metadata.etag
tracker_file = None
# Only can happen if the resumable threshold is set higher than the
# parallel transfer threshold.
if src_obj_metadata.size < ResumableThreshold():
return
tracker_file_name = GetTrackerFilePath(dst_url,
TrackerFileType.SLICED_DOWNLOAD,
api_selector)
# Check to see if we should attempt resuming the download.
try:
fp = open(download_file_name, 'rb')
existing_file_size = GetFileSize(fp)
# A parallel resumption should be attempted only if the destination file
# size is exactly the same as the source size and the tracker file matches.
if existing_file_size == src_obj_metadata.size:
tracker_file = open(tracker_file_name, 'r')
tracker_file_data = json.load(tracker_file)
if (tracker_file_data['etag'] == src_obj_metadata.etag and
tracker_file_data['generation'] == src_obj_metadata.generation and
tracker_file_data['num_components'] == num_components):
return
else:
tracker_file.close()
logger.warn('Sliced download tracker file doesn\'t match for '
'download of %s. Restarting download from scratch.' %
dst_url.object_name)
except (IOError, __HOLE__) as e:
# Ignore non-existent file (happens first time a download
# is attempted on an object), but warn user for other errors.
if isinstance(e, ValueError) or e.errno != errno.ENOENT:
logger.warn('Couldn\'t read sliced download tracker file (%s): %s. '
'Restarting download from scratch.' %
(tracker_file_name, str(e)))
finally:
if fp:
fp.close()
if tracker_file:
tracker_file.close()
# Delete component tracker files to guarantee download starts from scratch.
DeleteDownloadTrackerFiles(dst_url, api_selector)
# Create a new sliced download tracker file to represent this download.
try:
with open(tracker_file_name, 'w') as tracker_file:
tracker_file_data = {'etag': src_obj_metadata.etag,
'generation': src_obj_metadata.generation,
'num_components': num_components}
tracker_file.write(json.dumps(tracker_file_data))
except IOError as e:
RaiseUnwritableTrackerFileException(tracker_file_name, e.strerror)
|
ValueError
|
dataset/ETHPy150Open GoogleCloudPlatform/gsutil/gslib/copy_helper.py/_MaintainSlicedDownloadTrackerFiles
|
5,220 |
def _ValidateAndCompleteDownload(logger, src_url, src_obj_metadata, dst_url,
need_to_unzip, server_gzip, digesters,
hash_algs, download_file_name,
api_selector, bytes_transferred, gsutil_api):
"""Validates and performs necessary operations on a downloaded file.
Validates the integrity of the downloaded file using hash_algs. If the file
was compressed (temporarily), the file will be decompressed. Then, if the
integrity of the file was successfully validated, the file will be moved
from its temporary download location to its permanent location on disk.
Args:
logger: For outputting log messages.
src_url: StorageUrl for the source object.
src_obj_metadata: Metadata for the source object, potentially containing
hash values.
dst_url: StorageUrl describing the destination file.
need_to_unzip: If true, a temporary zip file was used and must be
uncompressed as part of validation.
server_gzip: If true, the server gzipped the bytes (regardless of whether
the object metadata claimed it was gzipped).
digesters: dict of {string, hash digester} that contains up-to-date digests
computed during the download. If a digester for a particular
algorithm is None, an up-to-date digest is not available and the
hash must be recomputed from the local file.
hash_algs: dict of {string, hash algorithm} that can be used if digesters
don't have up-to-date digests.
download_file_name: Temporary file name that was used for download.
api_selector: The Cloud API implementation used (used tracker file naming).
bytes_transferred: Number of bytes downloaded (used for logging).
gsutil_api: Cloud API to use for service and status.
Returns:
An MD5 of the local file, if one was calculated as part of the integrity
check.
"""
final_file_name = dst_url.object_name
file_name = download_file_name
digesters_succeeded = True
for alg in digesters:
# If we get a digester with a None algorithm, the underlying
# implementation failed to calculate a digest, so we will need to
# calculate one from scratch.
if not digesters[alg]:
digesters_succeeded = False
break
if digesters_succeeded:
local_hashes = _CreateDigestsFromDigesters(digesters)
else:
local_hashes = _CreateDigestsFromLocalFile(
gsutil_api.status_queue, hash_algs, file_name, final_file_name,
src_obj_metadata)
digest_verified = True
hash_invalid_exception = None
try:
_CheckHashes(logger, src_url, src_obj_metadata, final_file_name,
local_hashes)
DeleteDownloadTrackerFiles(dst_url, api_selector)
except HashMismatchException, e:
# If an non-gzipped object gets sent with gzip content encoding, the hash
# we calculate will match the gzipped bytes, not the original object. Thus,
# we'll need to calculate and check it after unzipping.
if server_gzip:
logger.debug(
'Hash did not match but server gzipped the content, will '
'recalculate.')
digest_verified = False
elif api_selector == ApiSelector.XML:
logger.debug(
'Hash did not match but server may have gzipped the content, will '
'recalculate.')
# Save off the exception in case this isn't a gzipped file.
hash_invalid_exception = e
digest_verified = False
else:
DeleteDownloadTrackerFiles(dst_url, api_selector)
if _RENAME_ON_HASH_MISMATCH:
os.rename(file_name,
final_file_name + _RENAME_ON_HASH_MISMATCH_SUFFIX)
else:
os.unlink(file_name)
raise
if need_to_unzip or server_gzip:
# Log that we're uncompressing if the file is big enough that
# decompressing would make it look like the transfer "stalled" at the end.
if bytes_transferred > TEN_MIB:
logger.info(
'Uncompressing temporarily gzipped file to %s...', final_file_name)
gzip_fp = None
try:
# Downloaded temporarily gzipped file, unzip to file without '_.gztmp'
# suffix.
gzip_fp = gzip.open(file_name, 'rb')
with open(final_file_name, 'wb') as f_out:
data = gzip_fp.read(GZIP_CHUNK_SIZE)
while data:
f_out.write(data)
data = gzip_fp.read(GZIP_CHUNK_SIZE)
except __HOLE__, e:
# In the XML case where we don't know if the file was gzipped, raise
# the original hash exception if we find that it wasn't.
if 'Not a gzipped file' in str(e) and hash_invalid_exception:
# Linter improperly thinks we're raising None despite the above check.
# pylint: disable=raising-bad-type
raise hash_invalid_exception
finally:
if gzip_fp:
gzip_fp.close()
os.unlink(file_name)
file_name = final_file_name
if not digest_verified:
try:
# Recalculate hashes on the unzipped local file.
local_hashes = _CreateDigestsFromLocalFile(
gsutil_api.status_queue, hash_algs, file_name, final_file_name,
src_obj_metadata)
_CheckHashes(logger, src_url, src_obj_metadata, final_file_name,
local_hashes)
DeleteDownloadTrackerFiles(dst_url, api_selector)
except HashMismatchException:
DeleteDownloadTrackerFiles(dst_url, api_selector)
if _RENAME_ON_HASH_MISMATCH:
os.rename(file_name,
file_name + _RENAME_ON_HASH_MISMATCH_SUFFIX)
else:
os.unlink(file_name)
raise
if file_name != final_file_name:
# Data is still in a temporary file, so move it to a permanent location.
if os.path.exists(final_file_name):
os.unlink(final_file_name)
os.rename(file_name,
final_file_name)
if 'md5' in local_hashes:
return local_hashes['md5']
|
IOError
|
dataset/ETHPy150Open GoogleCloudPlatform/gsutil/gslib/copy_helper.py/_ValidateAndCompleteDownload
|
5,221 |
def _ParseManifest(self):
"""Load and parse a manifest file.
This information will be used to skip any files that have a skip or OK
status.
"""
try:
if os.path.exists(self.manifest_path):
with open(self.manifest_path, 'rb') as f:
first_row = True
reader = csv.reader(f)
for row in reader:
if first_row:
try:
source_index = row.index('Source')
result_index = row.index('Result')
except __HOLE__:
# No header and thus not a valid manifest file.
raise CommandException(
'Missing headers in manifest file: %s' % self.manifest_path)
first_row = False
source = row[source_index]
result = row[result_index]
if result in ['OK', 'skip']:
# We're always guaranteed to take the last result of a specific
# source url.
self.manifest_filter[source] = result
except IOError:
raise CommandException('Could not parse %s' % self.manifest_path)
|
ValueError
|
dataset/ETHPy150Open GoogleCloudPlatform/gsutil/gslib/copy_helper.py/Manifest._ParseManifest
|
5,222 |
def _CreateManifestFile(self):
"""Opens the manifest file and assigns it to the file pointer."""
try:
if ((not os.path.exists(self.manifest_path))
or (os.stat(self.manifest_path).st_size == 0)):
# Add headers to the new file.
with open(self.manifest_path, 'wb', 1) as f:
writer = csv.writer(f)
writer.writerow(['Source',
'Destination',
'Start',
'End',
'Md5',
'UploadId',
'Source Size',
'Bytes Transferred',
'Result',
'Description'])
except __HOLE__:
raise CommandException('Could not create manifest file.')
|
IOError
|
dataset/ETHPy150Open GoogleCloudPlatform/gsutil/gslib/copy_helper.py/Manifest._CreateManifestFile
|
5,223 |
def reset(self):
"""
resets the storage if it supports being reset
"""
try:
self._storage.reset()
self.logger.info("Storage has be reset and all limits cleared")
except __HOLE__:
self.logger.warning("This storage type does not support being reset")
|
NotImplementedError
|
dataset/ETHPy150Open alisaifee/flask-limiter/flask_limiter/extension.py/Limiter.reset
|
5,224 |
def __check_request_limit(self):
endpoint = request.endpoint or ""
view_func = current_app.view_functions.get(endpoint, None)
name = ("%s.%s" % (
view_func.__module__, view_func.__name__
) if view_func else ""
)
if (not request.endpoint
or not self.enabled
or view_func == current_app.send_static_file
or name in self._exempt_routes
or request.blueprint in self._blueprint_exempt
or any(fn() for fn in self._request_filters)
):
return
limits = (
name in self._route_limits and self._route_limits[name]
or []
)
dynamic_limits = []
if name in self._dynamic_route_limits:
for lim in self._dynamic_route_limits[name]:
try:
dynamic_limits.extend(
ExtLimit(
limit, lim.key_func, lim.scope, lim.per_method,
lim.methods, lim.error_message, lim.exempt_when
) for limit in parse_many(lim.limit)
)
except __HOLE__ as e:
self.logger.error(
"failed to load ratelimit for view function %s (%s)"
, name, e
)
if request.blueprint:
if (request.blueprint in self._blueprint_dynamic_limits
and not dynamic_limits
):
for lim in self._blueprint_dynamic_limits[request.blueprint]:
try:
dynamic_limits.extend(
ExtLimit(
limit, lim.key_func, lim.scope, lim.per_method,
lim.methods, lim.error_message, lim.exempt_when
) for limit in parse_many(lim.limit)
)
except ValueError as e:
self.logger.error(
"failed to load ratelimit for blueprint %s (%s)"
, request.blueprint, e
)
if (request.blueprint in self._blueprint_limits
and not limits
):
limits.extend(self._blueprint_limits[request.blueprint])
failed_limit = None
limit_for_header = None
try:
all_limits = []
if self._storage_dead and self._fallback_limiter:
if self.__should_check_backend() and self._storage.check():
self.logger.info(
"Rate limit storage recovered"
)
self._storage_dead = False
self.__check_backend_count = 0
else:
all_limits = self._in_memory_fallback
if not all_limits:
all_limits = (limits + dynamic_limits or self._global_limits)
for lim in all_limits:
limit_scope = lim.scope or endpoint
if lim.is_exempt:
return
if lim.methods is not None and request.method.lower() not in lim.methods:
return
if lim.per_method:
limit_scope += ":%s" % request.method
if not limit_for_header or lim.limit < limit_for_header[0]:
limit_for_header = (lim.limit, lim.key_func(), limit_scope)
if not self.limiter.hit(lim.limit, lim.key_func(), limit_scope):
self.logger.warning(
"ratelimit %s (%s) exceeded at endpoint: %s"
, lim.limit, lim.key_func(), limit_scope
)
failed_limit = lim
limit_for_header = (lim.limit, lim.key_func(), limit_scope)
break
g.view_rate_limit = limit_for_header
if failed_limit:
if failed_limit.error_message:
exc_description = failed_limit.error_message if not callable(
failed_limit.error_message
) else failed_limit.error_message()
else:
exc_description = six.text_type(failed_limit.limit)
raise RateLimitExceeded(exc_description)
except Exception as e: # no qa
if isinstance(e, RateLimitExceeded):
six.reraise(*sys.exc_info())
if self._in_memory_fallback and not self._storage_dead:
self.logger.warn(
"Rate limit storage unreachable - falling back to"
" in-memory storage"
)
self._storage_dead = True
self.__check_request_limit()
else:
if self._swallow_errors:
self.logger.exception(
"Failed to rate limit. Swallowing error"
)
else:
six.reraise(*sys.exc_info())
|
ValueError
|
dataset/ETHPy150Open alisaifee/flask-limiter/flask_limiter/extension.py/Limiter.__check_request_limit
|
5,225 |
def __limit_decorator(self, limit_value,
key_func=None, shared=False,
scope=None,
per_method=False,
methods=None,
error_message=None,
exempt_when=None):
_scope = scope if shared else None
def _inner(obj):
func = key_func or self._key_func
is_route = not isinstance(obj, Blueprint)
name = "%s.%s" % (obj.__module__, obj.__name__) if is_route else obj.name
dynamic_limit, static_limits = None, []
if callable(limit_value):
dynamic_limit = ExtLimit(limit_value, func, _scope, per_method,
methods, error_message, exempt_when)
else:
try:
static_limits = [ExtLimit(
limit, func, _scope, per_method,
methods, error_message, exempt_when
) for limit in parse_many(limit_value)]
except __HOLE__ as e:
self.logger.error(
"failed to configure %s %s (%s)",
"view function" if is_route else "blueprint", name, e
)
if isinstance(obj, Blueprint):
if dynamic_limit:
self._blueprint_dynamic_limits.setdefault(name, []).append(
dynamic_limit
)
else:
self._blueprint_limits.setdefault(name, []).extend(
static_limits
)
else:
@wraps(obj)
def __inner(*a, **k):
return obj(*a, **k)
if dynamic_limit:
self._dynamic_route_limits.setdefault(name, []).append(
dynamic_limit
)
else:
self._route_limits.setdefault(name, []).extend(
static_limits
)
return __inner
return _inner
|
ValueError
|
dataset/ETHPy150Open alisaifee/flask-limiter/flask_limiter/extension.py/Limiter.__limit_decorator
|
5,226 |
def ascend(self, obj, depth=1):
"""Return a nested list containing referrers of the given object."""
depth += 1
parents = []
# Gather all referrers in one step to minimize
# cascading references due to repr() logic.
refs = gc.get_referrers(obj)
self.ignore.append(refs)
if len(refs) > self.maxparents:
return [("[%s referrers]" % len(refs), [])]
try:
ascendcode = self.ascend.__code__
except __HOLE__:
ascendcode = self.ascend.im_func.func_code
for parent in refs:
if inspect.isframe(parent) and parent.f_code is ascendcode:
continue
if parent in self.ignore:
continue
if depth <= self.maxdepth:
parents.append((parent, self.ascend(parent, depth)))
else:
parents.append((parent, []))
return parents
|
AttributeError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/cherrypy/cherrypy/lib/gctools.py/ReferrerTree.ascend
|
5,227 |
def send_command_expect(self, command_string, expect_string=None,
delay_factor=.2, max_loops=500, auto_find_prompt=True,
strip_prompt=True, strip_command=True):
'''
Send command to network device retrieve output until router_prompt or expect_string
By default this method will keep waiting to receive data until the network device prompt is
detected. The current network device prompt will be determined automatically.
command_string = command to execute
expect_string = pattern to search for uses re.search (use raw strings)
delay_factor = decrease the initial delay before we start looking for data
max_loops = number of iterations before we give up and raise an exception
strip_prompt = strip the trailing prompt from the output
strip_command = strip the leading command from the output
self.global_delay_factor is not used (to make this method faster)
'''
debug = False
# Find the current router prompt
if expect_string is None:
if auto_find_prompt:
try:
prompt = self.find_prompt(delay_factor=delay_factor)
except __HOLE__:
prompt = self.base_prompt
else:
prompt = self.base_prompt
search_pattern = re.escape(prompt.strip())
else:
search_pattern = expect_string
command_string = self.normalize_cmd(command_string)
if debug:
print("Command is: {0}".format(command_string))
print("Search to stop receiving data is: '{0}'".format(search_pattern))
time.sleep(delay_factor * 1)
self.clear_buffer()
self.remote_conn.sendall(command_string)
# Initial delay after sending command
i = 1
# Keep reading data until search_pattern is found (or max_loops)
output = ''
while i <= max_loops:
if self.remote_conn.recv_ready():
output += self.remote_conn.recv(MAX_BUFFER).decode('utf-8', 'ignore')
try:
lines = output.split("\n")
first_line = lines[0]
# First line is the echo line containing the command. In certain situations
# it gets repainted and needs filtered
if BACKSPACE_CHAR in first_line:
pattern = search_pattern + r'.*$'
first_line = re.sub(pattern, repl='', string=first_line)
lines[0] = first_line
output = "\n".join(lines)
except IndexError:
pass
if re.search(search_pattern, output):
break
else:
time.sleep(delay_factor * 1)
i += 1
else: # nobreak
raise IOError("Search pattern never detected in send_command_expect: {0}".format(
search_pattern))
# Some platforms have ansi_escape codes
if self.ansi_escape_codes:
output = self.strip_ansi_escape_codes(output)
output = self.normalize_linefeeds(output)
if strip_command:
output = self.strip_command(command_string, output)
if strip_prompt:
output = self.strip_prompt(output)
return output
|
ValueError
|
dataset/ETHPy150Open ktbyers/netmiko/netmiko/base_connection.py/BaseSSHConnection.send_command_expect
|
5,228 |
def __init__(self, values, categories=None, ordered=False, name=None,
fastpath=False, levels=None):
if fastpath:
# fast path
self._codes = _coerce_indexer_dtype(values, categories)
self._categories = self._validate_categories(
categories, fastpath=isinstance(categories, ABCIndexClass))
self._ordered = ordered
return
if name is not None:
msg = ("the 'name' keyword is removed, use 'name' with consumers "
"of the categorical instead (e.g. 'Series(cat, "
"name=\"something\")'")
warn(msg, UserWarning, stacklevel=2)
# TODO: Remove after deprecation period in 2017/ after 0.18
if levels is not None:
warn("Creating a 'Categorical' with 'levels' is deprecated, use "
"'categories' instead", FutureWarning, stacklevel=2)
if categories is None:
categories = levels
else:
raise ValueError("Cannot pass in both 'categories' and "
"(deprecated) 'levels', use only "
"'categories'", stacklevel=2)
# sanitize input
if is_categorical_dtype(values):
# we are either a Series or a CategoricalIndex
if isinstance(values, (ABCSeries, ABCCategoricalIndex)):
values = values._values
if ordered is None:
ordered = values.ordered
if categories is None:
categories = values.categories
values = values.__array__()
elif isinstance(values, (ABCIndexClass, ABCSeries)):
pass
else:
# on numpy < 1.6 datetimelike get inferred to all i8 by
# _sanitize_array which is fine, but since factorize does this
# correctly no need here this is an issue because _sanitize_array
# also coerces np.nan to a string under certain versions of numpy
# as well
values = _possibly_infer_to_datetimelike(values,
convert_dates=True)
if not isinstance(values, np.ndarray):
values = _convert_to_list_like(values)
from pandas.core.series import _sanitize_array
# On list with NaNs, int values will be converted to float. Use
# "object" dtype to prevent this. In the end objects will be
# casted to int/... in the category assignment step.
dtype = 'object' if isnull(values).any() else None
values = _sanitize_array(values, None, dtype=dtype)
if categories is None:
try:
codes, categories = factorize(values, sort=True)
except __HOLE__:
codes, categories = factorize(values, sort=False)
if ordered:
# raise, as we don't have a sortable data structure and so
# the user should give us one by specifying categories
raise TypeError("'values' is not ordered, please "
"explicitly specify the categories order "
"by passing in a categories argument.")
except ValueError:
# FIXME
raise NotImplementedError("> 1 ndim Categorical are not "
"supported at this time")
categories = self._validate_categories(categories)
else:
# there were two ways if categories are present
# - the old one, where each value is a int pointer to the levels
# array -> not anymore possible, but code outside of pandas could
# call us like that, so make some checks
# - the new one, where each value is also in the categories array
# (or np.nan)
# make sure that we always have the same type here, no matter what
# we get passed in
categories = self._validate_categories(categories)
codes = _get_codes_for_values(values, categories)
# TODO: check for old style usage. These warnings should be removes
# after 0.18/ in 2016
if is_integer_dtype(values) and not is_integer_dtype(categories):
warn("Values and categories have different dtypes. Did you "
"mean to use\n'Categorical.from_codes(codes, "
"categories)'?", RuntimeWarning, stacklevel=2)
if (len(values) and is_integer_dtype(values) and
(codes == -1).all()):
warn("None of the categories were found in values. Did you "
"mean to use\n'Categorical.from_codes(codes, "
"categories)'?", RuntimeWarning, stacklevel=2)
self.set_ordered(ordered or False, inplace=True)
self._categories = categories
self._codes = _coerce_indexer_dtype(codes, categories)
|
TypeError
|
dataset/ETHPy150Open pydata/pandas/pandas/core/categorical.py/Categorical.__init__
|
5,229 |
def map(self, mapper):
"""
Apply mapper function to its categories (not codes).
Parameters
----------
mapper : callable
Function to be applied. When all categories are mapped
to different categories, the result will be Categorical which has
the same order property as the original. Otherwise, the result will
be np.ndarray.
Returns
-------
applied : Categorical or np.ndarray.
"""
new_categories = self.categories.map(mapper)
try:
return Categorical.from_codes(self._codes.copy(),
categories=new_categories,
ordered=self.ordered)
except __HOLE__:
return np.take(new_categories, self._codes)
|
ValueError
|
dataset/ETHPy150Open pydata/pandas/pandas/core/categorical.py/Categorical.map
|
5,230 |
def is_dtype_equal(self, other):
"""
Returns True if categoricals are the same dtype
same categories, and same ordered
Parameters
----------
other : Categorical
Returns
-------
are_equal : boolean
"""
try:
return (self.categories.equals(other.categories) and
self.ordered == other.ordered)
except (AttributeError, __HOLE__):
return False
|
TypeError
|
dataset/ETHPy150Open pydata/pandas/pandas/core/categorical.py/Categorical.is_dtype_equal
|
5,231 |
@follows( normaliseBAMs )
@files( [ (("%s/bam/%s.norm.bam" % (x, x.asFile()), "%s/bam/%s.norm.bam" % (getControl(x), getControl(x).asFile())),
"%s/sicer/%s.sicer" % (x, x.asFile()) ) for x in TRACKS ] )
def runSICER( infiles, outfile ):
'''Run SICER for peak detection.'''
infile, controlfile = infiles
to_cluster = False
track = P.snip( os.path.basename(infile), ".norm.bam" )
control = P.snip( os.path.basename(controlfile), ".norm.bam" )
inputdir = os.path.dirname(outfile)
try: os.mkdir( track )
except __HOLE__: pass
try: os.mkdir( '''%(track)s/sicer''' % locals() )
except OSError: pass
# convert bam to bed
statement = '''bamToBed -i %(infile)s > %(track)s/sicer/%(track)s.bed;
bamToBed -i %(controlfile)s > %(track)s/sicer/%(control)s.bed; '''
# Run SICER
statement += '''cd %(inputdir)s; SICER.sh . %(track)s.bed %(control)s.bed . %(genome)s %(sicer_params)s >& %(track)s.sicer'''
P.run()
############################################################
|
OSError
|
dataset/ETHPy150Open CGATOxford/cgat/obsolete/zinba.py/runSICER
|
5,232 |
@follows( dedup )
@files( [ (("%s/bam/%s.norm.bam" % (x, x.asFile()), "%s/bam/%s.norm.bam" % (getControl(x), getControl(x).asFile())),
"%s/zinba/%s.peaks" % (x, x.asFile()) ) for x in TRACKS ] )
def runZinba( infiles, outfile ):
'''Run Zinba for peak detection.'''
infile, controlfile = infiles
to_cluster = False
track = P.snip( os.path.basename(infile), ".norm.bam" )
control = P.snip( os.path.basename(controlfile), ".norm.bam" )
inputdir = os.path.dirname(outfile)
frag_len = PARAMS['zinba_fragment_size']
mappability = PARAMS['zinba_mappability']
genome = PARAMS['zinba_genome']
try: os.mkdir( track )
except OSError: pass
try: os.mkdir( '''%(track)s/zinba''' % locals() )
except OSError: pass
try: os.mkdir( '''%(track)s/zinba/map_ext%(frag_len)s''' % locals() )
except __HOLE__: pass
# convert bam to bed
statement = '''bamToBed -i %(infile)s > %(track)s/zinba/%(track)s.bed;
bamToBed -i %(controlfile)s > %(track)s/zinba/%(control)s.bed; '''
P.run()
# Run Zinba
R.library( 'zinba' )
R( '''generateAlignability( mapdir='%(mappability)s', outdir='%(track)s/zinba/map_ext%(frag_len)s', athresh=1, extension=%(frag_len)s, twoBitFile='%(genome)s' )''' % locals() )
R( '''basealigncount( inputfile='%(track)s/zinba/%(track)s.bed', outputfile='%(track)s/zinba/%(track)s.basecount', extension=%(frag_len)s, filetype='bed', twoBitFile='%(genome)s' )''' % locals() )
R( '''zinba( refinepeaks=1, seq='%(track)s/zinba/%(track)s.bed', input='%(track)s/zinba/%(control)s.bed', filetype='bed', align='%(track)s/zinba/map_ext%(frag_len)s', twoBit='%(genome)s', outfile='%(track)s/zinba/%(track)s', extension=%(frag_len)s, basecountfile='%(track)s/zinba/%(track)s.basecount', numProc=4, threshold=0.01, broad=FALSE, printFullOut=1, interaction=FALSE, mode='peaks', FDR=TRUE) ''' % locals() )
############################################################
|
OSError
|
dataset/ETHPy150Open CGATOxford/cgat/obsolete/zinba.py/runZinba
|
5,233 |
def get_customer(request):
"""Returns the customer for the given request (which means for the current
logged in user/or the session user).
"""
try:
return request.customer
except __HOLE__:
customer = request.customer = _get_customer(request)
return customer
|
AttributeError
|
dataset/ETHPy150Open diefenbach/django-lfs/lfs/customer/utils.py/get_customer
|
5,234 |
def _get_customer(request):
user = request.user
if user.is_authenticated():
try:
return Customer.objects.get(user=user)
except __HOLE__:
return None
else:
session_key = request.session.session_key
try:
return Customer.objects.get(session=session_key)
except ObjectDoesNotExist:
return None
except MultipleObjectsReturned:
customers = Customer.objects.filter(session=session_key, user__isnull=True)
customer = customers[0]
customers.exclude(pk=customer.pk).delete()
return customer
|
ObjectDoesNotExist
|
dataset/ETHPy150Open diefenbach/django-lfs/lfs/customer/utils.py/_get_customer
|
5,235 |
def update_customer_after_login(request):
"""Updates the customer after login.
1. If there is no session customer, nothing has to be done.
2. If there is a session customer and no user customer we assign the session
customer to the current user.
3. If there is a session customer and a user customer we copy the session
customer information to the user customer and delete the session customer
"""
try:
session_customer = Customer.objects.get(session=request.session.session_key)
try:
user_customer = Customer.objects.get(user=request.user)
except ObjectDoesNotExist:
session_customer.user = request.user
session_customer.save()
else:
user_customer.selected_shipping_method = session_customer.selected_shipping_method
user_customer.save()
session_customer.delete()
except __HOLE__:
pass
|
ObjectDoesNotExist
|
dataset/ETHPy150Open diefenbach/django-lfs/lfs/customer/utils.py/update_customer_after_login
|
5,236 |
def run(self):
"""This is the main serving loop."""
if self._stats.error:
self._transmit_error()
self._close()
return
self._parse_options()
if self._options:
self._transmit_oack()
else:
self._next_block()
self._transmit_data()
while not self._should_stop:
try:
self.run_once()
except (__HOLE__, SystemExit):
logging.info(
'Caught KeyboardInterrupt/SystemExit exception. '
'Will exit.'
)
break
self._close()
|
KeyboardInterrupt
|
dataset/ETHPy150Open facebook/fbtftp/fbtftp/base_handler.py/BaseHandler.run
|
5,237 |
def clean(self, value):
""" (Try to) parse JSON string back to python. """
if isinstance(value, six.string_types):
if value == "":
value = None
try:
value = json.loads(value)
except __HOLE__:
raise ValidationError("Could not parse value as JSON")
return value
#Basic obfuscation, just so that the db_table doesn't
#appear in forms. (Not the end of the world if it does, but it's nice to
# hide these things). We don't encrypt for performance reasons.
#http://en.wikipedia.org/wiki/Vigen%C3%A8re_cipher
|
ValueError
|
dataset/ETHPy150Open potatolondon/djangae/djangae/forms/fields.py/JSONFormField.clean
|
5,238 |
@classmethod
def to_python(cls, value, model_ref=None, pk=None):
if model_ref is None:
if value is None:
return None
if isinstance(value, models.Model):
return value
model_ref, pk = decode_pk(value)
try:
pk = int(pk)
except (__HOLE__, TypeError):
raise forms.ValidationError('Invalid instance key.')
model = cls.load_model(model_ref)
try:
return model.objects.get(pk=pk)
except model.DoesNotExist:
raise forms.ValidationError('Invalid instance key.')
|
ValueError
|
dataset/ETHPy150Open potatolondon/djangae/djangae/forms/fields.py/GenericRelationFormfield.to_python
|
5,239 |
def search_function(encoding):
# Cache lookup
entry = _cache.get(encoding, _unknown)
if entry is not _unknown:
return entry
# Import the module:
#
# First try to find an alias for the normalized encoding
# name and lookup the module using the aliased name, then try to
# lookup the module using the standard import scheme, i.e. first
# try in the encodings package, then at top-level.
#
norm_encoding = normalize_encoding(encoding)
aliased_encoding = _aliases.get(norm_encoding) or \
_aliases.get(norm_encoding.replace('.', '_'))
if aliased_encoding is not None:
modnames = [aliased_encoding,
norm_encoding]
else:
modnames = [norm_encoding]
for modname in modnames:
if not modname or '.' in modname:
continue
try:
# Import is absolute to prevent the possibly malicious import of a
# module with side-effects that is not in the 'encodings' package.
mod = __import__('encodings.' + modname, fromlist=_import_tail,
level=0)
except ImportError:
pass
else:
break
else:
mod = None
try:
getregentry = mod.getregentry
except AttributeError:
# Not a codec module
mod = None
if mod is None:
# Cache misses
_cache[encoding] = None
return None
# Now ask the module for the registry entry
entry = getregentry()
if not isinstance(entry, codecs.CodecInfo):
if not 4 <= len(entry) <= 7:
raise CodecRegistryError,\
'module "%s" (%s) failed to register' % \
(mod.__name__, mod.__file__)
if not hasattr(entry[0], '__call__') or \
not hasattr(entry[1], '__call__') or \
(entry[2] is not None and not hasattr(entry[2], '__call__')) or \
(entry[3] is not None and not hasattr(entry[3], '__call__')) or \
(len(entry) > 4 and entry[4] is not None and not hasattr(entry[4], '__call__')) or \
(len(entry) > 5 and entry[5] is not None and not hasattr(entry[5], '__call__')):
raise CodecRegistryError,\
'incompatible codecs in module "%s" (%s)' % \
(mod.__name__, mod.__file__)
if len(entry)<7 or entry[6] is None:
entry += (None,)*(6-len(entry)) + (mod.__name__.split(".", 1)[1],)
entry = codecs.CodecInfo(*entry)
# Cache the codec registry entry
_cache[encoding] = entry
# Register its aliases (without overwriting previously registered
# aliases)
try:
codecaliases = mod.getaliases()
except __HOLE__:
pass
else:
for alias in codecaliases:
if alias not in _aliases:
_aliases[alias] = modname
# Return the registry entry
return entry
# Register the search_function in the Python codec registry
|
AttributeError
|
dataset/ETHPy150Open kleientertainment/ds_mod_tools/pkg/win32/Python27/Lib/encodings/__init__.py/search_function
|
5,240 |
def parse_backend_conf(backend, **kwargs):
"""
Helper function to parse the backend configuration
that doesn't use the URI notation.
"""
# Try to get the CACHES entry for the given backend name first
conf = settings.CACHES.get(backend, None)
if conf is not None:
args = conf.copy()
args.update(kwargs)
backend = args.pop('BACKEND')
location = args.pop('LOCATION', '')
return backend, location, args
else:
try:
# Trying to import the given backend, in case it's a dotted path
mod_path, cls_name = backend.rsplit('.', 1)
mod = importlib.import_module(mod_path)
backend_cls = getattr(mod, cls_name)
except (AttributeError, __HOLE__, ValueError):
raise InvalidCacheBackendError("Could not find backend '%s'" % backend)
location = kwargs.pop('LOCATION', '')
return backend, location, kwargs
raise InvalidCacheBackendError(
"Couldn't find a cache backend named '%s'" % backend)
|
ImportError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.4/django/core/cache/__init__.py/parse_backend_conf
|
5,241 |
def get_cache(backend, **kwargs):
"""
Function to load a cache backend dynamically. This is flexible by design
to allow different use cases:
To load a backend with the old URI-based notation::
cache = get_cache('locmem://')
To load a backend that is pre-defined in the settings::
cache = get_cache('default')
To load a backend with its dotted import path,
including arbitrary options::
cache = get_cache('django.core.cache.backends.memcached.MemcachedCache', **{
'LOCATION': '127.0.0.1:11211', 'TIMEOUT': 30,
})
"""
try:
if '://' in backend:
# for backwards compatibility
backend, location, params = parse_backend_uri(backend)
if backend in BACKENDS:
backend = 'django.core.cache.backends.%s' % BACKENDS[backend]
params.update(kwargs)
mod = importlib.import_module(backend)
backend_cls = mod.CacheClass
else:
backend, location, params = parse_backend_conf(backend, **kwargs)
mod_path, cls_name = backend.rsplit('.', 1)
mod = importlib.import_module(mod_path)
backend_cls = getattr(mod, cls_name)
except (__HOLE__, ImportError), e:
raise InvalidCacheBackendError(
"Could not find backend '%s': %s" % (backend, e))
cache = backend_cls(location, params)
# Some caches -- python-memcached in particular -- need to do a cleanup at the
# end of a request cycle. If the cache provides a close() method, wire it up
# here.
if hasattr(cache, 'close'):
signals.request_finished.connect(cache.close)
return cache
|
AttributeError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.4/django/core/cache/__init__.py/get_cache
|
5,242 |
def __getattr__(self, key):
try:
return self.data[key]
except __HOLE__:
raise AttributeError("'%s' object has no attribute '%s'" % (
self.__class__.__name__, key))
|
KeyError
|
dataset/ETHPy150Open ask/carrot/tests/test_django.py/DictWrapper.__getattr__
|
5,243 |
def test_DjangoBrokerConnection(self):
try:
from django.conf import settings
except __HOLE__:
sys.stderr.write(
"Django is not installed. \
Not testing django specific features.\n")
return
configured_or_configure(settings,
CARROT_BACKEND=CARROT_BACKEND,
BROKER_HOST=BROKER_HOST,
BROKER_PORT=BROKER_PORT,
BROKER_VHOST=BROKER_VHOST,
BROKER_USER=BROKER_USER,
BROKER_PASSWORD=BROKER_PASSWORD)
expected_values = {
"backend_cls": CARROT_BACKEND,
"hostname": BROKER_HOST,
"port": BROKER_PORT,
"virtual_host": BROKER_VHOST,
"userid": BROKER_USER,
"password": BROKER_PASSWORD}
conn = DjangoBrokerConnection()
self.assertTrue(isinstance(conn, BrokerConnection))
for val_name, val_value in expected_values.items():
self.assertEquals(getattr(conn, val_name, None), val_value)
|
ImportError
|
dataset/ETHPy150Open ask/carrot/tests/test_django.py/TestDjangoSpecific.test_DjangoBrokerConnection
|
5,244 |
def get_attribution(lang_scr_ext):
if not ATTRIBUTION_DATA:
attribution_path = path.join(TOOLS_DIR, 'sample_texts', 'attributions.txt')
with open(attribution_path, 'r') as f:
data = f.readlines()
for line in data:
line = line.strip()
if not line or line[0] == '#':
continue
tag, attrib = line.split(':')
ATTRIBUTION_DATA[tag.strip()] = attrib.strip()
print 'read %d lines of attribution data' % len(ATTRIBUTION_DATA)
try:
return ATTRIBUTION_DATA[lang_scr_ext + '.txt']
except __HOLE__:
print 'no attribution for %s' % lang_scr_ext
return 'none'
|
KeyError
|
dataset/ETHPy150Open googlei18n/nototools/nototools/generate_website_2_data.py/get_attribution
|
5,245 |
def save_map(map_name, base_dir=MAP_DIR, only_cached=True):
_map_dir = os.path.join(base_dir, map_name)
#if base_dir == DATA_DIR:
# _map_dir = os.path.join(_map_dir, map_name)
try:
os.makedirs(_map_dir)
except:
pass
for light in WORLD_INFO['lights']:
if 'los' in light:
del light['los']
if 'old_pos' in light:
del light['old_pos']
with open(os.path.join(_map_dir, 'world.meta'), 'w') as _map_file:
try:
_slices = WORLD_INFO['slices']
_references = WORLD_INFO['references']
_chunk_map = WORLD_INFO['chunk_map']
_map = WORLD_INFO['map']
_weather_light_map = None
del WORLD_INFO['slices']
del WORLD_INFO['chunk_map']
del WORLD_INFO['references']
del WORLD_INFO['map']
WORLD_INFO['map_size'] = maputils.get_map_size(_map)
if 'light_map' in WORLD_INFO['weather']:
_weather_light_map = WORLD_INFO['weather']['light_map']
del WORLD_INFO['weather']['light_map']
logging.debug('Writing map metadata to disk...')
_map_file.write('world_info:%s\n' % json.dumps(WORLD_INFO))
for _slice in _slices.keys():
if '_map' in _slices[_slice]:
del _slices[_slice]['_map']
_map_file.write('slice:%s:%s\n' % (_slice, json.dumps(_slices[_slice])))
for _chunk_key in _chunk_map:
_map_file.write('chunk:%s:%s\n' % (_chunk_key, json.dumps(_chunk_map[_chunk_key])))
#_map_file.write('slice_map:%s' % json.dumps(_slice_map))
WORLD_INFO['slices'] = _slices
WORLD_INFO['chunk_map'] = _chunk_map
WORLD_INFO['references'] = _references
WORLD_INFO['map'] = _map
#WORLD_INFO['slice_map'] = _slice_map
if _weather_light_map:
WORLD_INFO['weather']['light_map'] = _weather_light_map
#logging.debug('Reloading slices...')
#reload_slices()
#logging.debug('Done!')
except __HOLE__ as e:
logging.critical('FATAL: Map not JSON serializable.')
gfx.log('TypeError: Failed to save map (Map not JSON serializable).')
raise e
_chunk_cluster_size = WORLD_INFO['chunk_size']*10
_map = WORLD_INFO['map']
del WORLD_INFO['map']
if only_cached:
_cluster_keys = LOADED_CHUNKS
else:
_cluster_keys = []
for y1 in range(0, MAP_SIZE[1], _chunk_cluster_size):
for x1 in range(0, MAP_SIZE[0], _chunk_cluster_size):
_cluster_keys.append('%s,%s' % (x1, y1))
for cluster_key in _cluster_keys:
_x1 = int(cluster_key.split(',')[0])
_y1 = int(cluster_key.split(',')[1])
with open(os.path.join(_map_dir, 'world_%s.cluster' % cluster_key.replace(',', '_')), 'w') as _cluster_file:
for y2 in range(_y1, _y1+_chunk_cluster_size):
for x2 in range(_x1, _x1+_chunk_cluster_size):
_cluster_file.write(json.dumps(_map[x2][y2])+'\n')
WORLD_INFO['map'] = _map
SETTINGS['base_dir'] = _map_dir
|
TypeError
|
dataset/ETHPy150Open flags/Reactor-3/maps.py/save_map
|
5,246 |
def has_gravatar(email):
"""
Returns True if the user has a gravatar, False if otherwise
"""
# Request a 404 response if the gravatar does not exist
url = get_gravatar_url(email, default=GRAVATAR_DEFAULT_IMAGE_404)
# Verify an OK response was received
try:
request = Request(url)
request.get_method = lambda: 'HEAD'
return 200 == urlopen(request).code
except (__HOLE__, URLError):
return False
|
HTTPError
|
dataset/ETHPy150Open twaddington/django-gravatar/django_gravatar/helpers.py/has_gravatar
|
5,247 |
def install_twisted():
"""
If twisted is available, make `emit' return a DeferredList
This has been successfully tested with Twisted 14.0 and later.
"""
global emit, _call_partial
try:
from twisted.internet import defer
emit = _emit_twisted
_call_partial = defer.maybeDeferred
return True
except __HOLE__:
_call_partial = lambda fn, *a, **kw: fn(*a, **kw)
return False
|
ImportError
|
dataset/ETHPy150Open shaunduncan/smokesignal/smokesignal.py/install_twisted
|
5,248 |
def render(self, name, value, attrs=None):
# no point trying to come up with sensible semantics for when 'id' is missing from attrs,
# so let's make sure it fails early in the process
try:
id_ = attrs['id']
except (KeyError, __HOLE__):
raise TypeError("WidgetWithScript cannot be rendered without an 'id' attribute")
widget_html = self.render_html(name, value, attrs)
js = self.render_js_init(id_, name, value)
out = '{0}<script>{1}</script>'.format(widget_html, js)
return mark_safe(out)
|
TypeError
|
dataset/ETHPy150Open torchbox/wagtail/wagtail/utils/widgets.py/WidgetWithScript.render
|
5,249 |
def whoami(hostname=MYHOSTNAME):
"""Given a hostname return the Romeo object"""
try:
for host in foundation.RomeoKeyValue.search('HOSTNAME', value=hostname):
for ancestor in host.ANCESTORS:
if ancestor.KEY != 'SERVER': continue
return ancestor
except __HOLE__: pass
raise IdentityCrisis('you appear to be having an identity crisis')
###############################################################################
# Avoid importation circularites
###############################################################################
#internal library packages
|
IndexError
|
dataset/ETHPy150Open OrbitzWorldwide/droned/romeo/lib/romeo/__init__.py/whoami
|
5,250 |
def __getattr__(self, param): #compatibility hack
try:
return self._data[param]
except __HOLE__:
raise AttributeError("%s has no attribute \"%s\"" % (self, param))
|
KeyError
|
dataset/ETHPy150Open OrbitzWorldwide/droned/romeo/lib/romeo/__init__.py/_Romeo.__getattr__
|
5,251 |
def dump(self, obj, tag = None, typed = 1, ns_map = {}):
if Config.debug: print "In dump.", "obj=", obj
ns_map = ns_map.copy()
self.depth += 1
if type(tag) not in (NoneType, StringType, UnicodeType):
raise KeyError, "tag must be a string or None"
if isinstance(obj, anyType):
methname = "dump_instance"
else:
methname = "dump_" + type(obj).__name__
try:
meth = getattr(self, methname)
except __HOLE__:
if type(obj) == LongType:
obj_type = "integer"
elif pythonHasBooleanType and type(obj) == BooleanType:
obj_type = "boolean"
else:
obj_type = type(obj).__name__
self.out.append(self.dumper(None, obj_type, obj, tag, typed,
ns_map, self.genroot(ns_map)))
else:
meth(obj, tag, typed, ns_map)
self.depth -= 1
# generic dumper
|
AttributeError
|
dataset/ETHPy150Open AppScale/appscale/AppServer/google/appengine/api/SOAPpy/SOAPBuilder.py/SOAPBuilder.dump
|
5,252 |
def get_products_from_tiids(tiids, ignore_order=False):
# @ignore_order makes it slightly faster by not sorting
if not tiids:
return []
unsorted_products = Product.query.filter(Product.tiid.in_(tiids)).all()
ret = []
if ignore_order:
ret = unsorted_products
else:
for my_tiid in tiids:
try:
my_product = [p for p in unsorted_products if p.tiid == my_tiid][0]
except __HOLE__:
continue
ret.append(my_product)
return ret
|
IndexError
|
dataset/ETHPy150Open Impactstory/total-impact-webapp/totalimpactwebapp/product.py/get_products_from_tiids
|
5,253 |
@cached_property
def clean_biblio_dedup_dict(self):
dedup_key_dict = {}
try:
dedup_key_dict["title"] = remove_unneeded_characters(self.biblio.display_title).lower()
except (__HOLE__, TypeError):
dedup_key_dict["title"] = self.biblio.display_title
dedup_key_dict["genre"] = self.genre
dedup_key_dict["is_preprint"] = self.is_preprint
return dedup_key_dict
|
AttributeError
|
dataset/ETHPy150Open Impactstory/total-impact-webapp/totalimpactwebapp/product.py/Product.clean_biblio_dedup_dict
|
5,254 |
@cached_property
def is_true_product(self):
try:
if self.biblio.is_account:
return False
except __HOLE__:
pass
return True
|
AttributeError
|
dataset/ETHPy150Open Impactstory/total-impact-webapp/totalimpactwebapp/product.py/Product.is_true_product
|
5,255 |
@cached_property
def genre_icon(self):
try:
return configs.genre_icons[self.genre]
except __HOLE__:
return configs.genre_icons["unknown"]
#@cached_property
#def genre_url_representation(self):
# return self.display_genre_plural
|
KeyError
|
dataset/ETHPy150Open Impactstory/total-impact-webapp/totalimpactwebapp/product.py/Product.genre_icon
|
5,256 |
@cached_property
def mendeley_discipline(self):
top_discipline = None
discipline_snaps = [snap for snap in self.snaps if snap.provider=="mendeley" and snap.interaction=="discipline"]
if discipline_snaps:
most_recent_discipline_snap = sorted(
discipline_snaps,
key=lambda x: x.last_collected_date,
reverse=True
)[0]
all_disciplines = most_recent_discipline_snap.raw_value
try:
by_value = sorted(all_disciplines, key=all_disciplines.get, reverse=True)
top_discipline = by_value[0]
except __HOLE__:
pass
return top_discipline
|
AttributeError
|
dataset/ETHPy150Open Impactstory/total-impact-webapp/totalimpactwebapp/product.py/Product.mendeley_discipline
|
5,257 |
@cached_property
def is_account_product(self):
try:
if self.biblio.is_account:
return True
except __HOLE__:
pass
return False
|
AttributeError
|
dataset/ETHPy150Open Impactstory/total-impact-webapp/totalimpactwebapp/product.py/Product.is_account_product
|
5,258 |
@cached_property
def latest_diff_timestamp(self):
ts_list = [m.latest_nonzero_refresh_timestamp for m in self.metrics]
if not ts_list:
return None
try:
return sorted(ts_list, reverse=True)[0]
except __HOLE__:
return None
|
IndexError
|
dataset/ETHPy150Open Impactstory/total-impact-webapp/totalimpactwebapp/product.py/Product.latest_diff_timestamp
|
5,259 |
@cached_property
def countries(self):
my_countries = countries.CountryList()
try:
country_data = self.get_metric_raw_value("altmetric_com", "demographics")["geo"]["twitter"]
for country in country_data:
my_countries.add_from_metric(
country,
"altmetric_com:tweets",
country_data[country]
)
except (KeyError, TypeError):
pass
country_data = self.get_metric_raw_value("mendeley", "countries")
try:
for country in country_data:
my_countries.add_from_metric(
country,
"mendeley:readers",
country_data[country]
)
except (KeyError, TypeError):
pass
country_data = self.get_metric_raw_value("impactstory", "countries")
try:
for country in country_data:
my_countries.add_from_metric(
country,
"impactstory:views",
country_data[country]
)
except (KeyError, __HOLE__):
pass
return my_countries
|
TypeError
|
dataset/ETHPy150Open Impactstory/total-impact-webapp/totalimpactwebapp/product.py/Product.countries
|
5,260 |
def has_metric_this_good(self, provider, interaction, count):
requested_metric = self.get_metric_by_name(provider, interaction)
try:
return requested_metric.display_count >= count
except __HOLE__:
return False
|
AttributeError
|
dataset/ETHPy150Open Impactstory/total-impact-webapp/totalimpactwebapp/product.py/Product.has_metric_this_good
|
5,261 |
def get_pdf(self):
if self.has_file:
return self.get_file()
try:
pdf_url = self.get_pdf_url()
if pdf_url:
r = requests.get(pdf_url, timeout=10)
return r.content
except (__HOLE__, requests.exceptions.Timeout):
pass
return None
|
AttributeError
|
dataset/ETHPy150Open Impactstory/total-impact-webapp/totalimpactwebapp/product.py/Product.get_pdf
|
5,262 |
def get_embed_markup(self):
# logger.debug(u"in get_embed_markup for {tiid}".format(
# tiid=self.tiid))
if self.is_account_product:
return None
html = None
if self.aliases and self.aliases.best_url and "github" in self.aliases.best_url:
html = embed_markup.get_github_embed_html(self.aliases.best_url)
elif self.aliases and self.aliases.best_url and "dryad" in self.aliases.best_url:
html = embed_markup.get_dryad_embed_html(self.aliases.best_url)
elif self.aliases and self.aliases.best_url and "figshare" in self.aliases.best_url:
html = embed_markup.get_figshare_embed_html(self.aliases.best_url)
else:
if self.has_file or self.get_pdf_url():
try:
this_host = flask.request.url_root.strip("/")
# workaround for google docs viewer not supporting localhost urls
this_host = this_host.replace("localhost:5000", "staging-impactstory.org")
except __HOLE__: # when running as a script
this_host = "https://impactstory.org"
url = u"{this_host}/product/{tiid}/pdf".format(
this_host=this_host, tiid=self.tiid)
if url and ("localhost" in url or "127.0.0.1" in url):
html = u"<p>Can't view uploaded file on localhost. View it at <a href='{url}'>{url}</a>.</p>".format(
url=url)
else:
if url:
try:
html = embed_markup.wrap_in_pdf_reader("embed-pdf", url)
except UnicodeEncodeError:
pass
if not html and self.genre not in ["article", "unknown"]:
if self.aliases and self.aliases.best_url:
# this is how we embed slides, videos, etc
html = embed_markup.wrap_with_embedly(self.aliases.best_url)
return html
|
RuntimeError
|
dataset/ETHPy150Open Impactstory/total-impact-webapp/totalimpactwebapp/product.py/Product.get_embed_markup
|
5,263 |
def to_markup_dict(self, markup, hide_keys=None, show_keys="all"):
keys_to_show = [
"tiid",
"aliases",
"biblio",
"awards",
"genre",
"genre_icon",
"display_genre_plural",
"countries_str",
# for sorting
"year",
"awardedness_score",
"metrics_raw_sum",
"title",
"authors",
# for debugging
"mendeley_discipline",
# to show the "view on impactstory" badges
"embed_markup",
"fulltext_cta"
]
my_dict = self.to_dict(keys_to_show)
my_dict["markup"] = markup.make(my_dict)
if hide_keys is not None:
for key_to_hide in hide_keys:
try:
del my_dict[key_to_hide]
except __HOLE__:
pass
elif show_keys != "all":
my_small_dict = {}
for k, v in my_dict.iteritems():
if k in show_keys:
my_small_dict[k] = v
my_dict = my_small_dict
return my_dict
|
KeyError
|
dataset/ETHPy150Open Impactstory/total-impact-webapp/totalimpactwebapp/product.py/Product.to_markup_dict
|
5,264 |
def refresh_status(tiid, myredis):
task_ids = myredis.get_provider_task_ids(tiid)
if not task_ids:
status = "SUCCESS: no recent refresh"
return {"short": status, "long": status}
statuses = {}
if "STARTED" in task_ids:
if (len(task_ids) > 1):
task_ids.remove("STARTED")
else:
status = "started_queueing"
return {"short": status, "long": status}
for task_id in task_ids:
task_result = AsyncResult(task_id)
try:
state = task_result.state
except __HOLE__:
state = "unknown_state"
statuses[task_id] = state
# logger.debug(u"refresh_status statuses: tiid={tiid}, statuses={statuses}".format(
# tiid=tiid, statuses=statuses))
done_updating = all([(status.startswith("SUCCESS") or status.startswith("FAILURE")) for status in statuses.values()])
has_failures = any([status.startswith("FAILURE") for status in statuses.values()])
has_pending = any([status.startswith("PENDING") for status in statuses.values()])
has_started = any([status.startswith("STARTED") for status in statuses.values()])
status_short = "unknown"
if done_updating and not has_failures:
status_short = u"SUCCESS: refresh finished"
elif done_updating and has_failures:
status_short = u"SUCCESS with FAILURES"
elif has_failures:
status_short = u"SUCCESS with FAILURES (and not all providers ran)"
elif has_pending:
status_short = u"PENDING"
elif has_started:
status_short = u"STARTED"
status_long = u"{status_short}; task_ids: {statuses}".format(
status_short=status_short, statuses=statuses)
# if not refresh_status.startswith("SUCCESS"):
# # logger.debug(u"refresh_status: task_id={task_id}, refresh_status={refresh_status}, tiid={tiid}".format(
# # task_id=task_id, refresh_status=refresh_status, tiid=tiid))
# pass
return {"short": status_short, "long": status_long}
|
AttributeError
|
dataset/ETHPy150Open Impactstory/total-impact-webapp/totalimpactwebapp/product.py/refresh_status
|
5,265 |
def refresh_products_from_tiids(profile_id, tiids, analytics_credentials={}, source="webapp"):
# assume the profile is the same one as the first product
if not profile_id:
temp_profile = Product.query.get(tiids[0])
profile_id = temp_profile.profile_id
from totalimpactwebapp.profile import Profile
profile = Profile.query.get(profile_id)
save_profile_refresh_status(profile, RefreshStatus.states["PROGRESS_BAR"])
if not tiids:
return None
priority = "high"
if source=="scheduled":
priority = "low"
products = Product.query.filter(Product.tiid.in_(tiids)).all()
tiids_to_update = []
for product in products:
try:
tiid = product.tiid
product.set_last_refresh_start()
db.session.merge(product)
tiids_to_update += [tiid]
except __HOLE__:
logger.debug(u"couldn't find tiid {tiid} so not refreshing its metrics".format(
tiid=tiid))
db.session.commit()
start_product_update(profile_id, tiids_to_update, priority)
return tiids
|
AttributeError
|
dataset/ETHPy150Open Impactstory/total-impact-webapp/totalimpactwebapp/product.py/refresh_products_from_tiids
|
5,266 |
def _try_services(self, method_name, *args, **kwargs):
"""
Try each service until one returns a response. This function only
catches the bare minimum of exceptions from the service class. We want
exceptions to be raised so the service classes can be debugged and
fixed quickly.
"""
if self.random_wait_seconds > 0:
# for privacy... To avoid correlating addresses to same origin
# only gets called before the first service call. Does not pause
# before each and every call.
pause_time = random.random() * self.random_wait_seconds
if self.verbose:
print("Pausing for: %.2f seconds" % pause_time)
time.sleep(pause_time)
for service in self.services:
crypto = ((args and args[0]) or kwargs['crypto']).lower()
address = kwargs.get('address', '').lower()
fiat = kwargs.get('fiat', '').lower()
if service.supported_cryptos and (crypto not in service.supported_cryptos):
if self.verbose:
print("SKIP:", "%s not supported for %s" % (crypto, service.__class__.__name__))
continue
try:
if self.verbose: print("* Trying:", service, crypto, "%s%s" % (address, fiat))
ret = getattr(service, method_name)(*args, **kwargs)
self._successful_service = service
return ret
except (KeyError, IndexError, __HOLE__, ValueError,
requests.exceptions.Timeout, requests.exceptions.SSLError) as exc:
# API has probably changed, therefore service class broken
if self.verbose: print("FAIL:", service, exc.__class__.__name__, exc)
self._failed_services.append({
'service': service,
'error': "%s %s" % (exc.__class__.__name__, exc)
})
except SkipThisService as exc:
# service classes can raise this exception if for whatever reason
# that service can't return a response, but maybe another one can.
if self.verbose: print("SKIP:", exc.__class__.__name__, exc)
self._failed_services.append({'service': service, 'error': "Skipped"})
except NotImplementedError as exc:
if self.verbose: print("SKIP:", exc.__class__.__name__, exc)
self._failed_services.append({'service': service, 'error': "Not Implemented"})
if not self._failed_services:
raise NotImplementedError(
"No Services defined for %s and %s" % (crypto, method_name)
)
failed_msg = ', '.join(
["{service.name} -> {error}".format(**x) for x in self._failed_services]
)
raise NoService(self.no_service_msg(*args, **kwargs) + "! Tried: " + failed_msg)
|
TypeError
|
dataset/ETHPy150Open priestc/moneywagon/moneywagon/core.py/AutoFallbackFetcher._try_services
|
5,267 |
def get_optimal_services(crypto, type_of_service):
from .crypto_data import crypto_data
try:
# get best services from curated list
return crypto_data[crypto.lower()]['services'][type_of_service]
except __HOLE__:
raise ValueError("Invalid cryptocurrency symbol: %s" % crypto)
|
KeyError
|
dataset/ETHPy150Open priestc/moneywagon/moneywagon/core.py/get_optimal_services
|
5,268 |
def get_magic_bytes(crypto):
from .crypto_data import crypto_data
try:
pub_byte = crypto_data[crypto]['address_version_byte']
priv_byte = crypto_data[crypto]['private_key_prefix']
if priv_byte >= 128:
priv_byte -= 128 #pybitcointools bug
return pub_byte, priv_byte
except __HOLE__:
raise ValueError("Cryptocurrency symbol not found: %s" % crypto)
|
KeyError
|
dataset/ETHPy150Open priestc/moneywagon/moneywagon/core.py/get_magic_bytes
|
5,269 |
def getRhymingWord(self, curLine, person):
"""What word should go on the end of this line so it rhymes with the
previous?"""
try:
rhymeLine = self.getRhymeLine(curLine)
except __HOLE__:
return None
rhymeWord = self.poem[rhymeLine].split()[-1]
rhymes = self.getRhymes(rhymeWord)
if rhymes == None:
return None
# now we look for rhyming words in our chain
for rhyme in rhymes:
word = self.brains[person]["rev"].chain.get(str(rhyme.lower()))
if (word is not None and
rhyme.lower() != self.cleanWord(rhymeWord.lower())):
return rhyme.lower()
# the loop exited without breaking, so we found no rhymes that exist in
# our Markov chains
return None
|
ValueError
|
dataset/ETHPy150Open capnrefsmmat/seuss/rhyme.py/RhymingMarkovGenerator.getRhymingWord
|
5,270 |
def validate_number(self, number):
try:
number = int(number)
except (TypeError, __HOLE__):
raise PageNotAnInteger('That page number is not an integer')
if number < 1:
raise EmptyPage('That page number is less than 1')
return number
|
ValueError
|
dataset/ETHPy150Open CenterForOpenScience/scrapi/api/webview/pagination.py/PaginatorWithoutCount.validate_number
|
5,271 |
def parseargs():
global DEBUGSTREAM
try:
opts, args = getopt.getopt(
sys.argv[1:], 'nVhc:d',
['class=', 'nosetuid', 'version', 'help', 'debug'])
except getopt.error, e:
usage(1, e)
options = Options()
for opt, arg in opts:
if opt in ('-h', '--help'):
usage(0)
elif opt in ('-V', '--version'):
print >> sys.stderr, __version__
sys.exit(0)
elif opt in ('-n', '--nosetuid'):
options.setuid = 0
elif opt in ('-c', '--class'):
options.classname = arg
elif opt in ('-d', '--debug'):
DEBUGSTREAM = sys.stderr
# parse the rest of the arguments
if len(args) < 1:
localspec = 'localhost:8025'
remotespec = 'localhost:25'
elif len(args) < 2:
localspec = args[0]
remotespec = 'localhost:25'
elif len(args) < 3:
localspec = args[0]
remotespec = args[1]
else:
usage(1, 'Invalid arguments: %s' % COMMASPACE.join(args))
# split into host/port pairs
i = localspec.find(':')
if i < 0:
usage(1, 'Bad local spec: %s' % localspec)
options.localhost = localspec[:i]
try:
options.localport = int(localspec[i+1:])
except ValueError:
usage(1, 'Bad local port: %s' % localspec)
i = remotespec.find(':')
if i < 0:
usage(1, 'Bad remote spec: %s' % remotespec)
options.remotehost = remotespec[:i]
try:
options.remoteport = int(remotespec[i+1:])
except __HOLE__:
usage(1, 'Bad remote port: %s' % remotespec)
return options
|
ValueError
|
dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/smtpd.py/parseargs
|
5,272 |
def run_tests():
import django
from django.conf import global_settings
from django.conf import settings
settings.configure(
INSTALLED_APPS=[
'corsheaders',
],
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'TEST_NAME': ':memory:',
},
},
MIDDLEWARE_CLASSES=global_settings.MIDDLEWARE_CLASSES + (
'corsheaders.middleware.CorsMiddleware',),
)
if hasattr(django, 'setup'):
django.setup()
try:
from django.test.runner import DiscoverRunner as Runner
except __HOLE__:
from django.test.simple import DjangoTestSuiteRunner as Runner
test_runner = Runner(verbosity=1)
return test_runner.run_tests(['corsheaders'])
|
ImportError
|
dataset/ETHPy150Open ottoyiu/django-cors-headers/tests.py/run_tests
|
5,273 |
def run(self):
logger.debug('Proxying connection %r at address %r' % (self.client.conn, self.client.addr))
try:
self._process()
except __HOLE__:
pass
except Exception as e:
logger.exception('Exception while handling connection %r with reason %r' % (self.client.conn, e))
finally:
logger.debug("closing client connection with pending client buffer size %d bytes" % self.client.buffer_size())
self.client.close()
if self.server:
logger.debug("closed client connection with pending server buffer size %d bytes" % self.server.buffer_size())
self._access_log()
logger.debug('Closing proxy for connection %r at address %r' % (self.client.conn, self.client.addr))
|
KeyboardInterrupt
|
dataset/ETHPy150Open abhinavsingh/proxy.py/proxy.py/Proxy.run
|
5,274 |
def main():
parser = argparse.ArgumentParser(
description='proxy.py v%s' % __version__,
epilog='Having difficulty using proxy.py? Report at: %s/issues/new' % __homepage__
)
parser.add_argument('--hostname', default='127.0.0.1', help='Default: 127.0.0.1')
parser.add_argument('--port', default='8899', help='Default: 8899')
parser.add_argument('--log-level', default='INFO', help='DEBUG, INFO, WARNING, ERROR, CRITICAL')
args = parser.parse_args()
logging.basicConfig(level=getattr(logging, args.log_level), format='%(asctime)s - %(levelname)s - pid:%(process)d - %(message)s')
hostname = args.hostname
port = int(args.port)
try:
proxy = HTTP(hostname, port)
proxy.run()
except __HOLE__:
pass
|
KeyboardInterrupt
|
dataset/ETHPy150Open abhinavsingh/proxy.py/proxy.py/main
|
5,275 |
def _validate_constant(constants, option_value, option_name):
try:
constant_value = constants.lookupByValue(option_value)
except __HOLE__:
raise UsageError(
"The option '--{}' got unsupported value: '{}'. "
"Must be one of: {}.".format(
option_name, option_value,
', '.join(c.value for c in constants.iterconstants())
)
)
return constant_value
|
ValueError
|
dataset/ETHPy150Open ClusterHQ/flocker/admin/installer/_images.py/_validate_constant
|
5,276 |
def get_jsons(grid_file):
with open(grid_file) as f:
f.readline() # Pop off the top
f.readline()
for line in f:
try:
yield json.loads(line[:-2])
except __HOLE__:
yield json.loads(line)
break
|
ValueError
|
dataset/ETHPy150Open CenterForOpenScience/scrapi/institutions/grid.py/get_jsons
|
5,277 |
def run(self):
"""
Run the Node Server. Exit when triggered. Generally, this method should
not be overwritten.
"""
self.running = True
self.poly.connect()
counter = 0
try:
while self.running:
time.sleep(self.shortpoll)
self.poll()
counter += self.shortpoll
if counter >= self.longpoll:
self.long_poll()
counter = 0
except __HOLE__:
self.on_exit()
self.poly.exit()
|
KeyboardInterrupt
|
dataset/ETHPy150Open UniversalDevicesInc/Polyglot/polyglot/nodeserver_api.py/NodeServer.run
|
5,278 |
def _parse_cmd(self, cmd):
"""
Parses a received command.
:param cmd: String command received from Polyglot
"""
if len(cmd) >= 2:
# parse command
try:
cmd = json.loads(cmd)
except __HOLE__:
self.send_error('Received badly formatted command ' +
'(not json): {}'.format(cmd))
return False
# split command
try:
cmd_code = list(cmd.keys())[0]
args = cmd[cmd_code]
except (KeyError, IndexError):
self.send_error('Received badly formatted command: {} '
.format(cmd))
return False
# validate command
if cmd_code not in self.commands:
self.send_error('Received invalid command: {}'.format(cmd))
return False
# execute command
return self._recv(cmd_code, args)
|
ValueError
|
dataset/ETHPy150Open UniversalDevicesInc/Polyglot/polyglot/nodeserver_api.py/PolyglotConnector._parse_cmd
|
5,279 |
def __init__(self, definition) :
if definition.get('has missing', False) :
self.has_missing = True
try :
self.predicates += [predicates.ExistsPredicate(definition['field'])]
except __HOLE__ :
pass
else :
self.has_missing = False
|
AttributeError
|
dataset/ETHPy150Open datamade/dedupe/dedupe/variables/base.py/Variable.__init__
|
5,280 |
def __init__(self, definition) :
super(CustomType, self).__init__(definition)
try :
self.comparator = definition["comparator"]
except __HOLE__ :
raise KeyError("For 'Custom' field types you must define "
"a 'comparator' function in the field "
"definition. ")
if 'variable name' in definition :
self.name = definition['variable name']
else :
self.name = "(%s: %s, %s)" % (self.field,
self.type,
self.comparator.__name__)
|
KeyError
|
dataset/ETHPy150Open datamade/dedupe/dedupe/variables/base.py/CustomType.__init__
|
5,281 |
def get_callable(lookup_view, can_fail=False):
"""
Convert a string version of a function name to the callable object.
If the lookup_view is not an import path, it is assumed to be a URL pattern
label and the original string is returned.
If can_fail is True, lookup_view might be a URL pattern label, so errors
during the import fail and the string is returned.
"""
if not callable(lookup_view):
try:
# Bail early for non-ASCII strings (they can't be functions).
lookup_view = lookup_view.encode('ascii')
mod_name, func_name = get_mod_func(lookup_view)
if func_name != '':
lookup_view = getattr(__import__(mod_name, {}, {}, ['']), func_name)
if not callable(lookup_view):
raise AttributeError("'%s.%s' is not a callable." % (mod_name, func_name))
except (ImportError, __HOLE__):
if not can_fail:
raise
except UnicodeEncodeError:
pass
return lookup_view
|
AttributeError
|
dataset/ETHPy150Open dcramer/django-compositepks/django/core/urlresolvers.py/get_callable
|
5,282 |
def get_mod_func(callback):
# Converts 'django.views.news.stories.story_detail' to
# ['django.views.news.stories', 'story_detail']
try:
dot = callback.rindex('.')
except __HOLE__:
return callback, ''
return callback[:dot], callback[dot+1:]
|
ValueError
|
dataset/ETHPy150Open dcramer/django-compositepks/django/core/urlresolvers.py/get_mod_func
|
5,283 |
def _get_callback(self):
if self._callback is not None:
return self._callback
try:
self._callback = get_callable(self._callback_str)
except ImportError, e:
mod_name, _ = get_mod_func(self._callback_str)
raise ViewDoesNotExist, "Could not import %s. Error was: %s" % (mod_name, str(e))
except __HOLE__, e:
mod_name, func_name = get_mod_func(self._callback_str)
raise ViewDoesNotExist, "Tried %s in module %s. Error was: %s" % (func_name, mod_name, str(e))
return self._callback
|
AttributeError
|
dataset/ETHPy150Open dcramer/django-compositepks/django/core/urlresolvers.py/RegexURLPattern._get_callback
|
5,284 |
def _get_urlconf_module(self):
try:
return self._urlconf_module
except __HOLE__:
self._urlconf_module = __import__(self.urlconf_name, {}, {}, [''])
return self._urlconf_module
|
AttributeError
|
dataset/ETHPy150Open dcramer/django-compositepks/django/core/urlresolvers.py/RegexURLResolver._get_urlconf_module
|
5,285 |
def _resolve_special(self, view_type):
callback = getattr(self.urlconf_module, 'handler%s' % view_type)
mod_name, func_name = get_mod_func(callback)
try:
return getattr(__import__(mod_name, {}, {}, ['']), func_name), {}
except (ImportError, __HOLE__), e:
raise ViewDoesNotExist, "Tried %s. Error was: %s" % (callback, str(e))
|
AttributeError
|
dataset/ETHPy150Open dcramer/django-compositepks/django/core/urlresolvers.py/RegexURLResolver._resolve_special
|
5,286 |
def reverse(self, lookup_view, *args, **kwargs):
if args and kwargs:
raise ValueError("Don't mix *args and **kwargs in call to reverse()!")
try:
lookup_view = get_callable(lookup_view, True)
except (ImportError, __HOLE__), e:
raise NoReverseMatch("Error importing '%s': %s." % (lookup_view, e))
possibilities = self.reverse_dict.getlist(lookup_view)
for possibility, pattern in possibilities:
for result, params in possibility:
if args:
if len(args) != len(params):
continue
unicode_args = [force_unicode(val) for val in args]
candidate = result % dict(zip(params, unicode_args))
else:
if set(kwargs.keys()) != set(params):
continue
unicode_kwargs = dict([(k, force_unicode(v)) for (k, v) in kwargs.items()])
candidate = result % unicode_kwargs
if re.search(u'^%s' % pattern, candidate, re.UNICODE):
return candidate
raise NoReverseMatch("Reverse for '%s' with arguments '%s' and keyword "
"arguments '%s' not found." % (lookup_view, args, kwargs))
|
AttributeError
|
dataset/ETHPy150Open dcramer/django-compositepks/django/core/urlresolvers.py/RegexURLResolver.reverse
|
5,287 |
def __call__(self, env, start_response):
"""Accepts a standard WSGI application call, authenticating the request
and installing callback hooks for authorization and ACL header
validation. For an authenticated request, REMOTE_USER will be set to a
comma separated list of the user's groups.
With a non-empty reseller prefix, acts as the definitive auth service
for just tokens and accounts that begin with that prefix, but will deny
requests outside this prefix if no other auth middleware overrides it.
With an empty reseller prefix, acts as the definitive auth service only
for tokens that validate to a non-empty set of groups. For all other
requests, acts as the fallback auth service when no other auth
middleware overrides it.
Alternatively, if the request matches the self.auth_prefix, the request
will be routed through the internal auth request handler (self.handle).
This is to handle creating users, accounts, granting tokens, etc.
"""
if 'keystone.identity' in env:
return self.app(env, start_response)
# We're going to consider OPTIONS requests harmless and the CORS
# support in the Swift proxy needs to get them.
if env.get('REQUEST_METHOD') == 'OPTIONS':
return self.app(env, start_response)
if self.allow_overrides and env.get('swift.authorize_override', False):
return self.app(env, start_response)
if not self.swauth_remote:
if env.get('PATH_INFO', '') == self.auth_prefix[:-1]:
return HTTPMovedPermanently(add_slash=True)(env,
start_response)
elif env.get('PATH_INFO', '').startswith(self.auth_prefix):
return self.handle(env, start_response)
s3 = env.get('HTTP_AUTHORIZATION')
token = env.get('HTTP_X_AUTH_TOKEN', env.get('HTTP_X_STORAGE_TOKEN'))
if token and len(token) > swauth.authtypes.MAX_TOKEN_LENGTH:
return HTTPBadRequest(body='Token exceeds maximum length.')(env,
start_response)
if s3 or (token and token.startswith(self.reseller_prefix)):
# Note: Empty reseller_prefix will match all tokens.
groups = self.get_groups(env, token)
if groups:
env['REMOTE_USER'] = groups
user = groups and groups.split(',', 1)[0] or ''
# We know the proxy logs the token, so we augment it just a bit
# to also log the authenticated user.
env['HTTP_X_AUTH_TOKEN'] = \
'%s,%s' % (user, 's3' if s3 else token)
env['swift.authorize'] = self.authorize
env['swift.clean_acl'] = clean_acl
if '.reseller_admin' in groups:
env['reseller_request'] = True
else:
# Unauthorized token
if self.reseller_prefix and token and \
token.startswith(self.reseller_prefix):
# Because I know I'm the definitive auth for this token, I
# can deny it outright.
return HTTPUnauthorized()(env, start_response)
# Because I'm not certain if I'm the definitive auth, I won't
# overwrite swift.authorize and I'll just set a delayed denial
# if nothing else overrides me.
elif 'swift.authorize' not in env:
env['swift.authorize'] = self.denied_response
else:
if self.reseller_prefix:
# With a non-empty reseller_prefix, I would like to be called
# back for anonymous access to accounts I know I'm the
# definitive auth for.
try:
version, rest = split_path(env.get('PATH_INFO', ''),
1, 2, True)
except __HOLE__:
rest = None
if rest and rest.startswith(self.reseller_prefix):
# Handle anonymous access to accounts I'm the definitive
# auth for.
env['swift.authorize'] = self.authorize
env['swift.clean_acl'] = clean_acl
# Not my token, not my account, I can't authorize this request,
# deny all is a good idea if not already set...
elif 'swift.authorize' not in env:
env['swift.authorize'] = self.denied_response
# Because I'm not certain if I'm the definitive auth for empty
# reseller_prefixed accounts, I won't overwrite swift.authorize.
elif 'swift.authorize' not in env:
env['swift.authorize'] = self.authorize
env['swift.clean_acl'] = clean_acl
return self.app(env, start_response)
|
ValueError
|
dataset/ETHPy150Open openstack/swauth/swauth/middleware.py/Swauth.__call__
|
5,288 |
def authorize(self, req):
"""Returns None if the request is authorized to continue or a standard
WSGI response callable if not.
"""
try:
version, account, container, obj = split_path(req.path, 1, 4, True)
except __HOLE__:
return HTTPNotFound(request=req)
if not account or not account.startswith(self.reseller_prefix):
return self.denied_response(req)
user_groups = (req.remote_user or '').split(',')
if '.reseller_admin' in user_groups and \
account != self.reseller_prefix and \
account[len(self.reseller_prefix)] != '.':
req.environ['swift_owner'] = True
return None
if account in user_groups and \
(req.method not in ('DELETE', 'PUT') or container):
# If the user is admin for the account and is not trying to do an
# account DELETE or PUT...
req.environ['swift_owner'] = True
return None
if (req.environ.get('swift_sync_key') and
req.environ['swift_sync_key'] ==
req.headers.get('x-container-sync-key', None) and
'x-timestamp' in req.headers and
(req.remote_addr in self.allowed_sync_hosts or
get_remote_client(req) in self.allowed_sync_hosts)):
return None
referrers, groups = parse_acl(getattr(req, 'acl', None))
if referrer_allowed(req.referer, referrers):
if obj or '.rlistings' in groups:
return None
return self.denied_response(req)
if not req.remote_user:
return self.denied_response(req)
for user_group in user_groups:
if user_group in groups:
return None
return self.denied_response(req)
|
ValueError
|
dataset/ETHPy150Open openstack/swauth/swauth/middleware.py/Swauth.authorize
|
5,289 |
def handle_request(self, req):
"""Entry point for auth requests (ones that match the self.auth_prefix).
Should return a WSGI-style callable (such as swob.Response).
:param req: swob.Request object
"""
req.start_time = time()
handler = None
try:
version, account, user, _junk = split_path(req.path_info,
minsegs=0, maxsegs=4, rest_with_last=True)
except __HOLE__:
return HTTPNotFound(request=req)
if version in ('v1', 'v1.0', 'auth'):
if req.method == 'GET':
handler = self.handle_get_token
elif version == 'v2':
if not self.super_admin_key:
return HTTPNotFound(request=req)
req.path_info_pop()
if req.method == 'GET':
if not account and not user:
handler = self.handle_get_reseller
elif account:
if not user:
handler = self.handle_get_account
elif account == '.token':
req.path_info_pop()
handler = self.handle_validate_token
else:
handler = self.handle_get_user
elif req.method == 'PUT':
if not user:
handler = self.handle_put_account
else:
handler = self.handle_put_user
elif req.method == 'DELETE':
if not user:
handler = self.handle_delete_account
else:
handler = self.handle_delete_user
elif req.method == 'POST':
if account == '.prep':
handler = self.handle_prep
elif user == '.services':
handler = self.handle_set_services
else:
handler = self.handle_webadmin
if not handler:
req.response = HTTPBadRequest(request=req)
else:
req.response = handler(req)
return req.response
|
ValueError
|
dataset/ETHPy150Open openstack/swauth/swauth/middleware.py/Swauth.handle_request
|
5,290 |
def handle_set_services(self, req):
"""Handles the POST v2/<account>/.services call for setting services
information. Can only be called by a reseller .admin.
In the :func:`handle_get_account` (GET v2/<account>) call, a section of
the returned JSON dict is `services`. This section looks something like
this::
"services": {"storage": {"default": "local",
"local": "http://127.0.0.1:8080/v1/AUTH_018c3946"}}
Making use of this section is described in :func:`handle_get_token`.
This function allows setting values within this section for the
<account>, allowing the addition of new service end points or updating
existing ones.
The body of the POST request should contain a JSON dict with the
following format::
{"service_name": {"end_point_name": "end_point_value"}}
There can be multiple services and multiple end points in the same
call.
Any new services or end points will be added to the existing set of
services and end points. Any existing services with the same service
name will be merged with the new end points. Any existing end points
with the same end point name will have their values updated.
The updated services dictionary will be returned on success.
:param req: The swob.Request to process.
:returns: swob.Response, 2xx on success with the udpated services JSON
dict as described above
"""
if not self.is_reseller_admin(req):
return self.denied_response(req)
account = req.path_info_pop()
if req.path_info != '/.services' or not account or account[0] == '.':
return HTTPBadRequest(request=req)
try:
new_services = json.loads(req.body)
except __HOLE__ as err:
return HTTPBadRequest(body=str(err))
# Get the current services information
path = quote('/v1/%s/%s/.services' % (self.auth_account, account))
resp = self.make_pre_authed_request(
req.environ, 'GET', path).get_response(self.app)
if resp.status_int == 404:
return HTTPNotFound(request=req)
if resp.status_int // 100 != 2:
raise Exception('Could not obtain services info: %s %s' %
(path, resp.status))
services = json.loads(resp.body)
for new_service, value in new_services.iteritems():
if new_service in services:
services[new_service].update(value)
else:
services[new_service] = value
# Save the new services information
services = json.dumps(services)
resp = self.make_pre_authed_request(
req.environ, 'PUT', path, services).get_response(self.app)
if resp.status_int // 100 != 2:
raise Exception('Could not save .services object: %s %s' %
(path, resp.status))
return Response(request=req, body=services,
content_type=CONTENT_TYPE_JSON)
|
ValueError
|
dataset/ETHPy150Open openstack/swauth/swauth/middleware.py/Swauth.handle_set_services
|
5,291 |
def handle_get_token(self, req):
"""Handles the various `request for token and service end point(s)` calls.
There are various formats to support the various auth servers in the
past. Examples::
GET <auth-prefix>/v1/<act>/auth
X-Auth-User: <act>:<usr> or X-Storage-User: <usr>
X-Auth-Key: <key> or X-Storage-Pass: <key>
GET <auth-prefix>/auth
X-Auth-User: <act>:<usr> or X-Storage-User: <act>:<usr>
X-Auth-Key: <key> or X-Storage-Pass: <key>
GET <auth-prefix>/v1.0
X-Auth-User: <act>:<usr> or X-Storage-User: <act>:<usr>
X-Auth-Key: <key> or X-Storage-Pass: <key>
Values should be url encoded, "act%3Ausr" instead of "act:usr" for
example; however, for backwards compatibility the colon may be included
unencoded.
On successful authentication, the response will have X-Auth-Token and
X-Storage-Token set to the token to use with Swift and X-Storage-URL
set to the URL to the default Swift cluster to use.
The response body will be set to the account's services JSON object as
described here::
{"storage": { # Represents the Swift storage service end points
"default": "cluster1", # Indicates which cluster is the default
"cluster1": "<URL to use with Swift>",
# A Swift cluster that can be used with this account,
# "cluster1" is the name of the cluster which is usually a
# location indicator (like "dfw" for a datacenter region).
"cluster2": "<URL to use with Swift>"
# Another Swift cluster that can be used with this account,
# there will always be at least one Swift cluster to use or
# this whole "storage" dict won't be included at all.
},
"servers": { # Represents the Nova server service end points
# Expected to be similar to the "storage" dict, but not
# implemented yet.
},
# Possibly other service dicts, not implemented yet.
}
One can also include an "X-Auth-New-Token: true" header to
force issuing a new token and revoking any old token, even if
it hasn't expired yet.
:param req: The swob.Request to process.
:returns: swob.Response, 2xx on success with data set as explained
above.
"""
# Validate the request info
try:
pathsegs = split_path(req.path_info, minsegs=1, maxsegs=3,
rest_with_last=True)
except __HOLE__:
return HTTPNotFound(request=req)
if pathsegs[0] == 'v1' and pathsegs[2] == 'auth':
account = pathsegs[1]
user = req.headers.get('x-storage-user')
if not user:
user = unquote(req.headers.get('x-auth-user', ''))
if not user or ':' not in user:
return HTTPUnauthorized(request=req)
account2, user = user.split(':', 1)
if account != account2:
return HTTPUnauthorized(request=req)
key = req.headers.get('x-storage-pass')
if not key:
key = unquote(req.headers.get('x-auth-key', ''))
elif pathsegs[0] in ('auth', 'v1.0'):
user = unquote(req.headers.get('x-auth-user', ''))
if not user:
user = req.headers.get('x-storage-user')
if not user or ':' not in user:
return HTTPUnauthorized(request=req)
account, user = user.split(':', 1)
key = unquote(req.headers.get('x-auth-key', ''))
if not key:
key = req.headers.get('x-storage-pass')
else:
return HTTPBadRequest(request=req)
if not all((account, user, key)):
return HTTPUnauthorized(request=req)
if user == '.super_admin' and self.super_admin_key and \
key == self.super_admin_key:
token = self.get_itoken(req.environ)
url = '%s/%s.auth' % (self.dsc_url, self.reseller_prefix)
return Response(
request=req,
content_type=CONTENT_TYPE_JSON,
body=json.dumps({'storage': {'default': 'local',
'local': url}}),
headers={'x-auth-token': token,
'x-storage-token': token,
'x-storage-url': url})
# Authenticate user
path = quote('/v1/%s/%s/%s' % (self.auth_account, account, user))
resp = self.make_pre_authed_request(
req.environ, 'GET', path).get_response(self.app)
if resp.status_int == 404:
return HTTPUnauthorized(request=req)
if resp.status_int // 100 != 2:
raise Exception('Could not obtain user details: %s %s' %
(path, resp.status))
user_detail = json.loads(resp.body)
if not self.credentials_match(user_detail, key):
return HTTPUnauthorized(request=req)
# See if a token already exists and hasn't expired
token = None
expires = None
candidate_token = resp.headers.get('x-object-meta-auth-token')
if candidate_token:
path = quote('/v1/%s/.token_%s/%s' %
(self.auth_account, candidate_token[-1], candidate_token))
delete_token = False
try:
if req.headers.get('x-auth-new-token', 'false').lower() in \
TRUE_VALUES:
delete_token = True
else:
resp = self.make_pre_authed_request(
req.environ, 'GET', path).get_response(self.app)
if resp.status_int // 100 == 2:
token_detail = json.loads(resp.body)
if token_detail['expires'] > time():
token = candidate_token
expires = token_detail['expires']
else:
delete_token = True
elif resp.status_int != 404:
raise Exception(
'Could not detect whether a token already exists: '
'%s %s' % (path, resp.status))
finally:
if delete_token:
self.make_pre_authed_request(
req.environ, 'DELETE', path).get_response(self.app)
memcache_client = cache_from_env(req.environ)
if memcache_client:
memcache_key = '%s/auth/%s' % (self.reseller_prefix,
candidate_token)
memcache_client.delete(memcache_key)
# Create a new token if one didn't exist
if not token:
# Retrieve account id, we'll save this in the token
path = quote('/v1/%s/%s' % (self.auth_account, account))
resp = self.make_pre_authed_request(
req.environ, 'HEAD', path).get_response(self.app)
if resp.status_int // 100 != 2:
raise Exception('Could not retrieve account id value: '
'%s %s' % (path, resp.status))
account_id = \
resp.headers['x-container-meta-account-id']
# Generate new token
token = '%stk%s' % (self.reseller_prefix, uuid4().hex)
# Save token info
path = quote('/v1/%s/.token_%s/%s' %
(self.auth_account, token[-1], token))
try:
token_life = min(
int(req.headers.get('x-auth-token-lifetime',
self.token_life)),
self.max_token_life)
except ValueError:
token_life = self.token_life
expires = int(time() + token_life)
resp = self.make_pre_authed_request(
req.environ, 'PUT', path,
json.dumps({'account': account, 'user': user,
'account_id': account_id,
'groups': user_detail['groups'],
'expires': expires})).get_response(self.app)
if resp.status_int // 100 != 2:
raise Exception('Could not create new token: %s %s' %
(path, resp.status))
# Record the token with the user info for future use.
path = quote('/v1/%s/%s/%s' % (self.auth_account, account, user))
resp = self.make_pre_authed_request(
req.environ, 'POST', path,
headers={'X-Object-Meta-Auth-Token': token}
).get_response(self.app)
if resp.status_int // 100 != 2:
raise Exception('Could not save new token: %s %s' %
(path, resp.status))
# Get the services information
path = quote('/v1/%s/%s/.services' % (self.auth_account, account))
resp = self.make_pre_authed_request(
req.environ, 'GET', path).get_response(self.app)
if resp.status_int // 100 != 2:
raise Exception('Could not obtain services info: %s %s' %
(path, resp.status))
detail = json.loads(resp.body)
url = detail['storage'][detail['storage']['default']]
return Response(
request=req,
body=resp.body,
content_type=CONTENT_TYPE_JSON,
headers={'x-auth-token': token,
'x-storage-token': token,
'x-auth-token-expires': str(int(expires - time())),
'x-storage-url': url})
|
ValueError
|
dataset/ETHPy150Open openstack/swauth/swauth/middleware.py/Swauth.handle_get_token
|
5,292 |
def establish_variables(self, x=None, y=None, hue=None, data=None,
orient=None, order=None, hue_order=None,
units=None):
"""Convert input specification into a common representation."""
# Option 1:
# We are plotting a wide-form dataset
# -----------------------------------
if x is None and y is None:
# Do a sanity check on the inputs
if hue is not None:
error = "Cannot use `hue` without `x` or `y`"
raise ValueError(error)
# No hue grouping with wide inputs
plot_hues = None
hue_title = None
hue_names = None
# No statistical units with wide inputs
plot_units = None
# We also won't get a axes labels here
value_label = None
group_label = None
# Option 1a:
# The input data is a Pandas DataFrame
# ------------------------------------
if isinstance(data, pd.DataFrame):
# Order the data correctly
if order is None:
order = []
# Reduce to just numeric columns
for col in data:
try:
data[col].astype(np.float)
order.append(col)
except __HOLE__:
pass
plot_data = data[order]
group_names = order
group_label = data.columns.name
# Convert to a list of arrays, the common representation
iter_data = plot_data.iteritems()
plot_data = [np.asarray(s, np.float) for k, s in iter_data]
# Option 1b:
# The input data is an array or list
# ----------------------------------
else:
# We can't reorder the data
if order is not None:
error = "Input data must be a pandas object to reorder"
raise ValueError(error)
# The input data is an array
if hasattr(data, "shape"):
if len(data.shape) == 1:
if np.isscalar(data[0]):
plot_data = [data]
else:
plot_data = list(data)
elif len(data.shape) == 2:
nr, nc = data.shape
if nr == 1 or nc == 1:
plot_data = [data.ravel()]
else:
plot_data = [data[:, i] for i in range(nc)]
else:
error = ("Input `data` can have no "
"more than 2 dimensions")
raise ValueError(error)
# Check if `data` is None to let us bail out here (for testing)
elif data is None:
plot_data = [[]]
# The input data is a flat list
elif np.isscalar(data[0]):
plot_data = [data]
# The input data is a nested list
# This will catch some things that might fail later
# but exhaustive checks are hard
else:
plot_data = data
# Convert to a list of arrays, the common representation
plot_data = [np.asarray(d, np.float) for d in plot_data]
# The group names will just be numeric indices
group_names = list(range((len(plot_data))))
# Figure out the plotting orientation
orient = "h" if str(orient).startswith("h") else "v"
# Option 2:
# We are plotting a long-form dataset
# -----------------------------------
else:
# See if we need to get variables from `data`
if data is not None:
x = data.get(x, x)
y = data.get(y, y)
hue = data.get(hue, hue)
units = data.get(units, units)
# Validate the inputs
for input in [x, y, hue, units]:
if isinstance(input, string_types):
err = "Could not interpret input '{}'".format(input)
raise ValueError(err)
# Figure out the plotting orientation
orient = self.infer_orient(x, y, orient)
# Option 2a:
# We are plotting a single set of data
# ------------------------------------
if x is None or y is None:
# Determine where the data are
vals = y if x is None else x
# Put them into the common representation
plot_data = [np.asarray(vals)]
# Get a label for the value axis
if hasattr(vals, "name"):
value_label = vals.name
else:
value_label = None
# This plot will not have group labels or hue nesting
groups = None
group_label = None
group_names = []
plot_hues = None
hue_names = None
hue_title = None
plot_units = None
# Option 2b:
# We are grouping the data values by another variable
# ---------------------------------------------------
else:
# Determine which role each variable will play
if orient == "v":
vals, groups = y, x
else:
vals, groups = x, y
# Get the categorical axis label
group_label = None
if hasattr(groups, "name"):
group_label = groups.name
# Get the order on the categorical axis
group_names = categorical_order(groups, order)
# Group the numeric data
plot_data, value_label = self._group_longform(vals, groups,
group_names)
# Now handle the hue levels for nested ordering
if hue is None:
plot_hues = None
hue_title = None
hue_names = None
else:
# Get the order of the hue levels
hue_names = categorical_order(hue, hue_order)
# Group the hue data
plot_hues, hue_title = self._group_longform(hue, groups,
group_names)
# Now handle the units for nested observations
if units is None:
plot_units = None
else:
plot_units, _ = self._group_longform(units, groups,
group_names)
# Assign object attributes
# ------------------------
self.orient = orient
self.plot_data = plot_data
self.group_label = group_label
self.value_label = value_label
self.group_names = group_names
self.plot_hues = plot_hues
self.hue_title = hue_title
self.hue_names = hue_names
self.plot_units = plot_units
|
ValueError
|
dataset/ETHPy150Open mwaskom/seaborn/seaborn/categorical.py/_CategoricalPlotter.establish_variables
|
5,293 |
def _group_longform(self, vals, grouper, order):
"""Group a long-form variable by another with correct order."""
# Ensure that the groupby will work
if not isinstance(vals, pd.Series):
vals = pd.Series(vals)
# Group the val data
grouped_vals = vals.groupby(grouper)
out_data = []
for g in order:
try:
g_vals = np.asarray(grouped_vals.get_group(g))
except __HOLE__:
g_vals = np.array([])
out_data.append(g_vals)
# Get the vals axis label
label = vals.name
return out_data, label
|
KeyError
|
dataset/ETHPy150Open mwaskom/seaborn/seaborn/categorical.py/_CategoricalPlotter._group_longform
|
5,294 |
def infer_orient(self, x, y, orient=None):
"""Determine how the plot should be oriented based on the data."""
orient = str(orient)
def is_categorical(s):
try:
# Correct way, but doesnt exist in older Pandas
return pd.core.common.is_categorical_dtype(s)
except __HOLE__:
# Also works, but feels hackier
return str(s.dtype) == "categorical"
def is_not_numeric(s):
try:
np.asarray(s, dtype=np.float)
except ValueError:
return True
return False
no_numeric = "Neither the `x` nor `y` variable appears to be numeric."
if orient.startswith("v"):
return "v"
elif orient.startswith("h"):
return "h"
elif x is None:
return "v"
elif y is None:
return "h"
elif is_categorical(y):
if is_categorical(x):
raise ValueError(no_numeric)
else:
return "h"
elif is_not_numeric(y):
if is_not_numeric(x):
raise ValueError(no_numeric)
else:
return "h"
else:
return "v"
|
AttributeError
|
dataset/ETHPy150Open mwaskom/seaborn/seaborn/categorical.py/_CategoricalPlotter.infer_orient
|
5,295 |
def annotate_axes(self, ax):
"""Add descriptive labels to an Axes object."""
if self.orient == "v":
xlabel, ylabel = self.group_label, self.value_label
else:
xlabel, ylabel = self.value_label, self.group_label
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
if self.orient == "v":
ax.set_xticks(np.arange(len(self.plot_data)))
ax.set_xticklabels(self.group_names)
else:
ax.set_yticks(np.arange(len(self.plot_data)))
ax.set_yticklabels(self.group_names)
if self.orient == "v":
ax.xaxis.grid(False)
ax.set_xlim(-.5, len(self.plot_data) - .5)
else:
ax.yaxis.grid(False)
ax.set_ylim(-.5, len(self.plot_data) - .5)
if self.hue_names is not None:
leg = ax.legend(loc="best")
if self.hue_title is not None:
leg.set_title(self.hue_title)
# Set the title size a roundabout way to maintain
# compatability with matplotlib 1.1
try:
title_size = mpl.rcParams["axes.labelsize"] * .85
except __HOLE__: # labelsize is something like "large"
title_size = mpl.rcParams["axes.labelsize"]
prop = mpl.font_manager.FontProperties(size=title_size)
leg._legend_title_box._text.set_font_properties(prop)
|
TypeError
|
dataset/ETHPy150Open mwaskom/seaborn/seaborn/categorical.py/_CategoricalPlotter.annotate_axes
|
5,296 |
def fit_kde(self, x, bw):
"""Estimate a KDE for a vector of data with flexible bandwidth."""
# Allow for the use of old scipy where `bw` is fixed
try:
kde = stats.gaussian_kde(x, bw)
except __HOLE__:
kde = stats.gaussian_kde(x)
if bw != "scott": # scipy default
msg = ("Ignoring bandwidth choice, "
"please upgrade scipy to use a different bandwidth.")
warnings.warn(msg, UserWarning)
# Extract the numeric bandwidth from the KDE object
bw_used = kde.factor
# At this point, bw will be a numeric scale factor.
# To get the actual bandwidth of the kernel, we multiple by the
# unbiased standard deviation of the data, which we will use
# elsewhere to compute the range of the support.
bw_used = bw_used * x.std(ddof=1)
return kde, bw_used
|
TypeError
|
dataset/ETHPy150Open mwaskom/seaborn/seaborn/categorical.py/_ViolinPlotter.fit_kde
|
5,297 |
def _lv_box_ends(self, vals, k_depth='proportion', outlier_prop=None):
"""Get the number of data points and calculate `depth` of
letter-value plot."""
vals = np.asarray(vals)
vals = vals[np.isfinite(vals)]
n = len(vals)
# If p is not set, calculate it so that 8 points are outliers
if not outlier_prop:
# Conventional boxplots assume this proportion of the data are
# outliers.
p = 0.007
else:
if ((outlier_prop > 1.) or (outlier_prop < 0.)):
raise ValueError('outlier_prop not in range [0, 1]!')
p = outlier_prop
# Select the depth, i.e. number of boxes to draw, based on the method
k_dict = {'proportion': (np.log2(n)) - int(np.log2(n*p)) + 1,
'tukey': (np.log2(n)) - 3,
'trustworthy': (np.log2(n) -
np.log2(2*stats.norm.ppf((1-p))**2)) + 1}
k = k_dict[k_depth]
try:
k = int(k)
except __HOLE__:
k = 1
# If the number happens to be less than 0, set k to 0
if k < 1.:
k = 1
# Calculate the upper box ends
upper = [100*(1 - 0.5**(i+2)) for i in range(k, -1, -1)]
# Calculate the lower box ends
lower = [100*(0.5**(i+2)) for i in range(k, -1, -1)]
# Stitch the box ends together
percentile_ends = [(i, j) for i, j in zip(lower, upper)]
box_ends = [np.percentile(vals, q) for q in percentile_ends]
return box_ends, k
|
ValueError
|
dataset/ETHPy150Open mwaskom/seaborn/seaborn/categorical.py/_LVPlotter._lv_box_ends
|
5,298 |
def factorplot(x=None, y=None, hue=None, data=None, row=None, col=None,
col_wrap=None, estimator=np.mean, ci=95, n_boot=1000,
units=None, order=None, hue_order=None, row_order=None,
col_order=None, kind="point", size=4, aspect=1,
orient=None, color=None, palette=None,
legend=True, legend_out=True, sharex=True, sharey=True,
margin_titles=False, facet_kws=None, **kwargs):
# Handle some deprecated arguments
if "hline" in kwargs:
kwargs.pop("hline")
warnings.warn("The `hline` parameter has been removed", UserWarning)
if "dropna" in kwargs:
kwargs.pop("dropna")
warnings.warn("The `dropna` parameter has been removed", UserWarning)
if "x_order" in kwargs:
order = kwargs.pop("x_order")
warnings.warn("The `x_order` parameter has been renamed `order`",
UserWarning)
# Determine the plotting function
try:
plot_func = globals()[kind + "plot"]
except __HOLE__:
err = "Plot kind '{}' is not recognized".format(kind)
raise ValueError(err)
# Alias the input variables to determine categorical order and palette
# correctly in the case of a count plot
if kind == "count":
if x is None and y is not None:
x_, y_, orient = y, y, "h"
elif y is None and x is not None:
x_, y_, orient = x, x, "v"
else:
raise ValueError("Either `x` or `y` must be None for count plots")
else:
x_, y_ = x, y
# Determine the order for the whole dataset, which will be used in all
# facets to ensure representation of all data in the final plot
p = _CategoricalPlotter()
p.establish_variables(x_, y_, hue, data, orient, order, hue_order)
order = p.group_names
hue_order = p.hue_names
# Determine the palette to use
# (FacetGrid will pass a value for ``color`` to the plotting function
# so we need to define ``palette`` to get default behavior for the
# categorical functions
p.establish_colors(color, palette, 1)
if kind != "point" or hue is not None:
palette = p.colors
# Determine keyword arguments for the facets
facet_kws = {} if facet_kws is None else facet_kws
facet_kws.update(
data=data, row=row, col=col,
row_order=row_order, col_order=col_order,
col_wrap=col_wrap, size=size, aspect=aspect,
sharex=sharex, sharey=sharey,
legend_out=legend_out, margin_titles=margin_titles,
dropna=False,
)
# Determine keyword arguments for the plotting function
plot_kws = dict(
order=order, hue_order=hue_order,
orient=orient, color=color, palette=palette,
)
plot_kws.update(kwargs)
if kind in ["bar", "point"]:
plot_kws.update(
estimator=estimator, ci=ci, n_boot=n_boot, units=units,
)
# Initialize the facets
g = FacetGrid(**facet_kws)
# Draw the plot onto the facets
g.map_dataframe(plot_func, x, y, hue, **plot_kws)
# Special case axis labels for a count type plot
if kind == "count":
if x is None:
g.set_axis_labels(x_var="count")
if y is None:
g.set_axis_labels(y_var="count")
if legend and (hue is not None) and (hue not in [x, row, col]):
hue_order = list(map(str, hue_order))
g.add_legend(title=hue, label_order=hue_order)
return g
|
KeyError
|
dataset/ETHPy150Open mwaskom/seaborn/seaborn/categorical.py/factorplot
|
5,299 |
def emit(self, record):
"""
Emit a record.
Output the record to the file, catering for rollover as described
in doRollover().
"""
try:
if self.shouldRollover(record):
self.doRollover()
logging.FileHandler.emit(self, record)
except (__HOLE__, SystemExit):
raise
except:
self.handleError(record)
|
KeyboardInterrupt
|
dataset/ETHPy150Open babble/babble/include/jython/Lib/logging/handlers.py/BaseRotatingHandler.emit
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.