Unnamed: 0
int64 0
10k
| function
stringlengths 79
138k
| label
stringclasses 20
values | info
stringlengths 42
261
|
---|---|---|---|
7,300 | def test_PortageInstaller():
if not is_gentoo():
print("Skipping not Gentoo")
return
from rosdep2.platforms.gentoo import PortageInstaller
@patch.object(PortageInstaller, 'get_packages_to_install')
def test(mock_method):
installer = PortageInstaller()
mock_method.return_value = []
assert [] == installer.get_install_command(['fake'])
mock_method.return_value = ['a', 'b']
expected = [['sudo', '-H', 'emerge', 'a'],
['sudo', '-H', 'emerge', 'b']]
val = installer.get_install_command(['whatever'], interactive=False)
assert val == expected, val
expected = [['sudo', '-H', 'emerge', '-a', 'a'],
['sudo', '-H', 'emerge', '-a', 'b']]
val = installer.get_install_command(['whatever'], interactive=True)
assert val == expected, val
try:
test()
except __HOLE__:
traceback.print_exc()
raise | AssertionError | dataset/ETHPy150Open ros-infrastructure/rosdep/test/test_rosdep_gentoo.py/test_PortageInstaller |
7,301 | @classmethod
def deserialize(cls, fd, **kw):
try:
return cls._deserialize(fd, **kw)
except (struct.error, __HOLE__) as err:
raise cls.exception(err) | IOError | dataset/ETHPy150Open chrippa/livestreamer/src/livestreamer/packages/flashmedia/packet.py/Packet.deserialize |
7,302 | @classmethod
def deserialize_from(cls, buf, offset, **kw):
try:
return cls._deserialize_from(buf, offset, **kw)
except (struct.error, __HOLE__) as err:
raise cls.exception(err) | IOError | dataset/ETHPy150Open chrippa/livestreamer/src/livestreamer/packages/flashmedia/packet.py/Packet.deserialize_from |
7,303 | def ready(self):
self.responding = False
self.synchronised = False
self.get_cpu_average()
try:
self._proxy.getinfo()
self.responding = True
except (__HOLE__, socket.error, httplib.CannotSendRequest) as e:
# print "daemon offline"
pass
except JSONRPCException as e:
# "loading block index"
# print str(e.error['message'])
pass
try:
resp = self._proxy.masternode('debug')
if 'Node just started' not in resp:
self.synchronised = True
except (ValueError, socket.error, httplib.CannotSendRequest) as e:
# print "daemon offline"
pass
except JSONRPCException as e:
resp = str(e.error['message'])
if 'masternode' in resp:
if self.get_cpu_average() < 50:
self.synchronised = True
logmsg = self.responding and 'responding, ' or 'not responding, '
logmsg += self.synchronised and 'synchronised, ' or 'not synchronised, '
logmsg += 'cpu: ' + "{0:.2f}".format(self.get_cpu_average())
info(logmsg)
return (self.responding and self.synchronised)
# doesn't work due to
# name = "%s.%s" % (self.__service_name, name)
# return AuthServiceProxy(self.__service_url, name, connection=self.__conn) # noqa
# def __getattr__(self, attr):
# if getattr(self._proxy, attr):
# getattr(self._proxy, attr)()
# else:
# raise AttributeError | ValueError | dataset/ETHPy150Open moocowmoo/dashvend/bin/dashvend/dashrpc.py/DashRPC.ready |
7,304 | @stockholm.sniffer()
def _stockholm_sniffer(fh):
# Smells a Stockholm file if the following conditions are met:
# - File isn't empty
# - File contains correct header
try:
line = next(fh)
except __HOLE__:
return False, {}
if _is_header(line):
return True, {}
return False, {} | StopIteration | dataset/ETHPy150Open biocore/scikit-bio/skbio/io/format/stockholm.py/_stockholm_sniffer |
7,305 | @stockholm.reader(TabularMSA)
def _stockholm_to_tabular_msa(fh, constructor=None):
# Checks that user has passed required constructor parameter
if constructor is None:
raise ValueError("Must provide `constructor` parameter indicating the "
"type of sequences in the alignment. `constructor` "
"must be a subclass of `GrammaredSequence` "
"(e.g., `DNA`, `RNA`, `Protein`).")
# Checks that contructor parameter is supported
elif not issubclass(constructor, GrammaredSequence):
raise TypeError("`constructor` must be a subclass of "
"`GrammaredSequence`.")
# Checks that the file isn't empty
try:
line = next(fh)
except __HOLE__:
raise StockholmFormatError("File is empty.")
# Checks that the file follows basic format (includes the required header)
if not _is_header(line):
raise StockholmFormatError("File missing required Stockholm header "
"line.")
msa_data = _MSAData()
for line in fh:
if line.isspace():
continue
line = line.rstrip('\n')
if _is_sequence_line(line):
seq_name, seq_data = _parse_sequence_line(line)
msa_data.add_sequence(seq_name, seq_data)
elif line.startswith("#=GF"):
feature_name, feature_data = _parse_gf_line(line)
msa_data.add_gf_metadata(feature_name, feature_data)
elif line.startswith("#=GS"):
seq_name, feature_name, feature_data = _parse_gs_line(line)
msa_data.add_gs_metadata(seq_name, feature_name, feature_data)
elif line.startswith("#=GR"):
seq_name, feature_name, feature_data = _parse_gr_line(line)
msa_data.add_gr_metadata(seq_name, feature_name, feature_data)
elif line.startswith('#=GC'):
feature_name, feature_data = _parse_gc_line(line)
msa_data.add_gc_metadata(feature_name, feature_data)
elif _is_footer(line):
break
else:
raise StockholmFormatError("Unrecognized line: %r" % line)
if not _is_footer(line):
raise StockholmFormatError('Final line does not conform to Stockholm '
'format. Must contain only "//".')
return msa_data.build_tabular_msa(constructor)
# For storing intermediate data used to construct a Sequence object. | StopIteration | dataset/ETHPy150Open biocore/scikit-bio/skbio/io/format/stockholm.py/_stockholm_to_tabular_msa |
7,306 | def import_module(module_name):
"""
Given a dotted Python path, imports & returns the module.
If not found, raises ``UnknownModuleError``.
Ex::
mod = import_module('random')
:param module_name: The dotted Python path
:type module_name: string
:returns: module
"""
try:
return importlib.import_module(module_name)
except __HOLE__ as err:
raise UnknownModuleError(str(err)) | ImportError | dataset/ETHPy150Open toastdriven/alligator/alligator/utils.py/import_module |
7,307 | def import_attr(module_name, attr_name):
"""
Given a dotted Python path & an attribute name, imports the module &
returns the attribute.
If not found, raises ``UnknownCallableError``.
Ex::
choice = import_attr('random', 'choice')
:param module_name: The dotted Python path
:type module_name: string
:param attr_name: The attribute name
:type attr_name: string
:returns: attribute
"""
module = import_module(module_name)
try:
return getattr(module, attr_name)
except __HOLE__ as err:
raise UnknownCallableError(str(err)) | AttributeError | dataset/ETHPy150Open toastdriven/alligator/alligator/utils.py/import_attr |
7,308 | @require_POST
@csrf_exempt
def import_submission_for_form(request, username, id_string):
""" Retrieve and process submission from SMSSync Request """
sms_identity = request.POST.get('from_number', '').strip()
sms_text = request.POST.get('content', '').strip()
now_timestamp = datetime.datetime.now().strftime('%s')
sent_timestamp = request.POST.get('time_created', now_timestamp).strip()
try:
sms_time = datetime.datetime.fromtimestamp(float(sent_timestamp))
except __HOLE__:
sms_time = datetime.datetime.now()
return process_message_for_telerivet(username=username,
sms_identity=sms_identity,
sms_text=sms_text,
sms_time=sms_time,
id_string=id_string) | ValueError | dataset/ETHPy150Open kobotoolbox/kobocat/onadata/apps/sms_support/providers/telerivet.py/import_submission_for_form |
7,309 | def valid_host_source(value):
try:
section = SECTIONS["hosts:" + value]
except __HOLE__:
raise ValueError("invalid host source: %r" % value)
section.required = True
return value | KeyError | dataset/ETHPy150Open reddit/push/push/config.py/HostsConfig.valid_host_source |
7,310 | @register.filter
def intcomma(value, use_l10n=True):
"""
Converts an integer to a string containing commas every three digits.
For example, 3000 becomes '3,000' and 45000 becomes '45,000'.
"""
if settings.USE_L10N and use_l10n:
try:
if not isinstance(value, float):
value = int(value)
except (__HOLE__, ValueError):
return intcomma(value, False)
else:
return jinja2.Markup(number_format(value, force_grouping=True))
orig = force_unicode(value)
new = re.sub("^(-?\d+)(\d{3})", '\g<1>,\g<2>', orig)
if orig == new:
return jinja2.Markup(new)
else:
return intcomma(new, use_l10n) | TypeError | dataset/ETHPy150Open crate-archive/crate-site/crateweb/apps/jhumanize/helpers.py/intcomma |
7,311 | def is_numeric(x):
try:
int(x)
return True
except __HOLE__:
return False | ValueError | dataset/ETHPy150Open kootenpv/sky/sky/view/view.py/is_numeric |
7,312 | def _jump_to_in_window(self, filename, line_number=None, column_number=None, transient=False):
""" Opens a new window and jumps to declaration if possible
:param filename: string or int
:param line_number: int
:param column_number: int
:param transient: bool
If transient is True, opens a transient view
"""
active_window = self.view.window()
# restore saved location
try:
if self.view.sel()[0] != self.point:
self.view.sel().clear()
self.view.sel().add(self.point)
except __HOLE__:
# called without setting self.point
pass
# If the file was selected from a drop down list
if isinstance(filename, int):
if filename == -1: # cancelled
# restore view
active_window.focus_view(self.view)
self.view.show(self.point)
return
filename, line_number, column_number = self.options[filename]
flags = self.prepare_layout(active_window, transient, filename)
active_window.open_file('%s:%s:%s' % (filename, line_number or 0,
column_number or 0), flags) | AttributeError | dataset/ETHPy150Open srusskih/SublimeJEDI/sublime_jedi/go_to.py/BaseLookUpJediCommand._jump_to_in_window |
7,313 | def coerce_put_post(request):
"""
Django doesn't particularly understand REST.
In case we send data over PUT, Django won't
actually look at the data and load it. We need
to twist its arm here.
The try/except abominiation here is due to a bug
in mod_python. This should fix it.
Function from django-piston project.
"""
if request.method == "PUT":
try:
request.method = "POST"
request._load_post_and_files()
request.method = "PUT"
except __HOLE__:
request.META['REQUEST_METHOD'] = 'POST'
request._load_post_and_files()
request.META['REQUEST_METHOD'] = 'PUT'
request.PUT = request.POST | AttributeError | dataset/ETHPy150Open benoitc/dj-revproxy/revproxy/util.py/coerce_put_post |
7,314 | def import_conn_manager(module):
parts = module.rsplit(":", 1)
if len(parts) == 1:
raise ImportError("can't import handler '%s'" % module)
module, obj = parts[0], parts[1]
try:
__import__(module)
except __HOLE__:
if module.endswith(".py") and os.path.exists(module):
raise ImportError("Failed to find manager, did "
"you mean '%s:%s'?" % (module.rsplit(".",1)[0], obj))
mod = sys.modules[module]
mgr = eval(obj, mod.__dict__)
if mgr is None:
raise ImportError("Failed to find manager object: %r" % mgr)
return mgr | ImportError | dataset/ETHPy150Open benoitc/dj-revproxy/revproxy/util.py/import_conn_manager |
7,315 | @classmethod
def _init_dependencies(cls):
global db
if db is not None:
return
try:
db = __import__('google.appengine.ext.db').appengine.ext.db
except __HOLE__:
raise InvalidCacheBackendError("Datastore cache backend requires the "
"'google.appengine.ext' library") | ImportError | dataset/ETHPy150Open goFrendiAsgard/kokoropy/kokoropy/packages/beaker/ext/google.py/GoogleNamespaceManager._init_dependencies |
7,316 | def do_open(self, flags, replace):
# If we already loaded the data, don't bother loading it again
if self.loaded:
self.flags = flags
return
item = self.cache.get_by_key_name(self.namespace)
if not item:
self._is_new = True
self.hash = {}
else:
self._is_new = False
try:
self.hash = cPickle.loads(str(item.data))
except (IOError, __HOLE__, EOFError, cPickle.PickleError):
if self.log_debug:
log.debug("Couln't load pickle data, creating new storage")
self.hash = {}
self._is_new = True
self.flags = flags
self.loaded = True | OSError | dataset/ETHPy150Open goFrendiAsgard/kokoropy/kokoropy/packages/beaker/ext/google.py/GoogleNamespaceManager.do_open |
7,317 | def authorize(self, username, password, hosting_url, *args, **kwargs):
"""Authorize the Review Board Gateway repository.
Review Board Gateway uses HTTP Basic Auth, so this will store the
provided password, encrypted, for use in later API requests.
Similar to GitLab's API, Review Board Gateway will return a private
token on session authentication.
"""
try:
data, headers = self.client.json_post(
url=hosting_url + '/session',
username=username,
password=password)
except __HOLE__ as e:
if e.code == 404:
raise HostingServiceError(
ugettext('A Review Board Gateway server was not found at '
'the provided URL.'))
elif e.code == 401:
raise AuthorizationError(
ugettext('The username or password is incorrect.'))
else:
logging.warning('Failed authorization at %s: %s',
hosting_url + '/session', e, exc_info=1)
raise
self.account.data['private_token'] = \
encrypt_password(data['private_token'])
self.account.save() | HTTPError | dataset/ETHPy150Open reviewboard/reviewboard/reviewboard/hostingsvcs/rbgateway.py/ReviewBoardGateway.authorize |
7,318 | def get_file(self, repository, path, revision, base_commit_id, *args,
**kwargs):
"""Get a file from ReviewBoardGateway.
This will perform an API request to fetch the contents of a file.
"""
url = self._get_file_url(repository, revision, base_commit_id, path)
try:
data, is_new = self._api_get(url)
return data
except (__HOLE__, URLError) as e:
if e.code == 404:
raise FileNotFoundError(path, revision)
else:
logging.warning('Failed to get file from %s: %s',
url, e, exc_info=1)
raise SCMError(six.text_type(e)) | HTTPError | dataset/ETHPy150Open reviewboard/reviewboard/reviewboard/hostingsvcs/rbgateway.py/ReviewBoardGateway.get_file |
7,319 | def get_file_exists(self, repository, path, revision, base_commit_id,
*args, **kwargs):
"""Check whether a file exists in ReviewBoardGateway.
This will perform an API request to fetch the meta_data of a file.
"""
url = self._get_file_url(repository, revision, base_commit_id, path)
try:
self._api_head(url)
return True
except (__HOLE__, URLError) as e:
if e.code == 404:
return False
else:
logging.warning('Failed to get file exists from %s: %s',
url, e, exc_info=1)
raise SCMError(six.text_type(e)) | HTTPError | dataset/ETHPy150Open reviewboard/reviewboard/reviewboard/hostingsvcs/rbgateway.py/ReviewBoardGateway.get_file_exists |
7,320 | def _api_get(self, url):
"""Make a GET request to the Review Board Gateway API.
Delegate to the client's http_get function but first add a
PRIVATE-TOKEN in the header for authentication.
"""
try:
data, headers = self.client.http_get(
url,
headers={
'PRIVATE-TOKEN': self._get_private_token(),
})
return data, headers
except __HOLE__ as e:
if e.code == 401:
raise AuthorizationError(
ugettext('The login or password is incorrect.'))
elif e.code == 404:
raise
else:
logging.warning('Failed to execute a GET request at %s: %s',
url, e, exc_info=1)
raise | HTTPError | dataset/ETHPy150Open reviewboard/reviewboard/reviewboard/hostingsvcs/rbgateway.py/ReviewBoardGateway._api_get |
7,321 | def _api_head(self, url):
"""Make a HEAD request to the Review Board Gateway API.
Delegate to the client's http_request function using the method
HEAD but first add a PRIVATE-TOKEN in the header for authentication.
"""
try:
data, headers = self.client.http_request(
url,
headers={
'PRIVATE-TOKEN': self._get_private_token(),
},
method='HEAD')
return headers
except __HOLE__ as e:
if e.code == 401:
raise AuthorizationError(
ugettext('The login or password is incorrect.'))
elif e.code == 404:
raise
else:
logging.warning('Failed to execute a HEAD request at %s: %s',
url, e, exc_info=1)
raise | HTTPError | dataset/ETHPy150Open reviewboard/reviewboard/reviewboard/hostingsvcs/rbgateway.py/ReviewBoardGateway._api_head |
7,322 | def _cast_to_float(self, value):
try:
return float(value)
except (__HOLE__, ValueError):
raise TypeError("Non-numerical value: %r" % value) | TypeError | dataset/ETHPy150Open openstack/rally/rally/common/streaming_algorithms.py/StreamingAlgorithm._cast_to_float |
7,323 | @positional(1)
def get_package_for_module(module):
"""Get package name for a module.
Helper calculates the package name of a module.
Args:
module: Module to get name for. If module is a string, try to find
module in sys.modules.
Returns:
If module contains 'package' attribute, uses that as package name.
Else, if module is not the '__main__' module, the module __name__.
Else, the base name of the module file name. Else None.
"""
if isinstance(module, basestring):
try:
module = sys.modules[module]
except __HOLE__:
return None
try:
return unicode(module.package)
except AttributeError:
if module.__name__ == '__main__':
try:
file_name = module.__file__
except AttributeError:
pass
else:
base_name = os.path.basename(file_name)
split_name = os.path.splitext(base_name)
if len(split_name) == 1:
return unicode(base_name)
else:
return u'.'.join(split_name[:-1])
return unicode(module.__name__) | KeyError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/protorpc/protorpc/util.py/get_package_for_module |
7,324 | def ModularIntegerFactory(_mod, _dom, _sym, parent):
"""Create custom class for specific integer modulus."""
try:
_mod = _dom.convert(_mod)
except CoercionFailed:
ok = False
else:
ok = True
if not ok or _mod < 1:
raise ValueError("modulus must be a positive integer, got %s" % _mod)
key = _mod, _dom, _sym
try:
cls = _modular_integer_cache[key]
except __HOLE__:
class cls(ModularInteger):
mod, dom, sym = _mod, _dom, _sym
_parent = parent
if _sym:
cls.__name__ = "SymmetricModularIntegerMod%s" % _mod
else:
cls.__name__ = "ModularIntegerMod%s" % _mod
_modular_integer_cache[key] = cls
return cls | KeyError | dataset/ETHPy150Open sympy/sympy/sympy/polys/domains/modularinteger.py/ModularIntegerFactory |
7,325 | def has_capability(self, *args):
caps = self.capabilities
try:
for arg in args:
caps = caps[arg]
# If only part of a capability path is specified, we don't want
# to evaluate to True just because it has contents. We want to
# only say we have a capability if it is indeed 'True'.
return caps is True
except (TypeError, __HOLE__):
# The server either doesn't support the capability,
# or returned no capabilities at all.
return False | KeyError | dataset/ETHPy150Open reviewboard/rbtools/rbtools/api/capabilities.py/Capabilities.has_capability |
7,326 | def _ensure_is_connected(self):
if not self._is_connected:
try:
port = int(self.settings_dict['PORT'])
except __HOLE__:
raise ImproperlyConfigured("PORT must be an integer")
self.db_name = self.settings_dict['NAME']
self._connection = ES("%s:%s" % (self.settings_dict['HOST'], port),
decoder=Decoder,
encoder=Encoder,
autorefresh=True,
default_indices=[self.db_name])
self._db_connection = self._connection
#auto index creation: check if to remove
try:
self._connection.create_index(self.db_name)
except:
pass
# We're done!
self._is_connected = True | ValueError | dataset/ETHPy150Open aparo/django-elasticsearch/django_elasticsearch/base.py/DatabaseWrapper._ensure_is_connected |
7,327 | def process_body_row(linedict, filingnum, header_id, is_amended, cd, filer_id):
form = linedict['form_parser']
## Mark memo-ized rows as being superceded by an amendment.
try:
if linedict['memo_code']=='X':
linedict['superceded_by_amendment'] = True
except __HOLE__:
pass
#print "processing form type: %s" % (form)
if form=='SchA':
skeda_from_skedadict(linedict, filingnum, header_id, is_amended, cd)
elif form=='SchB':
skedb_from_skedbdict(linedict, filingnum, header_id, is_amended, cd)
elif form=='SchE':
skede_from_skededict(linedict, filingnum, header_id, is_amended, cd)
# Treat 48-hour contribution notices like sked A.
# Requires special handling for amendment, since these are superceded
# by regular F3 forms.
elif form=='F65':
skeda_from_f65(linedict, filingnum, header_id, is_amended, cd)
# disclosed donor to non-commmittee. Sorta rare, but..
elif form=='F56':
skeda_from_f56(linedict, filingnum, header_id, is_amended, cd)
# disclosed electioneering donor
elif form=='F92':
skeda_from_f92(linedict, filingnum, header_id, is_amended, cd)
# inaugural donors
elif form=='F132':
skeda_from_f132(linedict, filingnum, header_id, is_amended, cd)
#inaugural refunds
elif form=='F133':
skeda_from_f133(linedict, filingnum, header_id, is_amended, cd)
# IE's disclosed by non-committees. Note that they use this for * both * quarterly and 24- hour notices. There's not much consistency with this--be careful with superceding stuff.
elif form=='F57':
skede_from_f57(linedict, filingnum, header_id, is_amended, cd)
# Its another kind of line. Just dump it in Other lines.
else:
otherline_from_line(linedict, filingnum, header_id, is_amended, cd, filer_id) | KeyError | dataset/ETHPy150Open sunlightlabs/read_FEC/fecreader/formdata/utils/filing_body_processor_fix_hack.py/process_body_row |
7,328 | def process_filing_body(filingnum, fp=None, logger=None):
#It's useful to pass the form parser in when running in bulk so we don't have to keep creating new ones.
if not fp:
fp = form_parser()
if not logger:
logger=fec_logger()
msg = "process_filing_body: Starting # %s" % (filingnum)
#print msg
logger.info(msg)
connection = get_connection()
cursor = connection.cursor()
cmd = "select fec_id, is_superceded, data_is_processed from fec_alerts_new_filing where filing_number=%s" % (filingnum)
cursor.execute(cmd)
cd = CSV_dumper(connection)
result = cursor.fetchone()
if not result:
msg = 'process_filing_body: Couldn\'t find a new_filing for filing %s' % (filingnum)
logger.error(msg)
raise FilingHeaderDoesNotExist(msg)
# will throw a TypeError if it's missing.
header_id = 1
is_amended = result[1]
is_already_processed = result[2]
if is_already_processed:
msg = 'process_filing_body: This filing has already been entered.'
logger.error(msg)
raise FilingHeaderAlreadyProcessed(msg)
#print "Processing filing %s" % (filingnum)
try:
f1 = filing(filingnum)
except:
print "*** couldn't handle filing %s" % (filingnum)
return False
form = f1.get_form_type()
version = f1.get_version()
filer_id = f1.get_filer_id()
# only parse forms that we're set up to read
if not fp.is_allowed_form(form):
if verbose:
msg = "process_filing_body: Not a parseable form: %s - %s" % (form, filingnum)
# print msg
logger.error(msg)
return None
linenum = 0
while True:
linenum += 1
row = f1.get_body_row()
if not row:
break
#print "row is %s" % (row)
#print "\n\n\nForm is %s" % form
try:
linedict = fp.parse_form_line(row, version)
if linedict['form_type'].upper().startswith('SE'):
print "\n\n\nfiling %s form is %s transaction_id is: %s" % (filingnum, linedict['form_type'], linedict['transaction_id'])
# make sure the transaction isn't already there before entering.
try:
SkedE.objects.get(filing_number=filingnum, transaction_id=linedict['transaction_id'])
except SkedE.DoesNotExist:
process_body_row(linedict, filingnum, header_id, is_amended, cd, filer_id)
elif linedict['form_type'].upper().startswith('SA'):
print "\n\n\nfiling %s form is %s transaction_id is: %s" % (filingnum, linedict['form_type'], linedict['transaction_id'])
# make sure the transaction isn't already there before entering.
try:
SkedA.objects.get(filing_number=filingnum, transaction_id=linedict['transaction_id'])
print "Already present! %s form is %s transaction_id is: %s" % (filingnum, linedict['form_type'], linedict['transaction_id'])
except SkedA.DoesNotExist:
process_body_row(linedict, filingnum, header_id, is_amended, cd, filer_id)
elif linedict['form_type'].upper().startswith('SB'):
print "\n\n\nfiling %s form is %s transaction_id is: %s" % (filingnum, linedict['form_type'], linedict['transaction_id'])
# make sure the transaction isn't already there before entering.
try:
SkedB.objects.get(filing_number=filingnum, transaction_id=linedict['transaction_id'])
print "Already present! %s form is %s transaction_id is: %s" % (filingnum, linedict['form_type'], linedict['transaction_id'])
except SkedB.DoesNotExist:
process_body_row(linedict, filingnum, header_id, is_amended, cd, filer_id)
except ParserMissingError:
msg = 'process_filing_body: Unknown line type in filing %s line %s: type=%s Skipping.' % (filingnum, linenum, row[0])
logger.warn(msg)
continue
except __HOLE__:
"missing form type? in filing %s" % (filingnum)
# commit all the leftovers
cd.commit_all()
cd.close()
counter = cd.get_counter()
total_rows = 0
for i in counter:
total_rows += counter[i]
msg = "process_filing_body: Filing # %s Total rows: %s Tally is: %s" % (filingnum, total_rows, counter)
# print msg
logger.info(msg)
# don't commit during testing of fix
# this data has been moved here. At some point we should pick a single location for this data.
header_data = dict_to_hstore(counter)
cmd = "update fec_alerts_new_filing set lines_present='%s'::hstore where filing_number=%s" % (header_data, filingnum)
cursor.execute(cmd)
# mark file as having been entered.
cmd = "update fec_alerts_new_filing set data_is_processed = True where filing_number=%s" % (filingnum)
cursor.execute(cmd)
# flag this filer as one who has changed.
cmd = "update summary_data_committee_overlay set is_dirty=True where fec_id='%s'" % (filer_id)
cursor.execute(cmd)
# | KeyError | dataset/ETHPy150Open sunlightlabs/read_FEC/fecreader/formdata/utils/filing_body_processor_fix_hack.py/process_filing_body |
7,329 | def test_notifymail(self):
utils.subscribe(self.user2_settings, self.TEST_KEY)
utils.subscribe(self.user1_settings, self.TEST_KEY)
utils.notify("This notification goes out by mail!", self.TEST_KEY)
call_command("notifymail", cron=True)
# No un-mailed notifications can be left!
self.assertEqual(models.Notification.objects.filter(is_emailed=False).count(), 0)
# Test that calling it again works but nothing gets sent
call_command("notifymail", cron=True)
# Now try the daemon
pid_file = NamedTemporaryFile(delete=False)
# Close it so its available for the other command
pid_file.close()
try:
call_command('notifymail', daemon=True, pid=pid_file.name, no_sys_exit=False)
except __HOLE__:
# It's normal for this command to exit
pass
pid = open(pid_file.name, 'r').read()
os.unlink(pid_file.name)
# Give it a second to start
time.sleep(1)
os.kill(int(pid), signal.SIGTERM) | SystemExit | dataset/ETHPy150Open benjaoming/django-nyt/django_nyt/tests/test_management.py/CommandTest.test_notifymail |
7,330 | @classmethod
def detect_process(cls, headers):
"""Returns tuple of process, legacy or None, None if not process originating."""
try:
if 'Libprocess-From' in headers:
return PID.from_string(headers['Libprocess-From']), False
elif 'User-Agent' in headers and headers['User-Agent'].startswith('libprocess/'):
return PID.from_string(headers['User-Agent'][len('libprocess/'):]), True
except __HOLE__ as e:
log.error('Failed to detect process: %r' % e)
pass
return None, None | ValueError | dataset/ETHPy150Open wickman/compactor/compactor/httpd.py/WireProtocolMessageHandler.detect_process |
7,331 | def convert_code_to_value(M_c, cidx, code):
"""
For a column with categorical data, this function takes the 'code':
the integer used to represent a specific value, and returns the corresponding
raw value (e.g. 'Joe' or 234.23409), which is always encoded as a string.
Note that the underlying store 'value_to_code' is unfortunately named backwards.
TODO: fix the backwards naming.
"""
if M_c['column_metadata'][cidx]['modeltype'] == 'normal_inverse_gamma':
return float(code)
else:
try:
return M_c['column_metadata'][cidx]['value_to_code'][int(code)]
except __HOLE__:
return M_c['column_metadata'][cidx]['value_to_code'][str(int(code))] | KeyError | dataset/ETHPy150Open probcomp/crosscat/src/utils/data_utils.py/convert_code_to_value |
7,332 | def get_can_cast_to_float(column_data):
can_cast = True
try:
[float(datum) for datum in column_data]
except __HOLE__ as e:
can_cast = False
return can_cast | ValueError | dataset/ETHPy150Open probcomp/crosscat/src/utils/data_utils.py/get_can_cast_to_float |
7,333 | def add(self, l, st, ptr):
try:
ctr = st[id(self)]
except __HOLE__:
ctr = st[id(self)] = 0
if ctr >= self.a and (self.b is None or ctr <= self.b):
st2 = st.copy()
count = st2.pop(id(self)) # record loop exit statistics and reset counter
try:
st2[self.name] = st2[self.name] + (count,)
except KeyError:
st2[self.name] = (count,)
self.out.add(l, st2, ptr)
ctr += 1
st[id(self)] = ctr
if self.b is None or ctr <= self.b:
self.out2.add(l, st, ptr) | KeyError | dataset/ETHPy150Open blackberry/ALF/alf/fuzz/grammr2_crack.py/_rstate.add |
7,334 | def _update(self, **data):
# at the moment, the timestamps seem to be naive so they have no time zone and operate on UTC time.
# we can use this to our advantage to use strptime instead of a complicated parsing routine.
# example timestamp: 2015-08-21T12:03:45.782000+00:00
# sometimes the .%f modifier is missing
self.edited_timestamp = utils.parse_time(data.get('edited_timestamp'))
self.timestamp = utils.parse_time(data.get('timestamp'))
self.tts = data.get('tts')
self.content = data.get('content')
self.mention_everyone = data.get('mention_everyone')
self.embeds = data.get('embeds')
self.id = data.get('id')
self.channel = data.get('channel')
self.author = User(**data.get('author', {}))
self.nonce = data.get('nonce')
self.attachments = data.get('attachments')
self._handle_upgrades(data.get('channel_id'))
self._handle_mentions(data.get('mentions', []), data.get('mention_roles', []))
# clear the cached properties
cached = filter(lambda attr: attr[0] == '_', self.__slots__)
for attr in cached:
try:
delattr(self, attr)
except __HOLE__:
pass | AttributeError | dataset/ETHPy150Open Rapptz/discord.py/discord/message.py/Message._update |
7,335 | def run_and_read(view, cmd):
out, err = subprocess.Popen(['cmd.exe', '/c', cmd],
stdout=PIPE,
stderr=PIPE,
shell=True,
startupinfo=get_startup_info()).communicate()
try:
return (out or err).decode(get_oem_cp()).replace('\r\n', '\n')
except __HOLE__:
return '' | AttributeError | dataset/ETHPy150Open guillermooo/Vintageous/ex/plat/windows.py/run_and_read |
7,336 | def _scrub(self, path):
"""Remove all tags from a file.
"""
for cls in self._mutagen_classes():
# Try opening the file with this type, but just skip in the
# event of any error.
try:
f = cls(util.syspath(path))
except Exception:
continue
if f.tags is None:
continue
# Remove the tag for this type.
try:
f.delete()
except NotImplementedError:
# Some Mutagen metadata subclasses (namely, ASFTag) do not
# support .delete(), presumably because it is impossible to
# remove them. In this case, we just remove all the tags.
for tag in f.keys():
del f[tag]
f.save()
except __HOLE__ as exc:
self._log.error(u'could not scrub {0}: {1}',
util.displayable_path(path), exc) | IOError | dataset/ETHPy150Open beetbox/beets/beetsplug/scrub.py/ScrubPlugin._scrub |
7,337 | def _scrub_item(self, item, restore=True):
"""Remove tags from an Item's associated file and, if `restore`
is enabled, write the database's tags back to the file.
"""
# Get album art if we need to restore it.
if restore:
try:
mf = mediafile.MediaFile(util.syspath(item.path),
config['id3v23'].get(bool))
except __HOLE__ as exc:
self._log.error(u'could not open file to scrub: {0}',
exc)
art = mf.art
# Remove all tags.
self._scrub(item.path)
# Restore tags, if enabled.
if restore:
self._log.debug(u'writing new tags after scrub')
item.try_write()
if art:
self._log.debug(u'restoring art')
mf = mediafile.MediaFile(util.syspath(item.path),
config['id3v23'].get(bool))
mf.art = art
mf.save() | IOError | dataset/ETHPy150Open beetbox/beets/beetsplug/scrub.py/ScrubPlugin._scrub_item |
7,338 | @cronjobs.register
def send_weekly_ready_for_review_digest():
"""Sends out the weekly "Ready for review" digest email."""
# If this is stage, do nothing.
if settings.STAGE:
return
@email_utils.safe_translation
def _send_mail(locale, user, context):
subject = _('[Reviews Pending: %s] SUMO needs your help!') % locale
mail = email_utils.make_mail(
subject=subject,
text_template='wiki/email/ready_for_review_weekly_digest.ltxt',
html_template='wiki/email/ready_for_review_weekly_digest.html',
context_vars=context,
from_email=settings.TIDINGS_FROM_ADDRESS,
to_email=user.email)
email_utils.send_messages([mail])
# Get the list of revisions ready for review
categories = (HOW_TO_CATEGORY, TROUBLESHOOTING_CATEGORY,
TEMPLATES_CATEGORY)
revs = Revision.objects.filter(reviewed=None, document__is_archived=False,
document__category__in=categories)
revs = revs.filter(Q(document__current_revision_id__lt=F('id')) |
Q(document__current_revision_id=None))
locales = revs.values_list('document__locale', flat=True).distinct()
products = Product.objects.all()
for l in locales:
docs = revs.filter(document__locale=l).values_list(
'document', flat=True).distinct()
docs = Document.objects.filter(id__in=docs)
try:
leaders = Locale.objects.get(locale=l).leaders.all()
reviewers = Locale.objects.get(locale=l).reviewers.all()
users = list(set(chain(leaders, reviewers)))
except __HOLE__:
# Locale does not exist, so skip to the next locale
continue
for u in users:
docs_list = []
for p in products:
product_docs = docs.filter(Q(parent=None, products__in=[p]) |
Q(parent__products__in=[p]))
if product_docs:
docs_list.append(dict(
product=pgettext('DB: products.Product.title', p.title),
docs=product_docs))
product_docs = docs.filter(Q(parent=None, products=None) |
Q(parent__products=None))
if product_docs:
docs_list.append(dict(product=_('Other products'),
docs=product_docs))
_send_mail(l, u, {
'host': Site.objects.get_current().domain,
'locale': l,
'recipient': u,
'docs_list': docs_list,
'products': products
})
statsd.incr('wiki.cron.weekly-digest-mail') | ObjectDoesNotExist | dataset/ETHPy150Open mozilla/kitsune/kitsune/wiki/cron.py/send_weekly_ready_for_review_digest |
7,339 | def testNaming4(self):
exc_raised = False
try:
values = ftest(1, c=2)
except __HOLE__, t:
exc_raised = True
self.assertTrue(exc_raised, "TypeError 'c' unexpected arg not raised") | TypeError | dataset/ETHPy150Open pyjs/pyjs/examples/libtest/ArgsTest.py/ArgsTest.testNaming4 |
7,340 | def testNaming5(self):
exc_raised = False
try:
values = ftest()
except __HOLE__, t:
exc_raised = True
self.assertTrue(exc_raised, "TypeError 'ftest() takes exactly 2 arguments (0 given)' not raised") | TypeError | dataset/ETHPy150Open pyjs/pyjs/examples/libtest/ArgsTest.py/ArgsTest.testNaming5 |
7,341 | def testStarArgs(self):
args = (1,2)
res = aArgs(*args)
self.assertEquals(args, res)
args = "123"
try:
res = aArgs(*args)
called = True
exc = None
except TypeError, e:
called = False
exc = e
# weird one: a string is a sequence, so it gets away with being
# called on its own as *args! eeugh.
self.assertTrue(called,
"exception not expected but function called:" + repr(res) + repr(exc))
self.assertEquals(res, ("1", "2", "3"))
args = 1
try:
res = aArgs(*args)
called = True
except __HOLE__:
called = False
self.assertFalse(called,
"exception expected but not raised - TypeError: aArgs() argument after * must be a sequence")
args = (1,)
res = aArgs(*args)
self.assertEquals(args, res)
args = (1,)
res = aArgs(args)
self.assertEquals((args,), res) | TypeError | dataset/ETHPy150Open pyjs/pyjs/examples/libtest/ArgsTest.py/ArgsTest.testStarArgs |
7,342 | def testKwArgsRecurse(self):
kwa = kw_args(x=5, y=6)
if kwa:
self.assertEquals(kwa.get('x'), 5)
self.assertEquals(kwa.get('y'), 6)
kwa = kw_args2(x=5, y=6)
if kwa:
self.assertEquals(kwa.get('x'), 5)
self.assertEquals(kwa.get('y'), 6)
values = varargs_kwargs(1,2,3,4,c=3)
self.assertEquals(values[0], 1)
self.assertEquals(values[1], 2)
self.assertEquals(values[2], (3,4))
self.assertEquals(values[3]['c'], 3)
values = varargs_kwargs2(1,2,3,4,c=3)
self.assertEquals(values[0], 1)
self.assertEquals(values[1], 2)
self.assertEquals(values[2], (3,4))
self.assertEquals(values[3]['c'], 3)
values = varargs_kwargs2(1)
self.assertEquals(values[0], 1)
self.assertEquals(values[1], 3)
values = varargs_kwargs2(1, {'a':1}, {})
self.assertEquals(values[0], 1)
self.assertEquals(values[1]['a'], 1)
values = varargs_kwargs2(1, {'a':1})
self.assertEquals(values[0], 1)
try:
self.assertEquals(values[1], {'a':1})
except __HOLE__, e:
self.fail("Last arg in *args,**kwargs is dict problem") | TypeError | dataset/ETHPy150Open pyjs/pyjs/examples/libtest/ArgsTest.py/ArgsTest.testKwArgsRecurse |
7,343 | def testGetattr(self):
instance = ArgsTestClass()
foo = instance.foo
values = foo(1, 2, 3)
self.assertEquals(values[0], 1)
self.assertEquals(values[1], 2)
self.assertEquals(values[2], 3)
values = foo(*(1, 2, 3))
self.assertEquals(values[0], 1)
self.assertEquals(values[1], 2)
self.assertEquals(values[2], 3)
try:
values = foo(*(1, 2), **dict(c=3))
self.assertEquals(values[0], 1)
self.assertEquals(values[1], 2)
self.assertEquals(values[2], 3)
except __HOLE__:
self.fail('foo() takes exactly 4 arguments (5 given), bug #503') | TypeError | dataset/ETHPy150Open pyjs/pyjs/examples/libtest/ArgsTest.py/ArgsTest.testGetattr |
7,344 | @classmethod
def _create(cls, *args, **kwargs):
branched_from = kwargs.get('branched_from')
initiator = kwargs.get('initiator')
registration_schema = kwargs.get('registration_schema')
registration_metadata = kwargs.get('registration_metadata')
if not branched_from:
project_params = {}
if initiator:
project_params['creator'] = initiator
branched_from = ProjectFactory(**project_params)
initiator = branched_from.creator
try:
registration_schema = registration_schema or MetaSchema.find()[0]
except __HOLE__:
ensure_schemas()
registration_metadata = registration_metadata or {}
draft = DraftRegistration.create_from_node(
branched_from,
user=initiator,
schema=registration_schema,
data=registration_metadata,
)
return draft | IndexError | dataset/ETHPy150Open CenterForOpenScience/osf.io/tests/factories.py/DraftRegistrationFactory._create |
7,345 | def ReadManifest(jar_file_name):
"""Read and parse the manifest out of the given jar.
Args:
jar_file_name: the name of the jar from which the manifest is to be read.
Returns:
A parsed Manifest object, or None if the jar has no manifest.
Raises:
IOError: if the jar does not exist or cannot be read.
"""
with zipfile.ZipFile(jar_file_name) as jar:
try:
manifest_string = jar.read(_MANIFEST_NAME)
except __HOLE__:
return None
return _ParseManifest(manifest_string, jar_file_name) | KeyError | dataset/ETHPy150Open GoogleCloudPlatform/python-compat-runtime/appengine-compat/exported_appengine_sdk/google/appengine/tools/jarfile.py/ReadManifest |
7,346 | def _ParseManifestSection(section, jar_file_name):
"""Parse a dict out of the given manifest section string.
Args:
section: a str or unicode that is the manifest section. It looks something
like this (without the >):
> Name: section-name
> Some-Attribute: some value
> Another-Attribute: another value
jar_file_name: a str that is the path of the jar, for use in exception
messages.
Returns:
A dict where the keys are the attributes (here, 'Name', 'Some-Attribute',
'Another-Attribute'), and the values are the corresponding attribute values.
Raises:
InvalidJarError: if the manifest section is not well-formed.
"""
section = section.replace('\n ', '').rstrip('\n')
if not section:
return {}
try:
return dict(line.split(': ', 1) for line in section.split('\n'))
except __HOLE__:
raise InvalidJarError('%s: Invalid manifest %r' % (jar_file_name, section)) | ValueError | dataset/ETHPy150Open GoogleCloudPlatform/python-compat-runtime/appengine-compat/exported_appengine_sdk/google/appengine/tools/jarfile.py/_ParseManifestSection |
7,347 | def extractor(self, fname):
fname = os.path.abspath(fname)
outfile = os.path.splitext(fname)[0]
try:
fpout = open(outfile, "wb")
gz = gzip.GzipFile(fname, "rb")
while True:
data = gz.read(self.BLOCK_SIZE)
if data:
fpout.write(data)
else:
break
gz.close()
fpout.close()
except __HOLE__ as e:
raise e
except Exception as e:
return False
return True | KeyboardInterrupt | dataset/ETHPy150Open devttys0/binwalk/src/binwalk/plugins/gzipextract.py/GzipExtractPlugin.extractor |
7,348 | @staticmethod
def validate (obj):
import foam.events
ev = foam.events.Event
try:
getattr(ev, obj)
return True
except __HOLE__:
return False | AttributeError | dataset/ETHPy150Open fp7-ofelia/ocf/ofam/src/src/foam/types/trigger_type.py/TriggerType.validate |
7,349 | def run():
"""Thin wrapper for main() that catches KeyboardInterrupts."""
try:
main()
except __HOLE__:
print("Stopped by user.") | KeyboardInterrupt | dataset/ETHPy150Open earwig/git-repo-updater/gitup/script.py/run |
7,350 | def snake_case_dict(_dict):
raw_dict = _dict.copy()
result = {}
try:
while 1:
key, value = raw_dict.popitem()
result[snake_case(key)] = value
except __HOLE__:
return result | KeyError | dataset/ETHPy150Open puentesarrin/asyncflux/asyncflux/util.py/snake_case_dict |
7,351 | def test_long_integers(self):
if 12L + 24L != 36L: self.fail('long op')
if 12L + (-24L) != -12L: self.fail('long op')
if (-12L) + 24L != 12L: self.fail('long op')
if (-12L) + (-24L) != -36L: self.fail('long op')
if not 12L < 24L: self.fail('long op')
if not -24L < -12L: self.fail('long op')
x = sys.maxint
if int(long(x)) != x: self.fail('long op')
try: y = int(long(x)+1L)
except OverflowError: self.fail('long op')
if not isinstance(y, long): self.fail('long op')
x = -x
if int(long(x)) != x: self.fail('long op')
x = x-1
if int(long(x)) != x: self.fail('long op')
try: y = int(long(x)-1L)
except OverflowError: self.fail('long op')
if not isinstance(y, long): self.fail('long op')
try: 5 << -5
except ValueError: pass
else: self.fail('int negative shift <<')
try: 5L << -5L
except ValueError: pass
else: self.fail('long negative shift <<')
try: 5 >> -5
except ValueError: pass
else: self.fail('int negative shift >>')
try: 5L >> -5L
except __HOLE__: pass
else: self.fail('long negative shift >>') | ValueError | dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/test/test_types.py/TypesTests.test_long_integers |
7,352 | @unittest.skipIf(is_jython, "No buffer on Jython")
def test_buffers(self):
self.assertRaises(ValueError, buffer, 'asdf', -1)
cmp(buffer("abc"), buffer("def")) # used to raise a warning: tp_compare didn't return -1, 0, or 1
self.assertRaises(TypeError, buffer, None)
a = buffer('asdf')
hash(a)
b = a * 5
if a == b:
self.fail('buffers should not be equal')
if str(b) != ('asdf' * 5):
self.fail('repeated buffer has wrong content')
if str(a * 0) != '':
self.fail('repeated buffer zero times has wrong content')
if str(a + buffer('def')) != 'asdfdef':
self.fail('concatenation of buffers yields wrong content')
if str(buffer(a)) != 'asdf':
self.fail('composing buffers failed')
if str(buffer(a, 2)) != 'df':
self.fail('specifying buffer offset failed')
if str(buffer(a, 0, 2)) != 'as':
self.fail('specifying buffer size failed')
if str(buffer(a, 1, 2)) != 'sd':
self.fail('specifying buffer offset and size failed')
self.assertRaises(ValueError, buffer, buffer('asdf', 1), -1)
if str(buffer(buffer('asdf', 0, 2), 0)) != 'as':
self.fail('composing length-specified buffer failed')
if str(buffer(buffer('asdf', 0, 2), 0, 5000)) != 'as':
self.fail('composing length-specified buffer failed')
if str(buffer(buffer('asdf', 0, 2), 0, -1)) != 'as':
self.fail('composing length-specified buffer failed')
if str(buffer(buffer('asdf', 0, 2), 1, 2)) != 's':
self.fail('composing length-specified buffer failed')
try: a[1] = 'g'
except TypeError: pass
else: self.fail("buffer assignment should raise TypeError")
try: a[0:1] = 'g'
except TypeError: pass
else: self.fail("buffer slice assignment should raise TypeError")
# array.array() returns an object that does not implement a char buffer,
# something which int() uses for conversion.
import array
try: int(buffer(array.array('c')))
except __HOLE__: pass
else: self.fail("char buffer (at C level) not working") | TypeError | dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/test/test_types.py/TypesTests.test_buffers |
7,353 | def __eq__(self, o):
try:
return self.id == o.id and self.cls == o.cls and self.attrs == o.attrs
except __HOLE__:
return False | AttributeError | dataset/ETHPy150Open skyostil/tracy/src/analyzer/Trace.py/Object.__eq__ |
7,354 | @celery_app.task(name='scripts.refresh_box_tokens')
def run_main(days=None, dry_run=True):
init_app(set_backends=True, routes=False)
try:
days = int(days)
except (ValueError, __HOLE__):
days = 60 - 7 # refresh tokens that expire this week
delta = relativedelta(days=days)
if not dry_run:
scripts_utils.add_file_logger(logger, __file__)
main(delta, dry_run=dry_run) | TypeError | dataset/ETHPy150Open CenterForOpenScience/osf.io/scripts/refresh_box_tokens.py/run_main |
7,355 | def verify_node_settings_document(document, provider):
try:
assert('_id' in document)
assert('{}_list_id'.format(provider) in document)
except __HOLE__:
return False
return True | AssertionError | dataset/ETHPy150Open CenterForOpenScience/osf.io/scripts/migration/migrate_citation_addons_list_id.py/verify_node_settings_document |
7,356 | def _executeQuery(self, command, args=None):
"""execute the provided command on the database.
args are specified as a dictionary. error checking on the
results is performed here and the returned value is a list of
lists with each list representing a row of the returned table.
"""
try:
argument_string = "&".join(["%s=%s" % (x, args[x]) for x in args])
command_string = "&".join([command, argument_string])
except __HOLE__:
command_string = command
query = "%s?cmd=%s" % (self.url, command_string)
if len(query) > 2048:
if "gene_list" in args:
genes = args['gene_list'].split(",")
if len(genes) < 2:
raise ValueError("Request too long")
args['gene_list'] = ",".join(genes[(len(genes) / 2):])
query1 = self._executeQuery(command, args)
warnings = self.last_warnings
args['gene_list'] = ",".join(genes[:(len(genes) / 2)])
query2 = self._executeQuery(command, args)
self.last_warnings = warnings + self.last_warnings
return query1 + query2
data = urllib2.urlopen(query)
line = data.readline()
self.last_query = query
self.last_status = line
self.last_warnings = []
self.last_header = [self.last_status]
return_table = []
while re.match("^#", line):
if re.match("^# Warning: (.+)", line):
self.last_warnings.append(
re.match("^# Warning: (.+)", line).groups(1)[0])
self.last_header.append(line)
line = data.readline()
continue
elif re.match("^#", line):
self.last_header.append(line)
line = data.readline()
continue
if re.match("^Error: (.+)", line):
self.last_header.append(line)
raise CDGSError(re.match("^Error: (.+)", line).groups(1)[0], query)
line = line.strip()
headers = line.split("\t")
for line in data:
if re.match("^# Warning: (.+)", line):
self.last_warnings.append(
re.match("^# Warning: (.+)", line).groups(1)[0])
self.last_header.append(line)
line = data.readline()
continue
line = line.strip()
return_table.append(odict(zip(headers, line.split("\t"))))
return return_table | TypeError | dataset/ETHPy150Open CGATOxford/cgat/CGAT/CBioPortal.py/CBioPortal._executeQuery |
7,357 | def _RetrieveURL(self, url, payload, method, headers, request, response,
follow_redirects=True, deadline=_API_CALL_DEADLINE):
"""Retrieves a URL.
Args:
url: String containing the URL to access.
payload: Request payload to send, if any; None if no payload.
method: HTTP method to use (e.g., 'GET')
headers: List of additional header objects to use for the request.
request: Request object from original request.
response: Response object to populate with the response data.
follow_redirects: optional setting (defaulting to True) for whether or not
we should transparently follow redirects (up to MAX_REDIRECTS)
deadline: Number of seconds to wait for the urlfetch to finish.
Raises:
Raises an apiproxy_errors.ApplicationError exception with FETCH_ERROR
in cases where:
- MAX_REDIRECTS is exceeded
- The protocol of the redirected URL is bad or missing.
"""
last_protocol = ''
last_host = ''
for redirect_number in xrange(MAX_REDIRECTS + 1):
parsed = urlparse.urlparse(url)
protocol, host, path, parameters, query, fragment = parsed
port = urllib.splitport(urllib.splituser(host)[1])[1]
if port not in PORTS_ALLOWED_IN_PRODUCTION:
logging.warning(
'urlfetch received %s ; port %s is not allowed in production!' %
(url, port))
if protocol and not host:
logging.error('Missing host on redirect; target url is %s' % url)
raise apiproxy_errors.ApplicationError(
urlfetch_service_pb.URLFetchServiceError.FETCH_ERROR)
if not host and not protocol:
host = last_host
protocol = last_protocol
adjusted_headers = {
'User-Agent':
'AppEngine-Google; (+http://code.google.com/appengine)',
'Host': host,
'Accept-Encoding': 'gzip',
}
if payload is not None:
adjusted_headers['Content-Length'] = len(payload)
if method == 'POST' and payload:
adjusted_headers['Content-Type'] = 'application/x-www-form-urlencoded'
for header in headers:
if header.key().title().lower() == 'user-agent':
adjusted_headers['User-Agent'] = (
'%s %s' %
(header.value(), adjusted_headers['User-Agent']))
else:
adjusted_headers[header.key().title()] = header.value()
logging.debug('Making HTTP request: host = %s, '
'url = %s, payload = %s, headers = %s',
host, url, payload, adjusted_headers)
try:
if protocol == 'http':
connection = httplib.HTTPConnection(host)
elif protocol == 'https':
connection = httplib.HTTPSConnection(host)
else:
error_msg = 'Redirect specified invalid protocol: "%s"' % protocol
logging.error(error_msg)
raise apiproxy_errors.ApplicationError(
urlfetch_service_pb.URLFetchServiceError.FETCH_ERROR, error_msg)
last_protocol = protocol
last_host = host
if query != '':
full_path = path + '?' + query
else:
full_path = path
orig_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(deadline)
connection.request(method, full_path, payload, adjusted_headers)
http_response = connection.getresponse()
if method == 'HEAD':
http_response_data = ''
else:
http_response_data = http_response.read()
finally:
socket.setdefaulttimeout(orig_timeout)
connection.close()
except (httplib.error, socket.error, __HOLE__), e:
raise apiproxy_errors.ApplicationError(
urlfetch_service_pb.URLFetchServiceError.FETCH_ERROR, str(e))
if http_response.status in REDIRECT_STATUSES and follow_redirects:
url = http_response.getheader('Location', None)
if url is None:
error_msg = 'Redirecting response was missing "Location" header'
logging.error(error_msg)
raise apiproxy_errors.ApplicationError(
urlfetch_service_pb.URLFetchServiceError.FETCH_ERROR, error_msg)
else:
response.set_statuscode(http_response.status)
if http_response.getheader('content-encoding') == 'gzip':
gzip_stream = StringIO.StringIO(http_response_data)
gzip_file = gzip.GzipFile(fileobj=gzip_stream)
http_response_data = gzip_file.read()
response.set_content(http_response_data[:MAX_RESPONSE_SIZE])
for header_key, header_value in http_response.getheaders():
if (header_key.lower() == 'content-encoding' and
header_value == 'gzip'):
continue
if header_key.lower() == 'content-length':
header_value = str(len(response.content()))
header_proto = response.add_header()
header_proto.set_key(header_key)
header_proto.set_value(header_value)
if len(http_response_data) > MAX_RESPONSE_SIZE:
response.set_contentwastruncated(True)
if request.url() != url:
response.set_finalurl(url)
break
else:
error_msg = 'Too many repeated redirects'
logging.error(error_msg)
raise apiproxy_errors.ApplicationError(
urlfetch_service_pb.URLFetchServiceError.FETCH_ERROR, error_msg) | IOError | dataset/ETHPy150Open CollabQ/CollabQ/.google_appengine/google/appengine/api/urlfetch_stub.py/URLFetchServiceStub._RetrieveURL |
7,358 | def from_map(self, schema, inobjs, newdb):
"""Initalize the dictionary of types by converting the input map
:param schema: schema owning the types
:param inobjs: YAML map defining the schema objects
:param newdb: collection of dictionaries defining the database
"""
for k in inobjs:
(objtype, spc, key) = k.partition(' ')
if spc != ' ' or not objtype in ['domain', 'type']:
raise KeyError("Unrecognized object type: %s" % k)
if objtype == 'domain':
self[(schema.name, key)] = domain = Domain(
schema=schema.name, name=key)
indomain = inobjs[k]
if not indomain:
raise ValueError("Domain '%s' has no specification" % k)
for attr, val in list(indomain.items()):
setattr(domain, attr, val)
if 'oldname' in indomain:
domain.oldname = indomain['oldname']
newdb.constraints.from_map(domain, indomain, 'd')
if 'description' in indomain:
domain.description = indomain['description']
elif objtype == 'type':
intype = inobjs[k]
if 'labels' in intype:
self[(schema.name, key)] = dtype = Enum(
schema=schema.name, name=key)
dtype.labels = intype['labels']
elif 'attributes' in intype:
self[(schema.name, key)] = dtype = Composite(
schema=schema.name, name=key)
try:
newdb.columns.from_map(dtype, intype['attributes'])
except __HOLE__ as exc:
exc.args = ("Type '%s' has no attributes" % key, )
raise
elif 'input' in intype:
self[(schema.name, key)] = dtype = BaseType(
schema=schema.name, name=key)
for attr, val in list(intype.items()):
setattr(dtype, attr, val)
if 'oldname' in intype:
dtype.oldname = intype['oldname']
if 'description' in intype:
dtype.description = intype['description']
else:
raise KeyError("Unrecognized object type: %s" % k) | KeyError | dataset/ETHPy150Open perseas/Pyrseas/pyrseas/dbobject/dbtype.py/TypeDict.from_map |
7,359 | def __getitem__(self, name):
"Returns a BoundField with the given name."
try:
field = self.fields[name]
except __HOLE__:
raise KeyError('Key %r not found in Form' % name)
return BoundField(self, field, name) | KeyError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.2/django/forms/forms.py/BaseForm.__getitem__ |
7,360 | def _clean_fields(self):
for name, field in self.fields.items():
# value_from_datadict() gets the data from the data dictionaries.
# Each widget type knows how to retrieve its own data, because some
# widgets split data over several HTML fields.
value = field.widget.value_from_datadict(self.data, self.files, self.add_prefix(name))
try:
if isinstance(field, FileField):
initial = self.initial.get(name, field.initial)
value = field.clean(value, initial)
else:
value = field.clean(value)
self.cleaned_data[name] = value
if hasattr(self, 'clean_%s' % name):
value = getattr(self, 'clean_%s' % name)()
self.cleaned_data[name] = value
except __HOLE__, e:
self._errors[name] = self.error_class(e.messages)
if name in self.cleaned_data:
del self.cleaned_data[name] | ValidationError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.2/django/forms/forms.py/BaseForm._clean_fields |
7,361 | def _clean_form(self):
try:
self.cleaned_data = self.clean()
except __HOLE__, e:
self._errors[NON_FIELD_ERRORS] = self.error_class(e.messages) | ValidationError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.2/django/forms/forms.py/BaseForm._clean_form |
7,362 | def mps2_set_board_image_file(self, disk, images_cfg_path, image0file_path, image_name='images.txt'):
""" This function will alter image cfg file
@details Main goal of this function is to change number of images to 1, comment all
existing image entries and append at the end of file new entry with test path
@return True when all steps were successful
"""
MBED_SDK_TEST_STAMP = 'test suite entry'
image_path = join(disk, images_cfg_path, image_name)
new_file_lines = [] # New configuration file lines (entries)
# Check each line of the image configuration file
try:
with open(image_path, 'r') as file:
for line in file:
if re.search('^TOTALIMAGES', line):
# Check number of total images, should be 1
new_file_lines.append(re.sub('^TOTALIMAGES:[\t ]*[\d]+', 'TOTALIMAGES: 1', line))
elif re.search('; - %s[\n\r]*$'% MBED_SDK_TEST_STAMP, line):
# Look for test suite entries and remove them
pass # Omit all test suite entries
elif re.search('^IMAGE[\d]+FILE', line):
# Check all image entries and mark the ';'
new_file_lines.append(';' + line) # Comment non test suite lines
else:
# Append line to new file
new_file_lines.append(line)
except __HOLE__ as e:
return False
# Add new image entry with proper commented stamp
new_file_lines.append('IMAGE0FILE: %s ; - %s\r\n'% (image0file_path, MBED_SDK_TEST_STAMP))
# Write all lines to file
try:
with open(image_path, 'w') as file:
for line in new_file_lines:
file.write(line),
except IOError:
return False
return True | IOError | dataset/ETHPy150Open ARMmbed/htrun/mbed_host_tests/host_tests_plugins/module_copy_mps2.py/HostTestPluginCopyMethod_MPS2.mps2_set_board_image_file |
7,363 | @must_be_valid_project # injects project
@must_have_permission('write')
@must_not_be_registration
def project_add_tag(auth, node, **kwargs):
data = request.get_json()
tag = data['tag']
if tag:
try:
node.add_tag(tag=tag, auth=auth)
return {'status': 'success'}, http.CREATED
except __HOLE__:
return {'status': 'error'}, http.BAD_REQUEST | ValidationError | dataset/ETHPy150Open CenterForOpenScience/osf.io/website/project/views/tag.py/project_add_tag |
7,364 | def __init__(self, imagePath):
try:
self.image = Image.open(imagePath)
except __HOLE__:
print 'Could not open image. Are you sure you entered the correct path?\n'
sys.exit(-1)
self.image = self.image.resize((_HEIGHT, _WIDTH),Image.BILINEAR)
self.image = self.image.convert("L") # convert to mono | IOError | dataset/ETHPy150Open the-xkcd-community/the-red-spider-project/src/randomascii.py/AsciiGenerator.__init__ |
7,365 | def __str__(self):
asciiString = ''
for height in xrange(0, self.image.size[1]):
for width in xrange(0, self.image.size[0]):
lum = 255 - self.image.getpixel((width, height))
row = bisect(zonebounds, lum)
try:
possibles = greyscale[row]
except __HOLE__:
continue
asciiString = asciiString + possibles[random.randint(0, len(possibles) - 1)]
asciiString = asciiString + '\n'
return asciiString | IndexError | dataset/ETHPy150Open the-xkcd-community/the-red-spider-project/src/randomascii.py/AsciiGenerator.__str__ |
7,366 | def load_module(self, fullname):
if fullname in sys.modules:
return self
extname = fullname.split(self.prefix)[1]
module = self.find_module_for_extension(extname)
realname = module.__name__
try:
__import__(realname)
except __HOLE__:
raise sys.exc_info()
module = sys.modules[fullname] = sys.modules[realname]
if '.' not in extname:
setattr(sys.modules[self.extension_module], extname, module)
return module | ImportError | dataset/ETHPy150Open pecan/pecan/pecan/extensions.py/PecanExtensionImporter.load_module |
7,367 | @weblab_api.route_webclient('/locales.json')
def locales():
lang = request.args.get('lang')
if not lang:
# Default language is English
lang = 'en'
try:
babel.Locale.parse(lang)
except (babel.core.UnknownLocaleError, __HOLE__) as e:
# Avoid storing fake languages
return "Invalid language", 400
lang_contents = PER_LANG.get(lang)
if lang_contents is None:
#
# If the language has not been previously calculated, it is calculated as follows:
# - first, check the modified date of the locales.json and messages.mo file
# - then, generate the file using render_template
# - store it in the local cache, and then check the etag and so on
#
fname = data_filename('weblab/core/templates/webclient/locales.json')
try:
modification_time = os.stat(fname).st_mtime
last_modified = datetime.datetime.fromtimestamp(modification_time)
except Exception as e:
print("Could not calculate the time for %s" % fname)
traceback.print_exc()
last_modified = datetime.datetime.now()
messages_directory = data_filename('weblab/core/translations')
messages_file = data_filename('weblab/core/translations/{0}/LC_MESSAGES/messages.mo'.format(lang))
if os.path.exists(messages_file):
try:
messages_modification_time = os.stat(fname).st_mtime
messages_last_modified = datetime.datetime.fromtimestamp(messages_modification_time)
except Exception as e:
messages_last_modified = datetime.datetime.now()
last_modified = max(last_modified, messages_last_modified)
def ng_gettext(text):
"""Wrapper of gettext. It uses the messages_file to load particular translations (e.g. if 'es' is requested, it uses the translations for Spanish)."""
translation = babel.support.Translations.load(messages_directory, lang)
translated_text = translation.gettext(text)
return translated_text
contents = render_template('webclient/locales.json', ng_gettext=ng_gettext)
etag = hashlib.new('sha1', contents).hexdigest()
lang_contents = {
'last_modified' : last_modified.replace(microsecond=0),
'contents' : contents,
'etag' : etag,
}
PER_LANG[lang] = lang_contents
# At this point, lang_contents exists, and contains:
# last_modified: pointing at the latest point where the contents where modified
# contents: the string with the processed contents
# etag: with the hash of the contents
# First we check etag (and return 304 if the contents were not changed)
if request.if_none_match is not None and request.if_none_match.contains(lang_contents['etag']):
return Response(status=304)
# Then the date (and return 304 if the date is correct)
if request.if_modified_since is not None and request.if_modified_since >= lang_contents['last_modified']:
return Response(status=304)
# Otherwise, we create the response
response = Response(lang_contents['contents'])
response.mimetype = 'application/json'
response.last_modified = lang_contents['last_modified']
response.set_etag(lang_contents['etag'])
return response | ValueError | dataset/ETHPy150Open weblabdeusto/weblabdeusto/server/src/weblab/core/webclient/view_i18n.py/locales |
7,368 | @staticmethod
def are_concurrent(*lines):
"""Is a sequence of linear entities concurrent?
Two or more linear entities are concurrent if they all
intersect at a single point.
Parameters
==========
lines : a sequence of linear entities.
Returns
=======
True : if the set of linear entities are concurrent,
False : otherwise.
Notes
=====
Simply take the first two lines and find their intersection.
If there is no intersection, then the first two lines were
parallel and had no intersection so concurrency is impossible
amongst the whole set. Otherwise, check to see if the
intersection point of the first two lines is a member on
the rest of the lines. If so, the lines are concurrent.
See Also
========
sympy.geometry.util.intersection
Examples
========
>>> from sympy import Point, Line, Line3D
>>> p1, p2 = Point(0, 0), Point(3, 5)
>>> p3, p4 = Point(-2, -2), Point(0, 2)
>>> l1, l2, l3 = Line(p1, p2), Line(p1, p3), Line(p1, p4)
>>> Line.are_concurrent(l1, l2, l3)
True
>>> l4 = Line(p2, p3)
>>> Line.are_concurrent(l2, l3, l4)
False
"""
# Concurrency requires intersection at a single point; One linear
# entity cannot be concurrent.
if len(lines) <= 1:
return False
try:
# Get the intersection (if parallel)
p = lines[0].intersection(lines[1])
if len(p) == 0:
return False
# Make sure the intersection is on every linear entity
for line in lines[2:]:
if p[0] not in line:
return False
return True
except __HOLE__:
return False | AttributeError | dataset/ETHPy150Open sympy/sympy/sympy/geometry/line.py/LinearEntity.are_concurrent |
7,369 | def is_parallel(l1, l2):
"""Are two linear entities parallel?
Parameters
==========
l1 : LinearEntity
l2 : LinearEntity
Returns
=======
True : if l1 and l2 are parallel,
False : otherwise.
See Also
========
coefficients
Examples
========
>>> from sympy import Point, Line
>>> p1, p2 = Point(0, 0), Point(1, 1)
>>> p3, p4 = Point(3, 4), Point(6, 7)
>>> l1, l2 = Line(p1, p2), Line(p3, p4)
>>> Line.is_parallel(l1, l2)
True
>>> p5 = Point(6, 6)
>>> l3 = Line(p3, p5)
>>> Line.is_parallel(l1, l3)
False
"""
try:
a1, b1, c1 = l1.coefficients
a2, b2, c2 = l2.coefficients
return bool(simplify(a1*b2 - b1*a2) == 0)
except __HOLE__:
return False | AttributeError | dataset/ETHPy150Open sympy/sympy/sympy/geometry/line.py/LinearEntity.is_parallel |
7,370 | def is_perpendicular(l1, l2):
"""Are two linear entities perpendicular?
Parameters
==========
l1 : LinearEntity
l2 : LinearEntity
Returns
=======
True : if l1 and l2 are perpendicular,
False : otherwise.
See Also
========
coefficients
Examples
========
>>> from sympy import Point, Line
>>> p1, p2, p3 = Point(0, 0), Point(1, 1), Point(-1, 1)
>>> l1, l2 = Line(p1, p2), Line(p1, p3)
>>> l1.is_perpendicular(l2)
True
>>> p4 = Point(5, 3)
>>> l3 = Line(p1, p4)
>>> l1.is_perpendicular(l3)
False
"""
try:
a1, b1, c1 = l1.coefficients
a2, b2, c2 = l2.coefficients
return bool(simplify(a1*a2 + b1*b2) == 0)
except __HOLE__:
return False | AttributeError | dataset/ETHPy150Open sympy/sympy/sympy/geometry/line.py/LinearEntity.is_perpendicular |
7,371 | def __new__(cls, p1, pt=None, slope=None, **kwargs):
if isinstance(p1, LinearEntity):
p1, pt = p1.args
else:
p1 = Point(p1)
if pt is not None and slope is None:
try:
p2 = Point(pt)
except __HOLE__:
raise ValueError('The 2nd argument was not a valid Point. '
'If it was a slope, enter it with keyword "slope".')
elif slope is not None and pt is None:
slope = sympify(slope)
if slope.is_finite is False:
# when infinite slope, don't change x
dx = 0
dy = 1
else:
# go over 1 up slope
dx = 1
dy = slope
# XXX avoiding simplification by adding to coords directly
p2 = Point(p1.x + dx, p1.y + dy)
else:
raise ValueError('A 2nd Point or keyword "slope" must be used.')
return LinearEntity.__new__(cls, p1, p2, **kwargs) | NotImplementedError | dataset/ETHPy150Open sympy/sympy/sympy/geometry/line.py/Line.__new__ |
7,372 | def __new__(cls, p1, pt=None, angle=None, **kwargs):
p1 = Point(p1)
if pt is not None and angle is None:
try:
p2 = Point(pt)
except __HOLE__:
from sympy.utilities.misc import filldedent
raise ValueError(filldedent('''
The 2nd argument was not a valid Point; if
it was meant to be an angle it should be
given with keyword "angle".'''))
if p1 == p2:
raise ValueError('A Ray requires two distinct points.')
elif angle is not None and pt is None:
# we need to know if the angle is an odd multiple of pi/2
c = pi_coeff(sympify(angle))
p2 = None
if c is not None:
if c.is_Rational:
if c.q == 2:
if c.p == 1:
p2 = p1 + Point(0, 1)
elif c.p == 3:
p2 = p1 + Point(0, -1)
elif c.q == 1:
if c.p == 0:
p2 = p1 + Point(1, 0)
elif c.p == 1:
p2 = p1 + Point(-1, 0)
if p2 is None:
c *= S.Pi
else:
c = angle % (2*S.Pi)
if not p2:
m = 2*c/S.Pi
left = And(1 < m, m < 3) # is it in quadrant 2 or 3?
x = Piecewise((-1, left), (Piecewise((0, Eq(m % 1, 0)), (1, True)), True))
y = Piecewise((-tan(c), left), (Piecewise((1, Eq(m, 1)), (-1, Eq(m, 3)), (tan(c), True)), True))
p2 = p1 + Point(x, y)
else:
raise ValueError('A 2nd point or keyword "angle" must be used.')
return LinearEntity.__new__(cls, p1, p2, **kwargs) | NotImplementedError | dataset/ETHPy150Open sympy/sympy/sympy/geometry/line.py/Ray.__new__ |
7,373 | @description(valediction="OK!")
def _choose_new_hosts(self):
def get_connection_name(conn, number):
name = self._get_sa_connection_name(conn)
return '%s (#%d)' % (name, number)
def get_connections_names():
return (', '.join(get_connection_name(c, i) for i, c in enumerate(self._sa_connections))
or 'There are no more connections!')
if not self._sa_connections:
self._logger.log("There are no new connections on ServerAuditor's servers.")
self._valediction()
sys.exit(0)
self._logger.log("The following new hosts have been founded on ServerAuditor's servers:", sleep=0)
self._logger.log(get_connections_names(), color='blue')
prompt = "You may confirm this list (press 'Enter') or remove host (enter its number): "
while len(self._sa_connections):
number = p_input(prompt).strip()
if number == '':
break
try:
number = int(number)
if number >= len(self._sa_connections) or number < 0:
raise IndexError
except (__HOLE__, IndexError):
self._logger.log("Incorrect index!", color='red', file=sys.stderr)
else:
self._sa_connections.pop(number)
self._logger.log(get_connections_names(), color='blue')
if not self._sa_connections:
self._valediction()
sys.exit(0)
return | ValueError | dataset/ETHPy150Open Crystalnix/serverauditor-sshconfig/serverauditor_sshconfig/sa_import.py/ImportSSHConfigApplication._choose_new_hosts |
7,374 | def main():
app = ImportSSHConfigApplication(api=API(), ssh_config=SSHConfig(), cryptor=RNCryptor(), logger=PrettyLogger())
try:
app.run()
except (__HOLE__, EOFError):
sys.exit(1)
return | KeyboardInterrupt | dataset/ETHPy150Open Crystalnix/serverauditor-sshconfig/serverauditor_sshconfig/sa_import.py/main |
7,375 | @error_handler
def execute_query(request, design_id=None):
response = {'status': -1, 'message': ''}
if request.method != 'POST':
response['message'] = _('A POST request is required.')
app_name = get_app_name(request)
query_type = beeswax_models.SavedQuery.TYPES_MAPPING[app_name]
design = safe_get_design(request, query_type, design_id)
try:
form = get_query_form(request, design_id)
if form.is_valid():
design = save_design(request, SaveForm(), form, query_type, design)
query = SQLdesign(form, query_type=query_type)
query_server = dbms.get_query_server_config(request.POST.get('server'))
db = dbms.get(request.user, query_server)
query_history = db.execute_query(query, design)
query_history.last_state = beeswax_models.QueryHistory.STATE.expired.index
query_history.save()
try:
db.use(form.cleaned_data['database'])
datatable = db.execute_and_wait(query)
results = db.client.create_result(datatable)
response['status'] = 0
response['results'] = results_to_dict(results)
response['design'] = design.id
except Exception, e:
response['status'] = -1
response['message'] = str(e)
else:
response['message'] = _('There was an error with your query.')
response['errors'] = form.errors
except __HOLE__, e:
response['message']= str(e)
return JsonResponse(response, encoder=ResultEncoder) | RuntimeError | dataset/ETHPy150Open cloudera/hue/apps/rdbms/src/rdbms/api.py/execute_query |
7,376 | @error_handler
def explain_query(request):
response = {'status': -1, 'message': ''}
if request.method != 'POST':
response['message'] = _('A POST request is required.')
app_name = get_app_name(request)
query_type = beeswax_models.SavedQuery.TYPES_MAPPING[app_name]
try:
form = get_query_form(request)
if form.is_valid():
query = SQLdesign(form, query_type=query_type)
query_server = dbms.get_query_server_config(request.POST.get('server'))
db = dbms.get(request.user, query_server)
try:
db.use(form.cleaned_data['database'])
datatable = db.explain(query)
results = db.client.create_result(datatable)
response['status'] = 0
response['results'] = results_to_dict(results)
except Exception, e:
response['status'] = -1
response['message'] = str(e)
else:
response['message'] = _('There was an error with your query.')
response['errors'] = form.errors
except __HOLE__, e:
response['message']= str(e)
return JsonResponse(response) | RuntimeError | dataset/ETHPy150Open cloudera/hue/apps/rdbms/src/rdbms/api.py/explain_query |
7,377 | @error_handler
def save_query(request, design_id=None):
response = {'status': -1, 'message': ''}
if request.method != 'POST':
response['message'] = _('A POST request is required.')
app_name = get_app_name(request)
query_type = beeswax_models.SavedQuery.TYPES_MAPPING[app_name]
design = safe_get_design(request, query_type, design_id)
try:
save_form = SaveForm(request.POST.copy())
query_form = get_query_form(request, design_id)
if query_form.is_valid() and save_form.is_valid():
design = save_design(request, save_form, query_form, query_type, design, True)
response['design_id'] = design.id
response['status'] = 0
else:
response['errors'] = query_form.errors
except __HOLE__, e:
response['message'] = str(e)
return JsonResponse(response) | RuntimeError | dataset/ETHPy150Open cloudera/hue/apps/rdbms/src/rdbms/api.py/save_query |
7,378 | def application_json(self):
'''Handler for application/json media-type'''
self.decode_body()
try:
pybody = json.loads(self.body)
except __HOLE__:
pybody = self.body
return pybody | ValueError | dataset/ETHPy150Open jpaugh/agithub/agithub/base.py/ResponseBody.application_json |
7,379 | @classmethod
def OpenFileSystem(cls, path_spec, resolver_context=None):
"""Opens a file system object defined by path specification.
Args:
path_spec: the path specification (instance of PathSpec).
resolver_context: the optional resolver context (instance of
resolver.Context). The default is None which will use
the built in context which is not multi process safe.
Returns:
The file system object (instance of vfs.FileSystem) or None if the path
specification could not be resolved or has no file system object.
Raises:
AccessError: if the access to open the file system was denied.
BackEndError: if the file system cannot be opened.
KeyError: if resolver helper object is not set for the corresponding
type indicator.
PathSpecError: if the path specification is incorrect.
"""
if resolver_context is None:
resolver_context = cls._resolver_context
if path_spec.type_indicator == definitions.TYPE_INDICATOR_MOUNT:
if path_spec.HasParent():
raise errors.PathSpecError(
u'Unsupported mount path specification with parent.')
mount_point = getattr(path_spec, u'identifier', None)
if not mount_point:
raise errors.PathSpecError(
u'Unsupported path specification without mount point identifier.')
path_spec = mount_manager.MountPointManager.GetMountPoint(mount_point)
if not path_spec:
raise errors.MountPointError(
u'No such mount point: {0:s}'.format(mount_point))
file_system = resolver_context.GetFileSystem(path_spec)
if not file_system:
if path_spec.type_indicator not in cls._resolver_helpers:
raise KeyError((
u'Resolver helper object not set for type indicator: '
u'{0:s}.').format(path_spec.type_indicator))
resolver_helper = cls._resolver_helpers[path_spec.type_indicator]
file_system = resolver_helper.NewFileSystem(resolver_context)
try:
file_system.Open(path_spec)
except (errors.AccessError, errors.PathSpecError):
raise
except (IOError, __HOLE__) as exception:
raise errors.BackEndError(
u'Unable to open file system with error: {0:s}'.format(exception))
return file_system | ValueError | dataset/ETHPy150Open log2timeline/dfvfs/dfvfs/resolver/resolver.py/Resolver.OpenFileSystem |
7,380 | def _osUrandom(self, nbytes):
"""
Wrapper around C{os.urandom} that cleanly manage its absence.
"""
try:
return os.urandom(nbytes)
except (AttributeError, __HOLE__), e:
raise SourceNotAvailable(e) | NotImplementedError | dataset/ETHPy150Open nlloyd/SubliminalCollaborator/libs/twisted/python/randbytes.py/RandomFactory._osUrandom |
7,381 | def _match(self, pattern, string):
"""Same as :func:`re.match`, except the regex is compiled and cached,
then reused on subsequent matches with the same pattern."""
try:
compiled = self._compiled[pattern]
except __HOLE__:
compiled = self._compiled[pattern] = re.compile(pattern, re.U)
return compiled.match(string) | KeyError | dataset/ETHPy150Open celery/kombu/kombu/transport/virtual/exchange.py/TopicExchange._match |
7,382 | def get_controller(equipment, logfile=None):
try:
del os.environ["TERM"]
except __HOLE__:
pass
ssh = sshlib.get_ssh(equipment["hostname"], equipment["user"],
equipment["password"], prompt=equipment["prompt"],
logfile=logfile)
ctor = ShellController(ssh)
ctor.host = equipment["hostname"]
ctor.user = equipment["user"]
ctor.password = equipment["password"]
return ctor | KeyError | dataset/ETHPy150Open kdart/pycopia/QA/pycopia/QA/ssh_controller.py/get_controller |
7,383 | def get_next_sibling(self):
if hasattr(self, '_parent'):
if self._parent:
siblings = list(self._parent.get_children())
else:
siblings = [self]
try:
return siblings[siblings.index(self) + 1]
except __HOLE__:
return None
return super(Node, self).get_next_sibling() | IndexError | dataset/ETHPy150Open fusionbox/django-widgy/widgy/models/base.py/Node.get_next_sibling |
7,384 | @staticmethod
def fetch_content_instances(nodes):
"""
Given a list of nodes, efficiently get all of their content instances.
The structure returned looks like this::
{
content_type_id: {
content_id: content_instance,
content_id: content_instance,
},
content_type_id: {
content_id: content_instance,
},
}
"""
# Build a mapping of content_types -> ids
contents = defaultdict(set)
for node in nodes:
contents[node.content_type_id].add(node.content_id)
# Convert that mapping to content_types -> Content instances
for content_type_id, content_ids in contents.items():
try:
ct = ContentType.objects.get_for_id(content_type_id)
model_class = ct.model_class()
except __HOLE__:
# get_for_id raises AttributeError when there's no model_class in django < 1.6.
model_class = None
if model_class:
contents[content_type_id] = ct.model_class().objects.in_bulk(content_ids)
else:
ct = ContentType.objects.get(id=content_type_id)
contents[content_type_id] = dict((id, UnknownWidget(ct, id)) for id in content_ids)
# Warn about using an UnknownWidget. It doesn't matter which instance we use.
next(iter(contents[content_type_id].values()), UnknownWidget(ct, None)).warn()
return contents | AttributeError | dataset/ETHPy150Open fusionbox/django-widgy/widgy/models/base.py/Node.fetch_content_instances |
7,385 | @property
def node(self):
"""
Settable property used by Node.prefetch_tree to optimize tree
rendering
"""
if hasattr(self, '_node'):
return self._node
try:
return self._nodes.all()[0]
except __HOLE__:
raise Node.DoesNotExist | IndexError | dataset/ETHPy150Open fusionbox/django-widgy/widgy/models/base.py/Content.node |
7,386 | @classmethod
def get_templates_hierarchy(cls, **kwargs):
templates = kwargs.get('hierarchy', (
'widgy/{app_label}/{model_name}/{template_name}{extension}',
'widgy/{app_label}/{template_name}{extension}',
'widgy/{template_name}{extension}',
))
kwargs.setdefault('extension', '.html')
ret = []
for template in templates:
for parent_cls in cls.__mro__:
try:
ret.extend(
template.format(**i) for i in parent_cls.get_template_kwargs(**kwargs)
)
except __HOLE__:
pass
# This must return a list or tuple because
# django.template.render_to_string does a typecheck.
return ret | AttributeError | dataset/ETHPy150Open fusionbox/django-widgy/widgy/models/base.py/Content.get_templates_hierarchy |
7,387 | def xxinv(mul):
""" Y * X * X.I -> Y """
factor, matrices = mul.as_coeff_matrices()
for i, (X, Y) in enumerate(zip(matrices[:-1], matrices[1:])):
try:
if X.is_square and Y.is_square and X == Y.inverse():
I = Identity(X.rows)
return newmul(factor, *(matrices[:i] + [I] + matrices[i+2:]))
except __HOLE__: # Y might not be invertible
pass
return mul | ValueError | dataset/ETHPy150Open sympy/sympy/sympy/matrices/expressions/matmul.py/xxinv |
7,388 | def _readable(self, watcher, events):
"""Called by the pyev watcher (self.read_watcher) whenever the socket
is readable.
This means either the socket has been closed or there is a new
client connection waiting.
"""
protocol = self.factory.build(self.loop)
try:
sock, address = self.sock.accept()
connection = Connection(self.loop, sock, address, protocol, self)
self.connections.add(connection)
connection.make_connection()
logger.debug("added connection")
except __HOLE__ as e:
self.shutdown(e) | IOError | dataset/ETHPy150Open bfrog/whizzer/whizzer/server.py/SocketServer._readable |
7,389 | def _groupby_function(name, alias, npfunc, numeric_only=True,
_convert=False):
_local_template = "Compute %(f)s of group values"
@Substitution(name='groupby', f=name)
@Appender(_doc_template)
@Appender(_local_template)
def f(self):
self._set_selection_from_grouper()
try:
return self._cython_agg_general(alias, numeric_only=numeric_only)
except __HOLE__ as e:
raise SpecificationError(str(e))
except Exception:
result = self.aggregate(lambda x: npfunc(x, axis=self.axis))
if _convert:
result = result._convert(datetime=True)
return result
f.__name__ = name
return f | AssertionError | dataset/ETHPy150Open pydata/pandas/pandas/core/groupby.py/_groupby_function |
7,390 | def _get_indices(self, names):
"""
safe get multiple indices, translate keys for
datelike to underlying repr
"""
def get_converter(s):
# possibly convert to the actual key types
# in the indices, could be a Timestamp or a np.datetime64
if isinstance(s, (Timestamp, datetime.datetime)):
return lambda key: Timestamp(key)
elif isinstance(s, np.datetime64):
return lambda key: Timestamp(key).asm8
else:
return lambda key: key
if len(names) == 0:
return []
if len(self.indices) > 0:
index_sample = next(iter(self.indices))
else:
index_sample = None # Dummy sample
name_sample = names[0]
if isinstance(index_sample, tuple):
if not isinstance(name_sample, tuple):
msg = ("must supply a tuple to get_group with multiple"
" grouping keys")
raise ValueError(msg)
if not len(name_sample) == len(index_sample):
try:
# If the original grouper was a tuple
return [self.indices[name] for name in names]
except __HOLE__:
# turns out it wasn't a tuple
msg = ("must supply a a same-length tuple to get_group"
" with multiple grouping keys")
raise ValueError(msg)
converters = [get_converter(s) for s in index_sample]
names = [tuple([f(n) for f, n in zip(converters, name)])
for name in names]
else:
converter = get_converter(index_sample)
names = [converter(name) for name in names]
return [self.indices.get(name, []) for name in names] | KeyError | dataset/ETHPy150Open pydata/pandas/pandas/core/groupby.py/_GroupBy._get_indices |
7,391 | def _cython_transform(self, how, numeric_only=True):
output = {}
for name, obj in self._iterate_slices():
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
try:
result, names = self.grouper.transform(obj.values, how)
except __HOLE__ as e:
raise GroupByError(str(e))
output[name] = self._try_cast(result, obj)
if len(output) == 0:
raise DataError('No numeric types to aggregate')
return self._wrap_transformed_output(output, names) | AssertionError | dataset/ETHPy150Open pydata/pandas/pandas/core/groupby.py/_GroupBy._cython_transform |
7,392 | def _cython_agg_general(self, how, numeric_only=True):
output = {}
for name, obj in self._iterate_slices():
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
try:
result, names = self.grouper.aggregate(obj.values, how)
except __HOLE__ as e:
raise GroupByError(str(e))
output[name] = self._try_cast(result, obj)
if len(output) == 0:
raise DataError('No numeric types to aggregate')
return self._wrap_aggregated_output(output, names) | AssertionError | dataset/ETHPy150Open pydata/pandas/pandas/core/groupby.py/_GroupBy._cython_agg_general |
7,393 | def _python_agg_general(self, func, *args, **kwargs):
func = self._is_builtin_func(func)
f = lambda x: func(x, *args, **kwargs)
# iterate through "columns" ex exclusions to populate output dict
output = {}
for name, obj in self._iterate_slices():
try:
result, counts = self.grouper.agg_series(obj, f)
output[name] = self._try_cast(result, obj)
except __HOLE__:
continue
if len(output) == 0:
return self._python_apply_general(f)
if self.grouper._filter_empty_groups:
mask = counts.ravel() > 0
for name, result in compat.iteritems(output):
# since we are masking, make sure that we have a float object
values = result
if is_numeric_dtype(values.dtype):
values = com.ensure_float(values)
output[name] = self._try_cast(values[mask], result)
return self._wrap_aggregated_output(output) | TypeError | dataset/ETHPy150Open pydata/pandas/pandas/core/groupby.py/_GroupBy._python_agg_general |
7,394 | def _cython_operation(self, kind, values, how, axis):
assert kind in ['transform', 'aggregate']
arity = self._cython_arity.get(how, 1)
vdim = values.ndim
swapped = False
if vdim == 1:
values = values[:, None]
out_shape = (self.ngroups, arity)
else:
if axis > 0:
swapped = True
values = values.swapaxes(0, axis)
if arity > 1:
raise NotImplementedError("arity of more than 1 is not "
"supported for the 'how' argument")
out_shape = (self.ngroups,) + values.shape[1:]
is_numeric = is_numeric_dtype(values.dtype)
if is_datetime_or_timedelta_dtype(values.dtype):
values = values.view('int64')
is_numeric = True
elif is_bool_dtype(values.dtype):
values = _algos.ensure_float64(values)
elif com.is_integer_dtype(values):
values = values.astype('int64', copy=False)
elif is_numeric and not com.is_complex_dtype(values):
values = _algos.ensure_float64(values)
else:
values = values.astype(object)
try:
func, dtype_str = self._get_cython_function(
kind, how, values, is_numeric)
except __HOLE__:
if is_numeric:
values = _algos.ensure_float64(values)
func, dtype_str = self._get_cython_function(
kind, how, values, is_numeric)
else:
raise
if is_numeric:
out_dtype = '%s%d' % (values.dtype.kind, values.dtype.itemsize)
else:
out_dtype = 'object'
labels, _, _ = self.group_info
if kind == 'aggregate':
result = _maybe_fill(np.empty(out_shape, dtype=out_dtype),
fill_value=np.nan)
counts = np.zeros(self.ngroups, dtype=np.int64)
result = self._aggregate(
result, counts, values, labels, func, is_numeric)
elif kind == 'transform':
result = _maybe_fill(np.empty_like(values, dtype=out_dtype),
fill_value=np.nan)
# temporary storange for running-total type tranforms
accum = np.empty(out_shape, dtype=out_dtype)
result = self._transform(
result, accum, values, labels, func, is_numeric)
if com.is_integer_dtype(result):
if len(result[result == tslib.iNaT]) > 0:
result = result.astype('float64')
result[result == tslib.iNaT] = np.nan
if kind == 'aggregate' and \
self._filter_empty_groups and not counts.all():
if result.ndim == 2:
try:
result = lib.row_bool_subset(
result, (counts > 0).view(np.uint8))
except ValueError:
result = lib.row_bool_subset_object(
com._ensure_object(result),
(counts > 0).view(np.uint8))
else:
result = result[counts > 0]
if vdim == 1 and arity == 1:
result = result[:, 0]
if how in self._name_functions:
# TODO
names = self._name_functions[how]()
else:
names = None
if swapped:
result = result.swapaxes(0, axis)
return result, names | NotImplementedError | dataset/ETHPy150Open pydata/pandas/pandas/core/groupby.py/BaseGrouper._cython_operation |
7,395 | def filter(self, func, dropna=True, *args, **kwargs): # noqa
"""
Return a copy of a Series excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
func : function
To apply to each group. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Examples
--------
>>> grouped.filter(lambda x: x.mean() > 0)
Returns
-------
filtered : Series
"""
if isinstance(func, compat.string_types):
wrapper = lambda x: getattr(x, func)(*args, **kwargs)
else:
wrapper = lambda x: func(x, *args, **kwargs)
# Interpret np.nan as False.
def true_and_notnull(x, *args, **kwargs):
b = wrapper(x, *args, **kwargs)
return b and notnull(b)
try:
indices = [self._get_index(name) for name, group in self
if true_and_notnull(group)]
except __HOLE__:
raise TypeError("the filter must return a boolean result")
except TypeError:
raise TypeError("the filter must return a boolean result")
filtered = self._apply_filter(indices, dropna)
return filtered | ValueError | dataset/ETHPy150Open pydata/pandas/pandas/core/groupby.py/SeriesGroupBy.filter |
7,396 | def nunique(self, dropna=True):
""" Returns number of unique elements in the group """
ids, _, _ = self.grouper.group_info
val = self.obj.get_values()
try:
sorter = np.lexsort((val, ids))
except __HOLE__: # catches object dtypes
assert val.dtype == object, \
'val.dtype must be object, got %s' % val.dtype
val, _ = algos.factorize(val, sort=False)
sorter = np.lexsort((val, ids))
isnull = lambda a: a == -1
else:
isnull = com.isnull
ids, val = ids[sorter], val[sorter]
# group boundaries are where group ids change
# unique observations are where sorted values change
idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]
inc = np.r_[1, val[1:] != val[:-1]]
# 1st item of each group is a new unique observation
mask = isnull(val)
if dropna:
inc[idx] = 1
inc[mask] = 0
else:
inc[mask & np.r_[False, mask[:-1]]] = 0
inc[idx] = 1
out = np.add.reduceat(inc, idx).astype('int64', copy=False)
res = out if ids[0] != -1 else out[1:]
ri = self.grouper.result_index
# we might have duplications among the bins
if len(res) != len(ri):
res, out = np.zeros(len(ri), dtype=out.dtype), res
res[ids] = out
return Series(res,
index=ri,
name=self.name) | TypeError | dataset/ETHPy150Open pydata/pandas/pandas/core/groupby.py/SeriesGroupBy.nunique |
7,397 | def _aggregate_item_by_item(self, func, *args, **kwargs):
# only for axis==0
obj = self._obj_with_exclusions
result = {}
cannot_agg = []
errors = None
for item in obj:
try:
data = obj[item]
colg = SeriesGroupBy(data, selection=item,
grouper=self.grouper)
result[item] = self._try_cast(
colg.aggregate(func, *args, **kwargs), data)
except ValueError:
cannot_agg.append(item)
continue
except __HOLE__ as e:
cannot_agg.append(item)
errors = e
continue
result_columns = obj.columns
if cannot_agg:
result_columns = result_columns.drop(cannot_agg)
# GH6337
if not len(result_columns) and errors is not None:
raise errors
return DataFrame(result, columns=result_columns) | TypeError | dataset/ETHPy150Open pydata/pandas/pandas/core/groupby.py/NDFrameGroupBy._aggregate_item_by_item |
7,398 | def _wrap_applied_output(self, keys, values, not_indexed_same=False):
from pandas.core.index import _all_indexes_same
if len(keys) == 0:
# XXX
return DataFrame({})
key_names = self.grouper.names
if isinstance(values[0], DataFrame):
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
elif self.grouper.groupings is not None:
if len(self.grouper.groupings) > 1:
key_index = MultiIndex.from_tuples(keys, names=key_names)
else:
ping = self.grouper.groupings[0]
if len(keys) == ping.ngroups:
key_index = ping.group_index
key_index.name = key_names[0]
key_lookup = Index(keys)
indexer = key_lookup.get_indexer(key_index)
# reorder the values
values = [values[i] for i in indexer]
else:
key_index = Index(keys, name=key_names[0])
# don't use the key indexer
if not self.as_index:
key_index = None
# make Nones an empty object
if com._count_not_none(*values) != len(values):
try:
v = next(v for v in values if v is not None)
except StopIteration:
# If all values are None, then this will throw an error.
# We'd prefer it return an empty dataframe.
return DataFrame()
if v is None:
return DataFrame()
elif isinstance(v, NDFrame):
values = [
x if x is not None else
v._constructor(**v._construct_axes_dict())
for x in values
]
v = values[0]
if isinstance(v, (np.ndarray, Index, Series)):
if isinstance(v, Series):
applied_index = self._selected_obj._get_axis(self.axis)
all_indexed_same = _all_indexes_same([
x.index for x in values
])
singular_series = (len(values) == 1 and
applied_index.nlevels == 1)
# GH3596
# provide a reduction (Frame -> Series) if groups are
# unique
if self.squeeze:
# assign the name to this series
if singular_series:
values[0].name = keys[0]
# GH2893
# we have series in the values array, we want to
# produce a series:
# if any of the sub-series are not indexed the same
# OR we don't have a multi-index and we have only a
# single values
return self._concat_objects(
keys, values, not_indexed_same=not_indexed_same
)
# still a series
# path added as of GH 5545
elif all_indexed_same:
from pandas.tools.merge import concat
return concat(values)
if not all_indexed_same:
# GH 8467
return self._concat_objects(
keys, values, not_indexed_same=True,
)
try:
if self.axis == 0:
# GH6124 if the list of Series have a consistent name,
# then propagate that name to the result.
index = v.index.copy()
if index.name is None:
# Only propagate the series name to the result
# if all series have a consistent name. If the
# series do not have a consistent name, do
# nothing.
names = set(v.name for v in values)
if len(names) == 1:
index.name = list(names)[0]
# normally use vstack as its faster than concat
# and if we have mi-columns
if isinstance(v.index,
MultiIndex) or key_index is None:
stacked_values = np.vstack(map(np.asarray, values))
result = DataFrame(stacked_values, index=key_index,
columns=index)
else:
# GH5788 instead of stacking; concat gets the
# dtypes correct
from pandas.tools.merge import concat
result = concat(values, keys=key_index,
names=key_index.names,
axis=self.axis).unstack()
result.columns = index
else:
stacked_values = np.vstack(map(np.asarray, values))
result = DataFrame(stacked_values.T, index=v.index,
columns=key_index)
except (ValueError, __HOLE__):
# GH1738: values is list of arrays of unequal lengths fall
# through to the outer else caluse
return Series(values, index=key_index, name=self.name)
# if we have date/time like in the original, then coerce dates
# as we are stacking can easily have object dtypes here
so = self._selected_obj
if (so.ndim == 2 and so.dtypes.isin(_DATELIKE_DTYPES).any()):
result = result._convert(numeric=True)
date_cols = self._selected_obj.select_dtypes(
include=list(_DATELIKE_DTYPES)).columns
date_cols = date_cols.intersection(result.columns)
result[date_cols] = (result[date_cols]
._convert(datetime=True,
coerce=True))
else:
result = result._convert(datetime=True)
return self._reindex_output(result)
else:
# only coerce dates if we find at least 1 datetime
coerce = True if any([isinstance(x, Timestamp)
for x in values]) else False
return (Series(values, index=key_index, name=self.name)
._convert(datetime=True,
coerce=coerce))
else:
# Handle cases like BinGrouper
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same) | AttributeError | dataset/ETHPy150Open pydata/pandas/pandas/core/groupby.py/NDFrameGroupBy._wrap_applied_output |
7,399 | def _transform_general(self, func, *args, **kwargs):
from pandas.tools.merge import concat
applied = []
obj = self._obj_with_exclusions
gen = self.grouper.get_iterator(obj, axis=self.axis)
fast_path, slow_path = self._define_paths(func, *args, **kwargs)
path = None
for name, group in gen:
object.__setattr__(group, 'name', name)
if path is None:
# Try slow path and fast path.
try:
path, res = self._choose_path(fast_path, slow_path, group)
except __HOLE__:
return self._transform_item_by_item(obj, fast_path)
except ValueError:
msg = 'transform must return a scalar value for each group'
raise ValueError(msg)
else:
res = path(group)
# broadcasting
if isinstance(res, Series):
if res.index.is_(obj.index):
group.T.values[:] = res
else:
group.values[:] = res
applied.append(group)
else:
applied.append(res)
concat_index = obj.columns if self.axis == 0 else obj.index
concatenated = concat(applied, join_axes=[concat_index],
axis=self.axis, verify_integrity=False)
return self._set_result_index_ordered(concatenated) | TypeError | dataset/ETHPy150Open pydata/pandas/pandas/core/groupby.py/NDFrameGroupBy._transform_general |
Subsets and Splits