Unnamed: 0
int64 0
10k
| function
stringlengths 79
138k
| label
stringclasses 20
values | info
stringlengths 42
261
|
---|---|---|---|
6,800 | def assertIsDecorated(self, function, decorator_name):
try:
decorator_list = function.__decorators__
except __HOLE__:
decorator_list = []
self.assertIn(
decorator_name,
decorator_list,
msg="'{}' method should be decorated with 'module_required'".format(function.__name__)
) | AttributeError | dataset/ETHPy150Open reverse-shell/routersploit/routersploit/test/test_interpreter.py/RoutersploitInterpreterTest.assertIsDecorated |
6,801 | def __init__(self, shard_spec):
"""
:param string shard_spec: A string of the form M/N where M, N are ints and 0 <= M < N.
"""
def ensure_int(s):
try:
return int(s)
except __HOLE__:
raise self.InvalidShardSpec(shard_spec)
if shard_spec is None:
raise self.InvalidShardSpec('None')
shard_str, _, nshards_str = shard_spec.partition('/')
self._shard = ensure_int(shard_str)
self._nshards = ensure_int(nshards_str)
if self._shard < 0 or self._shard >= self._nshards:
raise self.InvalidShardSpec(shard_spec) | ValueError | dataset/ETHPy150Open pantsbuild/pants/src/python/pants/base/hash_utils.py/Sharder.__init__ |
6,802 | def test_run_with_no_which(self):
try:
which_backup = shutil.which
except __HOLE__: # pragma: no cover
return
del shutil.which
try:
self._test(*GetExcecutableTests.empty)
self.assertFalse(hasattr(shutil, 'which'))
self._test(*GetExcecutableTests.in_path_2)
self.assertFalse(hasattr(shutil, 'which'))
finally:
shutil.which = which_backup | AttributeError | dataset/ETHPy150Open epsy/clize/clize/tests/test_runner.py/GetExcecutableTests.test_run_with_no_which |
6,803 | def assert_systemexit(self, __code, __func, *args, **kwargs):
try:
__func(*args, **kwargs)
except __HOLE__ as e:
self.assertEqual(e.code, __code)
else:
self.fail('SystemExit not raised') | SystemExit | dataset/ETHPy150Open epsy/clize/clize/tests/test_runner.py/RunnerTests.assert_systemexit |
6,804 | def _script_to_name(script):
try:
return unicode_data.human_readable_script_name(script)
except __HOLE__:
return script | KeyError | dataset/ETHPy150Open googlei18n/nototools/nototools/mti_cmap_data.py/_script_to_name |
6,805 | def _parse_cgroup_file(self, stat_file):
"""Parses a cgroup pseudo file for key/values."""
self.log.debug("Opening cgroup file: %s" % stat_file)
try:
with open(stat_file, 'r') as fp:
return dict(map(lambda x: x.split(), fp.read().splitlines()))
except __HOLE__:
# It is possible that the container got stopped between the API call and now
self.log.info("Can't open %s. Metrics for this container are skipped." % stat_file) | IOError | dataset/ETHPy150Open serverdensity/sd-agent/checks.d/docker.py/Docker._parse_cgroup_file |
6,806 | def _conditional_import_module(self, module_name):
"""Import a module and return a reference to it or None on failure."""
try:
exec('import '+module_name)
except __HOLE__, error:
if self._warn_on_extension_import:
warnings.warn('Did a C extension fail to compile? %s' % error)
return locals().get(module_name) | ImportError | dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/test/test_hashlib.py/HashLibTestCase._conditional_import_module |
6,807 | def test_unknown_hash(self):
try:
hashlib.new('spam spam spam spam spam')
except __HOLE__:
pass
else:
self.assertTrue(0 == "hashlib didn't reject bogus hash name") | ValueError | dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/test/test_hashlib.py/HashLibTestCase.test_unknown_hash |
6,808 | def test_get_builtin_constructor(self):
get_builtin_constructor = hashlib.__dict__[
'__get_builtin_constructor']
self.assertRaises(ValueError, get_builtin_constructor, 'test')
try:
import _md5
except __HOLE__:
pass
# This forces an ImportError for "import _md5" statements
sys.modules['_md5'] = None
try:
self.assertRaises(ValueError, get_builtin_constructor, 'md5')
finally:
if '_md5' in locals():
sys.modules['_md5'] = _md5
else:
del sys.modules['_md5'] | ImportError | dataset/ETHPy150Open azoft-dev-team/imagrium/env/Lib/test/test_hashlib.py/HashLibTestCase.test_get_builtin_constructor |
6,809 | def from_envvars(conf, prefix=None, envvars=None, as_json=True):
"""Load environment variables as Flask configuration settings.
Values are parsed as JSON. If parsing fails with a ValueError,
values are instead used as verbatim strings.
:param app: App, whose configuration should be loaded from ENVVARs.
:param prefix: If ``None`` is passed as envvars, all variables from
``environ`` starting with this prefix are imported. The
prefix is stripped upon import.
:param envvars: A dictionary of mappings of environment-variable-names
to Flask configuration names. If a list is passed
instead, names are mapped 1:1. If ``None``, see prefix
argument.
:param as_json: If False, values will not be parsed as JSON first.
"""
if prefix is None and envvars is None:
raise RuntimeError('Must either give prefix or envvars argument')
# if it's a list, convert to dict
if isinstance(envvars, list):
envvars = {k: None for k in envvars}
if not envvars:
envvars = {k: k[len(prefix):] for k in os.environ.keys()
if k.startswith(prefix)}
for env_name, name in envvars.items():
if name is None:
name = env_name
if not env_name in os.environ:
continue
if as_json:
try:
conf[name] = json.loads(os.environ[env_name])
except __HOLE__:
conf[name] = os.environ[env_name]
else:
conf[name] = os.environ[env_name] | ValueError | dataset/ETHPy150Open mbr/flask-appconfig/flask_appconfig/env.py/from_envvars |
6,810 | def on_timer(self):
"""Callback function triggered by SidTimer every 'log_interval' seconds"""
# current_index is the position in the buffer calculated from current UTC time
current_index = self.timer.data_index
utc_now = self.timer.utc_now
# clear the View to prepare for new data display
self.viewer.clear()
# Get new data and pass them to the View
message = "%s [%d] Capturing data..." % (self.timer.get_utc_now(), current_index)
self.viewer.status_display(message, level=1)
try:
data = self.sampler.capture_1sec() # return a list of 1 second signal strength
Pxx, freqs = self.psd(data, self.sampler.NFFT, self.sampler.audio_sampling_rate)
except __HOLE__ as idxerr:
print("Index Error:", idxerr)
print("Data len:", len(data))
signal_strengths = []
for binSample in self.sampler.monitored_bins:
signal_strengths.append(Pxx[binSample])
# ensure that one thread at the time accesses the sid_file's' buffers
with self.timer.lock:
# Save signal strengths into memory buffers ; prepare message for status bar
message = self.timer.get_utc_now() + " [%d] " % current_index
message += "%d" % (self.scan_end_time - self.timer.time_now)
for station, strength in zip(self.config.stations, signal_strengths):
station['raw_buffer'][current_index] = strength
self.logger.sid_file.timestamp[current_index] = utc_now
# did we complete the expected scanning duration?
if self.timer.time_now >= self.scan_end_time:
fileName = "scanner_buffers.raw.ext.%s.csv" % (self.logger.sid_file.sid_params['utc_starttime'][:10])
fsaved = self.save_current_buffers(filename=fileName, log_type='raw', log_format='supersid_extended')
print(fsaved,"saved.")
self.close()
exit(0)
# end of this thread/need to handle to View to display captured data & message
self.viewer.status_display(message, level=2) | IndexError | dataset/ETHPy150Open ericgibert/supersid/supersid/supersid_scanner.py/SuperSID_scanner.on_timer |
6,811 | def run(self, wx_app = None):
"""Start the application as infinite loop accordingly to need"""
self.__class__.running = True
if self.config['viewer'] == 'wx':
wx_app.MainLoop()
elif self.config['viewer'] == 'text':
try:
while(self.__class__.running):
sleep(1)
except (__HOLE__, SystemExit):
pass | KeyboardInterrupt | dataset/ETHPy150Open ericgibert/supersid/supersid/supersid_scanner.py/SuperSID_scanner.run |
6,812 | def __call__(self, date, ctx):
# get the node values from the context
values = [ctx.get_value(node) for node in self.nodes]
# get the writer from the context, or create it if it's not been
# created already.
ctx_id = ctx.get_id()
try:
writer = self.writers[ctx_id]
except __HOLE__:
fh = self.fh
if isinstance(fh, MDFNode):
fh = ctx.get_value(fh)
if isinstance(fh, basestring):
fh = open(fh, "wb")
self.open_fhs.append(fh)
writer = self.writers[ctx_id] = csv.writer(fh)
# figure out how to handle them and what to write in the header
if self.handlers is None:
header = ["date"]
self.handlers = []
for node, value, column in zip(self.nodes, values, self.columns):
if isinstance(column, MDFNode):
column = ctx.get_value(column)
header.extend(_get_labels(node, column, value))
if isinstance(value, (basestring, int, float, bool, datetime.date)):
self.handlers.append(self._write_basetype)
elif isinstance(value, (list, tuple, np.ndarray, pa.Index, pa.core.generic.NDFrame)):
self.handlers.append(self._write_list)
elif isinstance(value, pa.Series):
self.handlers.append(self._write_series)
else:
raise Exception("Unhandled type %s for node %s" % (type(value), node))
# write the header
writer.writerow(header)
# format the values and write the row
row = [date]
for handler, value in zip(self.handlers, values):
handler(value, row)
writer.writerow(row) | KeyError | dataset/ETHPy150Open manahl/mdf/mdf/builders/basic.py/CSVWriter.__call__ |
6,813 | def get_dataframe(self, ctx, dtype=None, sparse_fill_value=None):
ctx_id = ctx if isinstance(ctx, int) else ctx.get_id()
# if the builder's been finalized and there's a dataframe cached
# return that without trying to convert it to a sparse dataframe
# or changing the dtypes (dtype and sparse_fill_value are only
# hints).
try:
return self._cached_dataframes[ctx_id]
except __HOLE__:
pass
if dtype is None:
dtype = self.dtype
result_df = self._build_dataframe(ctx_id, dtype)
if sparse_fill_value is None:
sparse_fill_value = self.sparse_fill_value
if sparse_fill_value is not None:
# this doesn't always work depending on the actual dtype
# the dataframe ends up being
try:
result_df = result_df.to_sparse(fill_value=sparse_fill_value)
except TypeError:
pass
# try and infer types for any that are currently set to object
return result_df.convert_objects() | KeyError | dataset/ETHPy150Open manahl/mdf/mdf/builders/basic.py/DataFrameBuilder.get_dataframe |
6,814 | def get_columns(self, node, ctx):
"""
returns the sub-set of columns in the dataframe returned
by get_dataframe that relate to a particular node
"""
ctx_id = ctx if isinstance(ctx, int) else ctx.get_id()
try:
return self._cached_columns[(ctx_id, node)]
except __HOLE__:
pass
handler_dict = self.context_handler_dict[ctx_id]
# the ctx is the root context passed to __call__, which may be
# different from the shifted contexts that the node was actually
# evaluated in.
# Get all the columns for this node in all sub-contexts.
columns = []
ctx_ids = []
for (node_name, short_name, sub_ctx_id), handler in handler_dict.items():
if node_name == node.name \
and short_name == node.short_name:
columns.append(handler.get_columns())
ctx_ids.append(sub_ctx_id)
# re-label in case the same node was evaluated in multiple sub-contexts
columns = _relabel(columns,
[node.name] * len(columns),
[node.short_name] * len(columns),
ctx_ids)
return reduce(operator.add, columns, []) | KeyError | dataset/ETHPy150Open manahl/mdf/mdf/builders/basic.py/DataFrameBuilder.get_columns |
6,815 | def pytest_runtest_logreport(self, report):
try:
scenario = report.scenario
except __HOLE__:
# skip reporting for non-bdd tests
return
if not scenario["steps"] or report.when != "call":
# skip if there isn't a result or scenario has no steps
return
def stepmap(step):
error_message = False
if step['failed'] and not scenario.setdefault('failed', False):
scenario['failed'] = True
error_message = True
return {
"keyword": step['keyword'],
"name": step['name'],
"line": step['line_number'],
"match": {
"location": "",
},
"result": self._get_result(step, report, error_message),
}
if scenario["feature"]["filename"] not in self.features:
self.features[scenario["feature"]["filename"]] = {
"keyword": "Feature",
"uri": scenario["feature"]["rel_filename"],
"name": scenario["feature"]["name"] or scenario["feature"]["rel_filename"],
"id": scenario["feature"]["rel_filename"].lower().replace(" ", "-"),
"line": scenario['feature']["line_number"],
"description": scenario["feature"]["description"],
"tags": self._serialize_tags(scenario["feature"]),
"elements": [],
}
self.features[scenario["feature"]["filename"]]["elements"].append({
"keyword": "Scenario",
"id": report.item["name"],
"name": scenario["name"],
"line": scenario["line_number"],
"description": "",
"tags": self._serialize_tags(scenario),
"type": "scenario",
"steps": [stepmap(step) for step in scenario["steps"]],
}) | AttributeError | dataset/ETHPy150Open pytest-dev/pytest-bdd/pytest_bdd/cucumber_json.py/LogBDDCucumberJSON.pytest_runtest_logreport |
6,816 | def _get_revision(self, revision):
"""
Get's an ID revision given as str. This will always return a fill
40 char revision number
:param revision: str or int or None
"""
if self._empty:
raise EmptyRepositoryError("There are no changesets yet")
if revision in [-1, 'tip', None]:
revision = 'tip'
try:
revision = hex(self._repo.lookup(revision))
except (IndexError, __HOLE__, RepoLookupError, TypeError):
raise ChangesetDoesNotExistError("Revision %s does not "
"exist for this repository"
% (revision))
return revision | ValueError | dataset/ETHPy150Open codeinn/vcs/vcs/backends/hg/repository.py/MercurialRepository._get_revision |
6,817 | def get_or_assign_number(self):
"""
Set a unique number to identify this Order object. The first 4 digits represent the
current year. The last five digits represent a zero-padded incremental counter.
"""
if self.number is None:
epoch = datetime.now().date()
epoch = epoch.replace(epoch.year, 1, 1)
aggr = Order.objects.filter(number__isnull=False, created_at__gt=epoch).aggregate(models.Max('number'))
try:
epoc_number = int(str(aggr['number__max'])[4:]) + 1
self.number = int('{0}{1:05d}'.format(epoch.year, epoc_number))
except (KeyError, __HOLE__):
# the first order this year
self.number = int('{0}00001'.format(epoch.year))
return self.get_number() | ValueError | dataset/ETHPy150Open awesto/django-shop/shop/models/defaults/order.py/Order.get_or_assign_number |
6,818 | def GetOutboundGatewaySettings(self):
"""Get Outbound Gateway Settings
Args:
None
Returns:
A dict {smartHost, smtpMode}"""
uri = self._serviceUrl('email/gateway')
try:
return self._GetProperties(uri)
except gdata.service.RequestError as e:
raise AppsForYourDomainException(e.args[0])
except __HOLE__:
#if no outbound gateway is set, we get a TypeError,
#catch it and return nothing...
return {'smartHost': None, 'smtpMode': None} | TypeError | dataset/ETHPy150Open kuri65536/python-for-android/python3-alpha/python-libs/gdata/apps/adminsettings/service.py/AdminSettingsService.GetOutboundGatewaySettings |
6,819 | def callback(request):
try:
client = get_evernote_client()
client.get_access_token(
request.session['oauth_token'],
request.session['oauth_token_secret'],
request.GET.get('oauth_verifier', '')
)
except __HOLE__:
return redirect('/')
note_store = client.get_note_store()
notebooks = note_store.listNotebooks()
return render_to_response('oauth/callback.html', {'notebooks': notebooks}) | KeyError | dataset/ETHPy150Open evernote/evernote-sdk-python/sample/django/oauth/views.py/callback |
6,820 | def _dynamic_mul(self, dimensions, other, keys):
"""
Implements dynamic version of overlaying operation overlaying
DynamicMaps and HoloMaps where the key dimensions of one is
a strict superset of the other.
"""
# If either is a HoloMap compute Dimension values
if not isinstance(self, DynamicMap) or not isinstance(other, DynamicMap):
keys = sorted((d, v) for k in keys for d, v in k)
grouped = dict([(g, [v for _, v in group])
for g, group in groupby(keys, lambda x: x[0])])
dimensions = [d(values=grouped[d.name]) for d in dimensions]
mode = 'bounded'
map_obj = None
elif (isinstance(self, DynamicMap) and (other, DynamicMap) and
self.mode != other.mode):
raise ValueEror("Cannot overlay DynamicMaps with mismatching mode.")
else:
map_obj = self if isinstance(self, DynamicMap) else other
mode = map_obj.mode
def dynamic_mul(*key):
key = key[0] if mode == 'open' else key
layers = []
try:
if isinstance(self, DynamicMap):
_, self_el = util.get_dynamic_item(self, dimensions, key)
if self_el is not None:
layers.append(self_el)
else:
layers.append(self[key])
except __HOLE__:
pass
try:
if isinstance(other, DynamicMap):
_, other_el = util.get_dynamic_item(other, dimensions, key)
if other_el is not None:
layers.append(other_el)
else:
layers.append(other[key])
except KeyError:
pass
return Overlay(layers)
if map_obj:
return map_obj.clone(callback=dynamic_mul, shared_data=False,
kdims=dimensions)
else:
return DynamicMap(callback=dynamic_mul, kdims=dimensions) | KeyError | dataset/ETHPy150Open ioam/holoviews/holoviews/core/spaces.py/HoloMap._dynamic_mul |
6,821 | def __getitem__(self, key):
"""
Return an element for any key chosen key (in'bounded mode') or
for a previously generated key that is still in the cache
(for one of the 'open' modes)
"""
tuple_key = util.wrap_tuple(key)
# Validation for bounded mode
if self.mode == 'bounded':
# DynamicMap(...)[:] returns a new DynamicMap with the same cache
if key == slice(None, None, None):
return self.clone(self)
slices = [el for el in tuple_key if isinstance(el, slice)]
if any(el.step for el in slices):
raise Exception("Slices cannot have a step argument "
"in DynamicMap bounded mode ")
if len(slices) not in [0, len(tuple_key)]:
raise Exception("Slices must be used exclusively or not at all")
if slices:
return self._slice_bounded(tuple_key)
# Cache lookup
try:
cache = super(DynamicMap,self).__getitem__(key)
# Return selected cache items in a new DynamicMap
if isinstance(cache, DynamicMap) and self.mode=='open':
cache = self.clone(cache)
except __HOLE__ as e:
cache = None
if self.mode == 'open' and len(self.data)>0:
raise KeyError(str(e) + " Note: Cannot index outside "
"available cache in open interval mode.")
# If the key expresses a cross product, compute the elements and return
product = self._cross_product(tuple_key, cache.data if cache else {})
if product is not None:
return product
# Not a cross product and nothing cached so compute element.
if cache: return cache
val = self._execute_callback(*tuple_key)
if self.call_mode == 'counter':
val = val[1]
self._cache(tuple_key, val)
return val | KeyError | dataset/ETHPy150Open ioam/holoviews/holoviews/core/spaces.py/DynamicMap.__getitem__ |
6,822 | def get_current_status(self):
p = Popen("%s freeze | grep ^%s==" % (self.pip_binary_path, self.resource.package_name), stdout=PIPE, stderr=STDOUT, shell=True)
out = p.communicate()[0]
res = p.wait()
if res != 0:
self.current_version = None
else:
try:
self.current_version = out.split("==", 1)[1].strip()
except __HOLE__:
raise Fail("pip could not determine installed package version.") | IndexError | dataset/ETHPy150Open samuel/kokki/kokki/cookbooks/pip/libraries/providers.py/PipPackageProvider.get_current_status |
6,823 | def assert_graph_equal(self, g1, g2):
try:
return self.assertSetEqual(set(g1), set(g2))
except __HOLE__:
# python2.6 does not have assertSetEqual
assert set(g1) == set(g2) | AttributeError | dataset/ETHPy150Open RDFLib/rdflib/test/test_auditable.py/BaseTestAuditableStore.assert_graph_equal |
6,824 | def _render_sources(dataset, tables):
"""Render the source part of a query.
Parameters
----------
dataset : str
The data set to fetch log data from.
tables : Union[dict, list]
The tables to fetch log data from
Returns
-------
str
A string that represents the "from" part of a query.
"""
if isinstance(tables, dict):
if tables.get('date_range', False):
try:
dataset_table = '.'.join([dataset, tables['table']])
return "FROM (TABLE_DATE_RANGE([{}], TIMESTAMP('{}'),"\
" TIMESTAMP('{}'))) ".format(dataset_table,
tables['from_date'],
tables['to_date'])
except __HOLE__ as exp:
logger.warn(
'Missing parameter %s in selecting sources' % (exp))
else:
return "FROM " + ", ".join(
["[%s.%s]" % (dataset, table) for table in tables]) | KeyError | dataset/ETHPy150Open tylertreat/BigQuery-Python/bigquery/query_builder.py/_render_sources |
6,825 | def parse_response(self, result, command_name, **options):
try:
return self.response_callbacks[command_name.upper()](
result, **options)
except __HOLE__:
return result | KeyError | dataset/ETHPy150Open coleifer/walrus/walrus/tusks/rlite.py/WalrusLite.parse_response |
6,826 | def cost(self):
"""Return the size (in bytes) that the bytecode for this takes up"""
assert self.cost_map != None
try:
try:
return self.__cost
except __HOLE__:
self.__cost = sum([self.cost_map[t] for t in self.value()])
return self.__cost
except:
raise Exception('Translated token not recognized') | AttributeError | dataset/ETHPy150Open googlei18n/compreffor/compreffor/pyCompressor.py/CandidateSubr.cost |
6,827 | @staticmethod
def process_subrs(glyph_set_keys, encodings, fdlen, fdselect, substrings, rev_keymap, subr_limit, nest_limit, verbose=False):
post_time = time.time()
def mark_reachable(cand_subr, fdidx):
try:
if fdidx not in cand_subr._fdidx:
cand_subr._fdidx.append(fdidx)
except __HOLE__:
cand_subr._fdidx = [fdidx]
for it in cand_subr._encoding:
mark_reachable(it[1], fdidx)
if fdselect != None:
for g, enc in zip(glyph_set_keys, encodings):
sel = fdselect(g)
for it in enc:
mark_reachable(it[1], sel)
else:
for encoding in encodings:
for it in encoding:
mark_reachable(it[1], 0)
subrs = [s for s in substrings if s.usages() > 0 and hasattr(s, '_fdidx') and bool(s._fdidx) and s.subr_saving(use_usages=True, true_cost=True) > 0]
bad_substrings = [s for s in substrings if s.usages() == 0 or not hasattr(s, '_fdidx') or not bool(s._fdidx) or s.subr_saving(use_usages=True, true_cost=True) <= 0]
if verbose:
print("%d substrings unused or negative saving subrs" % len(bad_substrings))
def set_flatten(s): s._flatten = True
map(set_flatten, bad_substrings)
gsubrs = []
lsubrs = [[] for _ in xrange(fdlen)]
subrs.sort(key=lambda s: s.subr_saving(use_usages=True, true_cost=True))
while subrs and (any(len(s) < subr_limit for s in lsubrs) or
len(gsubrs) < subr_limit):
subr = subrs[-1]
del subrs[-1]
if len(subr._fdidx) == 1:
lsub_index = lsubrs[subr._fdidx[0]]
if len(gsubrs) < subr_limit:
if len(lsub_index) < subr_limit:
# both have space
gcost = Compreffor.test_call_cost(subr, gsubrs)
lcost = Compreffor.test_call_cost(subr, lsub_index)
if gcost < lcost:
Compreffor.insert_by_usage(subr, gsubrs)
subr._global = True
else:
Compreffor.insert_by_usage(subr, lsub_index)
else:
# just gsubrs has space
Compreffor.insert_by_usage(subr, gsubrs)
subr._global = True
elif len(lsub_index) < subr_limit:
# just lsubrs has space
Compreffor.insert_by_usage(subr, lsub_index)
else:
# we must skip :(
bad_substrings.append(subr)
else:
if len(gsubrs) < subr_limit:
# we can put it in globals
Compreffor.insert_by_usage(subr, gsubrs)
subr._global = True
else:
# no room for this one
bad_substrings.append(subr)
bad_substrings.extend([s[1] for s in subrs]) # add any leftover subrs to bad_substrings
map(set_flatten, bad_substrings)
# fix any nesting issues
Compreffor.calc_nesting(gsubrs)
map(Compreffor.calc_nesting, lsubrs)
too_nested = [s for s in itertools.chain(*lsubrs) if s._max_call_depth > nest_limit]
too_nested.extend([s for s in gsubrs if s._max_call_depth > nest_limit])
map(set_flatten, too_nested)
bad_substrings.extend(too_nested)
lsubrs = [[s for s in lsubrarr if s._max_call_depth <= nest_limit] for lsubrarr in lsubrs]
gsubrs = [s for s in gsubrs if s._max_call_depth <= nest_limit]
too_nested = len(too_nested)
if verbose:
print("%d substrings nested too deep" % too_nested)
print("%d substrings being flattened" % len(bad_substrings))
# reorganize to minimize call cost of most frequent subrs
def update_position(idx, subr): subr._position = idx
gbias = psCharStrings.calcSubrBias(gsubrs)
lbias = [psCharStrings.calcSubrBias(s) for s in lsubrs]
for subr_arr, bias in zip(itertools.chain([gsubrs], lsubrs),
itertools.chain([gbias], lbias)):
subr_arr.sort(key=lambda s: s.usages(), reverse=True)
if bias == 1131:
subr_arr[:] = subr_arr[216:1240] + subr_arr[0:216] + subr_arr[1240:]
elif bias == 32768:
subr_arr[:] = (subr_arr[2264:33901] + subr_arr[216:1240] +
subr_arr[0:216] + subr_arr[1240:2264] + subr_arr[33901:])
map(update_position, range(len(subr_arr)), subr_arr)
for subr in sorted(bad_substrings, key=lambda s: len(s)):
# NOTE: it is important this is run in order so shorter
# substrings are run before longer ones
if hasattr(subr, '_fdidx') and len(subr._fdidx) > 0:
program = [rev_keymap[tok] for tok in subr.value()]
Compreffor.update_program(program, subr.encoding(), gbias, lbias, None)
Compreffor.expand_hintmask(program)
subr._program = program
for subr_arr, sel in zip(itertools.chain([gsubrs], lsubrs),
itertools.chain([None], xrange(fdlen))):
for subr in subr_arr:
program = [rev_keymap[tok] for tok in subr.value()]
if program[-1] not in ("endchar", "return"):
program.append("return")
Compreffor.update_program(program, subr.encoding(), gbias, lbias, sel)
Compreffor.expand_hintmask(program)
subr._program = program
if verbose:
print("POST-TIME: %gs" % (time.time() - post_time))
return (gsubrs, lsubrs) | AttributeError | dataset/ETHPy150Open googlei18n/compreffor/compreffor/pyCompressor.py/Compreffor.process_subrs |
6,828 | def _remove_array(self, key):
"""Private function that removes the cached array. Do not
call this unless you know what you are doing."""
try:
del self._cache[key]
except __HOLE__:
pass
######################################################################
# Setup a global `_array_cache`. The array object cache caches all the
# converted numpy arrays that are not copied. This prevents the user
# from deleting or resizing the numpy array after it has been sent down
# to VTK.
###################################################################### | KeyError | dataset/ETHPy150Open enthought/mayavi/tvtk/array_handler.py/ArrayCache._remove_array |
6,829 | def get_vtk_array_type(numeric_array_type):
"""Returns a VTK typecode given a numpy array."""
# This is a Mapping from numpy array types to VTK array types.
_arr_vtk = {numpy.dtype(numpy.character):vtkConstants.VTK_UNSIGNED_CHAR,
numpy.dtype(numpy.uint8):vtkConstants.VTK_UNSIGNED_CHAR,
numpy.dtype(numpy.uint16):vtkConstants.VTK_UNSIGNED_SHORT,
numpy.dtype(numpy.int8):vtkConstants.VTK_CHAR,
numpy.dtype(numpy.int16):vtkConstants.VTK_SHORT,
numpy.dtype(numpy.int32):vtkConstants.VTK_INT,
numpy.dtype(numpy.uint32):vtkConstants.VTK_UNSIGNED_INT,
numpy.dtype(numpy.float32):vtkConstants.VTK_FLOAT,
numpy.dtype(numpy.float64):vtkConstants.VTK_DOUBLE,
numpy.dtype(numpy.complex64):vtkConstants.VTK_FLOAT,
numpy.dtype(numpy.complex128):vtkConstants.VTK_DOUBLE,
}
_extra = {numpy.dtype(ID_TYPE_CODE):vtkConstants.VTK_ID_TYPE,
numpy.dtype(ULONG_TYPE_CODE):vtkConstants.VTK_UNSIGNED_LONG,
numpy.dtype(LONG_TYPE_CODE):vtkConstants.VTK_LONG,
}
for t in _extra:
if t not in _arr_vtk:
_arr_vtk[t] = _extra[t]
try:
return _arr_vtk[numeric_array_type]
except __HOLE__:
for key in _arr_vtk:
if numpy.issubdtype(numeric_array_type, key):
return _arr_vtk[key]
raise TypeError("Couldn't translate array's type to VTK") | KeyError | dataset/ETHPy150Open enthought/mayavi/tvtk/array_handler.py/get_vtk_array_type |
6,830 | def array2vtkCellArray(num_array, vtk_array=None):
"""Given a nested Python list or a numpy array, this method
creates a vtkCellArray instance and returns it.
A variety of input arguments are supported as described in the
Parameter documentation. If numpy arrays are given, this method
is highly efficient. This function is most efficient if the
passed numpy arrays have a typecode `ID_TYPE_CODE`. Otherwise a
typecast is necessary and this involves an extra copy. This
method *always copies* the input data.
An alternative and more efficient way to build the connectivity
list is to create a vtkIdTypeArray having data of the form
(npts,p0,p1,...p(npts-1), repeated for each cell) and then call
<vtkCellArray_instance>.SetCells(n_cell, id_list).
Parameters
----------
- num_array : numpy array or Python list/tuple
Valid values are:
1. A Python list of 1D lists. Each 1D list can contain one
cell connectivity list. This is very slow and is to be
used only when efficiency is of no consequence.
2. A 2D numpy array with the cell connectivity list.
3. A Python list of 2D numpy arrays. Each numeric array can
have a different shape. This makes it easy to generate a
cell array having cells of different kinds.
- vtk_array : `vtkCellArray` (default: `None`)
If an optional `vtkCellArray` instance, is passed as an argument
then a new array is not created and returned. The passed array
is itself modified and returned.
Example
-------
>>> a = [[0], [1, 2], [3, 4, 5], [6, 7, 8, 9]]
>>> cells = array_handler.array2vtkCellArray(a)
>>> a = numpy.array([[0,1,2], [3,4,5], [6,7,8]], 'l')
>>> cells = array_handler.array2vtkCellArray(a)
>>> l_a = [a[:,:1], a[:2,:2], a]
>>> cells = array_handler.array2vtkCellArray(l_a)
"""
if vtk_array:
cells = vtk_array
else:
cells = vtk.vtkCellArray()
assert cells.GetClassName() == 'vtkCellArray', \
'Second argument must be a `vtkCellArray` instance.'
if len(num_array) == 0:
return cells
########################################
# Internal functions.
def _slow_array2cells(z, cells):
cells.Reset()
vtk_ids = vtk.vtkIdList()
for i in z:
vtk_ids.Reset()
for j in i:
vtk_ids.InsertNextId(j)
cells.InsertNextCell(vtk_ids)
def _get_tmp_array(arr):
try:
tmp_arr = numpy.asarray(arr, ID_TYPE_CODE)
except __HOLE__:
tmp_arr = arr.astype(ID_TYPE_CODE)
return tmp_arr
def _set_cells(cells, n_cells, id_typ_arr):
vtk_arr = vtk.vtkIdTypeArray()
array2vtk(id_typ_arr, vtk_arr)
cells.SetCells(n_cells, vtk_arr)
########################################
msg = "Invalid argument. Valid types are a Python list of lists,"\
" a Python list of numpy arrays, or a numpy array."
if issubclass(type(num_array), (list, tuple)):
assert len(num_array[0]) > 0, "Input array must be 2D."
tp = type(num_array[0])
if issubclass(tp, list): # Pure Python list.
_slow_array2cells(num_array, cells)
return cells
elif issubclass(tp, numpy.ndarray): # List of arrays.
# Check shape of array and find total size.
tot_size = 0
n_cells = 0
for arr in num_array:
assert len(arr.shape) == 2, "Each array must be 2D"
shp = arr.shape
tot_size += shp[0]*(shp[1] + 1)
n_cells += shp[0]
# Create an empty array.
id_typ_arr = numpy.empty((tot_size,), ID_TYPE_CODE)
# Now populate it with the ids.
count = 0
for arr in num_array:
tmp_arr = _get_tmp_array(arr)
shp = arr.shape
sz = shp[0]*(shp[1] + 1)
set_id_type_array(tmp_arr, id_typ_arr[count:count+sz])
count += sz
# Now set them cells.
_set_cells(cells, n_cells, id_typ_arr)
return cells
else:
raise TypeError(msg)
elif issubclass(type(num_array), numpy.ndarray):
assert len(num_array.shape) == 2, "Input array must be 2D."
tmp_arr = _get_tmp_array(num_array)
shp = tmp_arr.shape
id_typ_arr = numpy.empty((shp[0]*(shp[1] + 1),), ID_TYPE_CODE)
set_id_type_array(tmp_arr, id_typ_arr)
_set_cells(cells, shp[0], id_typ_arr)
return cells
else:
raise TypeError(msg) | TypeError | dataset/ETHPy150Open enthought/mayavi/tvtk/array_handler.py/array2vtkCellArray |
6,831 | def convert_array(arr, vtk_typ=None):
"""Convert the given array to the optional type specified by
`vtk_typ`.
Parameters
----------
- arr : numpy array/list.
- vtk_typ : `string` or `None`
represents the type the array is to be converted to.
"""
if vtk_typ:
conv = {'vtkCellArray': array2vtkCellArray,
'vtkPoints': array2vtkPoints,
'vtkIdList': array2vtkIdList}
if vtk_typ in conv.keys():
vtk_arr = getattr(vtk, vtk_typ)()
return conv[vtk_typ](arr, vtk_arr)
elif vtk_typ.find('Array') > -1:
try:
vtk_arr = getattr(vtk, vtk_typ)()
except __HOLE__: # vtk_typ == 'vtkDataArray'
return array2vtk(arr)
else:
return array2vtk(arr, vtk_arr)
else:
return arr
else:
return array2vtk(arr) | TypeError | dataset/ETHPy150Open enthought/mayavi/tvtk/array_handler.py/convert_array |
6,832 | @property
def coordinates(self):
try:
from haystack.utils.geo import Point
except __HOLE__:
return None
else:
return Point(self.longitude, self.latitude, srid=4326) | ImportError | dataset/ETHPy150Open inonit/drf-haystack/tests/mockapp/models.py/MockLocation.coordinates |
6,833 | def deactivate(self):
"""Remove this response from the dependency graph and remove
its pseudocomp from the scoping object.
"""
if self._pseudo is not None:
scope = self.scope
try:
getattr(scope, self._pseudo.name)
except __HOLE__:
pass
else:
scope.remove(self._pseudo.name) | AttributeError | dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.main/src/openmdao/main/hasresponses.py/Response.deactivate |
6,834 | def evaluate(self, scope=None):
"""Use the value in the u vector if it exists instead of pulling
the value from scope.
"""
if self.pcomp_name:
scope = self._get_updated_scope(scope)
try:
system = getattr(scope, self.pcomp_name)._system
vname = self.pcomp_name + '.out0'
if scope._var_meta[vname].get('scalar'):
return system.vec['u'][scope.name2collapsed[vname]][0]
else:
return system.vec['u'][scope.name2collapsed[vname]]
except (__HOLE__, AttributeError):
pass
return super(Response, self).evaluate(scope) | KeyError | dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.main/src/openmdao/main/hasresponses.py/Response.evaluate |
6,835 | def add_response(self, expr, name=None, scope=None):
"""Adds a response to the driver.
expr: string
String containing the response expression.
name: string (optional)
Name to be used to refer to the response in place of the expression
string.
scope: object (optional)
The object to be used as the scope when evaluating the expression.
"""
expr = _remove_spaces(expr)
if expr in self._responses:
self.parent.raise_exception("Trying to add response '%s' to"
" driver, but it's already there"
% expr, AttributeError)
if name is not None and name in self._responses:
self.parent.raise_exception("Trying to add response '%s' to"
" driver using name '%s', but name is"
" already used" % (expr, name),
AttributeError)
scope = self._get_scope(scope)
try:
expreval = Response(expr, scope)
unresolved_vars = expreval.get_unresolved()
except __HOLE__:
unresolved_vars = [expr]
if unresolved_vars:
msg = "Can't add response '{0}' because of invalid variables {1}"
error = ConnectedExprEvaluator._invalid_expression_error(unresolved_vars,
expr, msg)
self.parent.raise_exception(str(error), type(error))
name = expr if name is None else name
#expreval.activate(self.parent)
self._responses[name] = expreval
self.parent.config_changed() | AttributeError | dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.main/src/openmdao/main/hasresponses.py/HasResponses.add_response |
6,836 | def _get_scope(self, scope=None):
if scope is None:
try:
return self.parent.get_expr_scope()
except __HOLE__:
pass
return scope | AttributeError | dataset/ETHPy150Open OpenMDAO/OpenMDAO-Framework/openmdao.main/src/openmdao/main/hasresponses.py/HasResponses._get_scope |
6,837 | def _as_dataset_variable(name, var):
"""Prepare a variable for adding it to a Dataset
"""
try:
var = as_variable(var, key=name)
except __HOLE__:
raise TypeError('variables must be given by arrays or a tuple of '
'the form (dims, data[, attrs, encoding])')
if name in var.dims:
# convert the into an Index
if var.ndim != 1:
raise ValueError('the variable %r has the same name as one of its '
'dimensions %r, but it is not 1-dimensional and '
'thus it is not a valid index' % (name, var.dims))
var = var.to_coord()
return var | TypeError | dataset/ETHPy150Open pydata/xarray/xarray/core/merge.py/_as_dataset_variable |
6,838 | def run(self, deployer, state_persister):
"""
Run the system ``mount`` tool to mount this change's volume's block
device. The volume must be attached to this node.
"""
# Create the directory where a device will be mounted.
# The directory's parent's permissions will be set to only allow access
# by owner, to limit access by other users on the node.
try:
self.mountpoint.makedirs()
except __HOLE__ as e:
if e.errno != EEXIST:
return fail()
self.mountpoint.parent().chmod(S_IRWXU)
# This should be asynchronous. FLOC-1797
deployer.block_device_manager.mount(self.device_path, self.mountpoint)
# Remove lost+found to ensure filesystems always start out empty.
# Mounted filesystem is also made world
# writeable/readable/executable since we can't predict what user a
# container will run as. We make sure we change mounted
# filesystem's root directory permissions, so we only do this
# after the filesystem is mounted. If other files exist we don't
# bother with either change, since at that point user has modified
# the volume and we don't want to undo their changes by mistake
# (e.g. postgres doesn't like world-writeable directories once
# it's initialized).
# A better way is described in
# https://clusterhq.atlassian.net/browse/FLOC-2074
lostfound = self.mountpoint.child(b"lost+found")
if self.mountpoint.children() == [lostfound]:
lostfound.remove()
self.mountpoint.chmod(S_IRWXU | S_IRWXG | S_IRWXO)
self.mountpoint.restat()
return succeed(None) | OSError | dataset/ETHPy150Open ClusterHQ/flocker/flocker/node/agents/blockdevice.py/MountBlockDevice.run |
6,839 | def detach_volume(self, blockdevice_id):
"""
Clear the cached device path, if it was cached.
"""
try:
del self._device_paths[blockdevice_id]
except __HOLE__:
pass
return self._api.detach_volume(blockdevice_id) | KeyError | dataset/ETHPy150Open ClusterHQ/flocker/flocker/node/agents/blockdevice.py/ProcessLifetimeCache.detach_volume |
6,840 | def _canvas_route(self, *args, **kwargs):
""" Decorator for canvas route
"""
def outer(view_fn):
@self.route(*args, **kwargs)
def inner(*args, **kwargs):
fn_args = getargspec(view_fn)
try:
idx = fn_args.args.index(_ARG_KEY)
except ValueError:
idx = -1
if idx > -1:
if 'error' in flask_request.args:
return redirect('%s?error=%s' % (
self.config.get('CANVAS_ERROR_URI', '/'),
flask_request.args.get('error')))
if 'signed_request' not in flask_request.form:
self.logger.error('signed_request not in request.form')
abort(403)
try:
_, decoded_data = _decode_signed_user(
*flask_request.form['signed_request'].split('.'))
except __HOLE__ as e:
self.logger.error(e.message)
abort(403)
if 'oauth_token' not in decoded_data:
app.logger.info('unauthorized user, redirecting')
return _authorize()
user = User(**decoded_data)
if not app.config.get('CANVAS_SKIP_AUTH_CHECK', False) \
and not user.has_permissions():
self.logger.info(
'user does not have the required permission set.')
return _authorize()
self.logger.info('all required permissions have been granted')
args = args[:idx - 1] + (user,) + args[idx:]
return view_fn(*args, **kwargs)
return inner
return outer | ValueError | dataset/ETHPy150Open demianbrecht/flask-canvas/flask_canvas.py/_canvas_route |
6,841 | def date_trunc_sql(self, lookup_type, field_name):
fields = ['year', 'month', 'day', 'hour', 'minute', 'second']
format = ('%%Y-', '%%m', '-%%d', ' %%H:', '%%i', ':%%s') # Use double percents to escape.
format_def = ('0000-', '01', '-01', ' 00:', '00', ':00')
try:
i = fields.index(lookup_type) + 1
except __HOLE__:
sql = field_name
else:
format_str = ''.join([f for f in format[:i]] + [f for f in format_def[i:]])
sql = "CAST(DATE_FORMAT(%s, '%s') AS DATETIME)" % (field_name, format_str)
return sql | ValueError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/django-1.5/django/db/backends/mysql/base.py/DatabaseOperations.date_trunc_sql |
6,842 | def db_sync(engine, abs_path, version=None, init_version=0):
"""Upgrade or downgrade a database.
Function runs the upgrade() or downgrade() functions in change scripts.
:param engine: SQLAlchemy engine instance for a given database
:param abs_path: Absolute path to migrate repository.
:param version: Database will upgrade/downgrade until this version.
If None - database will update to the latest
available version.
:param init_version: Initial database version
"""
if version is not None:
try:
version = int(version)
except __HOLE__:
raise exception.DbMigrationError(
message=_("version should be an integer"))
current_version = db_version(engine, abs_path, init_version)
repository = _find_migrate_repo(abs_path)
_db_schema_sanity_check(engine)
if version is None or version > current_version:
return versioning_api.upgrade(engine, repository, version)
else:
return versioning_api.downgrade(engine, repository,
version) | ValueError | dataset/ETHPy150Open openstack/rack/rack/openstack/common/db/sqlalchemy/migration.py/db_sync |
6,843 | def startElementNS(self, tag, qname, attrs):
if tag in self.triggers:
self.parse = True
if self.doc._parsing != "styles.xml" and tag == (OFFICENS, 'font-face-decls'):
self.parse = False
if self.parse == False:
return
self.level = self.level + 1
# Add any accumulated text content
content = ''.join(self.data)
if len(content.strip()) > 0:
self.parent.addText(content, check_grammar=False)
self.data = []
# Create the element
attrdict = {}
for (att,value) in attrs.items():
attrdict[att] = value
try:
e = Element(qname = tag, qattributes=attrdict, check_grammar=False)
self.curr = e
except __HOLE__, v:
print "Error: %s" % v
if tag == (OFFICENS, 'automatic-styles'):
e = self.doc.automaticstyles
elif tag == (OFFICENS, 'body'):
e = self.doc.body
elif tag == (OFFICENS, 'master-styles'):
e = self.doc.masterstyles
elif tag == (OFFICENS, 'meta'):
e = self.doc.meta
elif tag == (OFFICENS,'scripts'):
e = self.doc.scripts
elif tag == (OFFICENS,'settings'):
e = self.doc.settings
elif tag == (OFFICENS,'styles'):
e = self.doc.styles
elif self.doc._parsing == "styles.xml" and tag == (OFFICENS, 'font-face-decls'):
e = self.doc.fontfacedecls
elif hasattr(self,'parent'):
self.parent.addElement(e, check_grammar=False)
self.parent = e | AttributeError | dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/tablib-0.10.0/tablib/packages/odf/load.py/LoadParser.startElementNS |
6,844 | def get_password_validators(validator_config):
validators = []
for validator in validator_config:
try:
klass = import_string(validator['NAME'])
except __HOLE__:
msg = "The module in NAME could not be imported: %s. Check your AUTH_PASSWORD_VALIDATORS setting."
raise ImproperlyConfigured(msg % validator['NAME'])
validators.append(klass(**validator.get('OPTIONS', {})))
return validators | ImportError | dataset/ETHPy150Open orcasgit/django-password-validation/password_validation/validation.py/get_password_validators |
6,845 | def validate_password(password, user=None, password_validators=None):
"""
Validate whether the password meets all validator requirements.
If the password is valid, return ``None``.
If the password is invalid, raise ValidationError with all error messages.
"""
errors = []
if password_validators is None:
password_validators = get_default_password_validators()
for validator in password_validators:
try:
validator.validate(password, user)
except __HOLE__ as error:
errors.append(error)
if errors:
raise ValidationError(errors) | ValidationError | dataset/ETHPy150Open orcasgit/django-password-validation/password_validation/validation.py/validate_password |
6,846 | def __init__(self, password_list_path=DEFAULT_PASSWORD_LIST_PATH):
try:
common_passwords_lines = gzip.open(password_list_path).read().decode('utf-8').splitlines()
except __HOLE__:
common_passwords_lines = open(password_list_path).readlines()
self.passwords = {p.strip() for p in common_passwords_lines} | IOError | dataset/ETHPy150Open orcasgit/django-password-validation/password_validation/validation.py/CommonPasswordValidator.__init__ |
6,847 | def lookup(self, service, timeout=1):
if self.client.state != KazooState.CONNECTED:
return service
service_name = service.name
result = self.client.get_children_async(
path='/%s/%s' % (SERVICE_NAMESPACE, service_name, ),
watch=functools.partial(self.on_service_name_watch, service),
)
try:
instance_ids = result.get(timeout=timeout)
except NoNodeError:
raise LookupFailure("failed to resolve %s", service.name)
except KazooException as e:
logger.warning("zookeeper lookup failure: %s", e)
return service
logger.info("lookup %s %r", service_name, instance_ids)
old_instance_ids = {instance.id for instance in service}
for instance_id in instance_ids:
kwargs = self._get_service_znode(service, service_name, instance_id)
service.update(instance_id, **kwargs)
try:
old_instance_ids.remove(instance_id)
except __HOLE__:
pass
for instance_id in old_instance_ids:
service.remove(instance_id)
return service | KeyError | dataset/ETHPy150Open deliveryhero/lymph/lymph/discovery/zookeeper.py/ZookeeperServiceRegistry.lookup |
6,848 | def __init__(self, params, offset=0):
agents.Agent.__init__(self, params, offset)
try:
self.maxprice = self.args[0]
except (__HOLE__, IndexError):
raise MissingParameter, 'maxprice'
try:
self.maxbuy = self.args[1]
except IndexError:
raise MissingParameter, 'maxbuy'
del self.args
# {BUY: [prevprice, success], SELL: [prevprice, sucess]}
self.prevorder = [self.maxprice/2., False] | AttributeError | dataset/ETHPy150Open jcbagneris/fms/fms/contrib/coleman/agents/probeadjustbstrader.py/ProbeAdjustBSTrader.__init__ |
6,849 | def main ():
options = docopt.docopt(usage, help=False)
major = int(sys.version[0])
minor = int(sys.version[2])
if major != 2 or minor < 5:
sys.exit('This program can not work (is not tested) with your python version (< 2.5 or >= 3.0)')
if options["--version"]:
print 'ExaBGP : %s' % version
print 'Python : %s' % sys.version.replace('\n',' ')
print 'Uname : %s' % ' '.join(platform.uname()[:5])
sys.exit(0)
if options["--folder"]:
folder = os.path.realpath(os.path.normpath(options["--folder"]))
elif sys.argv[0].endswith('/bin/exabgp'):
folder = sys.argv[0][:-len('/bin/exabgp')] + '/etc/exabgp'
elif sys.argv[0].endswith('/sbin/exabgp'):
folder = sys.argv[0][:-len('/sbin/exabgp')] + '/etc/exabgp'
else:
folder = '/etc/exabgp'
os.environ['EXABGP_ETC'] = folder # This is not most pretty
if options["--run"]:
sys.argv = sys.argv[sys.argv.index('--run')+1:]
if sys.argv[0] == 'healthcheck':
from exabgp.application import run_healthcheck
run_healthcheck()
elif sys.argv[0] == 'cli':
from exabgp.application import run_cli
run_cli()
else:
print(usage)
sys.exit(0)
return
envfile = 'exabgp.env' if not options["--env"] else options["--env"]
if not envfile.startswith('/'):
envfile = '%s/%s' % (folder, envfile)
from exabgp.configuration.setup import environment
try:
env = environment.setup(envfile)
except environment.Error,exc:
print usage
print '\nconfiguration issue,', str(exc)
sys.exit(1)
# Must be done before setting the logger as it modify its behaviour
if options["--debug"]:
env.log.all = True
env.log.level = syslog.LOG_DEBUG
logger = Logger()
named_pipe = os.environ.get('NAMED_PIPE','')
if named_pipe:
from exabgp.application.control import main as control
control(named_pipe)
sys.exit(0)
if options["--decode"]:
decode = ''.join(options["--decode"]).replace(':','').replace(' ','')
if not is_bgp(decode):
print usage
print 'Environment values are:\n' + '\n'.join(' - %s' % _ for _ in environment.default())
print ""
print "The BGP message must be an hexadecimal string."
print ""
print "All colons or spaces are ignored, for example:"
print ""
print " --decode 001E0200000007900F0003000101"
print " --decode 001E:02:0000:0007:900F:0003:0001:01"
print " --decode FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF001E0200000007900F0003000101"
print " --decode FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF:001E:02:0000:0007:900F:0003:0001:01"
print " --decode 'FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF 001E02 00000007900F0003000101'"
sys.exit(1)
else:
decode = ''
# Make sure our child has a named pipe name
if env.api.file:
os.environ['NAMED_PIPE'] = env.api.file
duration = options["--signal"]
if duration and duration.isdigit():
pid = os.fork()
if pid:
import time
import signal
try:
time.sleep(int(duration))
os.kill(pid,signal.SIGUSR1)
except KeyboardInterrupt:
pass
try:
pid,code = os.wait()
sys.exit(code)
except KeyboardInterrupt:
try:
pid,code = os.wait()
sys.exit(code)
except Exception:
sys.exit(0)
if options["--help"]:
print(usage)
print 'Environment values are:\n' + '\n'.join(' - %s' % _ for _ in environment.default())
sys.exit(0)
if options["--decode"]:
env.log.parser = True
env.debug.route = decode
env.tcp.bind = ''
if options["--profile"]:
env.profile.enable = True
if options["--profile"].lower() in ['1','true']:
env.profile.file = True
elif options["--profile"].lower() in ['0','false']:
env.profile.file = False
else:
env.profile.file = options["--profile"]
if envfile and not os.path.isfile(envfile):
comment = 'environment file missing\ngenerate it using "exabgp --fi > %s"' % envfile
else:
comment = ''
if options["--full-ini"] or options["--fi"]:
for line in environment.iter_ini():
print line
sys.exit(0)
if options["--full-env"] or options["--fe"]:
print
for line in environment.iter_env():
print line
sys.exit(0)
if options["--diff-ini"] or options["--di"]:
for line in environment.iter_ini(True):
print line
sys.exit(0)
if options["--diff-env"] or options["--de"]:
for line in environment.iter_env(True):
print line
sys.exit(0)
if options["--once"]:
env.tcp.once = True
if options["--pdb"]:
# The following may fail on old version of python (but is required for debug.py)
os.environ['PDB'] = 'true'
env.debug.pdb = True
if options["--test"]:
env.debug.selfcheck = True
env.log.parser = True
if options["--memory"]:
env.debug.memory = True
configurations = []
# check the file only once that we have parsed all the command line options and allowed them to run
if options["<configuration>"]:
for f in options["<configuration>"]:
normalised = os.path.realpath(os.path.normpath(f))
if os.path.isfile(normalised):
configurations.append(normalised)
continue
if f.startswith('etc/exabgp'):
normalised = os.path.join(folder,f[11:])
if os.path.isfile(normalised):
configurations.append(normalised)
continue
logger.configuration('one of the arguments passed as configuration is not a file (%s)' % f,'error')
sys.exit(1)
else:
print(usage)
print 'Environment values are:\n' + '\n'.join(' - %s' % _ for _ in environment.default())
print '\nno configuration file provided'
sys.exit(1)
from exabgp.bgp.message.update.attribute import Attribute
Attribute.caching = env.cache.attributes
if env.debug.rotate or len(configurations) == 1:
run(env,comment,configurations)
if not (env.log.destination in ('syslog','stdout','stderr') or env.log.destination.startswith('host:')):
logger.configuration('can not log to files when running multiple configuration (as we fork)','error')
sys.exit(1)
try:
# run each configuration in its own process
pids = []
for configuration in configurations:
pid = os.fork()
if pid == 0:
run(env,comment,[configuration],os.getpid())
else:
pids.append(pid)
# If we get a ^C / SIGTERM, ignore just continue waiting for our child process
import signal
signal.signal(signal.SIGINT, signal.SIG_IGN)
# wait for the forked processes
for pid in pids:
os.waitpid(pid,0)
except __HOLE__,exc:
logger.reactor('Can not fork, errno %d : %s' % (exc.errno,exc.strerror),'critical')
sys.exit(1) | OSError | dataset/ETHPy150Open Exa-Networks/exabgp/lib/exabgp/application/bgp.py/main |
6,850 | def run (env, comment, configurations, pid=0):
logger = Logger()
if comment:
logger.configuration(comment)
if not env.profile.enable:
ok = Reactor(configurations).run()
__exit(env.debug.memory,0 if ok else 1)
try:
import cProfile as profile
except __HOLE__:
import profile
if not env.profile.file or env.profile.file == 'stdout':
ok = profile.run('Reactor(configurations).run()')
__exit(env.debug.memory,0 if ok else 1)
if pid:
profile_name = "%s-pid-%d" % (env.profile.file,pid)
else:
profile_name = env.profile.file
notice = ''
if os.path.isdir(profile_name):
notice = 'profile can not use this filename as outpout, it is not a directory (%s)' % profile_name
if os.path.exists(profile_name):
notice = 'profile can not use this filename as outpout, it already exists (%s)' % profile_name
if not notice:
logger.reactor('profiling ....')
profiler = profile.Profile()
profiler.enable()
try:
ok = Reactor(configurations).run()
except Exception:
raise
finally:
profiler.disable()
kprofile = lsprofcalltree.KCacheGrind(profiler)
with open(profile_name, 'w+') as write:
kprofile.output(write)
__exit(env.debug.memory,0 if ok else 1)
else:
logger.reactor("-"*len(notice))
logger.reactor(notice)
logger.reactor("-"*len(notice))
Reactor(configurations).run()
__exit(env.debug.memory,1) | ImportError | dataset/ETHPy150Open Exa-Networks/exabgp/lib/exabgp/application/bgp.py/run |
6,851 | def ndintegrate(data, unit_conv, limits, unit='ppm', noise_limits=None):
"""
Integrate one nD data array within limits given in units. Data points must
be equally spaced. Can only integrate one region per function call.
The integration error due to baseline noise is calculated as:
.. math::
error = \sigma_{vol} = \sigma \sqrt{\prod_i^{d} n_{i}},
if the noise_limits are set.
Where:
sigma is the standard deviation of the baseline noise. n is the number
of bins in the integration range for each d dimensions.
See integrate for more information.
Parameters
----------
data: array like
1d array of intensity
unit_convs: [`fileiobase.unit_conversion`, ] list
list of unit_conversion object associated with each dim of data.
limits: array like
With shape (2,) or (d, 2). 1D Array with lower and upper integration
limits for 1D . Or array with d rows of lower and upper integration
limits for each dimension.
noise_limits: Optional[array like]
With shape(2, ). Array with lower and upper limits to section of data
with only noise. A larger range will likely yield a more accurate
estimate. It is unwise to use the very end of the spectrum for the
noise range.
Returns
-------
array
[value, ...] integration values
if noise_limits is given:
array
[[value, error], ...] where error a one sigma estimate of the error
only from the spectrum noise
"""
# determine the dimensionality of the data.
d = np.ndim(data)
try:
iter(unit_conv)
except __HOLE__:
unit_conv = [unit_conv, ]
if d != len(unit_conv):
mesg = 'A unit_conversion object is needed for each dimension.'
raise ValueError(mesg)
limits = np.array(limits)
if limits.ndim == 1:
limits = np.expand_dims(limits, axis=0)
if limits.shape[0] != d and limits.shape[1] != 2:
mesg = 'A lower and upper limit is needed for each dimension.'
raise ValueError(mesg)
inds = [(uc(x[0], unit), uc(x[1], unit))
for (uc, x) in zip(unit_conv, limits)]
inds = [sorted(x) for x in inds]
# the integrate_nd needs to be scaled by the bin width in ppm
ppm_scales = [x.ppm_scale() for x in unit_conv]
dx = np.prod(np.array([abs(x[1]-x[0]) for x in ppm_scales]))
slice_sum = (data[[slice(x[0], x[1])for x in np.flipud(inds)]]).sum()
value = slice_sum * dx
if noise_limits is None:
return value
else:
noise_limits = np.array(noise_limits)
if noise_limits.size == 2:
noise_limits = np.expand_dims(noise_limits, axis=0)
if noise_limits.shape[0] != d and noise_limits.shape[1] != 2:
mesg = 'If given, a noise limit is needed for each dimension.'
raise ValueError(mesg)
noise_inds = [(uc(x[0], unit), uc(x[1], unit))
for (uc, x) in zip(unit_conv, noise_limits)]
noise_inds = [sorted(x) for x in noise_inds]
# see docstring of integrate
nm = np.prod(np.array([abs(x[1]-x[0]) for x in noise_inds]))
std = np.std(data[[slice(x[0], x[1])for x in np.flipud(noise_inds)]])
error = std * np.sqrt(nm)
return np.hstack((value, error)) | TypeError | dataset/ETHPy150Open jjhelmus/nmrglue/nmrglue/analysis/integration.py/ndintegrate |
6,852 | def setSWJPins(self, output, pin, wait=0):
cmd = []
cmd.append(COMMAND_ID['DAP_SWJ_PINS'])
try:
p = PINS[pin]
except __HOLE__:
logging.error('cannot find %s pin', pin)
return
cmd.append(output & 0xff)
cmd.append(p)
cmd.append(wait & 0xff)
cmd.append((wait >> 8) & 0xff)
cmd.append((wait >> 16) & 0xff)
cmd.append((wait >> 24) & 0xff)
self.interface.write(cmd)
resp = self.interface.read()
if resp[0] != COMMAND_ID['DAP_SWJ_PINS']:
# Response is to a different command
raise DAPAccessIntf.DeviceError()
return resp[1] | KeyError | dataset/ETHPy150Open mbedmicro/pyOCD/pyOCD/pyDAPAccess/cmsis_dap_core.py/CMSIS_DAP_Protocol.setSWJPins |
6,853 | @method_decorator(csrf_protect)
def dispatch(self, request, *args, **kwargs):
self.topic = self.get_topic(**kwargs)
if request.GET.get('first-unread'):
if request.user.is_authenticated():
read_dates = []
try:
read_dates.append(TopicReadTracker.objects.get(user=request.user, topic=self.topic).time_stamp)
except TopicReadTracker.DoesNotExist:
pass
try:
read_dates.append(ForumReadTracker.objects.get(user=request.user, forum=self.topic.forum).time_stamp)
except ForumReadTracker.DoesNotExist:
pass
read_date = read_dates and max(read_dates)
if read_date:
try:
first_unread_topic = self.topic.posts.filter(created__gt=read_date).order_by('created', 'id')[0]
except __HOLE__:
first_unread_topic = self.topic.last_post
else:
first_unread_topic = self.topic.head
return HttpResponseRedirect(reverse('pybb:post', kwargs={'pk': first_unread_topic.id}))
return super(TopicView, self).dispatch(request, *args, **kwargs) | IndexError | dataset/ETHPy150Open hovel/pybbm/pybb/views.py/TopicView.dispatch |
6,854 | def form_valid(self, form):
success = True
save_attachments = False
save_poll_answers = False
self.object, topic = form.save(commit=False)
if perms.may_attach_files(self.request.user):
aformset = self.get_attachment_formset_class()(
self.request.POST, self.request.FILES, instance=self.object
)
if aformset.is_valid():
save_attachments = True
else:
success = False
else:
aformset = None
if perms.may_create_poll(self.request.user):
pollformset = self.get_poll_answer_formset_class()()
if getattr(self, 'forum', None) or topic.head == self.object:
if topic.poll_type != Topic.POLL_TYPE_NONE:
pollformset = self.get_poll_answer_formset_class()(
self.request.POST, instance=topic
)
if pollformset.is_valid():
save_poll_answers = True
else:
success = False
else:
topic.poll_question = None
topic.poll_answers.all().delete()
else:
pollformset = None
if success:
try:
topic.save()
except __HOLE__ as e:
success = False
errors = form._errors.setdefault('name', ErrorList())
errors += e.error_list
else:
self.object.topic = topic
self.object.save()
if save_attachments:
aformset.save()
if save_poll_answers:
pollformset.save()
return HttpResponseRedirect(self.get_success_url())
return self.render_to_response(self.get_context_data(form=form,
aformset=aformset,
pollformset=pollformset)) | ValidationError | dataset/ETHPy150Open hovel/pybbm/pybb/views.py/PostEditMixin.form_valid |
6,855 | @method_decorator(csrf_protect)
def dispatch(self, request, *args, **kwargs):
if request.user.is_authenticated():
self.user = request.user
else:
if defaults.PYBB_ENABLE_ANONYMOUS_POST:
self.user, new = User.objects.get_or_create(**{username_field: defaults.PYBB_ANONYMOUS_USERNAME})
else:
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(request.get_full_path())
self.forum = None
self.topic = None
if 'forum_id' in kwargs:
self.forum = get_object_or_404(perms.filter_forums(request.user, Forum.objects.all()), pk=kwargs['forum_id'])
if not perms.may_create_topic(self.user, self.forum):
raise PermissionDenied
elif 'topic_id' in kwargs:
self.topic = get_object_or_404(perms.filter_topics(request.user, Topic.objects.all()), pk=kwargs['topic_id'])
if not perms.may_create_post(self.user, self.topic):
raise PermissionDenied
self.quote = ''
if 'quote_id' in request.GET:
try:
quote_id = int(request.GET.get('quote_id'))
except __HOLE__:
raise Http404
else:
post = get_object_or_404(Post, pk=quote_id)
if not perms.may_view_post(request.user, post):
raise PermissionDenied
profile = util.get_pybb_profile(post.user)
self.quote = util._get_markup_quoter(defaults.PYBB_MARKUP)(post.body, profile.get_display_name())
if self.quote and request.is_ajax():
return HttpResponse(self.quote)
return super(AddPostView, self).dispatch(request, *args, **kwargs) | TypeError | dataset/ETHPy150Open hovel/pybbm/pybb/views.py/AddPostView.dispatch |
6,856 | def get(self, name):
if not may(READ):
raise Forbidden()
try:
item = current_app.storage.openwrite(name)
except (OSError, IOError) as e:
if e.errno == errno.ENOENT:
raise NotFound()
raise
with item as item:
complete = item.meta['complete']
if not complete and not may(ADMIN):
error = 'Upload incomplete. Try again later.'
return render_template('error.html', heading=item.meta['filename'], body=error), 409
if item.meta['locked'] and not may(ADMIN):
raise Forbidden()
if delete_if_lifetime_over(item, name):
raise NotFound()
def read_data(item):
# reading the item for rendering is registered like a download
data = item.data.read(item.data.size, 0)
item.meta['timestamp-download'] = int(time.time())
return data
size = item.meta['size']
ct = item.meta['type']
try:
get_lexer_for_mimetype(ct)
use_pygments = True
ct_pygments = ct
except NoPygmentsLexer:
if ct.startswith('text/'):
# seems like we found a text type not supported by pygments
# use text/plain so we get a display with line numbers
use_pygments = True
ct_pygments = 'text/plain'
else:
use_pygments = False
if rendering_allowed(ct, size, use_pygments, complete):
if ct.startswith('text/x-bepasty-'):
# special bepasty items - must be first, don't feed to pygments
if ct == 'text/x-bepasty-list':
names = read_data(item).decode('utf-8').splitlines()
files = sorted(file_infos(names), key=lambda f: f['filename'])
rendered_content = Markup(render_template('filelist_tableonly.html', files=files))
else:
rendered_content = u"Can't render this content type."
elif ct.startswith('image/'):
src = url_for('bepasty.download', name=name)
rendered_content = Markup(u'<img src="%s" alt="the image" width="800">' % src)
elif ct.startswith('audio/'):
src = url_for('bepasty.download', name=name)
alt_msg = u'html5 audio element not supported by your browser.'
rendered_content = Markup(u'<audio controls src="%s">%s</audio>' % (src, alt_msg))
elif ct.startswith('video/'):
src = url_for('bepasty.download', name=name)
alt_msg = u'html5 video element not supported by your browser.'
rendered_content = Markup(u'<video controls src="%s">%s</video>' % (src, alt_msg))
elif ct in ['application/pdf', 'application/x-pdf', ]:
src = url_for('bepasty.inline', name=name)
link_txt = u'Click to see PDF'
rendered_content = Markup(u'<a href="%s">%s</a>' % (src, link_txt))
elif use_pygments:
text = read_data(item)
# TODO we don't have the coding in metadata
try:
text = text.decode('utf-8')
except __HOLE__:
# well, it is not utf-8 or ascii, so we can only guess...
text = text.decode('iso-8859-1')
lexer = get_lexer_for_mimetype(ct_pygments)
formatter = CustomHtmlFormatter(linenos='table', lineanchors="L",
lineparagraphs="L", anchorlinenos=True)
rendered_content = Markup(highlight(text, lexer, formatter))
else:
rendered_content = u"Can't render this content type."
else:
if not complete:
rendered_content = u"Rendering not allowed (not complete). Is it still being uploaded?"
else:
rendered_content = u"Rendering not allowed (too big?). Try download"
return render_template('display.html', name=name, item=item,
rendered_content=rendered_content) | UnicodeDecodeError | dataset/ETHPy150Open bepasty/bepasty-server/bepasty/views/display.py/DisplayView.get |
6,857 | def data_available(dataset_name=None):
"""Check if the data set is available on the local machine already."""
try:
from itertools import izip_longest
except __HOLE__:
from itertools import zip_longest as izip_longest
dr = data_resources[dataset_name]
zip_urls = (dr['files'], )
if 'save_names' in dr: zip_urls += (dr['save_names'], )
else: zip_urls += ([],)
for file_list, save_list in izip_longest(*zip_urls, fillvalue=[]):
for f, s in izip_longest(file_list, save_list, fillvalue=None):
if s is not None: f=s # If there is a save_name given, use that one
if not os.path.exists(os.path.join(data_path, dataset_name, f)):
return False
return True | ImportError | dataset/ETHPy150Open SheffieldML/GPy/GPy/util/datasets.py/data_available |
6,858 | def hapmap3(data_set='hapmap3'):
"""
The HapMap phase three SNP dataset - 1184 samples out of 11 populations.
SNP_matrix (A) encoding [see Paschou et all. 2007 (PCA-Correlated SNPs...)]:
Let (B1,B2) be the alphabetically sorted bases, which occur in the j-th SNP, then
/ 1, iff SNPij==(B1,B1)
Aij = | 0, iff SNPij==(B1,B2)
\ -1, iff SNPij==(B2,B2)
The SNP data and the meta information (such as iid, sex and phenotype) are
stored in the dataframe datadf, index is the Individual ID,
with following columns for metainfo:
* family_id -> Family ID
* paternal_id -> Paternal ID
* maternal_id -> Maternal ID
* sex -> Sex (1=male; 2=female; other=unknown)
* phenotype -> Phenotype (-9, or 0 for unknown)
* population -> Population string (e.g. 'ASW' - 'YRI')
* rest are SNP rs (ids)
More information is given in infodf:
* Chromosome:
- autosomal chromosemes -> 1-22
- X X chromosome -> 23
- Y Y chromosome -> 24
- XY Pseudo-autosomal region of X -> 25
- MT Mitochondrial -> 26
* Relative Positon (to Chromosome) [base pairs]
"""
try:
from pandas import read_pickle, DataFrame
from sys import stdout
import bz2
except __HOLE__ as i:
raise i("Need pandas for hapmap dataset, make sure to install pandas (http://pandas.pydata.org/) before loading the hapmap dataset")
dir_path = os.path.join(data_path,'hapmap3')
hapmap_file_name = 'hapmap3_r2_b36_fwd.consensus.qc.poly'
unpacked_files = [os.path.join(dir_path, hapmap_file_name+ending) for ending in ['.ped', '.map']]
unpacked_files_exist = reduce(lambda a, b:a and b, map(os.path.exists, unpacked_files))
if not unpacked_files_exist and not data_available(data_set):
download_data(data_set)
preprocessed_data_paths = [os.path.join(dir_path,hapmap_file_name + file_name) for file_name in \
['.snps.pickle',
'.info.pickle',
'.nan.pickle']]
if not reduce(lambda a,b: a and b, map(os.path.exists, preprocessed_data_paths)):
if not overide_manual_authorize and not prompt_user("Preprocessing requires ~25GB "
"of memory and can take a (very) long time, continue? [Y/n]"):
print("Preprocessing required for further usage.")
return
status = "Preprocessing data, please be patient..."
print(status)
def write_status(message, progress, status):
stdout.write(" "*len(status)); stdout.write("\r"); stdout.flush()
status = r"[{perc: <{ll}}] {message: <13s}".format(message=message, ll=20,
perc="="*int(20.*progress/100.))
stdout.write(status); stdout.flush()
return status
if not unpacked_files_exist:
status=write_status('unpacking...', 0, '')
curr = 0
for newfilepath in unpacked_files:
if not os.path.exists(newfilepath):
filepath = newfilepath + '.bz2'
file_size = os.path.getsize(filepath)
with open(newfilepath, 'wb') as new_file, open(filepath, 'rb') as f:
decomp = bz2.BZ2Decompressor()
file_processed = 0
buffsize = 100 * 1024
for data in iter(lambda : f.read(buffsize), b''):
new_file.write(decomp.decompress(data))
file_processed += len(data)
status=write_status('unpacking...', curr+12.*file_processed/(file_size), status)
curr += 12
status=write_status('unpacking...', curr, status)
os.remove(filepath)
status=write_status('reading .ped...', 25, status)
# Preprocess data:
snpstrnp = np.loadtxt(unpacked_files[0], dtype=str)
status=write_status('reading .map...', 33, status)
mapnp = np.loadtxt(unpacked_files[1], dtype=str)
status=write_status('reading relationships.txt...', 42, status)
# and metainfo:
infodf = DataFrame.from_csv(os.path.join(dir_path,'./relationships_w_pops_121708.txt'), header=0, sep='\t')
infodf.set_index('IID', inplace=1)
status=write_status('filtering nan...', 45, status)
snpstr = snpstrnp[:,6:].astype('S1').reshape(snpstrnp.shape[0], -1, 2)
inan = snpstr[:,:,0] == '0'
status=write_status('filtering reference alleles...', 55, status)
ref = np.array(map(lambda x: np.unique(x)[-2:], snpstr.swapaxes(0,1)[:,:,:]))
status=write_status('encoding snps...', 70, status)
# Encode the information for each gene in {-1,0,1}:
status=write_status('encoding snps...', 73, status)
snps = (snpstr==ref[None,:,:])
status=write_status('encoding snps...', 76, status)
snps = (snps*np.array([1,-1])[None,None,:])
status=write_status('encoding snps...', 78, status)
snps = snps.sum(-1)
status=write_status('encoding snps...', 81, status)
snps = snps.astype('i8')
status=write_status('marking nan values...', 88, status)
# put in nan values (masked as -128):
snps[inan] = -128
status=write_status('setting up meta...', 94, status)
# get meta information:
metaheader = np.r_[['family_id', 'iid', 'paternal_id', 'maternal_id', 'sex', 'phenotype']]
metadf = DataFrame(columns=metaheader, data=snpstrnp[:,:6])
metadf.set_index('iid', inplace=1)
metadf = metadf.join(infodf.population)
metadf.to_pickle(preprocessed_data_paths[1])
# put everything together:
status=write_status('setting up snps...', 96, status)
snpsdf = DataFrame(index=metadf.index, data=snps, columns=mapnp[:,1])
with open(preprocessed_data_paths[0], 'wb') as f:
pickle.dump(f, snpsdf, protocoll=-1)
status=write_status('setting up snps...', 98, status)
inandf = DataFrame(index=metadf.index, data=inan, columns=mapnp[:,1])
inandf.to_pickle(preprocessed_data_paths[2])
status=write_status('done :)', 100, status)
print('')
else:
print("loading snps...")
snpsdf = read_pickle(preprocessed_data_paths[0])
print("loading metainfo...")
metadf = read_pickle(preprocessed_data_paths[1])
print("loading nan entries...")
inandf = read_pickle(preprocessed_data_paths[2])
snps = snpsdf.values
populations = metadf.population.values.astype('S3')
hapmap = dict(name=data_set,
description='The HapMap phase three SNP dataset - '
'1184 samples out of 11 populations. inan is a '
'boolean array, containing wheather or not the '
'given entry is nan (nans are masked as '
'-128 in snps).',
snpsdf=snpsdf,
metadf=metadf,
snps=snps,
inan=inandf.values,
inandf=inandf,
populations=populations)
return hapmap | ImportError | dataset/ETHPy150Open SheffieldML/GPy/GPy/util/datasets.py/hapmap3 |
6,859 | def get_template(self, uri):
"""Return a :class:`.Template` object corresponding to the given
``uri``.
.. note:: The ``relativeto`` argument is not supported here at the moment.
"""
try:
if self.filesystem_checks:
return self._check(uri, self._collection[uri])
else:
return self._collection[uri]
except __HOLE__:
u = re.sub(r'^\/+', '', uri)
for dir in self.directories:
srcfile = posixpath.normpath(posixpath.join(dir, u))
if os.path.isfile(srcfile):
return self._load(srcfile, uri)
else:
raise exceptions.TopLevelLookupException(
"Cant locate template for uri %r" % uri) | KeyError | dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/Mako-0.8.1/mako/lookup.py/TemplateLookup.get_template |
6,860 | def filename_to_uri(self, filename):
"""Convert the given ``filename`` to a URI relative to
this :class:`.TemplateCollection`."""
try:
return self._uri_cache[filename]
except __HOLE__:
value = self._relativeize(filename)
self._uri_cache[filename] = value
return value | KeyError | dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/Mako-0.8.1/mako/lookup.py/TemplateLookup.filename_to_uri |
6,861 | def _load(self, filename, uri):
self._mutex.acquire()
try:
try:
# try returning from collection one
# more time in case concurrent thread already loaded
return self._collection[uri]
except __HOLE__:
pass
try:
if self.modulename_callable is not None:
module_filename = self.modulename_callable(filename, uri)
else:
module_filename = None
self._collection[uri] = template = Template(
uri=uri,
filename=posixpath.normpath(filename),
lookup=self,
module_filename=module_filename,
**self.template_args)
return template
except:
# if compilation fails etc, ensure
# template is removed from collection,
# re-raise
self._collection.pop(uri, None)
raise
finally:
self._mutex.release() | KeyError | dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/Mako-0.8.1/mako/lookup.py/TemplateLookup._load |
6,862 | def _check(self, uri, template):
if template.filename is None:
return template
try:
template_stat = os.stat(template.filename)
if template.module._modified_time < \
template_stat[stat.ST_MTIME]:
self._collection.pop(uri, None)
return self._load(template.filename, uri)
else:
return template
except __HOLE__:
self._collection.pop(uri, None)
raise exceptions.TemplateLookupException(
"Cant locate template for uri %r" % uri) | OSError | dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/Mako-0.8.1/mako/lookup.py/TemplateLookup._check |
6,863 | def __repr__(self):
try:
return "<%s - %s>" % (self._object_type.encode('utf-8'), self.title.encode('utf-8'))
except __HOLE__:
# the title is None
return "< Track >" | AttributeError | dataset/ETHPy150Open echonest/pyechonest/pyechonest/track.py/Track.__repr__ |
6,864 | def dns_sweep(self,file_with_ips,file_prefix):
logging.info("Finding misconfigured DNS servers that might allow zone transfers among live ips ..")
self.shell.shell_exec("nmap -PN -n -sS -p 53 -iL "+file_with_ips+" -oA "+file_prefix)
# Step 2 - Extract IPs
dns_servers=file_prefix+".dns_server.ips"
self.shell.shell_exec("grep \"53/open/tcp\" "+file_prefix+".gnmap | cut -f 2 -d \" \" > "+dns_servers)
file = FileOperations.open(dns_servers)
domain_names=file_prefix+".domain_names"
self.shell.shell_exec("rm -f "+domain_names)
num_dns_servers = 0
for line in file:
if line.strip('\n'):
dns_server = line.strip('\n')
self.shell.shell_exec("host "+dns_server+" "+dns_server+" | grep 'domain name' | cut -f 5 -d' ' | cut -f 2,3,4,5,6,7 -d. | sed 's/\.$//' >> "+domain_names)
num_dns_servers = num_dns_servers+1
try:
file = FileOperations.open(domain_names, owtf_clean=False)
except __HOLE__:
return
for line in file:
domain = line.strip('\n')
raw_axfr=file_prefix+"."+dns_server+"."+domain+".axfr.raw"
self.shell.shell_exec("host -l "+domain+" "+dns_server+" | grep "+domain+" > "+raw_axfr)
success=self.shell.shell_exec("wc -l "+raw_axfr+" | cut -f 1 -d ' '")
if success > 3:
logging.info("Attempting zone transfer on $dns_server using domain "+domain+".. Success!")
axfr=file_prefix+"."+dns_server+"."+domain+".axfr"
self.shell.shell_exec("rm -f "+axfr)
logging.info(self.shell.shell_exec("grep 'has address' "+raw_axfr+" | cut -f 1,4 -d ' ' | sort -k 2 -t ' ' | sed 's/ /#/g'"))
else:
logging.info("Attempting zone transfer on $dns_server using domain "+domain+" .. Success!")
self.shell.shell_exec("rm -f "+raw_axfr)
if num_dns_servers==0:
return | IOError | dataset/ETHPy150Open owtf/owtf/framework/plugin/scanner.py/Scanner.dns_sweep |
6,865 | def mkdir(self):
"write a directory for the node"
if getattr(self, 'cache_isdir', None):
return
self.parent.mkdir()
if self.name:
try:
os.mkdir(self.abspath())
except __HOLE__:
e = extract_exception()
if e.errno != errno.EEXIST:
raise
if not os.path.isdir(self.abspath()):
raise IOError('%s is not a directory' % self)
try:
self.children
except:
self.children = {}
self.cache_isdir = True | OSError | dataset/ETHPy150Open cournape/Bento/bento/core/node.py/Node.mkdir |
6,866 | def find_node(self, lst):
"read the file system, make the nodes as needed"
if is_string(lst):
lst = [x for x in split_path(lst) if x and x != '.']
cur = self
for x in lst:
if x == '..':
cur = cur.parent
continue
try:
if x in cur.children:
cur = cur.children[x]
continue
except:
cur.children = {}
# optimistic: create the node first then look if it was correct to do so
cur = self.__class__(x, cur)
try:
os.stat(cur.abspath())
except:
del cur.parent.children[x]
return None
ret = cur
try:
while not getattr(cur.parent, 'cache_isdir', None):
cur = cur.parent
cur.cache_isdir = True
except __HOLE__:
pass
return ret | AttributeError | dataset/ETHPy150Open cournape/Bento/bento/core/node.py/Node.find_node |
6,867 | def find_dir(self, lst):
"""
search a folder in the filesystem
create the corresponding mappings source <-> build directories
"""
if isinstance(lst, str):
lst = [x for x in split_path(lst) if x and x != '.']
node = self.find_node(lst)
try:
os.path.isdir(node.abspath())
except __HOLE__:
return None
return node | OSError | dataset/ETHPy150Open cournape/Bento/bento/core/node.py/Node.find_dir |
6,868 | def pytest_configure():
from django.conf import settings
settings.configure(
DEBUG_PROPAGATE_EXCEPTIONS=True,
DATABASES={'default': {'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:'}},
SITE_ID=1,
SECRET_KEY='HitCounts Rock!',
DEBUG=True,
TEMPLATE_DEBUG=True,
ALLOWED_HOSTS=[],
USE_I18N=True,
USE_L10N=True,
STATIC_URL='/static/',
MIDDLEWARE_CLASSES=(
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
),
INSTALLED_APPS=(
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
'blog',
'hitcount',
'tests',
),
ROOT_URLCONF='example_project.urls',
SESSION_ENGINE='django.contrib.sessions.backends.file',
TEMPLATES=[
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
},
],
# HitCount Variables (default values)
HITCOUNT_KEEP_HIT_ACTIVE={'days': 7},
HITCOUNT_HITS_PER_IP_LIMIT=0,
HITCOUNT_EXCLUDE_USER_GROUP=(),
HITCOUNT_KEEP_HIT_IN_DATABASE={'days': 30},
)
try:
import django
django.setup()
except __HOLE__:
pass
# so we can reuse this function when testing directly from Django
# via: ./runtests.py --django
return settings | AttributeError | dataset/ETHPy150Open thornomad/django-hitcount/tests/conftest.py/pytest_configure |
6,869 | def run_setup_marathon_job(context):
update_context_marathon_config(context)
with contextlib.nested(
mock.patch.object(SystemPaastaConfig, 'get_zk_hosts', autospec=True, return_value=context.zk_hosts),
mock.patch('paasta_tools.setup_marathon_job.parse_args', autospec=True),
mock.patch.object(MarathonServiceConfig, 'format_marathon_app_dict', autospec=True,
return_value=context.marathon_complete_config),
mock.patch('paasta_tools.setup_marathon_job.monitoring_tools.send_event', autospec=True),
) as (
mock_get_zk_hosts,
mock_parse_args,
_,
_,
):
mock_parse_args.return_value = mock.Mock(
soa_dir=context.soa_dir,
service_instance=context.job_id,
)
try:
setup_marathon_job.main()
except (__HOLE__, MarathonHttpError):
pass | SystemExit | dataset/ETHPy150Open Yelp/paasta/paasta_itests/steps/setup_marathon_job_steps.py/run_setup_marathon_job |
6,870 | def get(self):
''' worker '''
nsdata = {'nsname': self.get_argument('name'),
'profile_number': int(self.get_argument('nsid')),
'ns_platform': self.get_argument('type')}
try:
PGLOT.nodeservers.start_server(**nsdata)
self.send_json()
except __HOLE__ as err:
self.send_json(message=err.args[0], status=400) | ValueError | dataset/ETHPy150Open UniversalDevicesInc/Polyglot/polyglot/element_manager/api.py/ServersAddHandler.get |
6,871 | def digattr(obj, attr, default=None):
'''Perform template-style dotted lookup'''
steps = attr.split('.')
for step in steps:
try: # dict lookup
obj = obj[step]
except (TypeError, AttributeError, KeyError):
try: # attribute lookup
obj = getattr(obj, step)
except (TypeError, AttributeError):
try: # list index lookup
obj = obj[int(step)]
except (__HOLE__, ValueError, KeyError, TypeError):
return default
if callable(obj) and not getattr(obj, 'do_not_call_in_templates', False):
obj = obj()
return obj | IndexError | dataset/ETHPy150Open funkybob/django-nap/nap/utils/__init__.py/digattr |
6,872 | def _sorted_from_obj(self, data):
# data is a list of the type generated by parse_qsl
if isinstance(data, list):
items = data
else:
# complex objects:
try:
# django.http.QueryDict,
items = [(i[0], j) for i in data.lists() for j in i[1]]
except __HOLE__:
# webob.multidict.MultiDict
# werkzeug.datastructures.MultiDict
items = iteritems(data)
return sorted(items, key=lambda p: p[0]) | AttributeError | dataset/ETHPy150Open circuits/circuits/circuits/web/parsers/querystring.py/QueryStringParser._sorted_from_obj |
6,873 | def process(self, pair):
key = pair[0]
value = pair[1]
# faster than invoking a regex
try:
key.index("[")
self.parse(key, value)
return
except __HOLE__:
pass
try:
key.index(".")
self.parse(key, value)
return
except ValueError:
pass
self.result[key] = value | ValueError | dataset/ETHPy150Open circuits/circuits/circuits/web/parsers/querystring.py/QueryStringParser.process |
6,874 | def parse(self, key, value):
ref = self.result
tokens = self.tokens(key)
for token in tokens:
token_type, key = token
if token_type == QueryStringToken.ARRAY:
if key not in ref:
ref[key] = []
ref = ref[key]
elif token_type == QueryStringToken.OBJECT:
if key not in ref:
ref[key] = {}
ref = ref[key]
elif token_type == QueryStringToken.KEY:
try:
ref = ref[key]
next(tokens)
# TypeError is for pet[]=lucy&pet[]=ollie
# if the array key is empty a type error will be raised
except (IndexError, __HOLE__, TypeError):
# the index didn't exist
# so we look ahead to see what we are setting
# there is not a next token
# set the value
try:
next_token = next(tokens)
if next_token[0] == QueryStringToken.ARRAY:
ref.append([])
ref = ref[key]
elif next_token[0] == QueryStringToken.OBJECT:
try:
ref[key] = {}
except IndexError:
ref.append({})
ref = ref[key]
except StopIteration:
try:
ref.append(value)
except AttributeError:
ref[key] = value
return | KeyError | dataset/ETHPy150Open circuits/circuits/circuits/web/parsers/querystring.py/QueryStringParser.parse |
6,875 | def tokens(self, key):
buf = ""
for char in key:
if char == "[":
yield QueryStringToken.ARRAY, buf
buf = ""
elif char == ".":
yield QueryStringToken.OBJECT, buf
buf = ""
elif char == "]":
try:
yield QueryStringToken.KEY, int(buf)
buf = ""
except __HOLE__:
yield QueryStringToken.KEY, None
else:
buf = buf + char
if len(buf) > 0:
yield QueryStringToken.KEY, buf
else:
raise StopIteration() | ValueError | dataset/ETHPy150Open circuits/circuits/circuits/web/parsers/querystring.py/QueryStringParser.tokens |
6,876 | @register.simple_tag
def user_avatar(user, secure=False, size=256, rating='pg', default=''):
try:
profile = user.get_profile()
if profile.avatar:
return profile.avatar.url
except SiteProfileNotAvailable:
pass
except __HOLE__:
pass
except AttributeError:
pass
base_url = (secure and 'https://secure.gravatar.com' or
'http://www.gravatar.com')
m = hashlib.md5(user.email)
return '%(base_url)s/avatar/%(hash)s?%(params)s' % dict(
base_url=base_url, hash=m.hexdigest(),
params=urllib.urlencode(dict(
s=size, d=default, r=rating
))
) | ObjectDoesNotExist | dataset/ETHPy150Open mozilla/django-badger/badger/templatetags/badger_tags.py/user_avatar |
6,877 | def scale_image(img_upload, img_max_size):
"""Crop and scale an image file."""
try:
img = Image.open(img_upload)
except __HOLE__:
return None
src_width, src_height = img.size
src_ratio = float(src_width) / float(src_height)
dst_width, dst_height = img_max_size
dst_ratio = float(dst_width) / float(dst_height)
if dst_ratio < src_ratio:
crop_height = src_height
crop_width = crop_height * dst_ratio
x_offset = int(float(src_width - crop_width) / 2)
y_offset = 0
else:
crop_width = src_width
crop_height = crop_width / dst_ratio
x_offset = 0
y_offset = int(float(src_height - crop_height) / 2)
img = img.crop((x_offset, y_offset,
x_offset + int(crop_width), y_offset + int(crop_height)))
img = img.resize((dst_width, dst_height), Image.ANTIALIAS)
if img.mode != "RGB":
img = img.convert("RGB")
new_img = StringIO()
img.save(new_img, "PNG")
img_data = new_img.getvalue()
return ContentFile(img_data) | IOError | dataset/ETHPy150Open lmorchard/badg.us/badgus/profiles/models.py/scale_image |
6,878 | def is_vouched_mozillian(self):
"""Check whether this profile is associated with a vouched
mozillians.org profile"""
MOZILLIANS_API_BASE_URL = constance.config.MOZILLIANS_API_BASE_URL
MOZILLIANS_API_APPNAME = constance.config.MOZILLIANS_API_APPNAME
MOZILLIANS_API_KEY = constance.config.MOZILLIANS_API_KEY
MOZILLIANS_API_CACHE_KEY_PREFIX = constance.config.MOZILLIANS_API_CACHE_KEY_PREFIX
MOZILLIANS_API_CACHE_TIMEOUT = constance.config.MOZILLIANS_API_CACHE_TIMEOUT
if not MOZILLIANS_API_KEY:
logging.warning("'MOZILLIANS_API_KEY' not set up.")
return False
email = self.user.email
# /api/v1/users/?app_name=foobar&app_key=12345&[email protected]
url = '%s/users/?%s' % (MOZILLIANS_API_BASE_URL, urllib.urlencode({
'app_name': MOZILLIANS_API_APPNAME,
'app_key': MOZILLIANS_API_KEY,
'email': email
}))
# Cache the HTTP request to the API to minimize hits
cache_key = '%s:%s' % (MOZILLIANS_API_CACHE_KEY_PREFIX,
hashlib.md5(url.encode('utf-8')).hexdigest())
content = cache.get(cache_key)
if not content:
resp = requests.get(url)
if not resp.status_code == 200:
logging.error("Failed request to mozillians.org API: %s" %
resp.status_code)
return False
else:
content = resp.content
cache.set(cache_key, content, MOZILLIANS_API_CACHE_TIMEOUT)
try:
content = json.loads(content)
except __HOLE__:
logging.error("Failed parsing mozillians.org response")
return False
for obj in content.get('objects', []):
if obj['email'].lower() == email.lower():
return obj['is_vouched']
return False | ValueError | dataset/ETHPy150Open lmorchard/badg.us/badgus/profiles/models.py/UserProfile.is_vouched_mozillian |
6,879 | def estimate_u_multiple_tries(sumLogPi=None, nDoc=0, gamma=1.0, alpha0=1.0,
initu=None, initU=None, approx_grad=False,
fList=[1e7, 1e8, 1e10], **kwargs):
''' Estimate 2K-vector "u" via gradient descent,
gracefully using multiple restarts with progressively weaker tolerances
until one succeeds
Returns
--------
u : 2K-vector of positive parameters
fofu : scalar value of minimization objective
Info : dict
Raises
--------
ValueError with FAILURE in message if all restarts fail
'''
K = sumLogPi.size - 1
if initU is not None:
initu = initU
if initu is not None and not np.allclose(initu[-K:], alpha0):
uList = [initu, None]
else:
uList = [None]
nOverflow = 0
u = None
Info = dict()
msg = ''
for trial, myTuple in enumerate(itertools.product(uList, fList)):
initu, factr = myTuple
try:
u, fofu, Info = estimate_u(sumLogPi, nDoc, gamma, alpha0,
initu=initu, factr=factr, approx_grad=approx_grad)
Info['nRestarts'] = trial
Info['factr'] = factr
Info['msg'] = Info['task']
del Info['grad']
del Info['task']
break
except __HOLE__ as err:
if str(err).count('FAILURE') == 0:
raise err
msg = str(err)
if str(err).count('overflow') > 0:
nOverflow += 1
if u is None:
raise ValueError("FAILURE! " + msg)
Info['nOverflow'] = nOverflow
return u, fofu, Info | ValueError | dataset/ETHPy150Open daeilkim/refinery/refinery/bnpy/bnpy-dev/bnpy/allocmodel/admix/OptimizerForHDPFullVarModel.py/estimate_u_multiple_tries |
6,880 | def estimate_u(sumLogPi=None, nDoc=0, gamma=1.0, alpha0=1.0, initu=None, approx_grad=False, factr=1.0e7, **kwargs):
''' Run gradient optimization to estimate best v for specified problem
Returns
--------
vhat : K-vector of values, 0 < v < 1
fofvhat: objective function value at vhat
Info : dict with info about estimation algorithm
Raises
--------
ValueError on an overflow, any detection of NaN, or failure to converge
'''
sumLogPi = np.squeeze(np.asarray(sumLogPi, dtype=np.float64))
assert sumLogPi.ndim == 1
K = sumLogPi.size - 1
if initu is None:
initu = np.hstack( [np.ones(K), alpha0*np.ones(K)])
assert initu.size == 2*K
initc = np.log(initu)
myFunc = lambda c: objFunc_c(c, sumLogPi, nDoc, gamma, alpha0)
myGrad = lambda c: objGrad_c(c, sumLogPi, nDoc, gamma, alpha0)
with warnings.catch_warnings():
warnings.filterwarnings('error', category=RuntimeWarning,
message='overflow')
try:
chat, fhat, Info = scipy.optimize.fmin_l_bfgs_b(myFunc, initc,
fprime=myGrad, disp=None,
approx_grad=approx_grad,
factr=factr,
**kwargs)
except RuntimeWarning:
raise ValueError("FAILURE: overflow!" )
except __HOLE__:
raise ValueError("FAILURE: NaN found!")
if Info['warnflag'] > 1:
raise ValueError("FAILURE: " + Info['task'])
uhat = np.exp(chat)
Info['initu'] = initu
Info['objFunc'] = lambda u: objFunc_u(u, sumLogPi, nDoc, gamma, alpha0)
Info['gradFunc'] = lambda u: objGrad_u(u, sumLogPi, nDoc, gamma, alpha0)
return uhat, fhat, Info
########################################################### Objective/gradient
########################################################### in terms of u | AssertionError | dataset/ETHPy150Open daeilkim/refinery/refinery/bnpy/bnpy-dev/bnpy/allocmodel/admix/OptimizerForHDPFullVarModel.py/estimate_u |
6,881 | def get_default_if():
""" Returns the default interface """
f = open ('/proc/net/route', 'r')
for line in f:
words = line.split()
dest = words[1]
try:
if (int (dest) == 0):
interf = words[0]
break
except __HOLE__:
pass
return interf | ValueError | dataset/ETHPy150Open rlisagor/pynetlinux/pynetlinux/route.py/get_default_if |
6,882 | def get_default_gw():
""" Returns the default gateway """
octet_list = []
gw_from_route = None
f = open ('/proc/net/route', 'r')
for line in f:
words = line.split()
dest = words[1]
try:
if (int (dest) == 0):
gw_from_route = words[2]
break
except __HOLE__:
pass
if not gw_from_route:
return None
for i in range(8, 1, -2):
octet = gw_from_route[i-2:i]
octet = int(octet, 16)
octet_list.append(str(octet))
gw_ip = ".".join(octet_list)
return gw_ip | ValueError | dataset/ETHPy150Open rlisagor/pynetlinux/pynetlinux/route.py/get_default_gw |
6,883 | def Observer(request):
try:
observer_name = Config().observer_name
except __HOLE__:
observer_name = ''
diag = Diag.objects.filter(name=observer_name).first()
if not diag:
return HttpResponse(json.dumps({"health": ":-X", "time": time.time(), "comp": 0}))
t = time.time()
d = json.loads(diag.backend_register)
comp = d['last_run'] + d['last_duration']*2 + 300
if comp>t:
d['health'] = ':-)'
else:
d['health'] = ':-X'
d['time'] = t
d['comp'] = comp
return HttpResponse(json.dumps(d)) | AttributeError | dataset/ETHPy150Open open-cloud/xos/xos/core/views/observer.py/Observer |
6,884 | def __init__(self, content, attrs=None, filter_type=None, filename=None, **kwargs):
# It looks like there is a bug in django-compressor because it expects
# us to accept attrs.
super(DjangoScssFilter, self).__init__(content, filter_type, filename, **kwargs)
try:
# this is a link tag which means there is an SCSS file being
# referenced.
href = attrs['href']
except __HOLE__:
# this is a style tag which means this is inline SCSS.
self.filename = None
else:
self.filename = href.replace(settings.STATIC_URL, '') | KeyError | dataset/ETHPy150Open fusionbox/django-pyscss/django_pyscss/compressor.py/DjangoScssFilter.__init__ |
6,885 | def _is_valid_email(self, email):
"""
Given an email address, make sure that it is well-formed.
:param str email: The email address to validate.
:rtype: bool
:returns: True if the email address is valid, False if not.
"""
try:
validate_email(email)
return True
except __HOLE__:
return False | ValidationError | dataset/ETHPy150Open duointeractive/sea-cucumber/seacucumber/management/commands/ses_address.py/Command._is_valid_email |
6,886 | def add_color_info(info):
color_map = {
'red': (0xcc, 0x33, 0x33),
'green': (0x33, 0x99, 0x33),
'blue': (0x33, 0x66, 0x99),
'yellow': (0xcc, 0xcc, 0x33),
}
color_name = info.get('default_image_color', 'blue')
try:
info['_color'] = color_map[color_name]
except __HOLE__:
sys.exit("Error: color '%s' not defined" % color_name) | KeyError | dataset/ETHPy150Open conda/constructor/constructor/imaging.py/add_color_info |
6,887 | def IsSubclass(candidate, parent_class):
"""Calls issubclass without raising an exception.
Args:
candidate: A candidate to check if a subclass.
parent_class: A class or tuple of classes representing a potential parent.
Returns:
A boolean indicating whether or not candidate is a subclass of parent_class.
"""
try:
return issubclass(candidate, parent_class)
except __HOLE__:
return False | TypeError | dataset/ETHPy150Open GoogleCloudPlatform/endpoints-proto-datastore/endpoints_proto_datastore/utils.py/IsSubclass |
6,888 | def DatetimeValueFromString(value):
"""Converts a serialized datetime string to the native type.
Args:
value: The string value to be deserialized.
Returns:
A datetime.datetime/date/time object that was deserialized from the string.
Raises:
TypeError: if the value can not be deserialized to one of the three
datetime types.
"""
try:
return datetime.datetime.strptime(value, TIME_STRING_FORMAT).time()
except __HOLE__:
pass
try:
return datetime.datetime.strptime(value, DATE_STRING_FORMAT).date()
except ValueError:
pass
try:
return datetime.datetime.strptime(value, DATETIME_STRING_FORMAT)
except ValueError:
pass
raise TypeError('Could not deserialize timestamp: %s.' % (value,)) | ValueError | dataset/ETHPy150Open GoogleCloudPlatform/endpoints-proto-datastore/endpoints_proto_datastore/utils.py/DatetimeValueFromString |
6,889 | def old_changelist_view(self, request, extra_context=None):
"The 'change list' admin view for this model."
from django.contrib.admin.views.main import ERROR_FLAG
from django.core.exceptions import PermissionDenied
from django.utils.encoding import force_text
from django.utils.translation import ungettext
opts = self.model._meta
app_label = opts.app_label
if not self.has_change_permission(request, None):
raise PermissionDenied
# Check actions to see if any are available on this changelist
actions = self.get_actions(request)
# Remove action checkboxes if there aren't any actions available.
list_display = list(self.list_display)
if not actions:
try:
list_display.remove('action_checkbox')
except __HOLE__:
pass
try:
if django.VERSION[1] < 4:
params = (
request, self.model, list_display,
self.list_display_links, self.list_filter, self.date_hierarchy,
self.search_fields, self.list_select_related,
self.list_per_page, self.list_editable, self)
else:
params = (
request, self.model, list_display,
self.list_display_links, self.list_filter, self.date_hierarchy,
self.search_fields, self.list_select_related,
self.list_per_page, self.list_max_show_all,
self.list_editable, self)
cl = TreeChangeList(*params)
except IncorrectLookupParameters:
# Wacky lookup parameters were given, so redirect to the main
# changelist page, without parameters, and pass an 'invalid=1'
# parameter via the query string. If wacky parameters were given and
# the 'invalid=1' parameter was already in the query string, something
# is screwed up with the database, so display an error page.
if ERROR_FLAG in list(request.GET.keys()):
return render_to_response(
'admin/invalid_setup.html', {'title': _('Database error')})
return HttpResponseRedirect(request.path + '?' + ERROR_FLAG + '=1')
# If the request was POSTed, this might be a bulk action or a bulk edit.
# Try to look up an action first, but if this isn't an action the POST
# will fall through to the bulk edit check, below.
if actions and request.method == 'POST':
response = self.response_action(request, queryset=cl.get_queryset())
if response:
return response
# If we're allowing changelist editing, we need to construct a formset
# for the changelist given all the fields to be edited. Then we'll
# use the formset to validate/process POSTed data.
formset = cl.formset = None
# Handle POSTed bulk-edit data.
if request.method == "POST" and self.list_editable:
FormSet = self.get_changelist_formset(request)
formset = cl.formset = FormSet(
request.POST, request.FILES, queryset=cl.result_list
)
if formset.is_valid():
changecount = 0
for form in formset.forms:
if form.has_changed():
obj = self.save_form(request, form, change=True)
self.save_model(request, obj, form, change=True)
form.save_m2m()
change_msg = self.construct_change_message(request, form, None)
self.log_change(request, obj, change_msg)
changecount += 1
if changecount:
if changecount == 1:
name = force_text(opts.verbose_name)
else:
name = force_text(opts.verbose_name_plural)
msg = ungettext(
"%(count)s %(name)s was changed successfully.",
"%(count)s %(name)s were changed successfully.",
changecount) % {'count': changecount,
'name': name,
'obj': force_text(obj)}
self.message_user(request, msg)
return HttpResponseRedirect(request.get_full_path())
# Handle GET -- construct a formset for display.
elif self.list_editable:
FormSet = self.get_changelist_formset(request)
formset = cl.formset = FormSet(queryset=cl.result_list)
# Build the list of media to be used by the formset.
if formset:
media = self.media + formset.media
else:
media = self.media
# Build the action form and populate it with available actions.
if actions:
action_form = self.action_form(auto_id=None)
action_form.fields['action'].choices = self.get_action_choices(request)
else:
action_form = None
context = {
'title': cl.title,
'is_popup': cl.is_popup,
'cl': cl,
'media': media,
'has_add_permission': self.has_add_permission(request),
'app_label': app_label,
'action_form': action_form,
'actions_on_top': self.actions_on_top,
'actions_on_bottom': self.actions_on_bottom,
}
if django.VERSION[1] < 4:
context['root_path'] = self.admin_site.root_path
else:
selection_note_all = ungettext('%(total_count)s selected', 'All %(total_count)s selected', cl.result_count)
context.update({
'module_name': force_text(opts.verbose_name_plural),
'selection_note': _('0 of %(cnt)s selected') % {'cnt': len(cl.result_list)},
'selection_note_all': selection_note_all % {'total_count': cl.result_count},
})
context.update(extra_context or {})
context_instance = template.RequestContext(
request,
context
)
return render_to_response(self.change_list_template or [
'admin/%s/%s/change_list.html' % (app_label, opts.object_name.lower()),
'admin/%s/change_list.html' % app_label,
'admin/change_list.html'
], context=context_instance) | ValueError | dataset/ETHPy150Open callowayproject/django-categories/categories/editor/tree_editor.py/TreeEditor.old_changelist_view |
6,890 | def arch():
"""
Return the system's architecture according to dpkg or rpm.
"""
try:
p = subprocess.Popen(['dpkg', '--print-architecture'],
close_fds=True, stdout=subprocess.PIPE)
except __HOLE__ as e:
p = subprocess.Popen(['rpm', '--eval', '%_arch'],
close_fds=True, stdout=subprocess.PIPE)
stdout, stderr = p.communicate()
if 0 != p.returncode:
return None
return stdout.rstrip() | OSError | dataset/ETHPy150Open devstructure/blueprint/blueprint/util.py/arch |
6,891 | def lsb_release_codename():
"""
Return the OS release's codename.
"""
if hasattr(lsb_release_codename, '_cache'):
return lsb_release_codename._cache
try:
p = subprocess.Popen(['lsb_release', '-c'], stdout=subprocess.PIPE)
except __HOLE__:
lsb_release_codename._cache = None
return lsb_release_codename._cache
stdout, stderr = p.communicate()
if 0 != p.returncode:
lsb_release_codename._cache = None
return lsb_release_codename._cache
match = re.search(r'\t(\w+)$', stdout)
if match is None:
lsb_release_codename._cache = None
return lsb_release_codename._cache
lsb_release_codename._cache = match.group(1)
return lsb_release_codename._cache
# Patterns for determining which Upstart services should be included, based
# on the events used to start them. | OSError | dataset/ETHPy150Open devstructure/blueprint/blueprint/util.py/lsb_release_codename |
6,892 | def parse_service(pathname):
"""
Parse a potential service init script or config file into the
manager and service name or raise `ValueError`. Use the Upstart
"start on" stanzas and SysV init's LSB headers to restrict services to
only those that start at boot and run all the time.
"""
dirname, basename = os.path.split(pathname)
if '/etc/init' == dirname:
service, ext = os.path.splitext(basename)
# Ignore extraneous files in /etc/init.
if '.conf' != ext:
raise ValueError('not an Upstart config')
# Ignore services that don't operate on the (faked) main runlevels.
try:
content = open(pathname).read()
except __HOLE__:
raise ValueError('not a readable Upstart config')
if not (pattern_upstart_1.search(content) \
or pattern_upstart_2.search(content)):
raise ValueError('not a running service')
return ('upstart', service)
elif '/etc/init.d' == dirname or '/etc/rc.d/init.d' == dirname:
# Let Upstart handle its services.
if os.path.islink(pathname) \
and '/lib/init/upstart-job' == os.readlink(pathname):
raise ValueError('proxy for an Upstart config')
# Ignore services that don't operate on the main runlevels.
try:
content = open(pathname).read()
except IOError:
raise ValueError('not a readable SysV init script')
if not re.search(r'(?:Default-Start|chkconfig):\s*[2345]', content):
raise ValueError('not a running service')
return ('sysvinit', basename)
else:
raise ValueError('not a service') | IOError | dataset/ETHPy150Open devstructure/blueprint/blueprint/util.py/parse_service |
6,893 | def unicodeme(s):
if isinstance(s, unicode):
return s
for encoding in ('utf_8', 'latin_1'):
try:
return unicode(s, encoding)
except __HOLE__:
pass
# TODO Issue a warning?
return s | UnicodeDecodeError | dataset/ETHPy150Open devstructure/blueprint/blueprint/util.py/unicodeme |
6,894 | def add_request_data(self, issue, request_data):
"""Add parsed request data to the node
:param issue: Issue as XML document
:param request_data: HTTP request data
"""
request = HTTPRequestParser(request_data)
request.parse_data()
request.set_http_headers()
headers = request.get_headers()
# Add request attributes method like method
try:
xml_request_node = issue.xpath("TestProbe/HTTP/Request")[0]
xml_request_node.attrib['method'] = request.get_method()
xml_request_node.attrib['version'] = request.get_request_version()
except __HOLE__:
log.error("Index error")
# Add parsed data
try:
xml_parsed_traffic = issue.xpath("TestProbe/HTTP/Request/Parsed")[0]
except IndexError:
Log.error("Index error")
# Iterate through headers and create new XML nodes
for h in headers.keys():
for v in headers[h]:
# Create new sub-element
header_node = etree.Element('Header', name=h, value=v)
xml_parsed_traffic.append(header_node)
# Add request data node
request_data_node = etree.Element('Data')
request_data_node.text = etree.CDATA(request.get_request_data())
xml_parsed_traffic.append(request_data_node) | IndexError | dataset/ETHPy150Open dorneanu/appvulnms/src/core/parser/AppVulnXMLParser.py/AppVulnXMLParser.add_request_data |
6,895 | def add_response_data(self, issue, response_data, binary_data=False):
"""Add parsed response data to the node
:param issue: Issue as XML document
:param response_data: HTTP response data
:param binary_data: Flag indicating whether responde_data is binary
"""
response = HTTPResponseParser(response_data, binary_data)
response.parse_data()
response.set_http_headers()
headers = response.get_headers()
# Add response metadata
try:
xml_response_node = issue.xpath("TestProbe/HTTP/Response")[0]
xml_response_node.attrib['version'] = response.get_response_version()
xml_response_node.attrib['status'] = response.get_status()
xml_response_node.attrib['reason'] = response.get_reason()
except __HOLE__:
log.error("Index error")
# Add response data
try:
xml_parsed_traffic = issue.xpath("TestProbe/HTTP/Response/Parsed")[0]
except IndexError:
Log.error("Index error")
# Iterate through headers and create new XML nodes
for h in headers.keys():
for v in headers[h]:
# Create new sub-element
header_node = etree.Element('Header', name=h, value=v)
xml_parsed_traffic.append(header_node)
# Add request data node
request_data_node = etree.Element('Data')
request_data_node.text = etree.CDATA(response.get_response_data())
request_data_node.attrib['base64'] = str(binary_data)
xml_parsed_traffic.append(request_data_node) | IndexError | dataset/ETHPy150Open dorneanu/appvulnms/src/core/parser/AppVulnXMLParser.py/AppVulnXMLParser.add_response_data |
6,896 | @classmethod
def ensure_value_type(cls, value_type, parameter=None):
"""Raises a :exc:`TypeError` if the given ``value_type`` is not
an instance of nor a subclass of the class.
.. sourcecode:: pycon
>>> Integer.ensure_value_type(Bulk
... ) # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
TypeError: expected a subtype of sider.types.Integer,
but sider.types.Bulk was passed
>>> Integer.ensure_value_type(UnicodeString()
... ) # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
Traceback (most recent call last):
...
TypeError: expected an instance of sider.types.Integer,
but <sider.types.UnicodeString object at ...>
was passed
>>> Bulk.ensure_value_type(1)
Traceback (most recent call last):
...
TypeError: expected a type, not 1
Otherwise it simply returns an instance of
the given ``value_type``.
.. sourcecode:: pycon
>>> Bulk.ensure_value_type(Bulk) # doctest: +ELLIPSIS
<sider.types.Bulk object at ...>
>>> Bulk.ensure_value_type(ByteString) # doctest: +ELLIPSIS
<sider.types.ByteString object at ...>
>>> ByteString.ensure_value_type(ByteString
... ) # doctest: +ELLIPSIS
<sider.types.ByteString object at ...>
>>> bytestr = ByteString()
>>> ByteString.ensure_value_type(bytestr) # doctest: +ELLIPSIS
<sider.types.ByteString object at ...>
If an optional ``parameter`` name has present, the error message
becomes better.
.. sourcecode:: pycon
>>> Integer.ensure_value_type(Bulk,
... parameter='argname') # doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
TypeError: argname must be a subtype of sider.types.Integer,
but sider.types.Bulk was passed
>>> Integer.ensure_value_type(UnicodeString(),
... parameter='argname'
... ) # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
Traceback (most recent call last):
...
TypeError: argname must be an instance of sider.types.Integer,
but <sider.types.UnicodeString object at ...>
was passed
>>> Bulk.ensure_value_type(1, parameter='argname')
Traceback (most recent call last):
...
TypeError: argname must be a type, not 1
:param value_type: a type expected to be a subtype of the class
:type value_type: :class:`Value`, :class:`type`
:param parameter: an optional parameter name.
if present the error message becomes better
:type parameter: :class:`str`
:raises exceptions.TypeError:
if the given ``subtype`` is not a subclass of the class
"""
typename = '.'.join((cls.__module__, cls.__name__))
if isinstance(value_type, type):
subname = '.'.join((value_type.__module__, value_type.__name__))
if issubclass(value_type, cls):
try:
return value_type()
except __HOLE__ as e:
raise TypeError(
'{0} must implement the constructor which takes '
'no arguments; {1}'.format(subname, e)
)
else:
if parameter:
msg = '{0} must be a subtype of {1}, but {2} was passed'
else:
msg = 'expected a subtype of {1}, but {2} was passed'
raise TypeError(msg.format(parameter, typename, subname))
elif isinstance(value_type, Value):
if isinstance(value_type, cls):
return value_type
else:
if parameter:
msg = '{0} must be an instance of {1}, ' \
'but {2!r} was passed'
else:
msg = 'expected an instance of {1}, but {2!r} was passed'
raise TypeError(msg.format(parameter, typename, value_type))
else:
if parameter:
msg = '{0} must be a type, not {1!r}'
else:
msg = 'expected a type, not {1!r}'
raise TypeError(msg.format(parameter, value_type)) | TypeError | dataset/ETHPy150Open dahlia/sider/sider/types.py/Value.ensure_value_type |
6,897 | def delete_file(self, name):
try:
self.fs.delete_file(name+"_test.pst")
except __HOLE__:
pass | OSError | dataset/ETHPy150Open dokipen/whoosh/tests/test_postings.py/TestReadWrite.delete_file |
6,898 | def _load_yaml_mapping(filename):
try:
f = open(filename)
try:
yaml_versions = yaml_load(f)
finally:
f.close()
except __HOLE__:
yaml_versions = { }
return yaml_versions
####################################################################################################################### | IOError | dataset/ETHPy150Open turbulenz/turbulenz_local/turbulenz_local/helpers.py/_load_yaml_mapping |
6,899 | def validate(self, value):
try:
return int(value)
except (ValueError, __HOLE__):
raise ValidationError(_('Not an integer')) | TypeError | dataset/ETHPy150Open fiam/wapi/validators.py/IntegerValidator.validate |
Subsets and Splits