Unnamed: 0
int64 0
10k
| function
stringlengths 79
138k
| label
stringclasses 20
values | info
stringlengths 42
261
|
---|---|---|---|
2,000 | def _ParseManifest(self, node_list):
for node in itertools.chain(*node_list):
if node.nodeName == 'remote':
remote = self._ParseRemote(node)
if remote:
if remote.name in self._remotes:
if remote != self._remotes[remote.name]:
raise ManifestParseError(
'remote %s already exists with different attributes' %
(remote.name))
else:
self._remotes[remote.name] = remote
for node in itertools.chain(*node_list):
if node.nodeName == 'default':
new_default = self._ParseDefault(node)
if self._default is None:
self._default = new_default
elif new_default != self._default:
raise ManifestParseError('duplicate default in %s' %
(self.manifestFile))
if self._default is None:
self._default = _Default()
for node in itertools.chain(*node_list):
if node.nodeName == 'notice':
if self._notice is not None:
raise ManifestParseError(
'duplicate notice in %s' %
(self.manifestFile))
self._notice = self._ParseNotice(node)
for node in itertools.chain(*node_list):
if node.nodeName == 'manifest-server':
url = self._reqatt(node, 'url')
if self._manifest_server is not None:
raise ManifestParseError(
'duplicate manifest-server in %s' %
(self.manifestFile))
self._manifest_server = url
def recursively_add_projects(project):
projects = self._projects.setdefault(project.name, [])
if project.relpath is None:
raise ManifestParseError(
'missing path for %s in %s' %
(project.name, self.manifestFile))
if project.relpath in self._paths:
raise ManifestParseError(
'duplicate path %s in %s' %
(project.relpath, self.manifestFile))
self._paths[project.relpath] = project
projects.append(project)
for subproject in project.subprojects:
recursively_add_projects(subproject)
for node in itertools.chain(*node_list):
if node.nodeName == 'project':
project = self._ParseProject(node)
recursively_add_projects(project)
if node.nodeName == 'extend-project':
name = self._reqatt(node, 'name')
if name not in self._projects:
raise ManifestParseError('extend-project element specifies non-existent '
'project: %s' % name)
path = node.getAttribute('path')
groups = node.getAttribute('groups')
if groups:
groups = self._ParseGroups(groups)
for p in self._projects[name]:
if path and p.relpath != path:
continue
if groups:
p.groups.extend(groups)
if node.nodeName == 'repo-hooks':
# Get the name of the project and the (space-separated) list of enabled.
repo_hooks_project = self._reqatt(node, 'in-project')
enabled_repo_hooks = self._reqatt(node, 'enabled-list').split()
# Only one project can be the hooks project
if self._repo_hooks_project is not None:
raise ManifestParseError(
'duplicate repo-hooks in %s' %
(self.manifestFile))
# Store a reference to the Project.
try:
repo_hooks_projects = self._projects[repo_hooks_project]
except __HOLE__:
raise ManifestParseError(
'project %s not found for repo-hooks' %
(repo_hooks_project))
if len(repo_hooks_projects) != 1:
raise ManifestParseError(
'internal error parsing repo-hooks in %s' %
(self.manifestFile))
self._repo_hooks_project = repo_hooks_projects[0]
# Store the enabled hooks in the Project object.
self._repo_hooks_project.enabled_repo_hooks = enabled_repo_hooks
if node.nodeName == 'remove-project':
name = self._reqatt(node, 'name')
if name not in self._projects:
raise ManifestParseError('remove-project element specifies non-existent '
'project: %s' % name)
for p in self._projects[name]:
del self._paths[p.relpath]
del self._projects[name]
# If the manifest removes the hooks project, treat it as if it deleted
# the repo-hooks element too.
if self._repo_hooks_project and (self._repo_hooks_project.name == name):
self._repo_hooks_project = None | KeyError | dataset/ETHPy150Open esrlabs/git-repo/manifest_xml.py/XmlManifest._ParseManifest |
2,001 | def _ParseProject(self, node, parent = None, **extra_proj_attrs):
"""
reads a <project> element from the manifest file
"""
name = self._reqatt(node, 'name')
if parent:
name = self._JoinName(parent.name, name)
remote = self._get_remote(node)
if remote is None:
remote = self._default.remote
if remote is None:
raise ManifestParseError("no remote for project %s within %s" %
(name, self.manifestFile))
revisionExpr = node.getAttribute('revision') or remote.revision
if not revisionExpr:
revisionExpr = self._default.revisionExpr
if not revisionExpr:
raise ManifestParseError("no revision for project %s within %s" %
(name, self.manifestFile))
path = node.getAttribute('path')
if not path:
path = name
if path.startswith('/'):
raise ManifestParseError("project %s path cannot be absolute in %s" %
(name, self.manifestFile))
rebase = node.getAttribute('rebase')
if not rebase:
rebase = True
else:
rebase = rebase.lower() in ("yes", "true", "1")
sync_c = node.getAttribute('sync-c')
if not sync_c:
sync_c = False
else:
sync_c = sync_c.lower() in ("yes", "true", "1")
sync_s = node.getAttribute('sync-s')
if not sync_s:
sync_s = self._default.sync_s
else:
sync_s = sync_s.lower() in ("yes", "true", "1")
clone_depth = node.getAttribute('clone-depth')
if clone_depth:
try:
clone_depth = int(clone_depth)
if clone_depth <= 0:
raise ValueError()
except __HOLE__:
raise ManifestParseError('invalid clone-depth %s in %s' %
(clone_depth, self.manifestFile))
dest_branch = node.getAttribute('dest-branch') or self._default.destBranchExpr
upstream = node.getAttribute('upstream')
groups = ''
if node.hasAttribute('groups'):
groups = node.getAttribute('groups')
groups = self._ParseGroups(groups)
if parent is None:
relpath, worktree, gitdir, objdir = self.GetProjectPaths(name, path)
else:
relpath, worktree, gitdir, objdir = \
self.GetSubprojectPaths(parent, name, path)
default_groups = ['all', 'name:%s' % name, 'path:%s' % relpath]
groups.extend(set(default_groups).difference(groups))
if self.IsMirror and node.hasAttribute('force-path'):
if node.getAttribute('force-path').lower() in ("yes", "true", "1"):
gitdir = os.path.join(self.topdir, '%s.git' % path)
project = Project(manifest = self,
name = name,
remote = remote.ToRemoteSpec(name),
gitdir = gitdir,
objdir = objdir,
worktree = worktree,
relpath = relpath,
revisionExpr = revisionExpr,
revisionId = None,
rebase = rebase,
groups = groups,
sync_c = sync_c,
sync_s = sync_s,
clone_depth = clone_depth,
upstream = upstream,
parent = parent,
dest_branch = dest_branch,
**extra_proj_attrs)
for n in node.childNodes:
if n.nodeName == 'copyfile':
self._ParseCopyFile(project, n)
if n.nodeName == 'linkfile':
self._ParseLinkFile(project, n)
if n.nodeName == 'annotation':
self._ParseAnnotation(project, n)
if n.nodeName == 'project':
project.subprojects.append(self._ParseProject(n, parent = project))
return project | ValueError | dataset/ETHPy150Open esrlabs/git-repo/manifest_xml.py/XmlManifest._ParseProject |
2,002 | def build_config(self, config):
try:
config.setdefaults('sound', {
'musicOption': '0',
'soundsOption': '1'
})
config.setdefaults('game', {
'hardcoreOption': '0'
})
except __HOLE__:
config.setdefaults('sound', {
'musicOption': False,
'soundsOption': True
})
config.setdefaults('game', {
'hardcoreOption': False
}) | TypeError | dataset/ETHPy150Open oddbitdev/hexTap/main.py/HexTap.build_config |
2,003 | def sample_length(self, prob_end):
"""
Sample length of a game.
This is using inverse random sample on a probability density function
<https://en.wikipedia.org/wiki/Probability_density_function> given by:
f(n) = p_end * (1 - p_end) ^ (n - 1)
(So the probability of length n is given by f(n))
Which gives cumulative distribution function
<https://en.wikipedia.org/wiki/Cumulative_distribution_function>:
F(n) = 1 - (1 - p_end) ^ n
(So the probability of length less than or equal to n is given by F(n))
Which gives for given x = F(n) (ie the random sample) gives n:
n = ceil((ln(1-x)/ln(1-p_end)))
This approach of sampling from a distribution is called inverse
transform sampling
<https://en.wikipedia.org/wiki/Inverse_transform_sampling>.
Note that this corresponds to sampling at the end of every turn whether
or not the Match ends.
"""
try:
x = random.random()
return int(ceil(log(1 - x) / log(1 - self.prob_end)))
except ZeroDivisionError:
return float("inf")
except __HOLE__:
return 1 | ValueError | dataset/ETHPy150Open Axelrod-Python/Axelrod/axelrod/match_generator.py/ProbEndRoundRobinMatches.sample_length |
2,004 | def setup_module(module):
from nose import SkipTest
try:
import sklearn
except __HOLE__:
raise SkipTest("scikit-learn is not installed") | ImportError | dataset/ETHPy150Open nltk/nltk/nltk/classify/scikitlearn.py/setup_module |
2,005 | def v1_deprecated(warning=None):
"""Shows a warning if ENABLE_V1_WARNINGS is True.
Function decorator used to mark methods used in v1 classes which
may be removed in future versions of the library.
"""
warning = warning or ''
# This closure is what is returned from the deprecated function.
def mark_deprecated(f):
# The deprecated_function wraps the actual call to f.
def optional_warn_function(*args, **kwargs):
if ENABLE_V1_WARNINGS:
warnings.warn(warning, DeprecationWarning, stacklevel=2)
return f(*args, **kwargs)
# Preserve the original name to avoid masking all decorated functions as
# 'deprecated_function'
try:
optional_warn_function.func_name = f.func_name
except __HOLE__:
pass # In Python2.3 we can't set the func_name
return optional_warn_function
return mark_deprecated | TypeError | dataset/ETHPy150Open acil-bwh/SlicerCIP/Scripted/attic/PicasaSnap/atom/__init__.py/v1_deprecated |
2,006 | def deprecated(warning=None):
"""Decorator to raise warning each time the function is called.
Args:
warning: The warning message to be displayed as a string (optinoal).
"""
warning = warning or ''
# This closure is what is returned from the deprecated function.
def mark_deprecated(f):
# The deprecated_function wraps the actual call to f.
def deprecated_function(*args, **kwargs):
warnings.warn(warning, DeprecationWarning, stacklevel=2)
return f(*args, **kwargs)
# Preserve the original name to avoid masking all decorated functions as
# 'deprecated_function'
try:
deprecated_function.func_name = f.func_name
except __HOLE__:
# Setting the func_name is not allowed in Python2.3.
pass
return deprecated_function
return mark_deprecated | TypeError | dataset/ETHPy150Open acil-bwh/SlicerCIP/Scripted/attic/PicasaSnap/atom/__init__.py/deprecated |
2,007 | def update_machine(self, name_or_id, chassis_uuid=None, driver=None,
driver_info=None, name=None, instance_info=None,
instance_uuid=None, properties=None):
"""Update a machine with new configuration information
A user-friendly method to perform updates of a machine, in whole or
part.
:param string name_or_id: A machine name or UUID to be updated.
:param string chassis_uuid: Assign a chassis UUID to the machine.
NOTE: As of the Kilo release, this value
cannot be changed once set. If a user
attempts to change this value, then the
Ironic API, as of Kilo, will reject the
request.
:param string driver: The driver name for controlling the machine.
:param dict driver_info: The dictonary defining the configuration
that the driver will utilize to control
the machine. Permutations of this are
dependent upon the specific driver utilized.
:param string name: A human relatable name to represent the machine.
:param dict instance_info: A dictonary of configuration information
that conveys to the driver how the host
is to be configured when deployed.
be deployed to the machine.
:param string instance_uuid: A UUID value representing the instance
that the deployed machine represents.
:param dict properties: A dictonary defining the properties of a
machine.
:raises: OpenStackCloudException on operation error.
:returns: Dictonary containing a machine sub-dictonary consisting
of the updated data returned from the API update operation,
and a list named changes which contains all of the API paths
that received updates.
"""
machine = self.get_machine(name_or_id)
if not machine:
raise OpenStackCloudException(
"Machine update failed to find Machine: %s. " % name_or_id)
machine_config = {}
new_config = {}
try:
if chassis_uuid:
machine_config['chassis_uuid'] = machine['chassis_uuid']
new_config['chassis_uuid'] = chassis_uuid
if driver:
machine_config['driver'] = machine['driver']
new_config['driver'] = driver
if driver_info:
machine_config['driver_info'] = machine['driver_info']
new_config['driver_info'] = driver_info
if name:
machine_config['name'] = machine['name']
new_config['name'] = name
if instance_info:
machine_config['instance_info'] = machine['instance_info']
new_config['instance_info'] = instance_info
if instance_uuid:
machine_config['instance_uuid'] = machine['instance_uuid']
new_config['instance_uuid'] = instance_uuid
if properties:
machine_config['properties'] = machine['properties']
new_config['properties'] = properties
except __HOLE__ as e:
self.log.debug(
"Unexpected machine response missing key %s [%s]" % (
e.args[0], name_or_id))
raise OpenStackCloudException(
"Machine update failed - machine [%s] missing key %s. "
"Potential API issue."
% (name_or_id, e.args[0]))
try:
patch = jsonpatch.JsonPatch.from_diff(machine_config, new_config)
except Exception as e:
raise OpenStackCloudException(
"Machine update failed - Error generating JSON patch object "
"for submission to the API. Machine: %s Error: %s"
% (name_or_id, str(e)))
with _utils.shade_exceptions(
"Machine update failed - patch operation failed on Machine "
"{node}".format(node=name_or_id)
):
if not patch:
return dict(
node=machine,
changes=None
)
else:
machine = self.patch_machine(machine['uuid'], list(patch))
change_list = []
for change in list(patch):
change_list.append(change['path'])
return dict(
node=machine,
changes=change_list
) | KeyError | dataset/ETHPy150Open openstack-infra/shade/shade/operatorcloud.py/OperatorCloud.update_machine |
2,008 | @split( PARAMS["filename_vcf"], "*.pileup.gz" )
def buildPileups( infile, outfile ):
'''build samtools pileup formatted files from vcf formatted files.
The column to strain mapping are determined dynamically.
#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT 129P2 129S1 129S5 AKR A_J BALB C3H C57BL CAST CBA DBA LP_J NOD NZO PWK SPRET WSB
#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT 129P2 129S5 AKR C3H CAST DBA NOD PWK WSB 129S1 A_J BALB C57BL CBA LP_J NZO SPRET
The parser is incomplete (only looks at unphased data, etc.)
IT ALSO IGNORES HETEROZYGOUS CALLS.
Both vcf and pileup employ 1-based coordinate systems. Adds "chr" prefix.
This method applies two potential filters:
1. filename_refseq_filter:
remove all variants not in refseq regions
2. filename_snp_filter:
remove all SNPs in a blacklist. Note that the code
assumes that the blacklist is not too large.
'''
outfiles = IOTools.FilePool( "mouse%s.pileup.gz" )
if "filename_snp_filter" in PARAMS:
def _defdict(): return collections.defaultdict( list )
filter_snps = collections.defaultdict( _defdict )
x = 0
for line in IOTools.openFile(PARAMS["filename_snp_filter"], "r" ):
if line.startswith("track"):continue
if line.startswith("#"):continue
data = line[:-1].split("\t")
track, contig, pos = data[0], data[1], int(data[2])
track = track[len("mouse"):]
filter_snps[track][contig].append( pos )
x += 1
E.info("removing %i false positive SNPs" % x )
else:
filter_snps = None
if "filename_refseq_filter" in PARAMS:
E.info( "reading segment filter")
intervals = GTF.readAndIndex( GTF.iterator( IOTools.openFile(PARAMS["filename_refseq_filter"], "r") ) )
E.info( "read segment filter")
else:
intervals = None
inf = gzip.open(infile,"r")
headers = []
ninput = 0
counts = E.Counter()
for line in inf:
data = line[:-1].split("\t")
if line.startswith("#CHROM"):
if not headers: headers = data[9:]
continue
elif line.startswith("#"):
continue
contig, pos, ref = data[0], data[1], data[3]
pos = int(pos)
variants = [ref]
variants.extend( data[4].split(",") )
counts.input += 1
contig = "chr%s" % contig
if intervals:
if not intervals.contains( contig, pos-1, pos ):
counts.filter += 1
continue
for h, genotype_info in zip(headers, data[9:]):
# no variant for this strain - skip
if genotype_info == "." or genotype_info.startswith("./."): continue
# determine the genotype base - this is a hard-coded order
# revise if input file formats change.
consensus_quality, genotype_quality, read_depth = "0", "0", "0"
dd = genotype_info.split(":")
if len(dd) == 5:
genotype, mapping_quality, hcg, genotype_quality, read_depth = dd
if hcg != "1": continue
elif len(dd) == 4:
genotype, mapping_quality, genotype_quality, read_depth = dd
elif len(dd) == 2:
genotype, genotype_quality = dd
elif len(dd) == 1:
genotype = dd[0]
else:
raise ValueError( "parsing error for %s: line=%s" % (genotype_info, line) )
genotype = genotype.split("/")
if filter_snps:
if pos-1 in filter_snps[h][contig]:
counts.filtered_snps += 1
continue
# ignore heterozygous calls
if len(set(genotype)) != 1: continue
genotype = [ variants[int(x)] for x in genotype ]
lengths = [len(x) for x in genotype] + [len(ref)]
is_snp = len( set(lengths) ) == 1 and lengths[0] == 1
# skip genotypes for which no call can be made
if "." in genotype: continue
if is_snp:
genotype = "".join(genotype)
# skip wild type
if genotype == "%s%s" % (ref,ref):
continue
outfiles.write( h,
"\t".join( map(str, (
contig,
pos,
ref,
Genomics.encodeGenotype( genotype ),
consensus_quality,
genotype_quality,
mapping_quality,
read_depth,
genotype,
"<" * len(genotype) ) ) ) + "\n" )
else:
def getPrefix( s1, s2 ):
'''get common prefix of strings s1 and s2.'''
n = min( len( s1), len( s2 ) )
predix = []
for x in range( n ):
if s1[x] != s2[x]: return s1[:x]
return s1[:n]
def getSuffix( s1, s2 ):
'''get common sufix of strings s1 and s2.'''
n = min( len( s1), len( s2 ) )
predix = []
if s1[-1] != s2[-1]: return ""
for x in range( -2, -n - 1, -1 ):
if s1[x] != s2[x]: return s1[x+1:]
return s1[-n:]
def getGenotype( variant, ref ):
if variant == ref: return "*", 0
if len(ref) > len(variant):
# is a deletion
if ref.startswith(variant):
return "-%s" % ref[len(variant):], len(variant) - 1
elif ref.endswith( variant ):
return "-%s" % ref[:-len(variant)], -1
else:
prefix = getPrefix( ref, variant )
suffix = getSuffix( ref, variant )
shared = len(prefix) + len(suffix) - len(variant)
# print "-", prefix, suffix, ref, variant, shared, len(prefix), len(suffix), len(ref)
if shared < 0:
raise ValueError()
return "-%s" % ref[len(prefix):-(len(suffix)-shared)], len(prefix) - 1
elif len(ref) < len(variant):
# is an insertion
if variant.startswith(ref):
return "+%s" % variant[len(ref):], len(ref) - 1
elif variant.endswith(ref):
return "+%s" % variant[:len(ref)], 0
else:
prefix = getPrefix( ref, variant )
suffix = getSuffix( ref, variant )
shared = len(prefix) + len(suffix) - len(ref)
if shared < 0:
raise ValueError()
return "+%s" % variant[len(prefix):-(len(suffix)-shared)], len(prefix)
else:
assert 0, "snp?"
# in pileup, the position refers to the base
# after the coordinate, hence subtract 1
#pos -= 1
genotypes, offsets = [], []
is_error = True
for variant in genotype:
try:
g, offset = getGenotype( variant, ref )
except __HOLE__:
break
assert len(g) > 1, "incomplete genotype %s" % g
genotypes.append( g )
offsets.append( offset )
else:
is_error = False
if is_error:
print line,
counts.errors += 1
continue
assert len(set(offsets )) == 1
offset = offsets[0]
genotypes = "/".join( genotypes )
outfiles.write( h,
"\t".join( map(str, (
contig,
pos + offset,
"*",
genotypes,
"0",
"0",
"0",
"0",
genotypes,
"<" * len(genotype),
"0",
"0",
"0") ) ) + "\n" )
counts.output += 1
outfiles.close()
E.info("%s" % str(counts)) | ValueError | dataset/ETHPy150Open CGATOxford/cgat/obsolete/pipeline_snps.py/buildPileups |
2,009 | @files( ((None, "mouseC57BL.recalls.gz" ), ) )
def recallGenomicSNPs( infiles, outfile ):
'''validate the genomic SNP calls for C57BL6 using the
same methodology as for the RNASeq calls.
This tool requires a file
xgenome.fa/xgenome.fa.fai in the build directory with
samtools conform contig names (i.e. without the ``chr``
prefix).
'''
filename_bam = "ftp://ftp.sanger.ac.uk/pub/mouse_genomes/current_bams/C57BL.bam"
dbhandle = sqlite3.connect( PARAMS["database"] )
cc = dbhandle.cursor()
statement = "SELECT contig, pos, reference, genotype, status, genotypes FROM mouseC57BL_validated"
samfile = pysam.Samfile( filename_bam, "rb")
fastafile = pysam.Fastafile( "xgenome.fa" )
i = samfile.pileup( select = "snpcalls", fastafile = fastafile )
caller = pysam.SNPCaller( i )
outf= IOTools.openFile( outfile, "w" )
outf.write( "\t".join( ("contig", "pos", "ref",
"orig_genotype", "rnaseq_status", "rnaseq_genotypes",
"recall_genotype", "recall_consensus_quality",
"recall_snp_quality", "recall_mapping_quality", "recall_coverage" ) ) + "\n" )
for contig, pos, ref, genotype, status, genotypes in cc.execute(statement):
contig = re.sub("chr", "", contig)
try:
call = caller.call( contig, pos )
except __HOLE__, msg:
E.warn( "could not call %s:%i: msg=%s" % (contig, pos, msg) )
outf.write( "\t".join( map(str, (contig, pos, ref, genotype, status, genotypes,
call.genotype,
call.consensus_quality,
call.snp_quality,
call.mapping_quality,
call.coverage ) ) ) + "\n" )
outf.flush()
outf.close()
###################################################################
###################################################################
###################################################################
## MAIN PIPELINE
###################################################################
###################################################################
###################################################################
###################################################################
###################################################################
###################################################################
## Targets for prepare
###################################################################
###################################################################
###################################################################
################################################################### | ValueError | dataset/ETHPy150Open CGATOxford/cgat/obsolete/pipeline_snps.py/recallGenomicSNPs |
2,010 | @transform( buildPolyphenInput, suffix(".input"), ".features")
def buildPolyphenFeatures( infile, outfile ):
'''run polyphen on the cluster.
To do this, first send uniref to all nodes:
python ~/cgat/cluster_distribute.py
--collection=andreas
/net/cpp-group/tools/polyphen-2.0.18/nrdb/uniref100*.{pin,psd,psi,phr,psq,pal}
'''
nsnps = len([ x for x in open(infile)])
to_cluster = True
stepsize = max( int(nsnps / 200000.0), 1000 )
job_array=(0, nsnps, stepsize)
E.info("running array jobs on %i snps" % nsnps )
scratchdir = os.path.join(os.path.abspath("."), "scratch")
try:
os.mkdir( scratchdir )
except OSError:
pass
resultsdir = outfile + ".dir"
try:
os.mkdir( resultsdir )
except __HOLE__:
pass
statement = '''
/net/cpp-group/tools/polyphen-2.0.18/bin/run_pph_cpp.pl
-s %(peptides)s
-b %(polyphen_blastdb)s
-d %(scratchdir)s
%(infile)s > %(resultsdir)s/%(outfile)s.$SGE_TASK_ID 2> %(resultsdir)s/%(outfile)s.err.$SGE_TASK_ID
'''
P.run()
to_cluster = False
job_array=None
statement = '''find %(resultsdir)s -name "*.err.*" -exec cat {} \; > %(outfile)s.log'''
P.run()
statement = '''find %(resultsdir)s -not -name "*.err.*" -exec cat {} \; > %(outfile)s'''
P.run()
###################################################################
###################################################################
################################################################### | OSError | dataset/ETHPy150Open CGATOxford/cgat/obsolete/pipeline_snps.py/buildPolyphenFeatures |
2,011 | @follows( buildGeneMatrixConsequences,
buildGeneMatrixAlleles,
buildGeneMatrixEffects,
buildGeneMatrixStructuralVariants)
@files( [ ((x,y), "%s_vs_%s.gla" % (re.sub(".genematrix", "", x), \
re.sub("assignments.", "", y) ) )\
for x,y in \
itertools.product( \
glob.glob("*.genematrix" ),
glob.glob("assignments.*") )
if not y.endswith(".log") ] )
def runGeneListAnalysis( infiles, outfile):
'''run a gene list analysis.'''
genematrix, assignments = infiles
to_cluster = True
try:
options = "--qvalue-lambda=%(genelist_analysis_qvalue_lambda)f" % PARAMS
except __HOLE__:
options = ""
statement = '''
python %(scriptsdir)s/genelist_analysis.py
--format=matrix
--filename-assignments=%(assignments)s
--fdr
--qvalue-method=%(genelist_analysis_qvalue_method)s
--log=%(outfile)s.log
%(options)s
< %(genematrix)s
> %(outfile)s
'''
P.run()
########################################################################### | TypeError | dataset/ETHPy150Open CGATOxford/cgat/obsolete/pipeline_snps.py/runGeneListAnalysis |
2,012 | @unit_testing_only
def spoof_submission(submit_url, body):
client = Client()
f = StringIO(body.encode('utf-8'))
f.name = 'form.xml'
response = client.post(submit_url, {
'xml_submission_file': f,
})
try:
return response['X-CommCareHQ-FormID']
except __HOLE__:
return None | KeyError | dataset/ETHPy150Open dimagi/commcare-hq/corehq/ex-submodules/couchforms/util.py/spoof_submission |
2,013 | def _fetch_project(self):
"""
Clones the project if necessary, fetches from the remote repo and resets to the requested commit
"""
# If shallow_clones is set to True, then we need to specify the --depth=1 argument to all git fetch
# and clone invocations.
git_clone_fetch_depth_arg = ''
if Configuration['shallow_clones']:
git_clone_fetch_depth_arg = '--depth=1'
existing_repo_is_shallow = os.path.isfile(os.path.join(self._repo_directory, '.git', 'shallow'))
# If we disable shallow clones, but the existing repo is shallow, we must re-clone non-shallowly.
if not Configuration['shallow_clones'] and existing_repo_is_shallow and os.path.exists(self._repo_directory):
shutil.rmtree(self._repo_directory)
fs.create_dir(self._repo_directory, self.DIRECTORY_PERMISSIONS)
# Clone the repo if it doesn't exist
try:
self._execute_git_command_in_repo_and_raise_on_failure('rev-parse') # rev-parse succeeds if repo exists
except __HOLE__:
self._logger.notice('No valid repo in "{}". Cloning fresh from "{}".', self._repo_directory, self._url)
self._execute_git_command_in_repo_and_raise_on_failure(
git_command='clone {} {} {}'. format(git_clone_fetch_depth_arg, self._url, self._repo_directory),
error_msg='Could not clone repo.'
)
# Must add the --update-head-ok in the scenario that the current branch of the working directory
# is equal to self._branch, otherwise the git fetch will exit with a non-zero exit code.
self._execute_git_command_in_repo_and_raise_on_failure(
git_command='fetch {} --update-head-ok {} {}'.format(git_clone_fetch_depth_arg, self._remote, self._branch),
error_msg='Could not fetch specified branch "{}" from remote "{}".'.format(self._branch, self._remote)
)
# Validate and convert the user-specified hash/refspec to a full git hash
fetch_head_hash = self._execute_git_command_in_repo_and_raise_on_failure(
git_command='rev-parse FETCH_HEAD',
error_msg='Could not rev-parse FETCH_HEAD of {} to a commit hash.'.format(self._branch)
).strip()
# Save this hash as a local ref. Named local refs are necessary for slaves to fetch correctly from the master.
# The local ref will be passed on to slaves instead of the user-specified branch.
self._local_ref = 'refs/clusterrunner/{}'.format(fetch_head_hash)
self._execute_git_command_in_repo_and_raise_on_failure(
git_command='update-ref {} {}'.format(self._local_ref, fetch_head_hash),
error_msg='Could not update local ref.'
)
# The '--' argument acts as a delimiter to differentiate values that can be "tree-ish" or a "path"
self._execute_git_command_in_repo_and_raise_on_failure(
git_command='reset --hard {} --'.format(fetch_head_hash),
error_msg='Could not reset Git repo.'
)
self._execute_git_command_in_repo_and_raise_on_failure(
git_command='clean -dfx',
error_msg='Could not clean Git repo.'
) | RuntimeError | dataset/ETHPy150Open box/ClusterRunner/app/project_type/git.py/Git._fetch_project |
2,014 | def remove_handler(self, fd):
"""Stop listening for events on fd."""
self._handlers.pop(fd, None)
self._events.pop(fd, None)
try:
self._impl.unregister(fd)
except (OSError, __HOLE__):
logging.debug("Error deleting fd from IOLoop", exc_info=True) | IOError | dataset/ETHPy150Open felipecruz/zmqpy/zmqpy/eventloop/ioloop.py/IOLoop.remove_handler |
2,015 | def start(self):
"""Starts the I/O loop.
The loop will run until one of the I/O handlers calls stop(), which
will make the loop stop after the current event iteration completes.
"""
if self._stopped:
self._stopped = False
return
self._thread_ident = thread_get_ident()
self._running = True
while True:
poll_timeout = 3600.0
# Prevent IO event starvation by delaying new callbacks
# to the next iteration of the event loop.
with self._callback_lock:
callbacks = self._callbacks
self._callbacks = []
for callback in callbacks:
self._run_callback(callback)
if self._timeouts:
now = time.time()
while self._timeouts:
if self._timeouts[0].callback is None:
# the timeout was cancelled
heapq.heappop(self._timeouts)
elif self._timeouts[0].deadline <= now:
timeout = heapq.heappop(self._timeouts)
self._run_callback(timeout.callback)
else:
seconds = self._timeouts[0].deadline - now
poll_timeout = min(seconds, poll_timeout)
break
if self._callbacks:
# If any callbacks or timeouts called add_callback,
# we don't want to wait in poll() before we run them.
poll_timeout = 0.0
if not self._running:
break
if self._blocking_signal_threshold is not None:
# clear alarm so it doesn't fire while poll is waiting for
# events.
signal.setitimer(signal.ITIMER_REAL, 0, 0)
try:
event_pairs = self._impl.poll(poll_timeout)
except Exception as e:
# Depending on python version and IOLoop implementation,
# different exception types may be thrown and there are
# two ways EINTR might be signaled:
# * e.errno == errno.EINTR
# * e.args is like (errno.EINTR, 'Interrupted system call')
if (getattr(e, 'errno', None) == errno.EINTR or
(isinstance(getattr(e, 'args', None), tuple) and
len(e.args) == 2 and e.args[0] == errno.EINTR)):
continue
# elif getattr(e, 'errno', None) == ETERM:
# # This happens when the zmq Context is closed; we should just exit.
# self._running = False
# self._stopped = True
# break
else:
raise
if self._blocking_signal_threshold is not None:
signal.setitimer(signal.ITIMER_REAL,
self._blocking_signal_threshold, 0)
# Pop one fd at a time from the set of pending fds and run
# its handler. Since that handler may perform actions on
# other file descriptors, there may be reentrant calls to
# this IOLoop that update self._events
self._events.update(event_pairs)
while self._events:
fd, events = self._events.popitem()
try:
self._handlers[fd](fd, events)
except (__HOLE__, IOError) as e:
if e.args[0] == errno.EPIPE:
# Happens when the client closes the connection
pass
else:
logging.error("Exception in I/O handler for fd %s",
fd, exc_info=True)
except Exception:
logging.error("Exception in I/O handler for fd %s",
fd, exc_info=True)
# reset the stopped flag so another start/stop pair can be issued
self._stopped = False
if self._blocking_signal_threshold is not None:
signal.setitimer(signal.ITIMER_REAL, 0, 0) | OSError | dataset/ETHPy150Open felipecruz/zmqpy/zmqpy/eventloop/ioloop.py/IOLoop.start |
2,016 | def register_namespace(self, prefix, uri):
"""Registers a namespace,, making it available for use when adding subsequent fields to the entry.
Registration will also affect the XML export, adding in the xmlns:prefix="url" attribute when required."""
try:
etree.register_namespace(prefix, uri)
except __HOLE__ as e:
# the etree implementation we're using doesn't support register_namespace
# (probably lxml)
pass
self.add_ns.append(prefix)
if prefix not in NS.keys():
NS[prefix] = "{%s}%%s" % uri
# we also have to handle namespaces internally, for etree implementations which
# don't support register_namespace
if prefix not in self.nsmap.keys():
self.nsmap[prefix] = uri | AttributeError | dataset/ETHPy150Open swordapp/python-client-sword2/sword2/atom_objects.py/Entry.register_namespace |
2,017 | def find_all_commands(management_dir):
"""
Find all valid commands in a directory
management_dir : directory path
return - List of commands
"""
try:
#Find all commands in the directory that are not __init__.py and end in .py. Then, remove the trailing .py
return [f[:-3] for f in os.listdir(management_dir) if f.endswith('.py') and not f.startswith("__")]
except __HOLE__:
#If nothing is found, return empty
return [] | OSError | dataset/ETHPy150Open VikParuchuri/percept/percept/management/base.py/find_all_commands |
2,018 | def find_commands_module(app_name):
"""
Find the commands module in each app (if it exists) and return the path
app_name : The name of an app in the INSTALLED_APPS setting
return - path to the app
"""
parts = app_name.split('.')
parts.append('commands')
parts.reverse()
part = parts.pop()
path = None
#Load the module if needed
try:
f, path, descr = imp.find_module(part, path)
except __HOLE__ as e:
if os.path.basename(os.getcwd()) != part:
raise e
else:
try:
if f:
f.close()
except UnboundLocalError:
log.error("Could not import module {0} at path {1}. Sys.path is {2}".format(part, path, sys.path))
#Go down level by and level and try to load the module at each level
while parts:
part = parts.pop()
f, path, descr = imp.find_module(part, [path] if path else None)
if f:
f.close()
return path | ImportError | dataset/ETHPy150Open VikParuchuri/percept/percept/management/base.py/find_commands_module |
2,019 | def get_commands():
"""
Get all valid commands
return - all valid commands in dictionary form
"""
commands = {}
#Try to load the settings file (settings can be specified on the command line) and get the INSTALLED_APPS
try:
from percept.conf.base import settings
apps = settings.INSTALLED_APPS
except __HOLE__:
apps = []
#For each app, try to find the command module (command folder in the app)
#Then, try to load all commands in the directory
for app_name in apps:
try:
path = find_commands_module(app_name)
commands.update(dict([(name, app_name) for name in find_all_commands(path)]))
except ImportError as e:
pass
return commands | KeyError | dataset/ETHPy150Open VikParuchuri/percept/percept/management/base.py/get_commands |
2,020 | def execute(self):
"""
Run the command with the command line arguments
"""
#Initialize the option parser
parser = LaxOptionParser(
usage="%prog subcommand [options] [args]",
option_list=BaseCommand.option_list #This will define what is allowed input to the parser (ie --settings=)
)
#Parse the options
options, args = parser.parse_args(self.argv)
#Handle --settings and --pythonpath properly
options = handle_default_options(options)
try:
#Get the name of the subcommand
subcommand = self.argv[1]
except __HOLE__:
#If the subcommand name cannot be found, set it to help
subcommand = 'help'
#If the subcommand is help, print the usage of the parser, and available command names
if subcommand == 'help':
if len(args) <= 2:
parser.print_help()
sys.stdout.write(self.help_text + '\n')
else:
#Otherwise, run the given command
self.fetch_command(subcommand).run_from_argv(self.argv) | IndexError | dataset/ETHPy150Open VikParuchuri/percept/percept/management/base.py/Management.execute |
2,021 | def fetch_command(self, subcommand):
"""
Gets a given command
"""
try:
app_name = get_commands()[subcommand]
except __HOLE__:
sys.stderr.write("Unknown command: %r\nType '%s help' for usage.\n" % \
(subcommand, self.prog_name))
sys.exit(1)
if isinstance(app_name, BaseCommand):
# If the command is already loaded, use it directly.
klass = app_name
else:
klass = load_command_class(app_name, subcommand)
return klass | KeyError | dataset/ETHPy150Open VikParuchuri/percept/percept/management/base.py/Management.fetch_command |
2,022 | def get_extra_routes_data(self):
try:
extraroutes = getattr(self.tab_group.kwargs['router'], 'routes')
except __HOLE__:
extraroutes = []
return [api.RouterStaticRoute(r) for r in extraroutes] | AttributeError | dataset/ETHPy150Open openstack/horizon/openstack_dashboard/dashboards/project/routers/extensions/extraroutes/tabs.py/ExtraRoutesTab.get_extra_routes_data |
2,023 | def deregister(self, identity):
try:
target = self._targets.pop(identity)
except __HOLE__:
pass
else:
# Just incase set the barrier to unblock any worker...
target.barrier.set()
if LOG.isEnabledFor(logging.TRACE):
LOG.trace("Dispatched %s messages %s to target '%s' during"
" the lifetime of its existence in the dispatcher",
sum(six.itervalues(target.dispatched)),
dict(target.dispatched), target) | KeyError | dataset/ETHPy150Open openstack/taskflow/taskflow/engines/action_engine/executor.py/_Dispatcher.deregister |
2,024 | def _dispatch(self, message):
if LOG.isEnabledFor(logging.TRACE):
LOG.trace("Dispatching message %s (it took %s seconds"
" for it to arrive for processing after being"
" sent)", message,
timeutils.delta_seconds(message['sent_on'],
timeutils.utcnow()))
try:
kind = message['kind']
sender = message['sender']
body = message['body']
except (__HOLE__, ValueError, TypeError):
LOG.warn("Badly formatted message %s received", message,
exc_info=True)
return
target = self._targets.get(sender['id'])
if target is None:
# Must of been removed...
return
if kind == _KIND_COMPLETE_ME:
target.dispatched[kind] += 1
target.barrier.set()
elif kind == _KIND_EVENT:
task = target.task
target.dispatched[kind] += 1
task.notifier.notify(body['event_type'], body['details'])
else:
LOG.warn("Unknown message '%s' found in message from sender"
" %s to target '%s'", kind, sender, target) | KeyError | dataset/ETHPy150Open openstack/taskflow/taskflow/engines/action_engine/executor.py/_Dispatcher._dispatch |
2,025 | def _submit_task(self, func, task, *args, **kwargs):
"""Submit a function to run the given task (with given args/kwargs).
NOTE(harlowja): Adjust all events to be proxies instead since we want
those callbacks to be activated in this process, not in the child,
also since typically callbacks are functors (or callables) we can
not pickle those in the first place...
To make sure people understand how this works, the following is a
lengthy description of what is going on here, read at will:
So to ensure that we are proxying task triggered events that occur
in the executed subprocess (which will be created and used by the
thing using the multiprocessing based executor) we need to establish
a link between that process and this process that ensures that when a
event is triggered in that task in that process that a corresponding
event is triggered on the original task that was requested to be ran
in this process.
To accomplish this we have to create a copy of the task (without
any listeners) and then reattach a new set of listeners that will
now instead of calling the desired listeners just place messages
for this process (a dispatcher thread that is created in this class)
to dispatch to the original task (using a common queue + per task
sender identity/target that is used and associated to know which task
to proxy back too, since it is possible that there many be *many*
subprocess running at the same time, each running a different task
and using the same common queue to submit messages back to).
Once the subprocess task has finished execution, the executor will
then trigger a callback that will remove the task + target from the
dispatcher (which will stop any further proxying back to the original
task).
"""
progress_callback = kwargs.pop('progress_callback', None)
clone = task.copy(retain_listeners=False)
identity = uuidutils.generate_uuid()
target = _Target(task, self._manager.Event(), identity)
channel = _Channel(self._queue, identity)
self._rebind_task(task, clone, channel,
progress_callback=progress_callback)
def register():
if progress_callback is not None:
task.notifier.register(_UPDATE_PROGRESS, progress_callback)
self._dispatcher.register(identity, target)
def deregister():
if progress_callback is not None:
task.notifier.deregister(_UPDATE_PROGRESS, progress_callback)
self._dispatcher.deregister(identity)
register()
work = _WaitWorkItem(channel, target.barrier,
func, clone, *args, **kwargs)
try:
fut = self._executor.submit(work)
except __HOLE__:
with excutils.save_and_reraise_exception():
deregister()
fut.atom = task
fut.add_done_callback(lambda fut: deregister())
return fut | RuntimeError | dataset/ETHPy150Open openstack/taskflow/taskflow/engines/action_engine/executor.py/ParallelProcessTaskExecutor._submit_task |
2,026 | def populate_tree(self):
"""Create each item (and associated data) in the tree"""
if not self.stats:
warn_item = QTreeWidgetItem(self)
warn_item.setData(
0, Qt.DisplayRole,
_('No timings to display. '
'Did you forget to add @profile decorators ?')
.format(url=WEBSITE_URL))
warn_item.setFirstColumnSpanned(True)
warn_item.setTextAlignment(0, Qt.AlignCenter)
font = warn_item.font(0)
font.setStyle(QFont.StyleItalic)
warn_item.setFont(0, font)
return
try:
monospace_font = self.window().editor.get_plugin_font()
except __HOLE__: # If run standalone for testing
monospace_font = QFont("Courier New")
monospace_font.setPointSize(10)
for func_info, func_data in self.stats.items():
# Function name and position
filename, start_line_no, func_name = func_info
func_stats, func_peak_usage = func_data
func_item = QTreeWidgetItem(self)
func_item.setData(
0, Qt.DisplayRole,
_('{func_name} (peak {peak_usage:.3f} MiB) in file "{filename}", '
'line {line_no}').format(
filename=filename,
line_no=start_line_no,
func_name=func_name,
peak_usage=func_peak_usage))
func_item.setFirstColumnSpanned(True)
func_item.setData(COL_POS, Qt.UserRole,
(osp.normpath(filename), start_line_no))
# For sorting by time
func_item.setData(COL_USAGE, Qt.DisplayRole, func_peak_usage)
func_item.setData(COL_INCREMENT, Qt.DisplayRole,
func_peak_usage)
if self.parent().use_colors:
# Choose deteministic unique color for the function
md5 = hashlib.md5((filename + func_name).encode("utf8")).hexdigest()
hue = (int(md5[:2], 16) - 68) % 360 # avoid blue (unreadable)
func_color = QColor.fromHsv(hue, 200, 255)
else:
# Red color only
func_color = QColor.fromRgb(255, 0, 0)
# get max increment
max_increment = 0
for line_info in func_stats:
(line_no, code_line, usage, increment) = line_info
if increment is not None:
max_increment = max(max_increment, increment)
# Lines of code
for line_info in func_stats:
line_item = QTreeWidgetItem(func_item)
(line_no, code_line, usage, increment) = line_info
self.fill_item(
line_item, filename, line_no, code_line,
usage, increment)
# Color background
if increment is not None:
alpha = increment / max_increment if max_increment != 0 else 0
color = QColor(func_color)
color.setAlphaF(alpha) # Returns None
color = QBrush(color)
for col in range(self.columnCount()):
line_item.setBackground(col, color)
else:
for col in range(self.columnCount()):
line_item.setForeground(col, CODE_NOT_RUN_COLOR)
# Monospace font for code
line_item.setFont(COL_LINE, monospace_font) | AttributeError | dataset/ETHPy150Open Nodd/spyder_line_profiler/widgets/memoryprofilergui.py/MemoryProfilerDataTree.populate_tree |
2,027 | def django_init(self):
'''
Initializes the django engine. The root must have been set already."
'''
elems = os.path.split(self.root)[:-1]
parent = os.path.join(*elems)
sys.path.append(parent)
BASE_APP = []
try:
# Attempt to import the root folder. This is necessary to access
# the local templatetag libraries.
base = os.path.split(self.root)[-1]
logger.debug("importing app: %s" % base)
importlib.import_module(base)
BASE_APP = [ base ]
except __HOLE__ as exc:
logger.debug("app '{}' cannot be imported: {}".format(base, exc))
TEMPLATE_DIR = join(os.path.dirname(__file__), "templates")
dirs = [self.root, join(self.root, 'templates'), TEMPLATE_DIR]
logger.debug("template dirs: {}".format(dirs))
settings.configure(
DEBUG=True, TEMPLATE_DEBUG=True,
TEMPLATES=[
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': dirs,
'APP_DIRS': True,
'OPTIONS': {
'string_if_invalid': "Undefined: %s ",
'builtins': [
'pyblue.templatetags.pytags',
'django.contrib.humanize.templatetags.humanize',
],
}
}
],
INSTALLED_APPS=["pyblue", "django.contrib.humanize",
"django.contrib.staticfiles"] + BASE_APP,
STATIC_URL='/static/',
)
django.setup()
logger.debug("templatetags: %s" % ", ".join(get_installed_libraries())) | ImportError | dataset/ETHPy150Open ialbert/pyblue/pyblue/engine.py/PyBlue.django_init |
2,028 | def parse_metadata(path):
'''
Attempts to parse out metadata from django comments.
Each comment is assumed to be key = value where the value is a JSON object.
'''
# Match Django template comments.
PATTERN = re.compile(r'^{#\s?(?P<name>\w+)\s?=\s?(?P<value>[\S\s]+)\s?#}')
# Check only the start of the file.
lines = io.open(path, encoding='utf-8').read().splitlines()[:100]
lines = map(strip, lines)
meta = dict()
for line in lines:
m = PATTERN.search(line)
if m:
name, value = m.group('name'), m.group('value')
try:
obj = json.loads(value)
except __HOLE__ as exc:
obj = str(value)
meta[name] = obj
# logger.debug("path: {}, metadata: {}".format(path, meta))
return meta | ValueError | dataset/ETHPy150Open ialbert/pyblue/pyblue/engine.py/parse_metadata |
2,029 | def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if not argv:
argv = sys.argv
# setup command line parser
parser = E.OptionParser(version="%prog version: $Id$",
usage=globals()["__doc__"])
parser.add_option(
"-g", "--gtf-file", dest="filename_gtf", type="string",
help="filename with gene models in gtf format [%default]")
parser.add_option(
"-m", "--filename-mismapped", dest="filename_mismapped", type="string",
help="output bam file for mismapped reads [%default]")
parser.add_option(
"-j", "--junctions-bed-file", dest="filename_junctions", type="string",
help="bam file with reads mapped across junctions [%default]")
parser.add_option(
"-r", "--filename-regions", dest="filename_regions", type="string",
help="filename with regions to remove in bed format [%default]")
parser.add_option(
"-t", "--transcripts-gtf-file", dest="filename_transcriptome",
type="string",
help="bam file with reads mapped against transcripts [%default]")
parser.add_option(
"-p", "--map-tsv-file", dest="filename_map", type="string",
help="filename mapping transcript numbers (used by "
"--filename-transciptome) to transcript names "
"(used by --filename-gtf) [%default]")
parser.add_option(
"-s", "--filename-stats", dest="filename_stats", type="string",
help="filename to output stats to [%default]")
parser.add_option(
"-o", "--colour",
dest="colour_mismatches", action="store_true",
help="mismatches will use colour differences (CM tag) [%default]")
parser.add_option(
"-i", "--ignore-mismatches",
dest="ignore_mismatches", action="store_true",
help="ignore mismatches [%default]")
parser.add_option(
"-c", "--remove-contigs", dest="remove_contigs", type="string",
help="','-separated list of contigs to remove [%default]")
parser.add_option(
"-f", "--force-output", dest="force", action="store_true",
help="force overwriting of existing files [%default]")
parser.add_option("-u", "--unique", dest="unique", action="store_true",
help="remove reads not matching uniquely [%default]")
parser.add_option("--output-sam", dest="output_sam", action="store_true",
help="output in sam format [%default]")
parser.set_defaults(
filename_gtf=None,
filename_mismapped=None,
filename_junctions=None,
filename_transcriptome=None,
filename_map=None,
remove_contigs=None,
force=False,
unique=False,
colour_mismatches=False,
ignore_mismatches=False,
output_sam=False,
filename_table=None,
)
# add common options (-h/--help, ...) and parse command line
(options, args) = E.Start(parser, argv=argv)
if len(args) != 1:
raise ValueError("please supply one bam file")
bamfile_genome = args[0]
genome_samfile = pysam.Samfile(bamfile_genome, "rb")
if options.remove_contigs:
options.remove_contigs = options.remove_contigs.split(",")
if options.filename_map:
E.info("reading map")
id_map = IOTools.readMap(
IOTools.openFile(options.filename_map), has_header=True)
id_map = dict([(y, x) for x, y in id_map.iteritems()])
else:
id_map = None
transcripts = {}
if options.filename_gtf:
E.info("indexing geneset")
mapped, missed = 0, 0
for gtf in GTF.transcript_iterator(
GTF.iterator(IOTools.openFile(options.filename_gtf))):
gtf.sort(key=lambda x: x.start)
transcript_id = gtf[0].transcript_id
if id_map:
try:
transcript_id = id_map[transcript_id]
mapped += 1
except __HOLE__:
missed += 1
continue
transcripts[transcript_id] = gtf
E.info("read %i transcripts from geneset (%i mapped, %i missed)" %
(len(transcripts), mapped, missed))
regions_to_remove = None
if options.filename_regions:
E.info("indexing regions")
regions_to_remove = IndexedGenome.Simple()
for bed in Bed.iterator(IOTools.openFile(options.filename_regions)):
regions_to_remove.add(bed.contig, bed.start, bed.end)
E.info("read %i regions" % len(regions_to_remove))
if options.filename_transcriptome:
transcripts_samfile = pysam.Samfile(options.filename_transcriptome,
"rb")
else:
transcripts_samfile = None
if options.output_sam:
output_samfile = pysam.Samfile("-", "wh", template=genome_samfile)
else:
output_samfile = pysam.Samfile("-", "wb", template=genome_samfile)
if options.filename_mismapped:
if not options.force and os.path.exists(options.filename_mismapped):
raise IOError("output file %s already exists" %
options.filename_mismapped)
output_mismapped = pysam.Samfile(options.filename_mismapped,
"wb",
template=genome_samfile)
else:
output_mismapped = None
if options.filename_junctions:
junctions_samfile = pysam.Samfile(options.filename_junctions,
"rb")
else:
junctions_samfile = None
c = _bams2bam.filter(genome_samfile,
output_samfile,
output_mismapped,
transcripts_samfile,
junctions_samfile,
transcripts,
regions=regions_to_remove,
unique=options.unique,
remove_contigs=options.remove_contigs,
colour_mismatches=options.colour_mismatches,
ignore_mismatches=options.ignore_mismatches,
ignore_transcripts=transcripts_samfile is None,
ignore_junctions=junctions_samfile is None)
if options.filename_stats:
outf = IOTools.openFile(options.filename_stats, "w")
outf.write("category\tcounts\n%s\n" % c.asTable())
outf.close()
if options.filename_transcriptome:
transcripts_samfile.close()
genome_samfile.close()
output_samfile.close()
if output_mismapped:
output_mismapped.close()
# write footer and output benchmark information.
E.Stop() | KeyError | dataset/ETHPy150Open CGATOxford/cgat/scripts/bams2bam.py/main |
2,030 | def stat(self):
try:
return SFTPAttributes.from_stat(os.fstat(self.readfile.fileno()))
except __HOLE__ as e:
return SFTPServer.convert_errno(e.errno) | OSError | dataset/ETHPy150Open unbit/sftpclone/sftpclone/t/stub_sftp.py/StubSFTPHandle.stat |
2,031 | def chattr(self, attr):
# python doesn't have equivalents to fchown or fchmod, so we have to
# use the stored filename
try:
SFTPServer.set_file_attr(self.filename, attr)
return SFTP_OK
except __HOLE__ as e:
return SFTPServer.convert_errno(e.errno) | OSError | dataset/ETHPy150Open unbit/sftpclone/sftpclone/t/stub_sftp.py/StubSFTPHandle.chattr |
2,032 | def list_folder(self, path):
path = self._realpath(path)
try:
out = []
flist = os.listdir(path)
for fname in flist:
attr = SFTPAttributes.from_stat(
os.lstat(os.path.join(path, fname))
)
attr.filename = fname.encode("utf-8")
out.append(attr)
return out
except __HOLE__ as e:
return SFTPServer.convert_errno(e.errno) | OSError | dataset/ETHPy150Open unbit/sftpclone/sftpclone/t/stub_sftp.py/StubSFTPServer.list_folder |
2,033 | def stat(self, path):
path = self._realpath(path)
try:
return SFTPAttributes.from_stat(os.stat(path))
except __HOLE__ as e:
return SFTPServer.convert_errno(e.errno) | OSError | dataset/ETHPy150Open unbit/sftpclone/sftpclone/t/stub_sftp.py/StubSFTPServer.stat |
2,034 | def lstat(self, path):
path = self._realpath(path)
try:
return SFTPAttributes.from_stat(os.lstat(path))
except __HOLE__ as e:
return SFTPServer.convert_errno(e.errno) | OSError | dataset/ETHPy150Open unbit/sftpclone/sftpclone/t/stub_sftp.py/StubSFTPServer.lstat |
2,035 | def open(self, path, flags, attr):
path = self._realpath(path)
try:
binary_flag = getattr(os, 'O_BINARY', 0)
flags |= binary_flag
mode = getattr(attr, 'st_mode', None)
if mode is not None:
fd = os.open(path, flags, mode)
else:
# os.open() defaults to 0777 which is
# an odd default mode for files
fd = os.open(path, flags, o666)
except __HOLE__ as e:
return SFTPServer.convert_errno(e.errno)
if (flags & os.O_CREAT) and (attr is not None):
attr._flags &= ~attr.FLAG_PERMISSIONS
SFTPServer.set_file_attr(path, attr)
if flags & os.O_WRONLY:
if flags & os.O_APPEND:
fstr = 'ab'
else:
fstr = 'wb'
elif flags & os.O_RDWR:
if flags & os.O_APPEND:
fstr = 'a+b'
else:
fstr = 'r+b'
else:
# O_RDONLY (== 0)
fstr = 'rb'
try:
f = os.fdopen(fd, fstr)
except OSError as e:
return SFTPServer.convert_errno(e.errno)
fobj = StubSFTPHandle(flags)
fobj.filename = path
fobj.readfile = f
fobj.writefile = f
return fobj | OSError | dataset/ETHPy150Open unbit/sftpclone/sftpclone/t/stub_sftp.py/StubSFTPServer.open |
2,036 | def remove(self, path):
path = self._realpath(path)
try:
os.remove(path)
except __HOLE__ as e:
return SFTPServer.convert_errno(e.errno)
return SFTP_OK | OSError | dataset/ETHPy150Open unbit/sftpclone/sftpclone/t/stub_sftp.py/StubSFTPServer.remove |
2,037 | def rename(self, oldpath, newpath):
oldpath = self._realpath(oldpath)
newpath = self._realpath(newpath)
try:
os.rename(oldpath, newpath)
except __HOLE__ as e:
return SFTPServer.convert_errno(e.errno)
return SFTP_OK | OSError | dataset/ETHPy150Open unbit/sftpclone/sftpclone/t/stub_sftp.py/StubSFTPServer.rename |
2,038 | def mkdir(self, path, attr):
path = self._realpath(path)
try:
os.mkdir(path)
if attr is not None:
SFTPServer.set_file_attr(path, attr)
except __HOLE__ as e:
return SFTPServer.convert_errno(e.errno)
return SFTP_OK | OSError | dataset/ETHPy150Open unbit/sftpclone/sftpclone/t/stub_sftp.py/StubSFTPServer.mkdir |
2,039 | def rmdir(self, path):
path = self._realpath(path)
try:
os.rmdir(path)
except __HOLE__ as e:
return SFTPServer.convert_errno(e.errno)
return SFTP_OK | OSError | dataset/ETHPy150Open unbit/sftpclone/sftpclone/t/stub_sftp.py/StubSFTPServer.rmdir |
2,040 | def chattr(self, path, attr):
path = self._realpath(path)
try:
SFTPServer.set_file_attr(path, attr)
except __HOLE__ as e:
return SFTPServer.convert_errno(e.errno)
return SFTP_OK | OSError | dataset/ETHPy150Open unbit/sftpclone/sftpclone/t/stub_sftp.py/StubSFTPServer.chattr |
2,041 | def symlink(self, target_path, path):
path = self._realpath(path)
if (len(target_path) > 0) and (target_path[0] == '/'):
# absolute symlink
target_path = os.path.join(self.ROOT, target_path[1:])
try:
os.symlink(target_path, path)
except __HOLE__ as e:
return SFTPServer.convert_errno(e.errno)
return SFTP_OK | OSError | dataset/ETHPy150Open unbit/sftpclone/sftpclone/t/stub_sftp.py/StubSFTPServer.symlink |
2,042 | def readlink(self, path):
path = self._realpath(path)
try:
symlink = os.readlink(path)
except __HOLE__ as e:
return SFTPServer.convert_errno(e.errno)
# if it's absolute, remove the root
if os.path.isabs(symlink):
if symlink[:len(self.ROOT)] == self.ROOT:
symlink = symlink[len(self.ROOT):]
if (len(symlink) == 0) or (symlink[0] != '/'):
symlink = '/' + symlink
else:
symlink = '<error>'
return symlink | OSError | dataset/ETHPy150Open unbit/sftpclone/sftpclone/t/stub_sftp.py/StubSFTPServer.readlink |
2,043 | def demangle(name, disable_mask=0):
try:
demangled_name = idaapi.demangle_name2(name, disable_mask)
except __HOLE__:
# Backwards compatibility with IDA 6.6
demangled_name = idaapi.demangle_name(name, disable_mask)
if demangled_name:
return demangled_name
return name | AttributeError | dataset/ETHPy150Open tmr232/Sark/sark/code/base.py/demangle |
2,044 | def __new__(cls, *args, **kwargs):
"""
Initializes class attributes for subsequent constructor calls.
:note: *args and **kwargs are not explicitly used in this function,
but needed for Python 2 compatibility.
"""
if not cls.__initialized__:
cls.__initialized__ = True
try:
_meta = getattr(cls, 'Meta')
except AttributeError:
raise AttributeError(
'Missing Meta class in {0}.'.format(
cls.__name__))
for attr in ['series_name', 'fields']:
try:
setattr(cls, '_' + attr, getattr(_meta, attr))
except AttributeError:
raise AttributeError(
'Missing {0} in {1} Meta class.'.format(
attr,
cls.__name__))
cls._autocommit = getattr(_meta, 'autocommit', False)
cls._client = getattr(_meta, 'client', None)
if cls._autocommit and not cls._client:
raise AttributeError(
'In {0}, autocommit is set to True, but no client is set.'
.format(cls.__name__))
try:
cls._bulk_size = getattr(_meta, 'bulk_size')
if cls._bulk_size < 1 and cls._autocommit:
warn(
'Definition of bulk_size in {0} forced to 1, '
'was less than 1.'.format(cls.__name__))
cls._bulk_size = 1
except __HOLE__:
cls._bulk_size = -1
else:
if not cls._autocommit:
warn(
'Definition of bulk_size in {0} has no affect because'
' autocommit is false.'.format(cls.__name__))
cls._datapoints = defaultdict(list)
cls._type = namedtuple(cls.__name__, cls._fields)
return super(SeriesHelper, cls).__new__(cls) | AttributeError | dataset/ETHPy150Open influxdata/influxdb-python/influxdb/influxdb08/helper.py/SeriesHelper.__new__ |
2,045 | def ndb_get(self):
"""
A non-persistent store (ndb: NonDataBase). Everything stored
to this is guaranteed to be cleared when a server is shutdown.
Syntax is same as for the _get_db_holder() method and
property, e.g. obj.ndb.attr = value etc.
"""
try:
return self._ndb_holder
except __HOLE__:
self._ndb_holder = NDbHolder(self, "nattrhandler", manager_name="nattributes")
return self._ndb_holder
#@ndb.setter | AttributeError | dataset/ETHPy150Open evennia/evennia/evennia/server/serversession.py/ServerSession.ndb_get |
2,046 | @contextmanager
def alt_file(current_file):
"""
Create an alternate file next to an existing file.
"""
_alt_file = current_file + '-alt'
yield _alt_file
try:
shutil.move(_alt_file, current_file)
except __HOLE__:
# We didn't use an alt file.
pass | IOError | dataset/ETHPy150Open koenbok/Cactus/cactus/utils/filesystem.py/alt_file |
2,047 | @tornado.gen.coroutine
def resolve_dirty(self, ent):
"""Write back (or delete) a single dirty entry. Then mark it clean.
"""
dbname = ent.tup[0]
if dbname not in writable_collections:
# Maybe we should update the equivalent writable entry here,
# but we'll just skip it.
self.log.warning('Unable to update %s entry: %s', dbname, ent.key)
if ent.mutable:
ent.origval = deepcopy(ent.val)
return
if ent.found:
# Resolve update.
try:
checkwritable(ent.val)
except __HOLE__ as ex:
self.log.warning('Unable to update %s entry: %s', ent.key, ex)
### drop entry entirely?
return
newval = dict(ent.query)
newval['val'] = ent.val
yield motor.Op(self.app.mongodb[dbname].update,
ent.query, newval,
upsert=True)
if ent.mutable:
ent.origval = deepcopy(ent.val)
else:
# Resolve delete.
yield motor.Op(self.app.mongodb[dbname].remove,
ent.query)
ent.dirty = False | TypeError | dataset/ETHPy150Open erkyrath/tworld/lib/two/propcache.py/PropCache.resolve_dirty |
2,048 | def _convert_ip_address(self, ip, field_name):
try:
return netaddr.IPAddress(ip)
except (netaddr.AddrFormatError, __HOLE__):
msg = _('%(field_name)s: Invalid IP address '
'(value=%(ip)s)' % dict(
field_name=field_name, ip=ip))
raise forms.ValidationError(msg) | ValueError | dataset/ETHPy150Open Havate/havate-openstack/proto-build/gui/horizon/Horizon_GUI/openstack_dashboard/dashboards/project/networks/workflows.py/CreateSubnetDetailAction._convert_ip_address |
2,049 | def _convert_ip_network(self, network, field_name):
try:
return netaddr.IPNetwork(network)
except (netaddr.AddrFormatError, __HOLE__):
msg = _('%(field_name)s: Invalid IP address '
'(value=%(network)s)' % dict(
field_name=field_name, network=network))
raise forms.ValidationError(msg) | ValueError | dataset/ETHPy150Open Havate/havate-openstack/proto-build/gui/horizon/Horizon_GUI/openstack_dashboard/dashboards/project/networks/workflows.py/CreateSubnetDetailAction._convert_ip_network |
2,050 | def exec_vnum_link(self):
path = self.outputs[0].abspath(self.env)
try:
os.remove(path)
except __HOLE__:
pass
try:
os.symlink(self.inputs[0].name, path)
except OSError:
return 1 | OSError | dataset/ETHPy150Open appcelerator-archive/poc-nodejs-desktop/Resources/nodejs/builds/linux/node/lib/node/wafadmin/Tools/ccroot.py/exec_vnum_link |
2,051 | def __new__(cls, *args, **kw):
if not args:
# python3k pickle seems to call this
return object.__new__(cls)
try:
name, metadata, args = args[0], args[1], args[2:]
except __HOLE__:
raise TypeError("Table() takes at least two arguments")
schema = kw.get('schema', None)
if schema is None:
schema = metadata.schema
keep_existing = kw.pop('keep_existing', False)
extend_existing = kw.pop('extend_existing', False)
if 'useexisting' in kw:
msg = "useexisting is deprecated. Use extend_existing."
util.warn_deprecated(msg)
if extend_existing:
msg = "useexisting is synonymous with extend_existing."
raise exc.ArgumentError(msg)
extend_existing = kw.pop('useexisting', False)
if keep_existing and extend_existing:
msg = "keep_existing and extend_existing are mutually exclusive."
raise exc.ArgumentError(msg)
mustexist = kw.pop('mustexist', False)
key = _get_table_key(name, schema)
if key in metadata.tables:
if not keep_existing and not extend_existing and bool(args):
raise exc.InvalidRequestError(
"Table '%s' is already defined for this MetaData "
"instance. Specify 'extend_existing=True' "
"to redefine "
"options and columns on an "
"existing Table object." % key)
table = metadata.tables[key]
if extend_existing:
table._init_existing(*args, **kw)
return table
else:
if mustexist:
raise exc.InvalidRequestError(
"Table '%s' not defined" % (key))
table = object.__new__(cls)
table.dispatch.before_parent_attach(table, metadata)
metadata._add_table(name, schema, table)
try:
table._init(name, metadata, *args, **kw)
table.dispatch.after_parent_attach(table, metadata)
return table
except:
metadata._remove_table(name, schema)
raise | IndexError | dataset/ETHPy150Open RoseOu/flasky/venv/lib/python2.7/site-packages/sqlalchemy/sql/schema.py/Table.__new__ |
2,052 | def _make_proxy(self, selectable, name=None, key=None,
name_is_truncatable=False, **kw):
"""Create a *proxy* for this column.
This is a copy of this ``Column`` referenced by a different parent
(such as an alias or select statement). The column should
be used only in select scenarios, as its full DDL/default
information is not transferred.
"""
fk = [ForeignKey(f.column, _constraint=f.constraint)
for f in self.foreign_keys]
if name is None and self.name is None:
raise exc.InvalidRequestError(
"Cannot initialize a sub-selectable"
" with this Column object until its 'name' has "
"been assigned.")
try:
c = self._constructor(
_as_truncated(name or self.name) if
name_is_truncatable else (name or self.name),
self.type,
key=key if key else name if name else self.key,
primary_key=self.primary_key,
nullable=self.nullable,
_proxies=[self], *fk)
except __HOLE__:
util.raise_from_cause(
TypeError(
"Could not create a copy of this %r object. "
"Ensure the class includes a _constructor() "
"attribute or method which accepts the "
"standard Column constructor arguments, or "
"references the Column class itself." % self.__class__)
)
c.table = selectable
selectable._columns.add(c)
if selectable._is_clone_of is not None:
c._is_clone_of = selectable._is_clone_of.columns[c.key]
if self.primary_key:
selectable.primary_key.add(c)
c.dispatch.after_parent_attach(c, selectable)
return c | TypeError | dataset/ETHPy150Open RoseOu/flasky/venv/lib/python2.7/site-packages/sqlalchemy/sql/schema.py/Column._make_proxy |
2,053 | def _maybe_wrap_callable(self, fn):
"""Wrap callables that don't accept a context.
This is to allow easy compatibility with default callables
that aren't specific to accepting of a context.
"""
try:
argspec = util.get_callable_argspec(fn, no_self=True)
except __HOLE__:
return lambda ctx: fn()
defaulted = argspec[3] is not None and len(argspec[3]) or 0
positionals = len(argspec[0]) - defaulted
if positionals == 0:
return lambda ctx: fn()
elif positionals == 1:
return fn
else:
raise exc.ArgumentError(
"ColumnDefault Python function takes zero or one "
"positional arguments") | TypeError | dataset/ETHPy150Open RoseOu/flasky/venv/lib/python2.7/site-packages/sqlalchemy/sql/schema.py/ColumnDefault._maybe_wrap_callable |
2,054 | @property
def table(self):
try:
if isinstance(self.parent, Table):
return self.parent
except __HOLE__:
pass
raise exc.InvalidRequestError(
"This constraint is not bound to a table. Did you "
"mean to call table.append_constraint(constraint) ?") | AttributeError | dataset/ETHPy150Open RoseOu/flasky/venv/lib/python2.7/site-packages/sqlalchemy/sql/schema.py/Constraint.table |
2,055 | def _set_parent(self, table):
super(ForeignKeyConstraint, self)._set_parent(table)
self._validate_dest_table(table)
for col, fk in self._elements.items():
# string-specified column names now get
# resolved to Column objects
if isinstance(col, util.string_types):
try:
col = table.c[col]
except __HOLE__:
raise exc.ArgumentError(
"Can't create ForeignKeyConstraint "
"on table '%s': no column "
"named '%s' is present." % (table.description, col))
if not hasattr(fk, 'parent') or \
fk.parent is not col:
fk._set_parent_with_dispatch(col)
if self.use_alter:
def supports_alter(ddl, event, schema_item, bind, **kw):
return table in set(kw['tables']) and \
bind.dialect.supports_alter
event.listen(table.metadata, "after_create",
ddl.AddConstraint(self, on=supports_alter))
event.listen(table.metadata, "before_drop",
ddl.DropConstraint(self, on=supports_alter)) | KeyError | dataset/ETHPy150Open RoseOu/flasky/venv/lib/python2.7/site-packages/sqlalchemy/sql/schema.py/ForeignKeyConstraint._set_parent |
2,056 | @util.dependencies("sqlalchemy.engine.url")
def _bind_to(self, url, bind):
"""Bind to a Connectable in the caller's thread."""
if isinstance(bind, util.string_types + (url.URL, )):
try:
self.context._engine = self.__engines[bind]
except __HOLE__:
e = sqlalchemy.create_engine(bind)
self.__engines[bind] = e
self.context._engine = e
else:
# TODO: this is squirrely. we shouldn't have to hold onto engines
# in a case like this
if bind not in self.__engines:
self.__engines[bind] = bind
self.context._engine = bind | KeyError | dataset/ETHPy150Open RoseOu/flasky/venv/lib/python2.7/site-packages/sqlalchemy/sql/schema.py/ThreadLocalMetaData._bind_to |
2,057 | def ensure_benchmark_data(symbol, first_date, last_date, now, trading_day):
"""
Ensure we have benchmark data for `symbol` from `first_date` to `last_date`
Parameters
----------
symbol : str
The symbol for the benchmark to load.
first_date : pd.Timestamp
First required date for the cache.
last_date : pd.Timestamp
Last required date for the cache.
now : pd.Timestamp
The current time. This is used to prevent repeated attempts to
re-download data that isn't available due to scheduling quirks or other
failures.
trading_day : pd.CustomBusinessDay
A trading day delta. Used to find the day before first_date so we can
get the close of the day prior to first_date.
We attempt to download data unless we already have data stored at the data
cache for `symbol` whose first entry is before or on `first_date` and whose
last entry is on or after `last_date`.
If we perform a download and the cache criteria are not satisfied, we wait
at least one hour before attempting a redownload. This is determined by
comparing the current time to the result of os.path.getmtime on the cache
path.
"""
path = get_data_filepath(get_benchmark_filename(symbol))
# If the path does not exist, it means the first download has not happened
# yet, so don't try to read from 'path'.
if os.path.exists(path):
try:
data = pd.Series.from_csv(path).tz_localize('UTC')
if has_data_for_dates(data, first_date, last_date):
return data
# Don't re-download if we've successfully downloaded and written a
# file in the last hour.
last_download_time = last_modified_time(path)
if (now - last_download_time) <= ONE_HOUR:
logger.warn(
"Refusing to download new benchmark data because a "
"download succeeded at %s." % last_download_time
)
return data
except (OSError, IOError, ValueError) as e:
# These can all be raised by various versions of pandas on various
# classes of malformed input. Treat them all as cache misses.
logger.info(
"Loading data for {path} failed with error [{error}].".format(
path=path, error=e,
)
)
logger.info(
"Cache at {path} does not have data from {start} to {end}.\n"
"Downloading benchmark data for '{symbol}'.",
start=first_date,
end=last_date,
symbol=symbol,
path=path,
)
try:
data = get_benchmark_returns(
symbol,
first_date - trading_day,
last_date,
)
data.to_csv(path)
except (OSError, IOError, __HOLE__):
logger.exception('failed to cache the new benchmark returns')
if not has_data_for_dates(data, first_date, last_date):
logger.warn("Still don't have expected data after redownload!")
return data | HTTPError | dataset/ETHPy150Open quantopian/zipline/zipline/data/loader.py/ensure_benchmark_data |
2,058 | def ensure_treasury_data(bm_symbol, first_date, last_date, now):
"""
Ensure we have treasury data from treasury module associated with
`bm_symbol`.
Parameters
----------
bm_symbol : str
Benchmark symbol for which we're loading associated treasury curves.
first_date : pd.Timestamp
First date required to be in the cache.
last_date : pd.Timestamp
Last date required to be in the cache.
now : pd.Timestamp
The current time. This is used to prevent repeated attempts to
re-download data that isn't available due to scheduling quirks or other
failures.
We attempt to download data unless we already have data stored in the cache
for `module_name` whose first entry is before or on `first_date` and whose
last entry is on or after `last_date`.
If we perform a download and the cache criteria are not satisfied, we wait
at least one hour before attempting a redownload. This is determined by
comparing the current time to the result of os.path.getmtime on the cache
path.
"""
loader_module, filename, source = INDEX_MAPPING.get(
bm_symbol, INDEX_MAPPING['^GSPC']
)
first_date = max(first_date, loader_module.earliest_possible_date())
path = get_data_filepath(filename)
# If the path does not exist, it means the first download has not happened
# yet, so don't try to read from 'path'.
if os.path.exists(path):
try:
data = pd.DataFrame.from_csv(path).tz_localize('UTC')
if has_data_for_dates(data, first_date, last_date):
return data
# Don't re-download if we've successfully downloaded and written a
# file in the last hour.
last_download_time = last_modified_time(path)
if (now - last_download_time) <= ONE_HOUR:
logger.warn(
"Refusing to download new treasury data because a "
"download succeeded at %s." % last_download_time
)
return data
except (OSError, __HOLE__, ValueError) as e:
# These can all be raised by various versions of pandas on various
# classes of malformed input. Treat them all as cache misses.
logger.info(
"Loading data for {path} failed with error [{error}].".format(
path=path, error=e,
)
)
try:
data = loader_module.get_treasury_data(first_date, last_date)
data.to_csv(path)
except (OSError, IOError, HTTPError):
logger.exception('failed to cache treasury data')
if not has_data_for_dates(data, first_date, last_date):
logger.warn("Still don't have expected data after redownload!")
return data | IOError | dataset/ETHPy150Open quantopian/zipline/zipline/data/loader.py/ensure_treasury_data |
2,059 | def __iter__(self):
yield self.car
try:
iterator = (i for i in self.cdr)
except __HOLE__:
if self.cdr is not None:
yield self.cdr
raise TypeError("Iteration on malformed cons")
else:
for i in iterator:
yield i | TypeError | dataset/ETHPy150Open hylang/hy/hy/models/cons.py/HyCons.__iter__ |
2,060 | def __virtual__():
try:
if salt.utils.is_proxy() and __opts__['proxy']['proxytype'] == 'fx2':
return __virtualname__
except __HOLE__:
pass
return False | KeyError | dataset/ETHPy150Open saltstack/salt/salt/grains/fx2.py/__virtual__ |
2,061 | def _find_credentials():
'''
Cycle through all the possible credentials and return the first one that
works
'''
usernames = []
usernames.append(__pillar__['proxy'].get('admin_username', 'root'))
if 'fallback_admin_username' in __pillar__.get('proxy'):
usernames.append(__pillar__['proxy'].get('fallback_admin_username'))
for user in usernames:
for pwd in __pillar__['proxy']['passwords']:
r = salt.modules.dracr.get_chassis_name(
host=__pillar__['proxy']['host'],
admin_username=user,
admin_password=pwd)
# Retcode will be present if the chassis_name call failed
try:
if r.get('retcode', None) is None:
__opts__['proxy']['admin_username'] = user
__opts__['proxy']['admin_password'] = pwd
return (user, pwd)
except __HOLE__:
# Then the above was a string, and we can return the username
# and password
__opts__['proxy']['admin_username'] = user
__opts__['proxy']['admin_password'] = pwd
return (user, pwd)
logger.debug('grains fx2.find_credentials found no valid credentials, using Dell default')
return ('root', 'calvin') | AttributeError | dataset/ETHPy150Open saltstack/salt/salt/grains/fx2.py/_find_credentials |
2,062 | def location():
if not GRAINS_CACHE:
GRAINS_CACHE.update(_grains())
try:
return {'location': GRAINS_CACHE.get('Chassis Information').get('Chassis Location')}
except __HOLE__:
return {'location': 'Unknown'} | AttributeError | dataset/ETHPy150Open saltstack/salt/salt/grains/fx2.py/location |
2,063 | def SmartSet(self, obj, attr_name, new_attr):
"""Replace obj.attr_name with new_attr. This method is smart and works
at the module, class, and instance level while preserving proper
inheritance. It will not stub out C types however unless that has been
explicitly allowed by the type.
This method supports the case where attr_name is a staticmethod or a
classmethod of obj.
Notes:
- If obj is an instance, then it is its class that will actually be
stubbed. Note that the method Set() does not do that: if obj is
an instance, it (and not its class) will be stubbed.
- The stubbing is using the builtin getattr and setattr. So, the __get__
and __set__ will be called when stubbing (TODO: A better idea would
probably be to manipulate obj.__dict__ instead of getattr() and
setattr()).
Raises AttributeError if the attribute cannot be found.
"""
if (inspect.ismodule(obj) or
(not inspect.isclass(obj) and obj.__dict__.has_key(attr_name))):
orig_obj = obj
orig_attr = getattr(obj, attr_name)
else:
if not inspect.isclass(obj):
mro = list(inspect.getmro(obj.__class__))
else:
mro = list(inspect.getmro(obj))
mro.reverse()
orig_attr = None
for cls in mro:
try:
orig_obj = cls
orig_attr = getattr(obj, attr_name)
except __HOLE__:
continue
if orig_attr is None:
raise AttributeError("Attribute not found.")
# Calling getattr() on a staticmethod transforms it to a 'normal' function.
# We need to ensure that we put it back as a staticmethod.
old_attribute = obj.__dict__.get(attr_name)
if old_attribute is not None and isinstance(old_attribute, staticmethod):
orig_attr = staticmethod(orig_attr)
self.stubs.append((orig_obj, attr_name, orig_attr))
setattr(orig_obj, attr_name, new_attr) | AttributeError | dataset/ETHPy150Open AppScale/appscale/AppServer/lib/mox/stubout.py/StubOutForTesting.SmartSet |
2,064 | @classmethod
def get_user_data(cls, couch_user, property_names):
hubspot_properties = {}
hubspot_properties.update(get_subscription_properties_by_user(couch_user))
hubspot_properties.update(get_domain_membership_properties(couch_user))
hubspot_properties.update(get_ab_test_properties(couch_user))
try:
data = [{"property": prop, "value": hubspot_properties[prop]} for prop in property_names]
except __HOLE__:
raise CommandError("Property should be one of following\n{}".format(
hubspot_properties.keys()
))
user_data = {
"email": couch_user.email,
"properties": data
}
return user_data | KeyError | dataset/ETHPy150Open dimagi/commcare-hq/corehq/apps/analytics/management/commands/update_hubspot_properties.py/Command.get_user_data |
2,065 | def _create_streams(self, stream_type, parser, video):
try:
streams = parser(self.session, video["url"])
return streams.items()
except __HOLE__ as err:
self.logger.error("Failed to extract {0} streams: {1}",
stream_type, err) | IOError | dataset/ETHPy150Open chrippa/livestreamer/src/livestreamer/plugins/svtplay.py/SVTPlay._create_streams |
2,066 | def rescale(self, units):
'''
Return a copy of the :class:`IrregularlySampledSignal` converted to the
specified units
'''
to_dims = pq.quantity.validate_dimensionality(units)
if self.dimensionality == to_dims:
to_u = self.units
signal = np.array(self)
else:
to_u = pq.Quantity(1.0, to_dims)
from_u = pq.Quantity(1.0, self.dimensionality)
try:
cf = pq.quantity.get_conversion_factor(from_u, to_u)
except __HOLE__:
raise ValueError('Unable to convert between units of "%s" \
and "%s"' % (from_u._dimensionality,
to_u._dimensionality))
signal = cf * self.magnitude
new = self.__class__(times=self.times, signal=signal, units=to_u)
new._copy_data_complement(self)
new.annotations.update(self.annotations)
return new | AssertionError | dataset/ETHPy150Open NeuralEnsemble/python-neo/neo/core/irregularlysampledsignal.py/IrregularlySampledSignal.rescale |
2,067 | def __init__(self, dspfile, source, env):
self.dspfile = str(dspfile)
try:
get_abspath = dspfile.get_abspath
except __HOLE__:
self.dspabs = os.path.abspath(dspfile)
else:
self.dspabs = get_abspath()
if 'variant' not in env:
raise SCons.Errors.InternalError("You must specify a 'variant' argument (i.e. 'Debug' or " +\
"'Release') to create an MSVSProject.")
elif SCons.Util.is_String(env['variant']):
variants = [env['variant']]
elif SCons.Util.is_List(env['variant']):
variants = env['variant']
if 'buildtarget' not in env or env['buildtarget'] == None:
buildtarget = ['']
elif SCons.Util.is_String(env['buildtarget']):
buildtarget = [env['buildtarget']]
elif SCons.Util.is_List(env['buildtarget']):
if len(env['buildtarget']) != len(variants):
raise SCons.Errors.InternalError("Sizes of 'buildtarget' and 'variant' lists must be the same.")
buildtarget = []
for bt in env['buildtarget']:
if SCons.Util.is_String(bt):
buildtarget.append(bt)
else:
buildtarget.append(bt.get_abspath())
else:
buildtarget = [env['buildtarget'].get_abspath()]
if len(buildtarget) == 1:
bt = buildtarget[0]
buildtarget = []
for _ in variants:
buildtarget.append(bt)
if 'outdir' not in env or env['outdir'] == None:
outdir = ['']
elif SCons.Util.is_String(env['outdir']):
outdir = [env['outdir']]
elif SCons.Util.is_List(env['outdir']):
if len(env['outdir']) != len(variants):
raise SCons.Errors.InternalError("Sizes of 'outdir' and 'variant' lists must be the same.")
outdir = []
for s in env['outdir']:
if SCons.Util.is_String(s):
outdir.append(s)
else:
outdir.append(s.get_abspath())
else:
outdir = [env['outdir'].get_abspath()]
if len(outdir) == 1:
s = outdir[0]
outdir = []
for v in variants:
outdir.append(s)
if 'runfile' not in env or env['runfile'] == None:
runfile = buildtarget[-1:]
elif SCons.Util.is_String(env['runfile']):
runfile = [env['runfile']]
elif SCons.Util.is_List(env['runfile']):
if len(env['runfile']) != len(variants):
raise SCons.Errors.InternalError("Sizes of 'runfile' and 'variant' lists must be the same.")
runfile = []
for s in env['runfile']:
if SCons.Util.is_String(s):
runfile.append(s)
else:
runfile.append(s.get_abspath())
else:
runfile = [env['runfile'].get_abspath()]
if len(runfile) == 1:
s = runfile[0]
runfile = []
for v in variants:
runfile.append(s)
self.sconscript = env['MSVSSCONSCRIPT']
cmdargs = env.get('cmdargs', '')
self.env = env
if 'name' in self.env:
self.name = self.env['name']
else:
self.name = os.path.basename(SCons.Util.splitext(self.dspfile)[0])
self.name = self.env.subst(self.name)
sourcenames = [
'Source Files',
'Header Files',
'Local Headers',
'Resource Files',
'Other Files']
self.sources = {}
for n in sourcenames:
self.sources[n] = []
self.configs = {}
self.nokeep = 0
if 'nokeep' in env and env['variant'] != 0:
self.nokeep = 1
if self.nokeep == 0 and os.path.exists(self.dspabs):
self.Parse()
for t in zip(sourcenames,self.srcargs):
if t[1] in self.env:
if SCons.Util.is_List(self.env[t[1]]):
for i in self.env[t[1]]:
if not i in self.sources[t[0]]:
self.sources[t[0]].append(i)
else:
if not self.env[t[1]] in self.sources[t[0]]:
self.sources[t[0]].append(self.env[t[1]])
for n in sourcenames:
#TODO 2.4: compat layer supports sorted(key=) but not sort(key=)
#TODO 2.4: self.sources[n].sort(key=lambda a: a.lower())
self.sources[n] = sorted(self.sources[n], key=lambda a: a.lower())
def AddConfig(self, variant, buildtarget, outdir, runfile, cmdargs, dspfile=dspfile):
config = Config()
config.buildtarget = buildtarget
config.outdir = outdir
config.cmdargs = cmdargs
config.runfile = runfile
match = re.match('(.*)\|(.*)', variant)
if match:
config.variant = match.group(1)
config.platform = match.group(2)
else:
config.variant = variant
config.platform = 'Win32'
self.configs[variant] = config
print "Adding '" + self.name + ' - ' + config.variant + '|' + config.platform + "' to '" + str(dspfile) + "'"
for i in range(len(variants)):
AddConfig(self, variants[i], buildtarget[i], outdir[i], runfile[i], cmdargs)
self.platforms = []
for key in self.configs.keys():
platform = self.configs[key].platform
if not platform in self.platforms:
self.platforms.append(platform) | AttributeError | dataset/ETHPy150Open kayhayen/Nuitka/nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Tool/msvs.py/_DSPGenerator.__init__ |
2,068 | def Parse(self):
try:
dspfile = open(self.dspabs,'r')
except __HOLE__:
return # doesn't exist yet, so can't add anything to configs.
line = dspfile.readline()
while line:
if line.find("# End Project") > -1:
break
line = dspfile.readline()
line = dspfile.readline()
datas = line
while line and line != '\n':
line = dspfile.readline()
datas = datas + line
# OK, we've found our little pickled cache of data.
try:
datas = base64.decodestring(datas)
data = pickle.loads(datas)
except KeyboardInterrupt:
raise
except:
return # unable to unpickle any data for some reason
self.configs.update(data)
data = None
line = dspfile.readline()
datas = line
while line and line != '\n':
line = dspfile.readline()
datas = datas + line
# OK, we've found our little pickled cache of data.
# it has a "# " in front of it, so we strip that.
try:
datas = base64.decodestring(datas)
data = pickle.loads(datas)
except KeyboardInterrupt:
raise
except:
return # unable to unpickle any data for some reason
self.sources.update(data) | IOError | dataset/ETHPy150Open kayhayen/Nuitka/nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Tool/msvs.py/_GenerateV6DSP.Parse |
2,069 | def Build(self):
try:
self.file = open(self.dspabs,'w')
except __HOLE__, detail:
raise SCons.Errors.InternalError('Unable to open "' + self.dspabs + '" for writing:' + str(detail))
else:
self.PrintHeader()
self.PrintProject()
self.file.close() | IOError | dataset/ETHPy150Open kayhayen/Nuitka/nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Tool/msvs.py/_GenerateV6DSP.Build |
2,070 | def Parse(self):
try:
dspfile = open(self.dspabs,'r')
except IOError:
return # doesn't exist yet, so can't add anything to configs.
line = dspfile.readline()
while line:
if line.find('<!-- SCons Data:') > -1:
break
line = dspfile.readline()
line = dspfile.readline()
datas = line
while line and line != '\n':
line = dspfile.readline()
datas = datas + line
# OK, we've found our little pickled cache of data.
try:
datas = base64.decodestring(datas)
data = pickle.loads(datas)
except KeyboardInterrupt:
raise
except:
return # unable to unpickle any data for some reason
self.configs.update(data)
data = None
line = dspfile.readline()
datas = line
while line and line != '\n':
line = dspfile.readline()
datas = datas + line
# OK, we've found our little pickled cache of data.
try:
datas = base64.decodestring(datas)
data = pickle.loads(datas)
except __HOLE__:
raise
except:
return # unable to unpickle any data for some reason
self.sources.update(data) | KeyboardInterrupt | dataset/ETHPy150Open kayhayen/Nuitka/nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Tool/msvs.py/_GenerateV7DSP.Parse |
2,071 | def Build(self):
try:
self.file = open(self.dspabs,'w')
except __HOLE__, detail:
raise SCons.Errors.InternalError('Unable to open "' + self.dspabs + '" for writing:' + str(detail))
else:
self.PrintHeader()
self.PrintProject()
self.file.close() | IOError | dataset/ETHPy150Open kayhayen/Nuitka/nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Tool/msvs.py/_GenerateV7DSP.Build |
2,072 | def PrintProject(self):
name = self.name
confkeys = sorted(self.configs.keys())
self.file.write('\t<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />\n')
for kind in confkeys:
variant = self.configs[kind].variant
platform = self.configs[kind].platform
self.file.write(V10DSPPropertyGroupCondition % locals())
self.file.write('\t<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />\n')
self.file.write('\t<ImportGroup Label="ExtensionSettings">\n')
self.file.write('\t</ImportGroup>\n')
for kind in confkeys:
variant = self.configs[kind].variant
platform = self.configs[kind].platform
self.file.write(V10DSPImportGroupCondition % locals())
self.file.write('\t<PropertyGroup Label="UserMacros" />\n')
self.file.write('\t<PropertyGroup>\n')
self.file.write('\t<_ProjectFileVersion>10.0.30319.1</_ProjectFileVersion>\n')
for kind in confkeys:
variant = self.configs[kind].variant
platform = self.configs[kind].platform
outdir = self.configs[kind].outdir
buildtarget = self.configs[kind].buildtarget
runfile = self.configs[kind].runfile
cmdargs = self.configs[kind].cmdargs
env_has_buildtarget = 'MSVSBUILDTARGET' in self.env
if not env_has_buildtarget:
self.env['MSVSBUILDTARGET'] = buildtarget
starting = 'echo Starting SCons && '
if cmdargs:
cmdargs = ' ' + cmdargs
else:
cmdargs = ''
buildcmd = xmlify(starting + self.env.subst('$MSVSBUILDCOM', 1) + cmdargs)
rebuildcmd = xmlify(starting + self.env.subst('$MSVSREBUILDCOM', 1) + cmdargs)
cleancmd = xmlify(starting + self.env.subst('$MSVSCLEANCOM', 1) + cmdargs)
# This isn't perfect; CPPDEFINES and CPPPATH can contain $TARGET and $SOURCE,
# so they could vary depending on the command being generated. This code
# assumes they don't.
preprocdefs = xmlify(';'.join(processDefines(self.env.get('CPPDEFINES', []))))
includepath_Dirs = processIncludes(self.env.get('CPPPATH', []), self.env, None, None)
includepath = xmlify(';'.join([str(x) for x in includepath_Dirs]))
if not env_has_buildtarget:
del self.env['MSVSBUILDTARGET']
self.file.write(V10DSPCommandLine % locals())
self.file.write('\t</PropertyGroup>\n')
#filter settings in MSVS 2010 are stored in separate file
self.filtersabs = self.dspabs + '.filters'
try:
self.filters_file = open(self.filtersabs, 'w')
except __HOLE__, detail:
raise SCons.Errors.InternalError('Unable to open "' + self.filtersabs + '" for writing:' + str(detail))
self.filters_file.write('<?xml version="1.0" encoding="utf-8"?>\n'
'<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">\n')
self.PrintSourceFiles()
self.filters_file.write('</Project>')
self.filters_file.close()
self.file.write('\t<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />\n'
'\t<ImportGroup Label="ExtensionTargets">\n'
'\t</ImportGroup>\n'
'</Project>\n')
if self.nokeep == 0:
# now we pickle some data and add it to the file -- MSDEV will ignore it.
pdata = pickle.dumps(self.configs,1)
pdata = base64.encodestring(pdata)
self.file.write('<!-- SCons Data:\n' + pdata + '\n')
pdata = pickle.dumps(self.sources,1)
pdata = base64.encodestring(pdata)
self.file.write(pdata + '-->\n') | IOError | dataset/ETHPy150Open kayhayen/Nuitka/nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Tool/msvs.py/_GenerateV10DSP.PrintProject |
2,073 | def Build(self):
try:
self.file = open(self.dspabs, 'w')
except __HOLE__, detail:
raise SCons.Errors.InternalError('Unable to open "' + self.dspabs + '" for writing:' + str(detail))
else:
self.PrintHeader()
self.PrintProject()
self.file.close() | IOError | dataset/ETHPy150Open kayhayen/Nuitka/nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Tool/msvs.py/_GenerateV10DSP.Build |
2,074 | def Parse(self):
try:
dswfile = open(self.dswfile,'r')
except IOError:
return # doesn't exist yet, so can't add anything to configs.
line = dswfile.readline()
while line:
if line[:9] == "EndGlobal":
break
line = dswfile.readline()
line = dswfile.readline()
datas = line
while line:
line = dswfile.readline()
datas = datas + line
# OK, we've found our little pickled cache of data.
try:
datas = base64.decodestring(datas)
data = pickle.loads(datas)
except __HOLE__:
raise
except:
return # unable to unpickle any data for some reason
self.configs.update(data) | KeyboardInterrupt | dataset/ETHPy150Open kayhayen/Nuitka/nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Tool/msvs.py/_GenerateV7DSW.Parse |
2,075 | def Build(self):
try:
self.file = open(self.dswfile,'w')
except __HOLE__, detail:
raise SCons.Errors.InternalError('Unable to open "' + self.dswfile + '" for writing:' + str(detail))
else:
self.PrintSolution()
self.file.close() | IOError | dataset/ETHPy150Open kayhayen/Nuitka/nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Tool/msvs.py/_GenerateV7DSW.Build |
2,076 | def Build(self):
try:
self.file = open(self.dswfile,'w')
except __HOLE__, detail:
raise SCons.Errors.InternalError('Unable to open "' + self.dswfile + '" for writing:' + str(detail))
else:
self.PrintWorkspace()
self.file.close() | IOError | dataset/ETHPy150Open kayhayen/Nuitka/nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Tool/msvs.py/_GenerateV6DSW.Build |
2,077 | def GenerateProject(target, source, env):
# generate the dsp file, according to the version of MSVS.
builddspfile = target[0]
dspfile = builddspfile.srcnode()
# this detects whether or not we're using a VariantDir
if not dspfile is builddspfile:
try:
bdsp = open(str(builddspfile), "w+")
except __HOLE__, detail:
print 'Unable to open "' + str(dspfile) + '" for writing:',detail,'\n'
raise
bdsp.write("This is just a placeholder file.\nThe real project file is here:\n%s\n" % dspfile.get_abspath())
GenerateDSP(dspfile, source, env)
if env.get('auto_build_solution', 1):
builddswfile = target[1]
dswfile = builddswfile.srcnode()
if not dswfile is builddswfile:
try:
bdsw = open(str(builddswfile), "w+")
except IOError, detail:
print 'Unable to open "' + str(dspfile) + '" for writing:',detail,'\n'
raise
bdsw.write("This is just a placeholder file.\nThe real workspace file is here:\n%s\n" % dswfile.get_abspath())
GenerateDSW(dswfile, source, env) | IOError | dataset/ETHPy150Open kayhayen/Nuitka/nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Tool/msvs.py/GenerateProject |
2,078 | def projectEmitter(target, source, env):
"""Sets up the DSP dependencies."""
# todo: Not sure what sets source to what user has passed as target,
# but this is what happens. When that is fixed, we also won't have
# to make the user always append env['MSVSPROJECTSUFFIX'] to target.
if source[0] == target[0]:
source = []
# make sure the suffix is correct for the version of MSVS we're running.
(base, suff) = SCons.Util.splitext(str(target[0]))
suff = env.subst('$MSVSPROJECTSUFFIX')
target[0] = base + suff
if not source:
source = 'prj_inputs:'
source = source + env.subst('$MSVSSCONSCOM', 1)
source = source + env.subst('$MSVSENCODING', 1)
# Project file depends on CPPDEFINES and CPPPATH
preprocdefs = xmlify(';'.join(processDefines(env.get('CPPDEFINES', []))))
includepath_Dirs = processIncludes(env.get('CPPPATH', []), env, None, None)
includepath = xmlify(';'.join([str(x) for x in includepath_Dirs]))
source = source + "; ppdefs:%s incpath:%s"%(preprocdefs, includepath)
if 'buildtarget' in env and env['buildtarget'] != None:
if SCons.Util.is_String(env['buildtarget']):
source = source + ' "%s"' % env['buildtarget']
elif SCons.Util.is_List(env['buildtarget']):
for bt in env['buildtarget']:
if SCons.Util.is_String(bt):
source = source + ' "%s"' % bt
else:
try: source = source + ' "%s"' % bt.get_abspath()
except __HOLE__: raise SCons.Errors.InternalError("buildtarget can be a string, a node, a list of strings or nodes, or None")
else:
try: source = source + ' "%s"' % env['buildtarget'].get_abspath()
except AttributeError: raise SCons.Errors.InternalError("buildtarget can be a string, a node, a list of strings or nodes, or None")
if 'outdir' in env and env['outdir'] != None:
if SCons.Util.is_String(env['outdir']):
source = source + ' "%s"' % env['outdir']
elif SCons.Util.is_List(env['outdir']):
for s in env['outdir']:
if SCons.Util.is_String(s):
source = source + ' "%s"' % s
else:
try: source = source + ' "%s"' % s.get_abspath()
except AttributeError: raise SCons.Errors.InternalError("outdir can be a string, a node, a list of strings or nodes, or None")
else:
try: source = source + ' "%s"' % env['outdir'].get_abspath()
except AttributeError: raise SCons.Errors.InternalError("outdir can be a string, a node, a list of strings or nodes, or None")
if 'name' in env:
if SCons.Util.is_String(env['name']):
source = source + ' "%s"' % env['name']
else:
raise SCons.Errors.InternalError("name must be a string")
if 'variant' in env:
if SCons.Util.is_String(env['variant']):
source = source + ' "%s"' % env['variant']
elif SCons.Util.is_List(env['variant']):
for variant in env['variant']:
if SCons.Util.is_String(variant):
source = source + ' "%s"' % variant
else:
raise SCons.Errors.InternalError("name must be a string or a list of strings")
else:
raise SCons.Errors.InternalError("variant must be a string or a list of strings")
else:
raise SCons.Errors.InternalError("variant must be specified")
for s in _DSPGenerator.srcargs:
if s in env:
if SCons.Util.is_String(env[s]):
source = source + ' "%s' % env[s]
elif SCons.Util.is_List(env[s]):
for t in env[s]:
if SCons.Util.is_String(t):
source = source + ' "%s"' % t
else:
raise SCons.Errors.InternalError(s + " must be a string or a list of strings")
else:
raise SCons.Errors.InternalError(s + " must be a string or a list of strings")
source = source + ' "%s"' % str(target[0])
source = [SCons.Node.Python.Value(source)]
targetlist = [target[0]]
sourcelist = source
if env.get('auto_build_solution', 1):
env['projects'] = [env.File(t).srcnode() for t in targetlist]
t, s = solutionEmitter(target, target, env)
targetlist = targetlist + t
# Beginning with Visual Studio 2010 for each project file (.vcxproj) we have additional file (.vcxproj.filters)
if float(env['MSVS_VERSION']) >= 10.0:
targetlist.append(targetlist[0] + '.filters')
return (targetlist, sourcelist) | AttributeError | dataset/ETHPy150Open kayhayen/Nuitka/nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Tool/msvs.py/projectEmitter |
2,079 | def generate(env):
"""Add Builders and construction variables for Microsoft Visual
Studio project files to an Environment."""
try:
env['BUILDERS']['MSVSProject']
except __HOLE__:
env['BUILDERS']['MSVSProject'] = projectBuilder
try:
env['BUILDERS']['MSVSSolution']
except KeyError:
env['BUILDERS']['MSVSSolution'] = solutionBuilder
env['MSVSPROJECTCOM'] = projectAction
env['MSVSSOLUTIONCOM'] = solutionAction
if SCons.Script.call_stack:
# XXX Need to find a way to abstract this; the build engine
# shouldn't depend on anything in SCons.Script.
env['MSVSSCONSCRIPT'] = SCons.Script.call_stack[0].sconscript
else:
global default_MSVS_SConscript
if default_MSVS_SConscript is None:
default_MSVS_SConscript = env.File('SConstruct')
env['MSVSSCONSCRIPT'] = default_MSVS_SConscript
env['MSVSSCONS'] = '"%s" -c "%s"' % (python_executable, getExecScriptMain(env))
env['MSVSSCONSFLAGS'] = '-C "${MSVSSCONSCRIPT.dir.abspath}" -f ${MSVSSCONSCRIPT.name}'
env['MSVSSCONSCOM'] = '$MSVSSCONS $MSVSSCONSFLAGS'
env['MSVSBUILDCOM'] = '$MSVSSCONSCOM "$MSVSBUILDTARGET"'
env['MSVSREBUILDCOM'] = '$MSVSSCONSCOM "$MSVSBUILDTARGET"'
env['MSVSCLEANCOM'] = '$MSVSSCONSCOM -c "$MSVSBUILDTARGET"'
# Set-up ms tools paths for default version
msvc_setup_env_once(env)
if 'MSVS_VERSION' in env:
version_num, suite = msvs_parse_version(env['MSVS_VERSION'])
else:
(version_num, suite) = (7.0, None) # guess at a default
if 'MSVS' not in env:
env['MSVS'] = {}
if (version_num < 7.0):
env['MSVS']['PROJECTSUFFIX'] = '.dsp'
env['MSVS']['SOLUTIONSUFFIX'] = '.dsw'
elif (version_num < 10.0):
env['MSVS']['PROJECTSUFFIX'] = '.vcproj'
env['MSVS']['SOLUTIONSUFFIX'] = '.sln'
else:
env['MSVS']['PROJECTSUFFIX'] = '.vcxproj'
env['MSVS']['SOLUTIONSUFFIX'] = '.sln'
if (version_num >= 10.0):
env['MSVSENCODING'] = 'utf-8'
else:
env['MSVSENCODING'] = 'Windows-1252'
env['GET_MSVSPROJECTSUFFIX'] = GetMSVSProjectSuffix
env['GET_MSVSSOLUTIONSUFFIX'] = GetMSVSSolutionSuffix
env['MSVSPROJECTSUFFIX'] = '${GET_MSVSPROJECTSUFFIX}'
env['MSVSSOLUTIONSUFFIX'] = '${GET_MSVSSOLUTIONSUFFIX}'
env['SCONS_HOME'] = os.environ.get('SCONS_HOME') | KeyError | dataset/ETHPy150Open kayhayen/Nuitka/nuitka/build/inline_copy/lib/scons-2.3.2/SCons/Tool/msvs.py/generate |
2,080 | @register.filter
@stringfilter
def obfuscate_email(email):
obfuscated = ""
for char in email:
try:
output = (string.ascii_lowercase.index(char) + 97
if char in string.ascii_lowercase else None)
output = (string.ascii_uppercase.index(char) + 65
if char in string.ascii_uppercase else output)
except __HOLE__:
output = None
if output:
output = "&#%s;" % output
elif char == '@':
output = '@'
else:
output = char
obfuscated += output
return mark_safe(obfuscated) | ValueError | dataset/ETHPy150Open bueda/django-comrade/comrade/core/templatetags/obfuscation.py/obfuscate_email |
2,081 | def _AnalyzeKeywords(self, keywords):
start_time = rdfvalue.RDFDatetime().Now() - rdfvalue.Duration("180d")
end_time = rdfvalue.RDFDatetime(self.LAST_TIMESTAMP)
filtered_keywords = []
unversioned_keywords = []
for k in keywords:
if k.startswith(self.START_TIME_PREFIX):
try:
start_time = rdfvalue.RDFDatetime(k[self.START_TIME_PREFIX_LEN:])
except ValueError:
pass
elif k.startswith(self.END_TIME_PREFIX):
try:
time = rdfvalue.RDFDatetime()
time.ParseFromHumanReadable(k[self.END_TIME_PREFIX_LEN:], eoy=True)
end_time = time
except (__HOLE__, ValueError):
pass
elif k[0] == "+":
kw = k[1:]
filtered_keywords.append(kw)
unversioned_keywords.append(kw)
else:
filtered_keywords.append(k)
if not filtered_keywords:
filtered_keywords.append(".")
return start_time, end_time, filtered_keywords, unversioned_keywords | TypeError | dataset/ETHPy150Open google/grr/grr/lib/client_index.py/ClientIndex._AnalyzeKeywords |
2,082 | def parse_date(self, s):
try:
return dateutil.parser.parse(s)
except (__HOLE__, ValueError):
return None | TypeError | dataset/ETHPy150Open mollyproject/mollyproject/molly/apps/service_status/providers/rss_module.py/RSSModuleServiceStatusProvider.parse_date |
2,083 | def safe_parse(self, f, s):
try:
return f(s)
except (TypeError, __HOLE__):
return None | ValueError | dataset/ETHPy150Open mollyproject/mollyproject/molly/apps/service_status/providers/rss_module.py/RSSModuleServiceStatusProvider.safe_parse |
2,084 | def get_status(self):
services_feed = feedparser.parse(self.url)
try:
lastBuildDate = self.parse_date(services_feed.entries[0].get('ss_lastchecked'))
except __HOLE__, e:
try:
lastBuildDate = self.parse_date(services_feed.headers['last-modified'])
except Exception, e:
lastBuildDate = None
services = []
for service in services_feed.entries:
services.append({
'source': self.slug,
'source_name': self.name,
'name': service.title,
'responding': {'true':True,'false':False}.get(service.get('ss_responding')),
'lastChecked': self.parse_date(service.get('ss_lastchecked')),
'lastSeen': self.parse_date(service.get('ss_lastseen')),
'availability': self.safe_parse(int, service.get('ss_availability')),
'averageResponseTime': self.safe_parse(float, service.get('ss_averageresponsetime')),
'statusMessage': service.get('ss_statusmessage'),
})
services[-1]['status'] = {0: 'down', 100: 'up', None: {True: 'up', False: 'down', }.get(services[-1]['responding'], 'unknown')}.get(services[-1]['availability'], 'partial')
return {
'services': services,
'lastBuildDate': lastBuildDate,
} | IndexError | dataset/ETHPy150Open mollyproject/mollyproject/molly/apps/service_status/providers/rss_module.py/RSSModuleServiceStatusProvider.get_status |
2,085 | def stream_type_priority(stream_types, stream):
stream_type = type(stream[1]).shortname()
try:
prio = stream_types.index(stream_type)
except __HOLE__:
prio = 99
return prio | ValueError | dataset/ETHPy150Open chrippa/livestreamer/src/livestreamer/plugin/plugin.py/stream_type_priority |
2,086 | def streams(self, stream_types=None, sorting_excludes=None):
"""Attempts to extract available streams.
Returns a :class:`dict` containing the streams, where the key is
the name of the stream, most commonly the quality and the value
is a :class:`Stream` object.
The result can contain the synonyms **best** and **worst** which
points to the streams which are likely to be of highest and
lowest quality respectively.
If multiple streams with the same name are found, the order of
streams specified in *stream_types* will determine which stream
gets to keep the name while the rest will be renamed to
"<name>_<stream type>".
The synonyms can be fine tuned with the *sorting_excludes*
parameter. This can be either of these types:
- A list of filter expressions in the format
*[operator]<value>*. For example the filter ">480p" will
exclude streams ranked higher than "480p" from the list
used in the synonyms ranking. Valid operators are >, >=, <
and <=. If no operator is specified then equality will be
tested.
- A function that is passed to filter() with a list of
stream names as input.
:param stream_types: A list of stream types to return.
:param sorting_excludes: Specify which streams to exclude from
the best/worst synonyms.
.. versionchanged:: 1.4.2
Added *priority* parameter.
.. versionchanged:: 1.5.0
Renamed *priority* to *stream_types* and changed behaviour
slightly.
.. versionchanged:: 1.5.0
Added *sorting_excludes* parameter.
.. versionchanged:: 1.6.0
*sorting_excludes* can now be a list of filter expressions
or a function that is passed to filter().
"""
try:
ostreams = self._get_streams()
if isinstance(ostreams, dict):
ostreams = ostreams.items()
# Flatten the iterator to a list so we can reuse it.
if ostreams:
ostreams = list(ostreams)
except NoStreamsError:
return {}
except (IOError, OSError, __HOLE__) as err:
raise PluginError(err)
if not ostreams:
return {}
if stream_types is None:
stream_types = self.default_stream_types(ostreams)
# Add streams depending on stream type and priorities
sorted_streams = sorted(iterate_streams(ostreams),
key=partial(stream_type_priority,
stream_types))
streams = {}
for name, stream in sorted_streams:
stream_type = type(stream).shortname()
if stream_type not in stream_types:
continue
existing = streams.get(name)
if existing:
existing_stream_type = type(existing).shortname()
if existing_stream_type != stream_type:
name = "{0}_{1}".format(name, stream_type)
if name in streams:
name = "{0}_alt".format(name)
num_alts = len(list(filter(lambda n: n.startswith(name), streams.keys())))
# We shouldn't need more than 2 alt streams
if num_alts >= 2:
continue
elif num_alts > 0:
name = "{0}{1}".format(name, num_alts + 1)
# Validate stream name and discard the stream if it's bad.
match = re.match("([A-z0-9_+]+)", name)
if match:
name = match.group(1)
else:
self.logger.debug("The stream '{0}' has been ignored "
"since it is badly named.", name)
continue
# Force lowercase name and replace space with underscore.
streams[name.lower()] = stream
# Create the best/worst synonmys
stream_weight_only = lambda s: (self.stream_weight(s)[0] or
(len(streams) == 1 and 1))
stream_names = filter(stream_weight_only, streams.keys())
sorted_streams = sorted(stream_names, key=stream_weight_only)
if isinstance(sorting_excludes, list):
for expr in sorting_excludes:
filter_func = stream_sorting_filter(expr, self.stream_weight)
sorted_streams = list(filter(filter_func, sorted_streams))
elif callable(sorting_excludes):
sorted_streams = list(filter(sorting_excludes, sorted_streams))
if len(sorted_streams) > 0:
best = sorted_streams[-1]
worst = sorted_streams[0]
streams["best"] = streams[best]
streams["worst"] = streams[worst]
return streams | ValueError | dataset/ETHPy150Open chrippa/livestreamer/src/livestreamer/plugin/plugin.py/Plugin.streams |
2,087 | def MultithreadedMainSignalHandler(signal_num, cur_stack_frame):
"""Final signal handler for multi-threaded main process."""
if signal_num == signal.SIGINT:
if logging.getLogger().isEnabledFor(logging.DEBUG):
stack_trace = ''.join(traceback.format_list(traceback.extract_stack()))
err = ('DEBUG: Caught CTRL-C (signal %d) - Exception stack trace:\n'
' %s' % (signal_num, re.sub('\\n', '\n ', stack_trace)))
try:
sys.stderr.write(err.encode(UTF8))
except __HOLE__:
# Can happen when outputting invalid Unicode filenames.
sys.stderr.write(err)
else:
sys.stderr.write('Caught CTRL-C (signal %d) - exiting\n' % signal_num)
KillProcess(os.getpid()) | UnicodeDecodeError | dataset/ETHPy150Open GoogleCloudPlatform/gsutil/gslib/sig_handling.py/MultithreadedMainSignalHandler |
2,088 | def __setitem__(self, key, value):
content_type = None
#if value and Magic:
# content_type = self._magic.from_buffer(value)
spec = self._get_spec(filename=key, content_type=content_type)
try:
self.put(value, **spec)
except __HOLE__:
raise TypeError("GridFS value mus be string not %s" % type(value)) | TypeError | dataset/ETHPy150Open namlook/mongokit/mongokit/grid.py/FS.__setitem__ |
2,089 | def get_version(self, filename, version=-1, **kwargs):
"""Get a file from GridFS by ``"filename"`` or metadata fields.
Returns a version of the file in GridFS whose filename matches
`filename` and whose metadata fields match the supplied keyword
arguments, as an instance of :class:`~gridfs.grid_file.GridOut`.
Version numbering is a convenience atop the GridFS API provided
by MongoDB. If more than one file matches the query (either by
`filename` alone, by metadata fields, or by a combination of
both), then version ``-1`` will be the most recently uploaded
matching file, ``-2`` the second most recently
uploaded, etc. Version ``0`` will be the first version
uploaded, ``1`` the second version, etc. So if three versions
have been uploaded, then version ``0`` is the same as version
``-3``, version ``1`` is the same as version ``-2``, and
version ``2`` is the same as version ``-1``.
Raises :class:`~gridfs.errors.NoFile` if no such version of
that file exists.
An index on ``{filename: 1, uploadDate: -1}`` will
automatically be created when this method is called the first
time.
:Parameters:
- `filename`: ``"filename"`` of the file to get, or `None`
- `version` (optional): version of the file to get (defaults
to -1, the most recent version uploaded)
- `**kwargs` (optional): find files by custom metadata.
.. versionchanged:: 1.11
`filename` defaults to None;
.. versionadded:: 1.11
Accept keyword arguments to find files by custom metadata.
.. versionadded:: 1.9
"""
# This is took from pymongo source. We need to go a little deeper here
self._GridFS__files.ensure_index([("filename", ASCENDING),
("uploadDate", DESCENDING)])
########## Begin of MongoKit hack ##########
cursor = self._GridFS__files.find(self._get_spec(filename=filename, **kwargs))
########## end of MongoKit hack ############
if version < 0:
skip = abs(version) - 1
cursor.limit(-1).skip(skip).sort("uploadDate", DESCENDING)
else:
cursor.limit(-1).skip(version).sort("uploadDate", ASCENDING)
try:
grid_file = cursor.next()
return GridOut(self._GridFS__collection, grid_file["_id"])
except __HOLE__:
raise NoFile("no version %d for filename %r" % (version, filename)) | StopIteration | dataset/ETHPy150Open namlook/mongokit/mongokit/grid.py/FS.get_version |
2,090 | def _decorate_toplevel(fn):
def decorate_render(render_fn):
def go(context, *args, **kw):
def y(*args, **kw):
return render_fn(context, *args, **kw)
try:
y.__name__ = render_fn.__name__[7:]
except __HOLE__:
# < Python 2.4
pass
return fn(y)(context, *args, **kw)
return go
return decorate_render | TypeError | dataset/ETHPy150Open cloudera/hue/desktop/core/ext-py/Mako-0.8.1/mako/runtime.py/_decorate_toplevel |
2,091 | def dframe(self):
"Creates a pandas DataFrame from the stored keys and data."
try:
import pandas
except __HOLE__:
raise Exception("Cannot build a DataFrame without the pandas library.")
labels = self.dimensions('key', True) + [self.group]
return pandas.DataFrame(
[dict(zip(labels, k + (v,))) for (k, v) in self.data.items()]) | ImportError | dataset/ETHPy150Open ioam/holoviews/holoviews/core/ndmapping.py/MultiDimensionalMapping.dframe |
2,092 | def get_credentials(self):
"""
See more: http://developer.oanda.com/rest-live/accounts/
"""
url = "{0}/{1}/accounts/{2}".format(
self.domain,
self.API_VERSION,
self.account_id
)
try:
response = self._Client__call(uri=url)
assert len(response) > 0
return response
except RequestException:
return False
except __HOLE__:
return False | AssertionError | dataset/ETHPy150Open toloco/pyoanda/pyoanda/client.py/Client.get_credentials |
2,093 | def __call(self, uri, params=None, method="get"):
"""Only returns the response, nor the status_code
"""
try:
resp = self.__get_response(uri, params, method, False)
rjson = resp.json(**self.json_options)
assert resp.ok
except __HOLE__:
msg = "OCode-{}: {}".format(resp.status_code, rjson["message"])
raise BadRequest(msg)
except Exception as e:
msg = "Bad response: {}".format(e)
log.error(msg, exc_info=True)
raise BadRequest(msg)
else:
return rjson | AssertionError | dataset/ETHPy150Open toloco/pyoanda/pyoanda/client.py/Client.__call |
2,094 | def __call_stream(self, uri, params=None, method="get"):
"""Returns an stream response
"""
try:
resp = self.__get_response(uri, params, method, True)
assert resp.ok
except __HOLE__:
raise BadRequest(resp.status_code)
except Exception as e:
log.error("Bad response: {}".format(e), exc_info=True)
else:
return resp | AssertionError | dataset/ETHPy150Open toloco/pyoanda/pyoanda/client.py/Client.__call_stream |
2,095 | def get_instruments(self):
"""
See more:
http://developer.oanda.com/rest-live/rates/#getInstrumentList
"""
url = "{0}/{1}/instruments".format(self.domain, self.API_VERSION)
params = {"accountId": self.account_id}
try:
response = self._Client__call(uri=url, params=params)
assert len(response) > 0
return response
except RequestException:
return False
except __HOLE__:
return False | AssertionError | dataset/ETHPy150Open toloco/pyoanda/pyoanda/client.py/Client.get_instruments |
2,096 | def get_prices(self, instruments, stream=True):
"""
See more:
http://developer.oanda.com/rest-live/rates/#getCurrentPrices
"""
url = "{0}/{1}/prices".format(
self.domain_stream if stream else self.domain,
self.API_VERSION
)
params = {"accountId": self.account_id, "instruments": instruments}
call = {"uri": url, "params": params, "method": "get"}
try:
if stream:
return self._Client__call_stream(**call)
else:
return self._Client__call(**call)
except RequestException:
return False
except __HOLE__:
return False | AssertionError | dataset/ETHPy150Open toloco/pyoanda/pyoanda/client.py/Client.get_prices |
2,097 | def get_instrument_history(self, instrument, candle_format="bidask",
granularity='S5', count=500,
daily_alignment=None, alignment_timezone=None,
weekly_alignment="Monday", start=None,
end=None):
"""
See more:
http://developer.oanda.com/rest-live/rates/#retrieveInstrumentHistory
"""
url = "{0}/{1}/candles".format(self.domain, self.API_VERSION)
params = {
"accountId": self.account_id,
"instrument": instrument,
"candleFormat": candle_format,
"granularity": granularity,
"count": count,
"dailyAlignment": daily_alignment,
"alignmentTimezone": alignment_timezone,
"weeklyAlignment": weekly_alignment,
"start": start,
"end": end,
}
try:
return self._Client__call(uri=url, params=params, method="get")
except RequestException:
return False
except __HOLE__:
return False | AssertionError | dataset/ETHPy150Open toloco/pyoanda/pyoanda/client.py/Client.get_instrument_history |
2,098 | def get_orders(self, instrument=None, count=50):
"""
See more:
http://developer.oanda.com/rest-live/orders/#getOrdersForAnAccount
"""
url = "{0}/{1}/accounts/{2}/orders".format(
self.domain,
self.API_VERSION,
self.account_id
)
params = {"instrument": instrument, "count": count}
try:
return self._Client__call(uri=url, params=params, method="get")
except RequestException:
return False
except __HOLE__:
return False | AssertionError | dataset/ETHPy150Open toloco/pyoanda/pyoanda/client.py/Client.get_orders |
2,099 | def get_order(self, order_id):
"""
See more:
http://developer.oanda.com/rest-live/orders/#getInformationForAnOrder
"""
url = "{0}/{1}/accounts/{2}/orders/{3}".format(
self.domain,
self.API_VERSION,
self.account_id,
order_id
)
try:
return self._Client__call(uri=url, method="get")
except RequestException:
return False
except __HOLE__:
return False | AssertionError | dataset/ETHPy150Open toloco/pyoanda/pyoanda/client.py/Client.get_order |
Subsets and Splits