desc
stringlengths 3
26.7k
| decl
stringlengths 11
7.89k
| bodies
stringlengths 8
553k
|
---|---|---|
'Check that providers of virtual specs are compatible.'
| def satisfies(self, other):
| common = (set(self.providers) & set(other.providers))
if (not common):
return True
result = {}
for name in common:
crossed = self._cross_provider_maps(self.providers[name], other.providers[name])
if crossed:
result[name] = crossed
return all(((c in result) for c in common))
|
'Merge `other` ProviderIndex into this one.'
| def merge(self, other):
| other = other.copy()
for pkg in other.providers:
if (pkg not in self.providers):
self.providers[pkg] = other.providers[pkg]
continue
(spdict, opdict) = (self.providers[pkg], other.providers[pkg])
for provided_spec in opdict:
if (provided_spec not in spdict):
spdict[provided_spec] = opdict[provided_spec]
continue
spdict[provided_spec] = spdict[provided_spec].union(opdict[provided_spec])
|
'Remove a provider from the ProviderIndex.'
| def remove_provider(self, pkg_name):
| empty_pkg_dict = []
for (pkg, pkg_dict) in self.providers.items():
empty_pset = []
for (provided, pset) in pkg_dict.items():
same_name = set((p for p in pset if (p.fullname == pkg_name)))
pset.difference_update(same_name)
if (not pset):
empty_pset.append(provided)
for provided in empty_pset:
del pkg_dict[provided]
if (not pkg_dict):
empty_pkg_dict.append(pkg)
for pkg in empty_pkg_dict:
del self.providers[pkg]
|
'Deep copy of this ProviderIndex.'
| def copy(self):
| clone = ProviderIndex()
clone.providers = self._transform((lambda vpkg, pset: (vpkg, set((p.copy() for p in pset)))))
return clone
|
'IBM Blue Gene/Q system platform.'
| def __init__(self):
| super(Bgq, self).__init__('bgq')
self.add_target(self.front_end, Target(self.front_end))
self.add_target(self.back_end, Target(self.back_end))
front_distro = LinuxDistro()
back_distro = Cnk()
self.front_os = str(front_distro)
self.back_os = str(back_distro)
self.default_os = self.back_os
self.add_operating_system(str(front_distro), front_distro)
self.add_operating_system(str(back_distro), back_distro)
|
'Create a Cray system platform.
Target names should use craype target names but not include the
\'craype-\' prefix. Uses first viable target from:
self
envars [SPACK_FRONT_END, SPACK_BACK_END]
configuration file "targets.yaml" with keys \'front_end\', \'back_end\'
scanning /etc/bash/bashrc.local for back_end only'
| def __init__(self):
| super(Cray, self).__init__('cray')
for target in self._avail_targets():
name = target.replace('-', '_')
self.add_target(name, Target(name, ('craype-%s' % target)))
for name in ('front_end', 'back_end'):
_target = getattr(self, name, None)
if (_target is None):
_target = os.environ.get(('SPACK_' + name.upper()))
if ((_target is None) and (name == 'back_end')):
_target = self._default_target_from_env()
if (_target is not None):
safe_name = _target.replace('-', '_')
setattr(self, name, safe_name)
self.add_target(name, self.targets[safe_name])
if (self.back_end is not None):
self.default = self.back_end
self.add_target('default', self.targets[self.back_end])
else:
raise NoPlatformError()
front_distro = CrayFrontend()
back_distro = Cnl()
self.default_os = str(back_distro)
self.back_os = self.default_os
self.front_os = str(front_distro)
self.add_operating_system(self.back_os, back_distro)
self.add_operating_system(self.front_os, front_distro)
|
'Change the linker to default dynamic to be more
similar to linux/standard linker behavior'
| @classmethod
def setup_platform_environment(cls, pkg, env):
| env.set('CRAYPE_LINK_TYPE', 'dynamic')
cray_wrapper_names = join_path(build_env_path, 'cray')
if os.path.isdir(cray_wrapper_names):
env.prepend_path('PATH', cray_wrapper_names)
env.prepend_path('SPACK_ENV_PATH', cray_wrapper_names)
|
'Set and return the default CrayPE target loaded in a clean login
session.
A bash subshell is launched with a wiped environment and the list of
loaded modules is parsed for the first acceptable CrayPE target.'
| def _default_target_from_env(self):
| if (getattr(self, 'default', None) is None):
env = which('env')
env.add_default_arg('-')
output = env(('USER=%s' % os.environ['USER']), ('HOME=%s' % os.environ['HOME']), '/bin/bash', '--noprofile', '--norc', '-c', '. /etc/profile; module list -lt', output=str, error=str)
self._defmods = _get_modules_in_modulecmd_output(output)
targets = []
_fill_craype_targets_from_modules(targets, self._defmods)
self.default = (targets[0] if targets else None)
tty.debug('Found default modules:', *[(' %s' % mod) for mod in self._defmods])
return self.default
|
'Return a list of available CrayPE CPU targets.'
| def _avail_targets(self):
| if (getattr(self, '_craype_targets', None) is None):
module = get_module_cmd()
output = module('avail', '-t', 'craype-', output=str, error=str)
craype_modules = _get_modules_in_modulecmd_output(output)
self._craype_targets = targets = []
_fill_craype_targets_from_modules(targets, craype_modules)
return self._craype_targets
|
'The dotted representation of the version.
Example:
>>> version = Version(\'1-2-3b\')
>>> version.dotted
Version(\'1.2.3b\')
Returns:
Version: The version with separator characters replaced by dots'
| @property
def dotted(self):
| return Version(self.string.replace('-', '.').replace('_', '.'))
|
'The underscored representation of the version.
Example:
>>> version = Version(\'1.2.3b\')
>>> version.underscored
Version(\'1_2_3b\')
Returns:
Version: The version with separator characters replaced by
underscores'
| @property
def underscored(self):
| return Version(self.string.replace('.', '_').replace('-', '_'))
|
'The dashed representation of the version.
Example:
>>> version = Version(\'1.2.3b\')
>>> version.dashed
Version(\'1-2-3b\')
Returns:
Version: The version with separator characters replaced by dashes'
| @property
def dashed(self):
| return Version(self.string.replace('.', '-').replace('_', '-'))
|
'The joined representation of the version.
Example:
>>> version = Version(\'1.2.3b\')
>>> version.joined
Version(\'123b\')
Returns:
Version: The version with separator characters removed'
| @property
def joined(self):
| return Version(self.string.replace('.', '').replace('-', '').replace('_', ''))
|
'The version up to the specified component.
Examples:
>>> version = Version(\'1.23-4b\')
>>> version.up_to(1)
Version(\'1\')
>>> version.up_to(2)
Version(\'1.23\')
>>> version.up_to(3)
Version(\'1.23-4\')
>>> version.up_to(4)
Version(\'1.23-4b\')
>>> version.up_to(-1)
Version(\'1.23-4\')
>>> version.up_to(-2)
Version(\'1.23\')
>>> version.up_to(-3)
Version(\'1\')
Returns:
Version: The first index components of the version'
| def up_to(self, index):
| return self[:index]
|
'Tells if this version is numeric (vs. a non-numeric version). A
version will be numeric as long as the first section of it is,
even if it contains non-numerica portions.
Some numeric versions:
1
1.1
1.1a
1.a.1b
Some non-numeric versions:
develop
system
myfavoritebranch'
| def isnumeric(self):
| return isinstance(self.version[0], numbers.Integral)
|
'Triggers on the special case of the `@develop` version.'
| def isdevelop(self):
| return (self.string == 'develop')
|
'A Version \'satisfies\' another if it is at least as specific and has
a common prefix. e.g., we want [email protected] to satisfy a request for
[email protected] so that when a user asks to build with [email protected], we can find
a suitable compiler.'
| @coerced
def satisfies(self, other):
| nself = len(self.version)
nother = len(other.version)
return ((nother <= nself) and (self.version[:nother] == other.version))
|
'Compares two versions, knowing they\'re both numeric'
| def _numeric_lt(self, other):
| for (a, b) in zip(self.version, other.version):
if (a == b):
continue
elif (type(a) != type(b)):
return (type(b) == int)
else:
return (a < b)
return (len(self.version) < len(other.version))
|
'Version comparison is designed for consistency with the way RPM
does things. If you need more complicated versions in installed
packages, you should override your package\'s version string to
express it more sensibly.'
| @coerced
def __lt__(self, other):
| if (other is None):
return False
if (self.version == other.version):
return False
sdev = self.isdevelop()
if sdev:
return False
odev = other.isdevelop()
if odev:
return True
if self.isnumeric():
if other.isnumeric():
return self._numeric_lt(other)
else:
return False
elif other.isnumeric():
return True
else:
return (self.string < other.string)
|
'True if the other version is the immediate predecessor of this one.
That is, NO versions v exist such that:
(self < v < other and v not in self).'
| def is_predecessor(self, other):
| if (len(self.version) != len(other.version)):
return False
sl = self.version[(-1)]
ol = other.version[(-1)]
return ((type(sl) == int) and (type(ol) == int) and ((ol - sl) == 1))
|
'Sort VersionRanges lexicographically so that they are ordered first
by start and then by end. None denotes an open range, so None in
the start position is less than everything except None, and None in
the end position is greater than everything but None.'
| @coerced
def __lt__(self, other):
| if (other is None):
return False
(s, o) = (self, other)
if (s.start != o.start):
return ((s.start is None) or ((o.start is not None) and (s.start < o.start)))
return (((s.end != o.end) and (o.end is None)) or ((s.end is not None) and (s.end < o.end)))
|
'A VersionRange satisfies another if some version in this range
would satisfy some version in the other range. To do this it must
either:
a) Overlap with the other range
b) The start of this range satisfies the end of the other range.
This is essentially the same as overlaps(), but overlaps assumes
that its arguments are specific. That is, 4.7 is interpreted as
4.7.0.0.0.0... . This funciton assumes that 4.7 woudl be satisfied
by 4.7.3.5, etc.
Rationale:
If a user asks for [email protected]:4.7, and a package is only compatible with
[email protected]:4.8, then that package should be able to build under the
constraints. Just using overlaps() would not work here.
Note that we don\'t need to check whether the end of this range
would satisfy the start of the other range, because overlaps()
already covers that case.
Note further that overlaps() is a symmetric operation, while
satisfies() is not.'
| @coerced
def satisfies(self, other):
| return (self.overlaps(other) or (self.start and other.end and self.start.satisfies(other.end)))
|
'Get the lowest version in the list.'
| def lowest(self):
| if (not self):
return None
else:
return self[0].lowest()
|
'Get the highest version in the list.'
| def highest(self):
| if (not self):
return None
else:
return self[(-1)].highest()
|
'Generate human-readable dict for YAML.'
| def to_dict(self):
| if self.concrete:
return syaml_dict([('version', str(self[0]))])
else:
return syaml_dict([('versions', [str(v) for v in self])])
|
'Parse dict from to_dict.'
| @staticmethod
def from_dict(dictionary):
| if ('versions' in dictionary):
return VersionList(dictionary['versions'])
elif ('version' in dictionary):
return VersionList([dictionary['version']])
else:
raise ValueError("Dict must have 'version' or 'versions' in it.")
|
'A VersionList satisfies another if some version in the list
would satisfy some version in the other list. This uses
essentially the same algorithm as overlaps() does for
VersionList, but it calls satisfies() on member Versions
and VersionRanges.
If strict is specified, this version list must lie entirely
*within* the other in order to satisfy it.'
| @coerced
def satisfies(self, other, strict=False):
| if ((not other) or (not self)):
return False
if strict:
return (self in other)
s = o = 0
while ((s < len(self)) and (o < len(other))):
if self[s].satisfies(other[o]):
return True
elif (self[s] < other[o]):
s += 1
else:
o += 1
return False
|
'Intersect this spec\'s list with other.
Return True if the spec changed as a result; False otherwise'
| @coerced
def intersect(self, other):
| isection = self.intersection(other)
changed = (isection.versions != self.versions)
self.versions = isection.versions
return changed
|
'Returns a list of candidate virtual dep providers and external
packages that coiuld be used to concretize a spec.
Preferred specs come first in the list.'
| def _valid_virtuals_and_externals(self, spec):
| candidates = [spec]
pref_key = (lambda spec: 0)
if spec.virtual:
candidates = spack.repo.providers_for(spec)
if (not candidates):
raise UnsatisfiableProviderSpecError(candidates[0], spec)
spec_w_prefs = find_spec(spec, (lambda p: PackagePrefs.has_preferred_providers(p.name, spec.name)), spec)
pref_key = PackagePrefs(spec_w_prefs.name, 'providers', spec.name)
usable = OrderedDict()
for cspec in candidates:
if is_spec_buildable(cspec):
usable[cspec] = True
externals = spec_externals(cspec)
for ext in externals:
if ext.satisfies(spec):
usable[ext] = True
if (not usable):
raise NoBuildError(spec)
return sorted(usable, key=(lambda spec: ((not spec.external), pref_key(spec), spec.name, reverse_order(spec.versions), spec)))
|
'Given a list of candidate virtual and external packages, try to
find one that is most ABI compatible.'
| def choose_virtual_or_external(self, spec):
| candidates = self._valid_virtuals_and_externals(spec)
if (not candidates):
return candidates
abi_exemplar = find_spec(spec, (lambda x: x.compiler), spec.root)
return sorted(candidates, reverse=True, key=(lambda spec: (spack.abi.compatible(spec, abi_exemplar, loose=True), spack.abi.compatible(spec, abi_exemplar))))
|
'If the spec is already concrete, return. Otherwise take
the preferred version from spackconfig, and default to the package\'s
version if there are no available versions.
TODO: In many cases we probably want to look for installed
versions of each package and use an installed version
if we can link to it. The policy implemented here will
tend to rebuild a lot of stuff becasue it will prefer
a compiler in the spec to any compiler already-
installed things were built with. There is likely
some better policy that finds some middle ground
between these two extremes.'
| def concretize_version(self, spec):
| if spec.versions.concrete:
return False
pkg = spec.package
usable = [v for v in pkg.versions if any((v.satisfies(sv) for sv in spec.versions))]
yaml_prefs = PackagePrefs(spec.name, 'version')
keyfn = (lambda v: ((- yaml_prefs(v)), pkg.versions.get(Version(v)).get('preferred', False), (not v.isdevelop()), v))
usable.sort(key=keyfn, reverse=True)
if usable:
spec.versions = ver([usable[0]])
elif ((not spec.versions) or (spec.versions == VersionList([':']))):
raise NoValidVersionError(spec)
else:
last = spec.versions[(-1)]
if isinstance(last, VersionRange):
if last.end:
spec.versions = ver([last.end])
else:
spec.versions = ver([last.start])
else:
spec.versions = ver([last])
return True
|
'If the spec is empty provide the defaults of the platform. If the
architecture is not a string type, then check if either the platform,
target or operating system are concretized. If any of the fields are
changed then return True. If everything is concretized (i.e the
architecture attribute is a namedtuple of classes) then return False.
If the target is a string type, then convert the string into a
concretized architecture. If it has no architecture and the root of the
DAG has an architecture, then use the root otherwise use the defaults
on the platform.'
| def concretize_architecture(self, spec):
| root_arch = spec.root.architecture
sys_arch = spack.spec.ArchSpec(spack.architecture.sys_type())
spec_changed = False
if (spec.architecture is None):
spec.architecture = spack.spec.ArchSpec(sys_arch)
spec_changed = True
default_archs = list((x for x in [root_arch, sys_arch] if x))
for arch in default_archs:
if spec.architecture.concrete:
break
replacement_fields = [k for (k, v) in iteritems(arch.to_cmp_dict()) if (v and (not getattr(spec.architecture, k)))]
for field in replacement_fields:
setattr(spec.architecture, field, getattr(arch, field))
spec_changed = True
if (not spec.architecture.concrete):
raise InsufficientArchitectureInfoError(spec, default_archs)
return spec_changed
|
'If the spec already has variants filled in, return. Otherwise, add
the user preferences from packages.yaml or the default variants from
the package specification.'
| def concretize_variants(self, spec):
| changed = False
preferred_variants = PackagePrefs.preferred_variants(spec.name)
pkg_cls = spec.package_class
for (name, variant) in pkg_cls.variants.items():
if (name not in spec.variants):
changed = True
if (name in preferred_variants):
spec.variants[name] = preferred_variants.get(name)
else:
spec.variants[name] = variant.make_default()
return changed
|
'If the spec already has a compiler, we\'re done. If not, then take
the compiler used for the nearest ancestor with a compiler
spec and use that. If the ancestor\'s compiler is not
concrete, then used the preferred compiler as specified in
spackconfig.
Intuition: Use the spackconfig default if no package that depends on
this one has a strict compiler requirement. Otherwise, try to
build with the compiler that will be used by libraries that
link to this one, to maximize compatibility.'
| def concretize_compiler(self, spec):
| if (not (spec.architecture.platform_os and spec.architecture.target)):
return True
def _proper_compiler_style(cspec, aspec):
return spack.compilers.compilers_for_spec(cspec, arch_spec=aspec)
all_compiler_specs = spack.compilers.all_compiler_specs()
if (not all_compiler_specs):
raise spack.compilers.NoCompilersError()
if (spec.compiler and spec.compiler.concrete and (spec.compiler in all_compiler_specs)):
if (not _proper_compiler_style(spec.compiler, spec.architecture)):
_compiler_concretization_failure(spec.compiler, spec.architecture)
return False
other_spec = (spec if spec.compiler else find_spec(spec, (lambda x: x.compiler), spec.root))
other_compiler = other_spec.compiler
assert other_spec
if (other_compiler in all_compiler_specs):
spec.compiler = other_compiler.copy()
if (not _proper_compiler_style(spec.compiler, spec.architecture)):
_compiler_concretization_failure(spec.compiler, spec.architecture)
return True
compiler_list = (all_compiler_specs if (not other_compiler) else spack.compilers.find(other_compiler))
if (not compiler_list):
raise UnavailableCompilerVersionError(other_compiler)
ppk = PackagePrefs(other_spec.name, 'compiler')
matches = sorted(compiler_list, key=ppk)
try:
spec.compiler = next((c for c in matches if _proper_compiler_style(c, spec.architecture))).copy()
except StopIteration:
_compiler_concretization_failure(other_compiler, spec.architecture)
assert spec.compiler.concrete
return True
|
'The compiler flags are updated to match those of the spec whose
compiler is used, defaulting to no compiler flags in the spec.
Default specs set at the compiler level will still be added later.'
| def concretize_compiler_flags(self, spec):
| if (not (spec.architecture.platform_os and spec.architecture.target)):
return True
compiler_match = (lambda other: ((spec.compiler == other.compiler) and (spec.architecture == other.architecture)))
ret = False
for flag in spack.spec.FlagMap.valid_compiler_flags():
if (flag not in spec.compiler_flags):
spec.compiler_flags[flag] = list()
try:
nearest = next((p for p in spec.traverse(direction='parents') if (compiler_match(p) and (p is not spec) and (flag in p.compiler_flags))))
nearest_flags = set(nearest.compiler_flags.get(flag, []))
flags = set(spec.compiler_flags.get(flag, []))
if (nearest_flags - flags):
spec.compiler_flags[flag] = list((nearest_flags | flags))
ret = True
except StopIteration:
pass
compiler = spack.compilers.compiler_for_spec(spec.compiler, spec.architecture)
for flag in compiler.flags:
config_flags = set(compiler.flags.get(flag, []))
flags = set(spec.compiler_flags.get(flag, []))
spec.compiler_flags[flag] = list((config_flags | flags))
if (config_flags - flags):
ret = True
return ret
|
'Format help on sections for a particular verbosity level.
Args:
level (str): \'short\' or \'long\' (more commands shown for long)'
| def format_help_sections(self, level):
| if (level not in levels):
raise ValueError(('level must be one of: %s' % levels))
add_all_commands(self)
'Print help on subcommands in neatly formatted sections.'
formatter = self._get_formatter()
if (not hasattr(self, 'actions')):
self.actions = self._subparsers._actions[(-1)]._get_subactions()
remaining = set(spack.cmd.commands)
def add_group(group):
formatter.start_section(group.title)
formatter.add_text(group.description)
formatter.add_arguments(group._group_actions)
formatter.end_section()
def add_subcommand_group(title, commands):
'Add informational help group for a specific subcommand set.'
cmd_set = set(commands)
cmds = dict(((action.metavar, action) for action in self.actions if (action.metavar in cmd_set)))
group = argparse._ArgumentGroup(self, title=title)
for name in commands:
group._add_action(cmds[name])
if (name in remaining):
remaining.remove(name)
add_group(group)
show_options = options_by_level[level]
if (show_options != 'all'):
opts = dict(((opt.option_strings[0].strip('-'), opt) for opt in self._optionals._group_actions))
new_actions = [opts[letter] for letter in show_options]
self._optionals._group_actions = new_actions
options = ''.join((opt.option_strings[0].strip('-') for opt in self._optionals._group_actions))
index = index_commands()
formatter.add_text(('usage: %s [-%s] <command> [...]' % (self.prog, options)))
formatter.add_text(self.description)
formatter.add_text(intro_by_level[level])
sections = index[level]
for section in sorted(sections):
if (section == 'help'):
continue
group_description = section_descriptions.get(section, section)
to_display = sections[section]
commands = []
if (section in section_order):
commands.extend((cmd for cmd in section_order[section] if (cmd in to_display)))
commands.extend((cmd for cmd in sorted(sections[section]) if (cmd not in commands)))
add_subcommand_group(group_description, commands)
add_group(self._optionals)
formatter.add_text('{help}:\n spack help -a list all available commands\n spack help <command> help on a specific command\n spack help --spec help on the spec syntax\n spack docs open http://spack.rtfd.io/ in a browser'.format(help=section_descriptions['help']))
return formatter.format_help()
|
'Add one subcommand to this parser.'
| def add_command(self, name):
| name = spack.cmd.get_python_name(name)
if (not hasattr(self, 'subparsers')):
if (self._actions[(-1)].dest == 'command'):
self._remove_action(self._actions[(-1)])
self.subparsers = self.add_subparsers(metavar='COMMAND', dest='command')
module = spack.cmd.get_module(name)
cmd_name = name.replace('_', '-')
subparser = self.subparsers.add_parser(cmd_name, help=module.description, description=module.description)
module.setup_parser(subparser)
return module
|
'Create a new SpackCommand that invokes ``command`` when called.'
| def __init__(self, command, fail_on_error=True):
| self.parser = make_argument_parser()
self.parser.add_command(command)
self.command_name = command
self.command = spack.cmd.get_command(command)
self.fail_on_error = fail_on_error
|
'Invoke this SpackCommand.
Args:
argv (list of str): command line arguments.
Keyword Args:
color (optional bool): force-disable or force-enable color
Returns:
(str, str): output and error as a strings
On return, if ``fail_on_error`` is False, return value of comman
is set in ``returncode`` property. Otherwise, raise an error.'
| def __call__(self, *argv, **kwargs):
| (args, unknown) = self.parser.parse_known_args(([self.command_name] + list(argv)))
(out, err) = (sys.stdout, sys.stderr)
(ofd, ofn) = tempfile.mkstemp()
(efd, efn) = tempfile.mkstemp()
try:
sys.stdout = open(ofn, 'w')
sys.stderr = open(efn, 'w')
self.returncode = _invoke_spack_command(self.command, self.parser, args, unknown)
except SystemExit as e:
self.returncode = e.code
finally:
sys.stdout.flush()
sys.stdout.close()
sys.stderr.flush()
sys.stderr.close()
(sys.stdout, sys.stderr) = (out, err)
return_out = open(ofn).read()
return_err = open(efn).read()
os.unlink(ofn)
os.unlink(efn)
if (self.fail_on_error and (self.returncode != 0)):
raise SpackCommandError(('Command exited with code %d: %s(%s)' % (self.returncode, self.command_name, ', '.join((("'%s'" % a) for a in argv)))))
return (return_out, return_err)
|
'Return true if parent and child have ABI compatible targets.'
| def architecture_compatible(self, parent, child):
| return ((not parent.architecture) or (not child.architecture) or (parent.architecture == child.architecture))
|
'Returns gcc ABI compatibility info by getting the library version of
a compiler\'s libstdc++ or libgcc_s'
| @memoized
def _gcc_get_libstdcxx_version(self, version):
| spec = CompilerSpec('gcc', version)
compilers = spack.compilers.compilers_for_spec(spec)
if (not compilers):
return None
compiler = compilers[0]
rungcc = None
libname = None
output = None
if compiler.cxx:
rungcc = Executable(compiler.cxx)
libname = ('libstdc++.' + dso_suffix)
elif compiler.cc:
rungcc = Executable(compiler.cc)
libname = ('libgcc_s.' + dso_suffix)
else:
return None
try:
if (Clang.default_version(rungcc.exe[0]) != 'unknown'):
return None
output = rungcc(('--print-file-name=%s' % libname), output=str)
except ProcessError:
return None
if (not output):
return None
libpath = os.readlink(output.strip())
if (not libpath):
return None
return os.path.basename(libpath)
|
'Returns true iff the gcc version pversion and cversion
are ABI compatible.'
| @memoized
def _gcc_compiler_compare(self, pversion, cversion):
| plib = self._gcc_get_libstdcxx_version(pversion)
clib = self._gcc_get_libstdcxx_version(cversion)
if ((not plib) or (not clib)):
return False
return (plib == clib)
|
'Returns true iff the intel version pversion and cversion
are ABI compatible'
| def _intel_compiler_compare(self, pversion, cversion):
| if ((len(pversion.version) < 2) or (len(cversion.version) < 2)):
return False
return (pversion.version[:2] == cversion.version[:2])
|
'Return true if compilers for parent and child are ABI compatible.'
| def compiler_compatible(self, parent, child, **kwargs):
| if ((not parent.compiler) or (not child.compiler)):
return True
if (parent.compiler.name != child.compiler.name):
return False
if kwargs.get('loose', False):
return True
for pversion in parent.compiler.versions:
for cversion in child.compiler.versions:
if pversion.satisfies(cversion):
return True
elif ((parent.compiler.name == 'gcc') and self._gcc_compiler_compare(pversion, cversion)):
return True
elif ((parent.compiler.name == 'intel') and self._intel_compiler_compare(pversion, cversion)):
return True
return False
|
'Returns true iff a parent and child spec are ABI compatible'
| def compatible(self, parent, child, **kwargs):
| loosematch = kwargs.get('loose', False)
return (self.architecture_compatible(parent, child) and self.compiler_compatible(parent, child, loose=loosematch))
|
'Getattr lazily loads modules if they\'re not already loaded.'
| def __getattr__(self, name):
| submodule = ((self.__package__ + '.') + name)
setattr(self, name, __import__(submodule))
return getattr(self, name)
|
'Convenience function to make swapping repositories easier.
This is currently used by mock tests.
TODO: Maybe there is a cleaner way.'
| def swap(self, other):
| attrs = ['repos', 'by_namespace', 'by_path', '_all_package_names', '_provider_index']
for attr in attrs:
tmp = getattr(self, attr)
setattr(self, attr, getattr(other, attr))
setattr(other, attr, tmp)
|
'Add a repository to the namespace and path indexes.
Checks for duplicates -- two repos can\'t have the same root
directory, and they provide have the same namespace.'
| def _add(self, repo):
| if (repo.root in self.by_path):
raise DuplicateRepoError(("Duplicate repository: '%s'" % repo.root))
if (repo.namespace in self.by_namespace):
raise DuplicateRepoError(("Package repos '%s' and '%s' both provide namespace %s" % (repo.root, self.by_namespace[repo.namespace].root, repo.namespace)))
self.by_namespace[repo.full_namespace] = repo
self.by_path[repo.root] = repo
|
'Add repo first in the search path.'
| def put_first(self, repo):
| self._add(repo)
self.repos.insert(0, repo)
|
'Add repo last in the search path.'
| def put_last(self, repo):
| self._add(repo)
self.repos.append(repo)
|
'Remove a repo from the search path.'
| def remove(self, repo):
| if (repo in self.repos):
self.repos.remove(repo)
|
'Get a repository by namespace.
Arguments:
namespace:
Look up this namespace in the RepoPath, and return it if found.
Optional Arguments:
default:
If default is provided, return it when the namespace
isn\'t found. If not, raise an UnknownNamespaceError.'
| def get_repo(self, namespace, default=NOT_PROVIDED):
| fullspace = ('%s.%s' % (self.super_namespace, namespace))
if (fullspace not in self.by_namespace):
if (default == NOT_PROVIDED):
raise UnknownNamespaceError(namespace)
return default
return self.by_namespace[fullspace]
|
'Get the first repo in precedence order.'
| def first_repo(self):
| return (self.repos[0] if self.repos else None)
|
'Return all unique package names in all repositories.'
| def all_package_names(self):
| if (self._all_package_names is None):
all_pkgs = set()
for repo in self.repos:
for name in repo.all_package_names():
all_pkgs.add(name)
self._all_package_names = sorted(all_pkgs, key=(lambda n: n.lower()))
return self._all_package_names
|
'Merged ProviderIndex from all Repos in the RepoPath.'
| @property
def provider_index(self):
| if (self._provider_index is None):
self._provider_index = ProviderIndex()
for repo in reversed(self.repos):
self._provider_index.merge(repo.provider_index)
return self._provider_index
|
'Implements precedence for overlaid namespaces.
Loop checks each namespace in self.repos for packages, and
also handles loading empty containing namespaces.'
| def find_module(self, fullname, path=None):
| (namespace, dot, module_name) = fullname.rpartition('.')
for repo in self.repos:
if (namespace == repo.full_namespace):
if repo.real_name(module_name):
return repo
elif (fullname == repo.full_namespace):
return repo
if self.by_namespace.is_prefix(fullname):
return self
return None
|
'Handles loading container namespaces when necessary.
See ``Repo`` for how actual package modules are loaded.'
| def load_module(self, fullname):
| if (fullname in sys.modules):
return sys.modules[fullname]
if (not self.by_namespace.is_prefix(fullname)):
raise ImportError(('No such Spack repo: %s' % fullname))
module = SpackNamespace(fullname)
module.__loader__ = self
sys.modules[fullname] = module
return module
|
'Given a spec, get the repository for its package.'
| @_autospec
def repo_for_pkg(self, spec):
| if spec.namespace:
fullspace = ('%s.%s' % (self.super_namespace, spec.namespace))
if (fullspace not in self.by_namespace):
raise UnknownNamespaceError(spec.namespace)
return self.by_namespace[fullspace]
for repo in self.repos:
if (spec.name in repo):
return repo
return self.first_repo()
|
'Find a repo that contains the supplied spec\'s package.
Raises UnknownPackageError if not found.'
| @_autospec
def get(self, spec, new=False):
| return self.repo_for_pkg(spec).get(spec)
|
'Find a class for the spec\'s package and return the class object.'
| def get_pkg_class(self, pkg_name):
| return self.repo_for_pkg(pkg_name).get_pkg_class(pkg_name)
|
'Dump provenance information for a spec to a particular path.
This dumps the package file and any associated patch files.
Raises UnknownPackageError if not found.'
| @_autospec
def dump_provenance(self, spec, path):
| return self.repo_for_pkg(spec).dump_provenance(spec, path)
|
'Whether package with the give name exists in the path\'s repos.
Note that virtual packages do not "exist".'
| def exists(self, pkg_name):
| return any((repo.exists(pkg_name) for repo in self.repos))
|
'True if the package with this name is virtual, False otherwise.'
| def is_virtual(self, pkg_name):
| return (pkg_name in self.provider_index)
|
'Instantiate a package repository from a filesystem path.
Arguments:
root The root directory of the repository.
namespace A super-namespace that will contain the repo-defined
namespace (this is generally jsut `spack.pkg`). The
super-namespace is Spack\'s way of separating repositories
from other python namespaces.'
| def __init__(self, root, namespace=repo_namespace):
| self.root = canonicalize_path(root)
self.super_namespace = namespace
def check(condition, msg):
if (not condition):
raise BadRepoError(msg)
self.config_file = join_path(self.root, repo_config_name)
check(os.path.isfile(self.config_file), ("No %s found in '%s'" % (repo_config_name, root)))
self.packages_path = join_path(self.root, packages_dir_name)
check(os.path.isdir(self.packages_path), ("No directory '%s' found in '%s'" % (repo_config_name, root)))
config = self._read_config()
check(('namespace' in config), ('%s must define a namespace.' % join_path(root, repo_config_name)))
self.namespace = config['namespace']
check(re.match('[a-zA-Z][a-zA-Z0-9_.]+', self.namespace), (("Invalid namespace '%s' in repo '%s'. " % (self.namespace, self.root)) + "Namespaces must be valid python identifiers separated by '.'"))
if self.super_namespace:
self.full_namespace = ('%s.%s' % (self.super_namespace, self.namespace))
else:
self.full_namespace = self.namespace
self._names = self.full_namespace.split('.')
self._modules = {}
self._classes = {}
self._instances = {}
self._needs_update = []
self._provider_index = None
self._all_package_names = None
self._create_namespace()
self._cache_file = ('providers/%s-index.yaml' % self.namespace)
|
'Create this repo\'s namespace module and insert it into sys.modules.
Ensures that modules loaded via the repo have a home, and that
we don\'t get runtime warnings from Python\'s module system.'
| def _create_namespace(self):
| parent = None
for l in range(1, (len(self._names) + 1)):
ns = '.'.join(self._names[:l])
if (ns not in sys.modules):
module = SpackNamespace(ns)
module.__loader__ = self
sys.modules[ns] = module
if parent:
modname = self._names[(l - 1)]
setattr(parent, modname, module)
else:
module = sys.modules[ns]
parent = module
|
'Allow users to import Spack packages using Python identifiers.
A python identifier might map to many different Spack package
names due to hyphen/underscore ambiguity.
Easy example:
num3proxy -> 3proxy
Ambiguous:
foo_bar -> foo_bar, foo-bar
More ambiguous:
foo_bar_baz -> foo_bar_baz, foo-bar-baz, foo_bar-baz, foo-bar_baz'
| def real_name(self, import_name):
| if (import_name in self):
return import_name
options = possible_spack_module_names(import_name)
options.remove(import_name)
for name in options:
if (name in self):
return name
return None
|
'True if fullname is a prefix of this Repo\'s namespace.'
| def is_prefix(self, fullname):
| parts = fullname.split('.')
return (self._names[:len(parts)] == parts)
|
'Python find_module import hook.
Returns this Repo if it can load the module; None if not.'
| def find_module(self, fullname, path=None):
| if self.is_prefix(fullname):
return self
(namespace, dot, module_name) = fullname.rpartition('.')
if (namespace == self.full_namespace):
if self.real_name(module_name):
return self
return None
|
'Python importer load hook.
Tries to load the module; raises an ImportError if it can\'t.'
| def load_module(self, fullname):
| if (fullname in sys.modules):
return sys.modules[fullname]
(namespace, dot, module_name) = fullname.rpartition('.')
if self.is_prefix(fullname):
module = SpackNamespace(fullname)
elif (namespace == self.full_namespace):
real_name = self.real_name(module_name)
if (not real_name):
raise ImportError(('No module %s in %s' % (module_name, self)))
module = self._get_pkg_module(real_name)
else:
raise ImportError(('No module %s in %s' % (fullname, self)))
module.__loader__ = self
sys.modules[fullname] = module
if (namespace != fullname):
parent = sys.modules[namespace]
if (not hasattr(parent, module_name)):
setattr(parent, module_name, module)
return module
|
'Check for a YAML config file in this db\'s root directory.'
| def _read_config(self):
| try:
with open(self.config_file) as reponame_file:
yaml_data = yaml.load(reponame_file)
if ((not yaml_data) or ('repo' not in yaml_data) or (not isinstance(yaml_data['repo'], dict))):
tty.die(('Invalid %s in repository %s' % (repo_config_name, self.root)))
return yaml_data['repo']
except IOError:
tty.die(('Error reading %s when opening %s' % (self.config_file, self.root)))
|
'Dump provenance information for a spec to a particular path.
This dumps the package file and any associated patch files.
Raises UnknownPackageError if not found.'
| @_autospec
def dump_provenance(self, spec, path):
| if spec.virtual:
raise UnknownPackageError(spec.name)
if (spec.namespace and (spec.namespace != self.namespace)):
raise UnknownPackageError(('Repository %s does not contain package %s.' % (self.namespace, spec.fullname)))
mkdirp(path)
for (spec, patches) in spec.package.patches.items():
for patch in patches:
if patch.path:
if os.path.exists(patch.path):
install(patch.path, path)
else:
tty.warn(('Patch file did not exist: %s' % patch.path))
install(self.filename_for_package_name(spec), path)
|
'Clear entire package instance cache.'
| def purge(self):
| self._instances.clear()
|
'A provider index with names *specific* to this repo.'
| @property
def provider_index(self):
| if (self._provider_index is None):
self._update_provider_index()
return self._provider_index
|
'Check that the spec\'s namespace is the same as this repository\'s.'
| def _check_namespace(self, spec):
| if (spec.namespace and (spec.namespace != self.namespace)):
raise UnknownNamespaceError(spec.namespace)
|
'Get the directory name for a particular package. This is the
directory that contains its package.py file.'
| @_autospec
def dirname_for_package_name(self, spec):
| self._check_namespace(spec)
return join_path(self.packages_path, spec.name)
|
'Get the filename for the module we should load for a particular
package. Packages for a Repo live in
``$root/<package_name>/package.py``
This will return a proper package.py path even if the
package doesn\'t exist yet, so callers will need to ensure
the package exists before importing.'
| @_autospec
def filename_for_package_name(self, spec):
| self._check_namespace(spec)
pkg_dir = self.dirname_for_package_name(spec.name)
return join_path(pkg_dir, package_file_name)
|
'List packages in the repo and check whether index is up to date.
Both of these opreations require checking all `package.py`
files so we do them at the same time. We list the repo
directory and look at package.py files, and we compare the
index modification date with the ost recently modified package
file, storing the result.
The implementation here should try to minimize filesystem
calls. At the moment, it is O(number of packages) and makes
about one stat call per package. This is resonably fast, and
avoids actually importing packages in Spack, which is slow.'
| def _fast_package_check(self):
| if (self._all_package_names is None):
self._all_package_names = []
index_mtime = spack.misc_cache.mtime(self._cache_file)
for pkg_name in os.listdir(self.packages_path):
pkg_dir = join_path(self.packages_path, pkg_name)
if (not valid_module_name(pkg_name)):
msg = "Skipping package at %s. '%s' is not a valid Spack module name."
tty.warn((msg % (pkg_dir, pkg_name)))
continue
pkg_file = join_path(self.packages_path, pkg_name, package_file_name)
try:
sinfo = os.stat(pkg_file)
except OSError as e:
if (e.errno == errno.ENOENT):
continue
elif (e.errno == errno.EACCES):
tty.warn(("Can't read package file %s." % pkg_file))
continue
raise e
if stat.S_ISDIR(sinfo.st_mode):
continue
self._all_package_names.append(pkg_name)
if (sinfo.st_mtime > index_mtime):
self._needs_update.append(pkg_name)
self._all_package_names.sort()
return self._all_package_names
|
'Returns a sorted list of all package names in the Repo.'
| def all_package_names(self):
| self._fast_package_check()
return self._all_package_names
|
'Iterator over all packages in the repository.
Use this with care, because loading packages is slow.'
| def all_packages(self):
| for name in self.all_package_names():
(yield self.get(name))
|
'Whether a package with the supplied name exists.'
| def exists(self, pkg_name):
| if self._all_package_names:
idx = bisect_left(self.all_package_names(), pkg_name)
return ((idx < len(self._all_package_names)) and (self._all_package_names[idx] == pkg_name))
filename = self.filename_for_package_name(pkg_name)
return os.path.exists(filename)
|
'True if the package with this name is virtual, False otherwise.'
| def is_virtual(self, pkg_name):
| return self.provider_index.contains(pkg_name)
|
'Create a module for a particular package.
This caches the module within this Repo *instance*. It does
*not* add it to ``sys.modules``. So, you can construct
multiple Repos for testing and ensure that the module will be
loaded once per repo.'
| def _get_pkg_module(self, pkg_name):
| if (pkg_name not in self._modules):
file_path = self.filename_for_package_name(pkg_name)
if (not os.path.exists(file_path)):
raise UnknownPackageError(pkg_name, self)
if (not os.path.isfile(file_path)):
tty.die(("Something's wrong. '%s' is not a file!" % file_path))
if (not os.access(file_path, os.R_OK)):
tty.die(("Cannot read '%s'!" % file_path))
fullname = ('%s.%s' % (self.full_namespace, pkg_name))
module = imp.load_source(fullname, file_path)
module.__package__ = self.full_namespace
module.__loader__ = self
self._modules[pkg_name] = module
return self._modules[pkg_name]
|
'Get the class for the package out of its module.
First loads (or fetches from cache) a module for the
package. Then extracts the package class from the module
according to Spack\'s naming convention.'
| def get_pkg_class(self, pkg_name):
| (namespace, _, pkg_name) = pkg_name.rpartition('.')
if (namespace and (namespace != self.namespace)):
raise InvalidNamespaceError(('Invalid namespace for %s repo: %s' % (self.namespace, namespace)))
class_name = mod_to_class(pkg_name)
module = self._get_pkg_module(pkg_name)
cls = getattr(module, class_name)
if (not inspect.isclass(cls)):
tty.die(('%s.%s is not a class' % (pkg_name, class_name)))
return cls
|
'Create a file cache object.
This will create the cache directory if it does not exist yet.'
| def __init__(self, root):
| self.root = root.rstrip(os.path.sep)
if (not os.path.exists(self.root)):
mkdirp(self.root)
self._locks = {}
|
'Remove all files under the cache root.'
| def destroy(self):
| for f in os.listdir(self.root):
path = join_path(self.root, f)
if os.path.isdir(path):
shutil.rmtree(path, True)
else:
os.remove(path)
|
'Path to the file in the cache for a particular key.'
| def cache_path(self, key):
| return join_path(self.root, key)
|
'Path to the file in the cache for a particular key.'
| def _lock_path(self, key):
| keyfile = os.path.basename(key)
keydir = os.path.dirname(key)
return join_path(self.root, keydir, (('.' + keyfile) + '.lock'))
|
'Create a lock for a key, if necessary, and return a lock object.'
| def _get_lock(self, key):
| if (key not in self._locks):
self._locks[key] = Lock(self._lock_path(key))
return self._locks[key]
|
'Ensure we can access a cache file. Create a lock for it if needed.
Return whether the cache file exists yet or not.'
| def init_entry(self, key):
| cache_path = self.cache_path(key)
exists = os.path.exists(cache_path)
if exists:
if (not os.path.isfile(cache_path)):
raise CacheError(('Cache file is not a file: %s' % cache_path))
if (not os.access(cache_path, (os.R_OK | os.W_OK))):
raise CacheError(('Cannot access cache file: %s' % cache_path))
else:
parent = os.path.dirname(cache_path)
if (parent.rstrip(os.path.sep) != self.root):
mkdirp(parent)
if (not os.access(parent, (os.R_OK | os.W_OK))):
raise CacheError(('Cannot access cache directory: %s' % parent))
self._get_lock(key)
return exists
|
'Get a read transaction on a file cache item.
Returns a ReadTransaction context manager and opens the cache file for
reading. You can use it like this:
with file_cache_object.read_transaction(key) as cache_file:
cache_file.read()'
| def read_transaction(self, key):
| return ReadTransaction(self._get_lock(key), (lambda : open(self.cache_path(key))))
|
'Get a write transaction on a file cache item.
Returns a WriteTransaction context manager that opens a temporary file
for writing. Once the context manager finishes, if nothing went wrong,
moves the file into place on top of the old file atomically.'
| def write_transaction(self, key):
| class WriteContextManager(object, ):
def __enter__(cm):
cm.orig_filename = self.cache_path(key)
cm.orig_file = None
if os.path.exists(cm.orig_filename):
cm.orig_file = open(cm.orig_filename, 'r')
cm.tmp_filename = (self.cache_path(key) + '.tmp')
cm.tmp_file = open(cm.tmp_filename, 'w')
return (cm.orig_file, cm.tmp_file)
def __exit__(cm, type, value, traceback):
if cm.orig_file:
cm.orig_file.close()
cm.tmp_file.close()
if value:
shutil.rmtree(cm.tmp_filename, True)
else:
os.rename(cm.tmp_filename, cm.orig_filename)
return WriteTransaction(self._get_lock(key), WriteContextManager)
|
'Return modification time of cache file, or 0 if it does not exist.
Time is in units returned by os.stat in the mtime field, which is
platform-dependent.'
| def mtime(self, key):
| if (not self.init_entry(key)):
return 0
else:
sinfo = os.stat(self.cache_path(key))
return sinfo.st_mtime
|
'Return set of possible transitive dependencies of this package.
Args:
transitive (bool): include all transitive dependencies if True,
only direct dependencies if False.'
| def possible_dependencies(self, transitive=True, visited=None):
| if (visited is None):
visited = set()
visited.add(self.name)
for name in self.dependencies:
spec = spack.spec.Spec(name)
if (not spec.virtual):
visited.add(name)
if transitive:
pkg = spack.repo.get(name)
pkg.possible_dependencies(transitive, visited)
else:
for provider in spack.repo.providers_for(spec):
visited.add(provider.name)
if transitive:
pkg = spack.repo.get(provider.name)
pkg.possible_dependencies(transitive, visited)
return visited
|
'Return the directory where the package.py file lives.'
| @property
def package_dir(self):
| return os.path.dirname(self.module.__file__)
|
'Returns the directory where global license files for all
packages are stored.'
| @property
def global_license_dir(self):
| spack_root = ancestor(__file__, 4)
return join_path(spack_root, 'etc', 'spack', 'licenses')
|
'Returns the path where a global license file for this
particular package should be stored.'
| @property
def global_license_file(self):
| if (not self.license_files):
return
return join_path(self.global_license_dir, self.name, os.path.basename(self.license_files[0]))
|
'Return a list of URLs for different versions of this
package, sorted by version. A version\'s URL only appears
in this list if it has an explicitly defined URL.'
| @memoized
def version_urls(self):
| version_urls = {}
for v in sorted(self.versions):
args = self.versions[v]
if ('url' in args):
version_urls[v] = args['url']
return version_urls
|
'Finds the URL for the next lowest version with a URL.
If there is no lower version with a URL, uses the
package url property. If that isn\'t there, uses a
*higher* URL, and if that isn\'t there raises an error.'
| def nearest_url(self, version):
| version_urls = self.version_urls()
url = getattr(self.__class__, 'url', None)
for v in version_urls:
if ((v > version) and url):
break
if version_urls[v]:
url = version_urls[v]
return url
|
'Returns a URL from which the specified version of this package
may be downloaded.
version: class Version
The version for which a URL is sought.
See Class Version (version.py)'
| def url_for_version(self, version):
| if (not isinstance(version, Version)):
version = Version(version)
cls = self.__class__
if (not (hasattr(cls, 'url') or self.version_urls())):
raise NoURLError(cls)
version_urls = self.version_urls()
if (version in version_urls):
return version_urls[version]
return spack.url.substitute_version(self.nearest_url(version), self.url_version(version))
|
'Allow a stage object to be set to override the default.'
| @stage.setter
def stage(self, stage):
| self._stage = stage
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.