desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Intersect self\'s versions with other. Return whether the CompilerSpec changed.'
def constrain(self, other):
other = self._autospec(other) if (not other.satisfies(self)): raise UnsatisfiableCompilerSpecError(other, self) return self.versions.intersect(other.versions)
'A CompilerSpec is concrete if its versions are concrete and there is an available compiler with the right version.'
@property def concrete(self):
return self.versions.concrete
'Add all flags in other that aren\'t in self to self. Return whether the spec changed.'
def constrain(self, other):
if (other.spec and other.spec._concrete): for k in self: if (k not in other): raise UnsatisfiableCompilerFlagSpecError(self[k], '<absent>') changed = False for k in other: if ((k in self) and (not (set(self[k]) <= set(other[k])))): raise UnsatisfiableCompilerFlagSpecError(' '.join((f for f in self[k])), ' '.join((f for f in other[k]))) elif (k not in self): self[k] = other[k] changed = True return changed
'Create a new descriptor. Parameters: attribute_name (str): name of the attribute to be searched for in the Package instance default_handler (callable, optional): default function to be called if the attribute was not found in the Package instance'
def __init__(self, attribute_name, default_handler=None):
self.attribute_name = attribute_name if (default_handler is None): default_handler = (lambda descriptor, spec, cls: None) self.default = default_handler
'Retrieves the property from Package using a well defined chain of responsibility. The order of call is: 1. if the query was through the name of a virtual package try to search for the attribute `{virtual_name}_{attribute_name}` in Package 2. try to search for attribute `{attribute_name}` in Package 3. try to call the default handler The first call that produces a value will stop the chain. If no call can handle the request or a None value is produced, then AttributeError is raised.'
def __get__(self, instance, cls):
pkg = instance.package try: query = instance.last_query except AttributeError: _ = instance[instance.name] query = instance.last_query callbacks_chain = [] if query.isvirtual: specialized_name = '{0}_{1}'.format(query.name, self.attribute_name) callbacks_chain.append((lambda : getattr(pkg, specialized_name))) callbacks_chain.append((lambda : getattr(pkg, self.attribute_name))) callbacks_chain.append((lambda : self.default(self, instance, cls))) value = None for f in callbacks_chain: try: value = f() break except AttributeError: pass if (value is None): fmt = "'{name}' package has no relevant attribute '{query}'\n" fmt += " DCTB spec : '{spec}'\n" fmt += " DCTB queried as : '{spec.last_query.name}'\n" fmt += " DCTB extra parameters : '{spec.last_query.extra_parameters}'\n" message = fmt.format(name=pkg.name, query=self.attribute_name, spec=instance) raise AttributeError(message) return value
'Called by the parser to add an allowable version.'
def _add_version(self, version):
self.versions.add(version)
'Called by the parser to add a known flag. Known flags currently include "arch"'
def _add_flag(self, name, value):
valid_flags = FlagMap.valid_compiler_flags() if ((name == 'arch') or (name == 'architecture')): parts = tuple(value.split('-')) (plat, os, tgt) = (parts if (len(parts) == 3) else (None, None, value)) self._set_architecture(platform=plat, platform_os=os, target=tgt) elif (name == 'platform'): self._set_architecture(platform=value) elif ((name == 'os') or (name == 'operating_system')): self._set_architecture(platform_os=value) elif (name == 'target'): self._set_architecture(target=value) elif (name in valid_flags): assert (self.compiler_flags is not None) self.compiler_flags[name] = value.split() elif ((str(value).upper() == 'TRUE') or (str(value).upper() == 'FALSE')): self.variants[name] = BoolValuedVariant(name, value) else: self.variants[name] = AbstractVariant(name, value)
'Called by the parser to set the architecture.'
def _set_architecture(self, **kwargs):
arch_attrs = ['platform', 'platform_os', 'target'] if (self.architecture and self.architecture.concrete): raise DuplicateArchitectureError(("Spec for '%s' cannot have two architectures." % self.name)) if (not self.architecture): new_vals = tuple((kwargs.get(arg, None) for arg in arch_attrs)) self.architecture = ArchSpec(*new_vals) else: new_attrvals = [(a, v) for (a, v) in iteritems(kwargs) if (a in arch_attrs)] for (new_attr, new_value) in new_attrvals: if getattr(self.architecture, new_attr): raise DuplicateArchitectureError(("Spec for '%s' cannot have two '%s' specified for its architecture" % (self.name, new_attr))) else: setattr(self.architecture, new_attr, new_value)
'Called by the parser to set the compiler.'
def _set_compiler(self, compiler):
if self.compiler: raise DuplicateCompilerSpecError(("Spec for '%s' cannot have two compilers." % self.name)) self.compiler = compiler
'Called by the parser to add another spec as a dependency.'
def _add_dependency(self, spec, deptypes):
if (spec.name in self._dependencies): raise DuplicateDependencyError(("Cannot depend on '%s' twice" % spec)) dspec = DependencySpec(self, spec, deptypes) self._dependencies[spec.name] = dspec spec._dependents[self.name] = dspec
'Follow dependent links and find the root of this spec\'s DAG. In spack specs, there should be a single root (the package being installed). This will throw an assertion error if that is not the case.'
@property def root(self):
if (not self._dependents): return self depiter = iter(self._dependents.values()) first_root = next(depiter).parent.root assert all(((first_root is d.parent.root) for d in depiter)) return first_root
'Internal package call gets only the class object for a package. Use this to just get package metadata.'
@property def package_class(self):
return spack.repo.get_pkg_class(self.fullname)
'Right now, a spec is virtual if no package exists with its name. TODO: revisit this -- might need to use a separate namespace and be more explicit about this. Possible idea: just use conventin and make virtual deps all caps, e.g., MPI vs mpi.'
@property def virtual(self):
return Spec.is_virtual(self.name)
'Test if a name is virtual without requiring a Spec.'
@staticmethod def is_virtual(name):
return ((name is not None) and (not spack.repo.exists(name)))
'A spec is concrete if it can describe only ONE build of a package. If any of the name, version, architecture, compiler, variants, or depdenencies are ambiguous,then it is not concrete.'
@property def concrete(self):
if self._concrete: return True self._concrete = bool(((not self.virtual) and (self.namespace is not None) and self.versions.concrete and self.variants.concrete and self.architecture and self.architecture.concrete and self.compiler and self.compiler.concrete and self.compiler_flags.concrete and self._dependencies.concrete)) return self._concrete
'Generic traversal of the DAG represented by this spec. This will yield each node in the spec. Options: order [=pre|post] Order to traverse spec nodes. Defaults to preorder traversal. Options are: \'pre\': Pre-order traversal; each node is yielded before its children in the dependency DAG. \'post\': Post-order traversal; each node is yielded after its children in the dependency DAG. cover [=nodes|edges|paths] Determines how extensively to cover the dag. Possible values: \'nodes\': Visit each node in the dag only once. Every node yielded by this function will be unique. \'edges\': If a node has been visited once but is reached along a new path from the root, yield it but do not descend into it. This traverses each \'edge\' in the DAG once. \'paths\': Explore every unique path reachable from the root. This descends into visited subtrees and will yield nodes twice if they\'re reachable by multiple paths. depth [=False] Defaults to False. When True, yields not just nodes in the spec, but also their depth from the root in a (depth, node) tuple. key [=id] Allow a custom key function to track the identity of nodes in the traversal. root [=True] If False, this won\'t yield the root node, just its descendents. direction [=children|parents] If \'children\', does a traversal of this spec\'s children. If \'parents\', traverses upwards in the DAG towards the root.'
def traverse_edges(self, visited=None, d=0, deptype=None, deptype_query=None, dep_spec=None, **kwargs):
depth = kwargs.get('depth', False) key_fun = kwargs.get('key', id) if isinstance(key_fun, string_types): key_fun = attrgetter(key_fun) yield_root = kwargs.get('root', True) cover = kwargs.get('cover', 'nodes') direction = kwargs.get('direction', 'children') order = kwargs.get('order', 'pre') deptype = canonical_deptype(deptype) if (deptype_query is None): deptype_query = ('link', 'run') def validate(name, val, allowed_values): if (val not in allowed_values): raise ValueError(('Invalid value for %s: %s. Choices are %s' % (name, val, ','.join(allowed_values)))) validate('cover', cover, ('nodes', 'edges', 'paths')) validate('direction', direction, ('children', 'parents')) validate('order', order, ('pre', 'post')) if (visited is None): visited = set() key = key_fun(self) if ((key in visited) and (cover == 'nodes')): return def return_val(dspec): if (not dspec): if (direction == 'parents'): dspec = DependencySpec(self, None, ()) else: dspec = DependencySpec(None, self, ()) return ((d, dspec) if depth else dspec) yield_me = (yield_root or (d > 0)) if (yield_me and (order == 'pre')): (yield return_val(dep_spec)) if (not ((key in visited) and (cover == 'edges'))): if (direction == 'children'): successors = self.dependencies_dict(deptype) succ = (lambda s: s.spec) elif (direction == 'parents'): successors = self.dependents_dict(deptype) succ = (lambda s: s.parent) else: raise ValueError(('Invalid traversal direction: %s' % direction)) visited.add(key) for (name, dspec) in sorted(successors.items()): child = successors[name] children = succ(child).traverse_edges(visited, d=(d + 1), deptype=deptype, deptype_query=deptype_query, dep_spec=dspec, **kwargs) for elt in children: (yield elt) if (yield_me and (order == 'post')): (yield return_val(dep_spec))
'Returns a version of the spec with the dependencies hashed instead of completely enumerated.'
@property def short_spec(self):
return self.format('$_$@$%@$+$=$/')
'Returns an auto-colorized version of ``self.short_spec``.'
@property def cshort_spec(self):
return self.cformat('$_$@$%@$+$=$/')
'Return a hash of the entire spec DAG, including connectivity.'
def dag_hash(self, length=None):
if self._hash: return self._hash[:length] else: yaml_text = syaml.dump(self.to_node_dict(), default_flow_style=True, width=maxint) sha = hashlib.sha1(yaml_text.encode('utf-8')) b32_hash = base64.b32encode(sha.digest()).lower() if (sys.version_info[0] >= 3): b32_hash = b32_hash.decode('utf-8') if self.concrete: self._hash = b32_hash return b32_hash[:length]
'Get the first <bits> bits of the DAG hash as an integer type.'
def dag_hash_bit_prefix(self, bits):
return base32_prefix_bits(self.dag_hash(), bits)
'Read the DependencySpec portion of a YAML-formatted Spec. This needs to be backward-compatible with older spack spec formats so that reindex will work on old specs/databases.'
@staticmethod def read_yaml_dep_specs(dependency_dict):
for (dep_name, elt) in dependency_dict.items(): if isinstance(elt, string_types): (dag_hash, deptypes) = (elt, ['build', 'link']) elif isinstance(elt, tuple): (dag_hash, deptypes) = elt elif isinstance(elt, dict): (dag_hash, deptypes) = (elt['hash'], elt['type']) else: raise SpecError("Couldn't parse dependency types in spec.") (yield (dep_name, dag_hash, list(deptypes)))
'Construct a spec from YAML. Parameters: data -- a nested dict/list data structure read from YAML or JSON.'
@staticmethod def from_dict(data):
nodes = data['spec'] dep_list = [Spec.from_node_dict(node) for node in nodes] if (not dep_list): raise SpecError('YAML spec contains no nodes.') deps = dict(((spec.name, spec) for spec in dep_list)) spec = dep_list[0] for node in nodes: name = next(iter(node)) if ('dependencies' not in node[name]): continue yaml_deps = node[name]['dependencies'] for (dname, dhash, dtypes) in Spec.read_yaml_dep_specs(yaml_deps): deps[name]._dependencies[dname] = DependencySpec(deps[name], deps[dname], dtypes) return spec
'Construct a spec from YAML. Parameters: stream -- string or file object to read from.'
@staticmethod def from_yaml(stream):
try: data = syaml.load(stream) return Spec.from_dict(data) except MarkedYAMLError as e: raise syaml.SpackYAMLError('error parsing YAML spec:', str(e))
'Construct a spec from JSON. Parameters: stream -- string or file object to read from.'
@staticmethod def from_json(stream):
try: data = sjson.load(stream) return Spec.from_dict(data) except Exception as e: raise sjson.SpackJSONError('error parsing JSON spec:', str(e))
'Recursive helper function for concretize(). This concretizes everything bottom-up. As things are concretized, they\'re added to the presets, and ancestors will prefer the settings of their children.'
def _concretize_helper(self, presets=None, visited=None):
if (presets is None): presets = {} if (visited is None): visited = set() if (self.name in visited): return False changed = False for name in sorted(self._dependencies.keys()): changed |= self._dependencies[name].spec._concretize_helper(presets, visited) if (self.name in presets): changed |= self.constrain(presets[self.name]) else: if (not self.virtual): changed |= any((spack.concretizer.concretize_architecture(self), spack.concretizer.concretize_compiler(self), spack.concretizer.concretize_compiler_flags(self), spack.concretizer.concretize_version(self), spack.concretizer.concretize_variants(self))) presets[self.name] = self visited.add(self.name) return changed
'Replace this virtual spec with a concrete spec.'
def _replace_with(self, concrete):
assert self.virtual for (name, dep_spec) in self._dependents.items(): dependent = dep_spec.parent deptypes = dep_spec.deptypes if (self.name in dependent._dependencies): del dependent._dependencies[self.name] if (concrete.name not in dependent._dependencies): dependent._add_dependency(concrete, deptypes)
'Find virtual packages in this spec, replace them with providers, and normalize again to include the provider\'s (potentially virtual) dependencies. Repeat until there are no virtual deps. Precondition: spec is normalized. .. todo:: If a provider depends on something that conflicts with other dependencies in the spec being expanded, this can produce a conflicting spec. For example, if mpich depends on hwloc@:1.3 but something in the spec needs hwloc1.4:, then we should choose an MPI other than mpich. Cases like this are infrequent, but should implement this before it is a problem.'
def _expand_virtual_packages(self):
self_index = ProviderIndex(self.traverse(), restrict=True) changed = False done = False while (not done): done = True for spec in list(self.traverse()): replacement = None if spec.external: continue if spec.virtual: replacement = self._find_provider(spec, self_index) if replacement: spec._replace_with(replacement) done = False break if (not replacement): candidates = spack.concretizer.choose_virtual_or_external(spec) for replacement in candidates: if (replacement is spec): break copy = self.copy() copy[spec.name]._dup(replacement, deps=False) try: copy.normalize(force=True) break except SpecError: continue if replacement.external: if spec._dependencies: changed = True spec._dependencies = DependencyMap(spec) replacement._dependencies = DependencyMap(replacement) replacement.architecture = self.architecture def feq(cfield, sfield): return ((not cfield) or (cfield == sfield)) if ((replacement is spec) or (feq(replacement.name, spec.name) and feq(replacement.versions, spec.versions) and feq(replacement.compiler, spec.compiler) and feq(replacement.architecture, spec.architecture) and feq(replacement._dependencies, spec._dependencies) and feq(replacement.variants, spec.variants) and feq(replacement.external_path, spec.external_path) and feq(replacement.external_module, spec.external_module))): continue if spec.virtual: spec._replace_with(replacement) changed = True if spec._dup(replacement, deps=False, cleardeps=False): changed = True spec._dependencies.owner = spec self_index.update(spec) done = False break return changed
'A spec is concrete if it describes one build of a package uniquely. This will ensure that this spec is concrete. If this spec could describe more than one version, variant, or build of a package, this will add constraints to make it concrete. Some rigorous validation and checks are also performed on the spec. Concretizing ensures that it is self-consistent and that it\'s consistent with requirements of its pacakges. See flatten() and normalize() for more details on this.'
def concretize(self):
if (not self.name): raise SpecError('Attempting to concretize anonymous spec') if self._concrete: return changed = True force = False while changed: changes = (self.normalize(force), self._expand_virtual_packages(), self._concretize_helper()) changed = any(changes) force = True for s in self.traverse(deptype_query=alldeps): if (s.namespace is None): s.namespace = spack.repo.repo_for_pkg(s.name).namespace for s in self.traverse(): if s.external_module: compiler = spack.compilers.compiler_for_spec(s.compiler, s.architecture) for mod in compiler.modules: load_module(mod) s.external_path = get_path_from_module(s.external_module) self._mark_concrete() matches = [] for x in self.traverse(): for (conflict_spec, when_list) in x.package.conflicts.items(): if x.satisfies(conflict_spec): for when_spec in when_list: if x.satisfies(when_spec): matches.append((x, conflict_spec, when_spec)) if matches: raise ConflictsInSpecError(self, matches)
'Mark this spec and its dependencies as concrete. Only for internal use -- client code should use "concretize" unless there is a need to force a spec to be concrete.'
def _mark_concrete(self, value=True):
for s in self.traverse(deptype_query=alldeps): s._normal = value s._concrete = value
'This is a non-destructive version of concretize(). First clones, then returns a concrete version of this package without modifying this package.'
def concretized(self):
clone = self.copy() clone.concretize() return clone
'Return a DependencyMap containing all of this spec\'s dependencies with their constraints merged. If copy is True, returns merged copies of its dependencies without modifying the spec it\'s called on. If copy is False, clears this spec\'s dependencies and returns them.'
def flat_dependencies(self, **kwargs):
copy = kwargs.get('copy', True) deptype_query = kwargs.get('deptype_query') flat_deps = {} try: deptree = self.traverse(root=False, deptype_query=deptype_query) for spec in deptree: if (spec.name not in flat_deps): if copy: spec = spec.copy(deps=False) flat_deps[spec.name] = spec else: flat_deps[spec.name].constrain(spec) if (not copy): for spec in flat_deps.values(): spec._dependencies.clear() spec._dependents.clear() self._dependencies.clear() return flat_deps except UnsatisfiableSpecError as e: raise InconsistentSpecError(('Invalid Spec DAG: %s' % e.message))
'Return DependencyMap that points to all the dependencies in this spec.'
def index(self, deptype=None):
dm = DependencyMap(None) for spec in self.traverse(deptype=deptype): dm[spec.name] = spec return dm
'Evaluate all the conditions on a dependency with this name. If the package depends on <name> in this configuration, return the dependency. If no conditions are True (and we don\'t depend on it), return None.'
def _evaluate_dependency_conditions(self, name):
pkg = spack.repo.get(self.fullname) conditions = pkg.dependencies[name] substitute_abstract_variants(self) dep = None for (when_spec, dep_spec) in conditions.items(): sat = self.satisfies(when_spec, strict=True) if sat: if (dep is None): dep = Spec(name) try: dep.constrain(dep_spec) except UnsatisfiableSpecError as e: e.message = ('Conflicting conditional dependencies onpackage %s for spec %s' % (self.name, self)) raise e return dep
'Find provider for a virtual spec in the provider index. Raise an exception if there is a conflicting virtual dependency already in this spec.'
def _find_provider(self, vdep, provider_index):
assert vdep.virtual providers = provider_index.providers_for(vdep) if providers: for provider in providers: for spec in providers: if ((spec is not provider) and provider.satisfies(spec)): providers.remove(spec) if (len(providers) > 1): raise MultipleProviderError(vdep, providers) return providers[0] else: required = provider_index.providers_for(vdep.name) if (len(required) > 1): raise MultipleProviderError(vdep, required) elif required: raise UnsatisfiableProviderSpecError(required[0], vdep)
'Merge the dependency into this spec. Caller should assume that this routine can owns the dep parameter (i.e. it needs to be a copy of any internal structures like dependencies on Package class objects). This is the core of normalize(). There are some basic steps: * If dep is virtual, evaluate whether it corresponds to an existing concrete dependency, and merge if so. * If it\'s real and it provides some virtual dep, see if it provides what some virtual dependency wants and merge if so. * Finally, if none of the above, merge dependency and its constraints into this spec. This method returns True if the spec was changed, False otherwise.'
def _merge_dependency(self, dep, deptypes, visited, spec_deps, provider_index):
changed = False if dep.virtual: visited.add(dep.name) provider = self._find_provider(dep, provider_index) if provider: dep = provider else: index = ProviderIndex([dep], restrict=True) items = list(spec_deps.items()) for (name, vspec) in items: if index.providers_for(vspec): vspec._replace_with(dep) del spec_deps[vspec.name] changed = True else: required = index.providers_for(vspec.name) if required: raise UnsatisfiableProviderSpecError(required[0], dep) provider_index.update(dep) if (dep.name not in spec_deps): spec_deps[dep.name] = dep changed = True else: try: changed |= spec_deps[dep.name].constrain(dep) except UnsatisfiableSpecError as e: e.message = "Invalid spec: '%s'. " e.message += 'Package %s requires %s %s, but spec asked for %s' e.message %= (spec_deps[dep.name], dep.name, e.constraint_type, e.required, e.provided) raise e dependency = spec_deps[dep.name] if (dep.name not in self._dependencies): self._add_dependency(dependency, deptypes) changed |= dependency._normalize_helper(visited, spec_deps, provider_index) return changed
'Recursive helper function for _normalize.'
def _normalize_helper(self, visited, spec_deps, provider_index):
if (self.name in visited): return False visited.add(self.name) if (self.virtual or self.external): return False any_change = False changed = True pkg = spack.repo.get(self.fullname) while changed: changed = False for dep_name in pkg.dependencies: pkg_dep = self._evaluate_dependency_conditions(dep_name) deptypes = pkg.dependency_types[dep_name] if pkg_dep: changed |= self._merge_dependency(pkg_dep, deptypes, visited, spec_deps, provider_index) any_change |= changed return any_change
'When specs are parsed, any dependencies specified are hanging off the root, and ONLY the ones that were explicitly provided are there. Normalization turns a partial flat spec into a DAG, where: 1. Known dependencies of the root package are in the DAG. 2. Each node\'s dependencies dict only contains its known direct deps. 3. There is only ONE unique spec for each package in the DAG. * This includes virtual packages. If there a non-virtual package that provides a virtual package that is in the spec, then we replace the virtual package with the non-virtual one. TODO: normalize should probably implement some form of cycle detection, to ensure that the spec is actually a DAG.'
def normalize(self, force=False):
if (not self.name): raise SpecError('Attempting to normalize anonymous spec') if (self._normal and (not force)): return False if force: self._mark_concrete(False) self.validate_or_raise() spec_deps = self.flat_dependencies(copy=False, deptype_query=alldeps) provider_index = ProviderIndex([s for s in spec_deps.values()], restrict=True) visited = set() any_change = self._normalize_helper(visited, spec_deps, provider_index) extra = set(spec_deps.keys()).difference(visited) if extra: raise InvalidDependencyError(((self.name + ' does not depend on ') + comma_or(extra))) self._normal = True return any_change
'Return a normalized copy of this spec without modifying this spec.'
def normalized(self):
clone = self.copy() clone.normalize() return clone
'Checks that names and values in this spec are real. If they\'re not, it will raise an appropriate exception.'
def validate_or_raise(self):
for spec in self.traverse(): if ((not spec.virtual) and spec.name): spack.repo.get(spec.fullname) if spec.compiler: if (not compilers.supported(spec.compiler)): raise UnsupportedCompilerError(spec.compiler.name) if (not spec.virtual): pkg_cls = spec.package_class pkg_variants = pkg_cls.variants not_existing = (set(spec.variants) - set(pkg_variants)) if not_existing: raise UnknownVariantError(spec.name, not_existing) substitute_abstract_variants(spec)
'Merge the constraints of other with self. Returns True if the spec changed as a result, False if not.'
def constrain(self, other, deps=True):
if self.concrete: if self.satisfies(other): return False else: raise UnsatisfiableSpecError(self, other, 'constrain a concrete spec') other = self._autospec(other) if (not ((self.name == other.name) or (not self.name) or (not other.name))): raise UnsatisfiableSpecNameError(self.name, other.name) if ((other.namespace is not None) and (self.namespace is not None) and (other.namespace != self.namespace)): raise UnsatisfiableSpecNameError(self.fullname, other.fullname) if (not self.versions.overlaps(other.versions)): raise UnsatisfiableVersionSpecError(self.versions, other.versions) for v in [x for x in other.variants if (x in self.variants)]: if (not self.variants[v].compatible(other.variants[v])): raise UnsatisfiableVariantSpecError(self.variants[v], other.variants[v]) (sarch, oarch) = (self.architecture, other.architecture) if ((sarch is not None) and (oarch is not None)): if ((sarch.platform is not None) and (oarch.platform is not None)): if (sarch.platform != oarch.platform): raise UnsatisfiableArchitectureSpecError(sarch, oarch) if ((sarch.platform_os is not None) and (oarch.platform_os is not None)): if (sarch.platform_os != oarch.platform_os): raise UnsatisfiableArchitectureSpecError(sarch, oarch) if ((sarch.target is not None) and (oarch.target is not None)): if (sarch.target != oarch.target): raise UnsatisfiableArchitectureSpecError(sarch, oarch) changed = False if ((self.compiler is not None) and (other.compiler is not None)): changed |= self.compiler.constrain(other.compiler) elif (self.compiler is None): changed |= (self.compiler != other.compiler) self.compiler = other.compiler changed |= self.versions.intersect(other.versions) changed |= self.variants.constrain(other.variants) changed |= self.compiler_flags.constrain(other.compiler_flags) old = str(self.architecture) (sarch, oarch) = (self.architecture, other.architecture) if ((sarch is None) or (other.architecture is None)): self.architecture = (sarch or oarch) else: if ((sarch.platform is None) or (oarch.platform is None)): self.architecture.platform = (sarch.platform or oarch.platform) if ((sarch.platform_os is None) or (oarch.platform_os is None)): sarch.platform_os = (sarch.platform_os or oarch.platform_os) if ((sarch.target is None) or (oarch.target is None)): sarch.target = (sarch.target or oarch.target) changed |= (str(self.architecture) != old) if deps: changed |= self._constrain_dependencies(other) return changed
'Apply constraints of other spec\'s dependencies to this spec.'
def _constrain_dependencies(self, other):
other = self._autospec(other) if ((not self._dependencies) or (not other._dependencies)): return False if (not other.satisfies_dependencies(self)): raise UnsatisfiableDependencySpecError(other, self) changed = False for name in self.common_dependencies(other): changed |= self[name].constrain(other[name], deps=False) if (name in self._dependencies): changed |= self._dependencies[name].update_deptypes(other._dependencies[name].deptypes) for name in other.dep_difference(self): dep_spec_copy = other.get_dependency(name) dep_copy = dep_spec_copy.spec deptypes = dep_spec_copy.deptypes self._add_dependency(dep_copy.copy(), deptypes) changed = True return changed
'Return names of dependencies that self an other have in common.'
def common_dependencies(self, other):
common = set((s.name for s in self.traverse(root=False))) common.intersection_update((s.name for s in other.traverse(root=False))) return common
'Return a constrained copy without modifying this spec.'
def constrained(self, other, deps=True):
clone = self.copy(deps=deps) clone.constrain(other, deps) return clone
'Returns dependencies in self that are not in other.'
def dep_difference(self, other):
mine = set((s.name for s in self.traverse(root=False))) mine.difference_update((s.name for s in other.traverse(root=False))) return mine
'Used to convert arguments to specs. If spec_like is a spec, returns it. If it\'s a string, tries to parse a string. If that fails, tries to parse a local spec from it (i.e. name is assumed to be self\'s name).'
def _autospec(self, spec_like):
if isinstance(spec_like, spack.spec.Spec): return spec_like try: spec = spack.spec.Spec(spec_like) if (not spec.name): raise SpecError('anonymous package -- this will always be handled') return spec except SpecError: return parse_anonymous_spec(spec_like, self.name)
'Determine if this spec satisfies all constraints of another. There are two senses for satisfies: * `loose` (default): the absence of a constraint in self implies that it *could* be satisfied by other, so we only check that there are no conflicts with other for constraints that this spec actually has. * `strict`: strict means that we *must* meet all the constraints specified on other.'
def satisfies(self, other, deps=True, strict=False, strict_deps=False):
other = self._autospec(other) if other.concrete: return (self.concrete and (self.dag_hash() == other.dag_hash())) if ((not self.virtual) and other.virtual): try: pkg = spack.repo.get(self.fullname) except spack.repository.UnknownEntityError: return False if pkg.provides(other.name): for (provided, when_specs) in pkg.provided.items(): if any((self.satisfies(when_spec, deps=False, strict=strict) for when_spec in when_specs)): if provided.satisfies(other): return True return False if ((self.name != other.name) and self.name and other.name): return False if ((other.namespace is not None) and (self.namespace is not None) and (self.namespace != other.namespace)): return False if (self.versions and other.versions): if (not self.versions.satisfies(other.versions, strict=strict)): return False elif (strict and (self.versions or other.versions)): return False if (self.compiler and other.compiler): if (not self.compiler.satisfies(other.compiler, strict=strict)): return False elif (strict and (other.compiler and (not self.compiler))): return False var_strict = strict if ((not self.name) or (not other.name)): var_strict = True if (not self.variants.satisfies(other.variants, strict=var_strict)): return False if (self.architecture and other.architecture): if (not self.architecture.satisfies(other.architecture, strict)): return False elif (strict and (other.architecture and (not self.architecture))): return False if (not self.compiler_flags.satisfies(other.compiler_flags, strict=strict)): return False if deps: deps_strict = strict if (self._concrete and (not other.name)): deps_strict = True return self.satisfies_dependencies(other, strict=deps_strict) else: return True
'This checks constraints on common dependencies against each other.'
def satisfies_dependencies(self, other, strict=False):
other = self._autospec(other) if strict: if (other._dependencies and (not self._dependencies)): return False selfdeps = self.traverse(root=False) otherdeps = other.traverse(root=False) if (not all((any((d.satisfies(dep) for d in selfdeps)) for dep in otherdeps))): return False elif ((not self._dependencies) or (not other._dependencies)): return True for name in self.common_dependencies(other): if (not self[name].satisfies(other[name], deps=False)): return False self_index = ProviderIndex(self.traverse(), restrict=True) other_index = ProviderIndex(other.traverse(), restrict=True) if (not self_index.satisfies(other_index)): return False for spec in self.virtual_dependencies(): if ((spec.name in other_index) and (not other_index.providers_for(spec))): return False for spec in other.virtual_dependencies(): if ((spec.name in self_index) and (not self_index.providers_for(spec))): return False return True
'Return list of any virtual deps in this spec.'
def virtual_dependencies(self):
return [spec for spec in self.traverse() if spec.virtual]
'Copy the spec other into self. This is an overwriting copy. It does not copy any dependents (parents), but by default copies dependencies. To duplicate an entire DAG, call _dup() on the root of the DAG. Options: dependencies[=True] Whether deps should be copied too. Set to False to copy a spec but not its dependencies.'
def _dup(self, other, deps=True, cleardeps=True):
changed = True if hasattr(self, 'name'): changed = ((self.name != other.name) and (self.versions != other.versions) and (self.architecture != other.architecture) and (self.compiler != other.compiler) and (self.variants != other.variants) and (self._normal != other._normal) and (self.concrete != other.concrete) and (self.external_path != other.external_path) and (self.external_module != other.external_module) and (self.compiler_flags != other.compiler_flags)) self.name = other.name self.versions = other.versions.copy() self.architecture = (other.architecture.copy() if other.architecture else None) self.compiler = (other.compiler.copy() if other.compiler else None) if cleardeps: self._dependents = DependencyMap(self) self._dependencies = DependencyMap(self) self.compiler_flags = other.compiler_flags.copy() self.variants = other.variants.copy() self.variants.spec = self self.external_path = other.external_path self.external_module = other.external_module self.namespace = other.namespace if deps: deptypes = alldeps if isinstance(deps, (tuple, list)): deptypes = deps self._dup_deps(other, deptypes) if ((deps is True) or (deps == alldeps)): self._hash = other._hash self._cmp_key_cache = other._cmp_key_cache self._normal = other._normal self._concrete = other._concrete else: self._hash = None self._cmp_key_cache = None self._normal = False self._concrete = False return changed
'Return a copy of this spec. By default, returns a deep copy. To control how dependencies are copied, supply: deps=True: deep copy deps=False: shallow copy (no dependencies) deps=(\'link\', \'build\'): only build and link dependencies. Similar for other deptypes.'
def copy(self, deps=True):
clone = Spec.__new__(Spec) clone._dup(self, deps=deps) return clone
'Get a dependency from the spec by its name. This call implicitly sets a query state in the package being retrieved. The behavior of packages may be influenced by additional query parameters that are passed after a colon symbol. Note that if a virtual package is queried a copy of the Spec is returned while for non-virtual a reference is returned.'
def __getitem__(self, name):
query_parameters = name.split(':') if (len(query_parameters) > 2): msg = "key has more than one ':' symbol." msg += ' At most one is admitted.' raise KeyError(msg) (name, query_parameters) = (query_parameters[0], query_parameters[1:]) if query_parameters: csv = query_parameters.pop().strip() query_parameters = re.split('\\s*,\\s*', csv) try: value = next(itertools.chain((x for x in self.traverse() if (x.name == name)), (x for x in self.traverse() if ((not x.virtual) and x.package.provides(name))))) except StopIteration: raise KeyError(('No spec with name %s in %s' % (name, self))) if self._concrete: return SpecBuildInterface(value, name, query_parameters) return value
'True if this spec satisfies the provided spec, or if any dependency does. If the spec has no name, then we parse this one first.'
def __contains__(self, spec):
spec = self._autospec(spec) for s in self.traverse(): if s.satisfies(spec, strict=True): return True return False
'Return a list of all dependencies sorted by name.'
def sorted_deps(self):
deps = self.flat_dependencies() return tuple((deps[name] for name in sorted(deps)))
'Recursive helper for eq_dag and ne_dag. Does the actual DAG traversal.'
def _eq_dag(self, other, vs, vo, deptypes):
vs.add(id(self)) vo.add(id(other)) if self.ne_node(other): return False if (len(self._dependencies) != len(other._dependencies)): return False ssorted = [self._dependencies[name] for name in sorted(self._dependencies)] osorted = [other._dependencies[name] for name in sorted(other._dependencies)] for (s_dspec, o_dspec) in zip(ssorted, osorted): if (deptypes and (s_dspec.deptypes != o_dspec.deptypes)): return False (s, o) = (s_dspec.spec, o_dspec.spec) visited_s = (id(s) in vs) visited_o = (id(o) in vo) if (visited_s != visited_o): return False if (visited_s or visited_o): continue if (not s._eq_dag(o, vs, vo, deptypes)): return False return True
'True if the full dependency DAGs of specs are equal.'
def eq_dag(self, other, deptypes=True):
return self._eq_dag(other, set(), set(), deptypes)
'True if the full dependency DAGs of specs are not equal.'
def ne_dag(self, other, deptypes=True):
return (not self.eq_dag(other, set(), set(), deptypes))
'Comparison key for just *this node* and not its deps.'
def _cmp_node(self):
return (self.name, self.namespace, self.versions, self.variants, self.architecture, self.compiler, self.compiler_flags)
'Equality with another spec, not including dependencies.'
def eq_node(self, other):
return (self._cmp_node() == other._cmp_node())
'Inequality with another spec, not including dependencies.'
def ne_node(self, other):
return (self._cmp_node() != other._cmp_node())
'This returns a key for the spec *including* DAG structure. The key is the concatenation of: 1. A tuple describing this node in the DAG. 2. The hash of each of this node\'s dependencies\' cmp_keys.'
def _cmp_key(self):
if self._cmp_key_cache: return self._cmp_key_cache dep_tuple = tuple(((d.spec.name, hash(d.spec), tuple(sorted(d.deptypes))) for (name, d) in sorted(self._dependencies.items()))) key = (self._cmp_node(), dep_tuple) if self._concrete: self._cmp_key_cache = key return key
'Prints out particular pieces of a spec, depending on what is in the format string. The format strings you can provide are:: $_ Package name $. Full package name (with namespace) $@ Version with \'@\' prefix $% Compiler with \'%\' prefix $%@ Compiler with \'%\' prefix & compiler version with \'@\' prefix $%+ Compiler with \'%\' prefix & compiler flags prefixed by name $%@+ Compiler, compiler version, and compiler flags with same prefixes as above $+ Options $= Architecture prefixed by \'arch=\' $/ 7-char prefix of DAG hash with \'-\' prefix You can also use full-string versions, which elide the prefixes:: ${PACKAGE} Package name ${VERSION} Version ${COMPILER} Full compiler string ${COMPILERNAME} Compiler name ${COMPILERVER} Compiler version ${COMPILERFLAGS} Compiler flags ${OPTIONS} Options ${ARCHITECTURE} Architecture ${SHA1} Dependencies 8-char sha1 prefix ${HASH:len} DAG hash with optional length specifier ${SPACK_ROOT} The spack root directory ${SPACK_INSTALL} The default spack install directory, ${SPACK_PREFIX}/opt ${PREFIX} The package prefix Note these are case-insensitive: for example you can specify either ``${PACKAGE}`` or ``${package}``. Optionally you can provide a width, e.g. ``$20_`` for a 20-wide name. Like printf, you can provide \'-\' for left justification, e.g. ``$-20_`` for a left-justified name. Anything else is copied verbatim into the output stream. *Example:* ``$_$@$+`` translates to the name, version, and options of the package, but no dependencies, arch, or compiler. TODO: allow, e.g., ``$6#`` to customize short hash length TODO: allow, e.g., ``$//`` for full hash.'
def format(self, format_string='$_$@$%@+$+$=', **kwargs):
color = kwargs.get('color', False) length = len(format_string) out = StringIO() named = escape = compiler = False named_str = fmt = '' def write(s, c): f = ((color_formats[c] + cescape(s)) + '@.') cwrite(f, stream=out, color=color) iterator = enumerate(format_string) for (i, c) in iterator: if escape: fmt = '%' if (c == '-'): fmt += c (i, c) = next(iterator) while (c in '0123456789'): fmt += c (i, c) = next(iterator) fmt += 's' if (c == '_'): name = (self.name if self.name else '') out.write((fmt % name)) elif (c == '.'): out.write((fmt % self.fullname)) elif (c == '@'): if (self.versions and (self.versions != _any_version)): write((fmt % (c + str(self.versions))), c) elif (c == '%'): if self.compiler: write((fmt % (c + str(self.compiler.name))), c) compiler = True elif (c == '+'): if self.variants: write((fmt % str(self.variants)), c) elif (c == '='): if (self.architecture and str(self.architecture)): a_str = (((' arch' + c) + str(self.architecture)) + ' ') write((fmt % a_str), c) elif (c == '/'): out.write(('/' + (fmt % self.dag_hash(7)))) elif (c == '$'): if (fmt != '%s'): raise ValueError("Can't use format width with $$.") out.write('$') elif (c == '{'): named = True named_str = '' escape = False elif compiler: if (c == '@'): if (self.compiler and self.compiler.versions and (self.compiler.versions != _any_version)): write((c + str(self.compiler.versions)), '%') elif (c == '+'): if self.compiler_flags: write((fmt % str(self.compiler_flags)), '%') compiler = False elif (c == '$'): escape = True compiler = False else: out.write(c) compiler = False elif named: if (not (c == '}')): if (i == (length - 1)): raise ValueError(("Error: unterminated ${ in format:'%s'" % format_string)) named_str += c continue named_str = named_str.upper() if (named_str == 'PACKAGE'): name = (self.name if self.name else '') write((fmt % self.name), '@') if (named_str == 'VERSION'): if (self.versions and (self.versions != _any_version)): write((fmt % str(self.versions)), '@') elif (named_str == 'COMPILER'): if self.compiler: write((fmt % self.compiler), '%') elif (named_str == 'COMPILERNAME'): if self.compiler: write((fmt % self.compiler.name), '%') elif (named_str in ['COMPILERVER', 'COMPILERVERSION']): if self.compiler: write((fmt % self.compiler.versions), '%') elif (named_str == 'COMPILERFLAGS'): if self.compiler: write((fmt % str(self.compiler_flags)), '%') elif (named_str == 'OPTIONS'): if self.variants: write((fmt % str(self.variants)), '+') elif (named_str == 'ARCHITECTURE'): if (self.architecture and str(self.architecture)): write((fmt % str(self.architecture)), '=') elif (named_str == 'SHA1'): if self.dependencies: out.write((fmt % str(self.dag_hash(7)))) elif (named_str == 'SPACK_ROOT'): out.write((fmt % spack.prefix)) elif (named_str == 'SPACK_INSTALL'): out.write((fmt % spack.store.root)) elif (named_str == 'PREFIX'): out.write((fmt % self.prefix)) elif named_str.startswith('HASH'): if named_str.startswith('HASH:'): (_, hashlen) = named_str.split(':') hashlen = int(hashlen) else: hashlen = None out.write((fmt % self.dag_hash(hashlen))) named = False elif (c == '$'): escape = True if (i == (length - 1)): raise ValueError(("Error: unterminated $ in format: '%s'" % format_string)) else: out.write(c) result = out.getvalue() return result
'Same as format, but color defaults to auto instead of False.'
def cformat(self, *args, **kwargs):
kwargs = kwargs.copy() kwargs.setdefault('color', None) return self.format(*args, **kwargs)
'Helper for tree to print DB install status.'
def _install_status(self):
if (not self.concrete): return None try: record = spack.store.db.get_record(self) return record.installed except KeyError: return None
'Helper for tree to print DB install status.'
def _installed_explicitly(self):
if (not self.concrete): return None try: record = spack.store.db.get_record(self) return record.explicit except KeyError: return None
'Prints out this spec and its dependencies, tree-formatted with indentation.'
def tree(self, **kwargs):
color = kwargs.pop('color', get_color_when()) depth = kwargs.pop('depth', False) hashes = kwargs.pop('hashes', False) hlen = kwargs.pop('hashlen', None) install_status = kwargs.pop('install_status', False) cover = kwargs.pop('cover', 'nodes') indent = kwargs.pop('indent', 0) fmt = kwargs.pop('format', '$_$@$%@+$+$=') prefix = kwargs.pop('prefix', None) show_types = kwargs.pop('show_types', False) deptypes = kwargs.pop('deptypes', ('build', 'link')) check_kwargs(kwargs, self.tree) out = '' for (d, dep_spec) in self.traverse_edges(order='pre', cover=cover, depth=True, deptypes=deptypes): node = dep_spec.spec if (prefix is not None): out += prefix(node) out += (' ' * indent) if depth: out += ('%-4d' % d) if install_status: status = node._install_status() if (status is None): out += ' ' elif status: out += colorize('@g{[+]} ', color=color) else: out += colorize('@r{[-]} ', color=color) if hashes: out += (colorize('@K{%s} ', color=color) % node.dag_hash(hlen)) if show_types: out += '[' if dep_spec.deptypes: for t in alldeps: out += ''.join((t[0] if (t in dep_spec.deptypes) else ' ')) else: out += (' ' * len(alldeps)) out += '] ' out += (' ' * d) if (d > 0): out += '^' out += (node.format(fmt, color=color) + '\n') return out
'Parse a spec out of the input. If a spec is supplied, initialize and return it instead of creating a new one.'
def spec(self, name):
if name: (spec_namespace, dot, spec_name) = name.rpartition('.') if (not spec_namespace): spec_namespace = None self.check_identifier(spec_name) else: spec_namespace = None spec_name = None spec = Spec.__new__(Spec) spec.name = spec_name spec.versions = VersionList() spec.variants = VariantMap(spec) spec.architecture = None spec.compiler = None spec.external_path = None spec.external_module = None spec.compiler_flags = FlagMap(spec) spec._dependents = DependencyMap(spec) spec._dependencies = DependencyMap(spec) spec.namespace = spec_namespace spec._hash = None spec._cmp_key_cache = None spec._normal = False spec._concrete = False added_version = False while self.next: if self.accept(AT): vlist = self.version_list() for version in vlist: spec._add_version(version) added_version = True elif self.accept(ON): name = self.variant() spec.variants[name] = BoolValuedVariant(name, True) elif self.accept(OFF): name = self.variant() spec.variants[name] = BoolValuedVariant(name, False) elif self.accept(PCT): spec._set_compiler(self.compiler()) elif self.accept(ID): self.previous = self.token if self.accept(EQ): self.expect(VAL) spec._add_flag(self.previous.value, self.token.value) self.previous = None else: self.push_tokens([self.token]) self.previous = None break elif self.accept(HASH): hash_spec = self.spec_by_hash() if hash_spec.satisfies(spec): spec = hash_spec break else: raise InvalidHashError(spec, hash_spec.dag_hash()) else: break if ((not added_version) and (not spec._hash)): spec.versions = VersionList(':') return spec
'The only identifiers that can contain \'.\' are versions, but version ids are context-sensitive so we have to check on a case-by-case basis. Call this if we detect a version id where it shouldn\'t be.'
def check_identifier(self, id=None):
if (not id): id = self.token.value if ('.' in id): self.last_token_error("{0}: Identifier cannot contain '.'".format(id))
'Takes the name of the vpkg'
def __init__(self, vpkg, providers):
super(MultipleProviderError, self).__init__(("Multiple providers found for '%s': %s" % (vpkg, [str(s) for s in providers]))) self.vpkg = vpkg self.providers = providers
'Decorator for Spack directives. Spack directives allow you to modify a package while it is being defined, e.g. to add version or dependency information. Directives are one of the key pieces of Spack\'s package "language", which is embedded in python. Here\'s an example directive: @directive(dicts=\'versions\') version(pkg, ...): This directive allows you write: class Foo(Package): version(...) The ``@directive`` decorator handles a couple things for you: 1. Adds the class scope (pkg) as an initial parameter when called, like a class method would. This allows you to modify a package from within a directive, while the package is still being defined. 2. It automatically adds a dictionary called "versions" to the package so that you can refer to pkg.versions. The ``(dicts=\'versions\')`` part ensures that ALL packages in Spack will have a ``versions`` attribute after they\'re constructed, and that if no directive actually modified it, it will just be an empty dict. This is just a modular way to add storage attributes to the Package class, and it\'s how Spack gets information from the packages to the core.'
@staticmethod def directive(dicts=None):
global __all__ if isinstance(dicts, string_types): dicts = (dicts,) if (not isinstance(dicts, collections.Sequence)): message = 'dicts arg must be list, tuple, or string. Found {0}' raise TypeError(message.format(type(dicts))) DirectiveMetaMixin._directive_names |= set(dicts) def _decorator(decorated_function): __all__.append(decorated_function.__name__) @functools.wraps(decorated_function) def _wrapper(*args, **kwargs): values = decorated_function(*args, **kwargs) if (not isinstance(values, collections.Sequence)): values = (values,) DirectiveMetaMixin._directives_to_be_executed.extend(values) return _wrapper return _decorator
'Empty cached config information.'
def clear(self):
self.sections = {}
'Used by the platform specific subclass to list available targets. Raises an error if the platform specifies a name that is reserved by spack as an alias.'
def add_target(self, name, target):
if (name in Platform.reserved_targets): raise ValueError(('%s is a spack reserved alias and cannot be the name of a target' % name)) self.targets[name] = target
'This is a getter method for the target dictionary that handles defaulting based on the values provided by default, front-end, and back-end. This can be overwritten by a subclass for which we want to provide further aliasing options.'
def target(self, name):
if (name == 'default_target'): name = self.default elif ((name == 'frontend') or (name == 'fe')): name = self.front_end elif ((name == 'backend') or (name == 'be')): name = self.back_end return self.targets.get(name, None)
'Add the operating_system class object into the platform.operating_sys dictionary'
def add_operating_system(self, name, os_class):
if (name in Platform.reserved_oss): raise ValueError(('%s is a spack reserved alias and cannot be the name of an OS' % name)) self.operating_sys[name] = os_class
'Subclass can override this method if it requires any platform-specific build environment modifications.'
@classmethod def setup_platform_environment(self, pkg, env):
pass
'Subclass is responsible for implementing this method. Returns True if the Platform class detects that it is the current platform and False if it\'s not.'
@classmethod def detect(self):
raise NotImplementedError()
'Return a list of compilers found in the supplied paths. This invokes the find() method for each Compiler class, and appends the compilers detected to a list.'
def find_compilers(self, *paths):
if (not paths): paths = get_path('PATH') filtered_path = [] for p in paths: p = os.path.realpath(p) if (not os.path.isdir(p)): continue filtered_path.append(p) bin = join_path(p, 'bin') if os.path.isdir(bin): filtered_path.append(os.path.realpath(bin)) import spack.compilers types = spack.compilers.all_compiler_types() compiler_lists = parmap((lambda cmp_cls: self.find_compiler(cmp_cls, *filtered_path)), types) clist = [comp for cl in compiler_lists for comp in cl] return clist
'Try to find the given type of compiler in the user\'s environment. For each set of compilers found, this returns compiler objects with the cc, cxx, f77, fc paths and the version filled in. This will search for compilers with the names in cc_names, cxx_names, etc. and it will group them if they have common prefixes, suffixes, and versions. e.g., gcc-mp-4.7 would be grouped with g++-mp-4.7 and gfortran-mp-4.7.'
def find_compiler(self, cmp_cls, *path):
dicts = parmap((lambda t: cmp_cls._find_matches_in_path(*t)), [((cmp_cls.cc_names, cmp_cls.cc_version) + tuple(path)), ((cmp_cls.cxx_names, cmp_cls.cxx_version) + tuple(path)), ((cmp_cls.f77_names, cmp_cls.f77_version) + tuple(path)), ((cmp_cls.fc_names, cmp_cls.fc_version) + tuple(path))]) all_keys = set() for d in dicts: all_keys.update(d) compilers = {} for k in all_keys: (ver, pre, suf) = k if (ver == 'unknown'): continue paths = tuple(((pn[k] if (k in pn) else None) for pn in dicts)) spec = spack.spec.CompilerSpec(cmp_cls.name, ver) if (ver in compilers): prev = compilers[ver] prev_paths = [prev.cc, prev.cxx, prev.f77, prev.fc] newcount = len([p for p in paths if (p is not None)]) prevcount = len([p for p in prev_paths if (p is not None)]) if (newcount <= prevcount): continue compilers[ver] = cmp_cls(spec, self, py_platform.machine(), paths) return list(compilers.values())
'Create a Database for Spack installations under ``root``. A Database is a cache of Specs data from ``$prefix/spec.yaml`` files in Spack installation directories. By default, Database files (data and lock files) are stored under ``root/.spack-db``, which is created if it does not exist. This is the ``db_dir``. The Database will attempt to read an ``index.json`` file in ``db_dir``. If it does not find one, it will fall back to read an ``index.yaml`` if one is present. If that does not exist, it will create a database when needed by scanning the entire Database root for ``spec.yaml`` files according to Spack\'s ``DirectoryLayout``. Caller may optionally provide a custom ``db_dir`` parameter where data will be stored. This is intended to be used for testing the Database class.'
def __init__(self, root, db_dir=None):
self.root = root if (db_dir is None): self._db_dir = join_path(self.root, _db_dirname) else: self._db_dir = db_dir self._old_yaml_index_path = join_path(self._db_dir, 'index.yaml') self._index_path = join_path(self._db_dir, 'index.json') self._lock_path = join_path(self._db_dir, 'lock') self.prefix_lock_path = join_path(self._db_dir, 'prefix_lock') if (not os.path.exists(self._db_dir)): mkdirp(self._db_dir) self.lock = Lock(self._lock_path) self._data = {} self._error = None
'Get a write lock context manager for use in a `with` block.'
def write_transaction(self, timeout=_db_lock_timeout):
return WriteTransaction(self.lock, self._read, self._write, timeout)
'Get a read lock context manager for use in a `with` block.'
def read_transaction(self, timeout=_db_lock_timeout):
return ReadTransaction(self.lock, self._read, timeout=timeout)
'Get a lock on a particular spec\'s installation directory. NOTE: The installation directory **does not** need to exist. Prefix lock is a byte range lock on the nth byte of a file. The lock file is ``spack.store.db.prefix_lock`` -- the DB tells us what to call it and it lives alongside the install DB. n is the sys.maxsize-bit prefix of the DAG hash. This makes likelihood of collision is very low AND it gives us readers-writer lock semantics with just a single lockfile, so no cleanup required.'
def prefix_lock(self, spec):
prefix = spec.prefix if (prefix not in self._prefix_locks): self._prefix_locks[prefix] = Lock(self.prefix_lock_path, spec.dag_hash_bit_prefix(bit_length(sys.maxsize)), 1) return self._prefix_locks[prefix]
'Write out the databsae to a JSON file. This function does not do any locking or transactions.'
def _write_to_file(self, stream):
installs = dict(((k, v.to_dict()) for (k, v) in self._data.items())) database = {'database': {'installs': installs, 'version': str(_db_version)}} try: sjson.dump(database, stream) except YAMLError as e: raise syaml.SpackYAMLError('error writing YAML database:', str(e))
'Recursively construct a spec from a hash in a YAML database. Does not do any locking.'
def _read_spec_from_dict(self, hash_key, installs):
spec_dict = installs[hash_key]['spec'] for name in spec_dict: spec_dict[name]['hash'] = hash_key spec = spack.spec.Spec.from_node_dict(spec_dict) return spec
'Fill database from file, do not maintain old data Translate the spec portions from node-dict form to spec form Does not do any locking.'
def _read_from_file(self, stream, format='json'):
if (format.lower() == 'json'): load = sjson.load elif (format.lower() == 'yaml'): load = syaml.load else: raise ValueError(('Invalid database format: %s' % format)) try: if isinstance(stream, string_types): with open(stream, 'r') as f: fdata = load(f) else: fdata = load(stream) except MarkedYAMLError as e: raise syaml.SpackYAMLError('error parsing YAML database:', str(e)) except Exception as e: raise CorruptDatabaseError('error parsing database:', str(e)) if (fdata is None): return def check(cond, msg): if (not cond): raise CorruptDatabaseError(('Spack database is corrupt: %s' % msg), self._index_path) check(('database' in fdata), "No 'database' attribute in YAML.") db = fdata['database'] check(('installs' in db), "No 'installs' in YAML DB.") check(('version' in db), "No 'version' in YAML DB.") installs = db['installs'] version = Version(db['version']) if (version > _db_version): raise InvalidDatabaseVersionError(_db_version, version) elif (version < _db_version): self.reindex(spack.store.layout) installs = dict(((k, v.to_dict()) for (k, v) in self._data.items())) def invalid_record(hash_key, error): msg = 'Invalid record in Spack database: hash: %s, cause: %s: %s' msg %= (hash_key, type(e).__name__, str(e)) raise CorruptDatabaseError(msg, self._index_path) data = {} for (hash_key, rec) in installs.items(): try: spec = self._read_spec_from_dict(hash_key, installs) data[hash_key] = InstallRecord.from_dict(spec, rec) except Exception as e: invalid_record(hash_key, e) for hash_key in data: try: self._assign_dependencies(hash_key, installs, data) except Exception as e: invalid_record(hash_key, e) for (hash_key, rec) in data.items(): rec.spec._mark_concrete() self._data = data
'Build database index from scratch based on a directory layout. Locks the DB if it isn\'t locked already.'
def reindex(self, directory_layout):
def _read_suppress_error(): try: if os.path.isfile(self._index_path): self._read_from_file(self._index_path) except CorruptDatabaseError as e: self._error = e self._data = {} transaction = WriteTransaction(self.lock, _read_suppress_error, self._write, _db_lock_timeout) with transaction: if self._error: tty.warn('Spack database was corrupt. Will rebuild. Error was:', str(self._error)) self._error = None old_data = self._data try: self._data = {} processed_specs = set() for spec in directory_layout.all_specs(): tty.debug('RECONSTRUCTING FROM SPEC.YAML: {0}'.format(spec)) explicit = True if (old_data is not None): old_info = old_data.get(spec.dag_hash()) if (old_info is not None): explicit = old_info.explicit self._add(spec, directory_layout, explicit=explicit) processed_specs.add(spec) for (key, entry) in old_data.items(): if (entry.spec in processed_specs): msg = 'SKIPPING RECONSTRUCTION FROM OLD DB: {0}' msg += ' [already reconstructed from spec.yaml]' tty.debug(msg.format(entry.spec)) continue tty.debug('RECONSTRUCTING FROM OLD DB: {0}'.format(entry.spec)) try: layout = spack.store.layout if entry.spec.external: layout = None install_check = True else: install_check = layout.check_installed(entry.spec) if install_check: kwargs = {'spec': entry.spec, 'directory_layout': layout, 'explicit': entry.explicit} self._add(**kwargs) processed_specs.add(entry.spec) except Exception as e: tty.debug(e.message) pass self._check_ref_counts() except: self._data = old_data raise
'Ensure consistency of reference counts in the DB. Raise an AssertionError if something is amiss. Does no locking.'
def _check_ref_counts(self):
counts = {} for (key, rec) in self._data.items(): counts.setdefault(key, 0) for dep in rec.spec.dependencies(_tracked_deps): dep_key = dep.dag_hash() counts.setdefault(dep_key, 0) counts[dep_key] += 1 for rec in self._data.values(): key = rec.spec.dag_hash() expected = counts[key] found = rec.ref_count if (not (expected == found)): raise AssertionError(('Invalid ref_count: %s: %d (expected %d), in DB %s' % (key, found, expected, self._index_path)))
'Write the in-memory database index to its file path. This is a helper function called by the WriteTransaction context manager. If there is an exception while the write lock is active, nothing will be written to the database file, but the in-memory database *may* be left in an inconsistent state. It will be consistent after the start of the next transaction, when it read from disk again. This routine does no locking.'
def _write(self, type, value, traceback):
if (type is not None): return temp_file = (self._index_path + ('.%s.%s.temp' % (socket.getfqdn(), os.getpid()))) try: with open(temp_file, 'w') as f: self._write_to_file(f) os.rename(temp_file, self._index_path) except: if os.path.exists(temp_file): os.remove(temp_file) raise
'Re-read Database from the data in the set location. This does no locking, with one exception: it will automatically migrate an index.yaml to an index.json if possible. This requires taking a write lock.'
def _read(self):
if os.path.isfile(self._index_path): self._read_from_file(self._index_path, format='json') elif os.path.isfile(self._old_yaml_index_path): if os.access(self._db_dir, (os.R_OK | os.W_OK)): self._read_from_file(self._old_yaml_index_path, format='yaml') with WriteTransaction(self.lock, timeout=_db_lock_timeout): self._write(None, None, None) else: self._read_from_file(self._old_yaml_index_path, format='yaml') else: with WriteTransaction(self.lock, timeout=_db_lock_timeout): self._write(None, None, None) self.reindex(spack.store.layout)
'Add an install record for this spec to the database. Assumes spec is installed in ``layout.path_for_spec(spec)``. Also ensures dependencies are present and updated in the DB as either intsalled or missing.'
def _add(self, spec, directory_layout=None, explicit=False):
if (not spec.concrete): raise NonConcreteSpecAddError('Specs added to DB must be concrete.') for dep in spec.dependencies(_tracked_deps): dkey = dep.dag_hash() if (dkey not in self._data): self._add(dep, directory_layout, explicit=False) key = spec.dag_hash() if (key not in self._data): installed = bool(spec.external) path = None if ((not spec.external) and directory_layout): path = directory_layout.path_for_spec(spec) try: directory_layout.check_installed(spec) installed = True except DirectoryLayoutError as e: tty.warn('Dependency missing due to corrupt install directory:', path, str(e)) new_spec = spec.copy(deps=False) self._data[key] = InstallRecord(new_spec, path, installed, ref_count=0, explicit=explicit) for (name, dep) in iteritems(spec.dependencies_dict(_tracked_deps)): dkey = dep.spec.dag_hash() new_spec._add_dependency(self._data[dkey].spec, dep.deptypes) self._data[dkey].ref_count += 1 new_spec._mark_concrete() new_spec._hash = key else: self._data[key].installed = True self._data[key].explicit = explicit
'Add spec at path to database, locking and reading DB to sync. ``add()`` will lock and read from the DB on disk.'
@_autospec def add(self, spec, directory_layout, explicit=False):
with self.write_transaction(): self._add(spec, directory_layout, explicit=explicit)
'Get the exact spec OR get a single spec that matches.'
def _get_matching_spec_key(self, spec, **kwargs):
key = spec.dag_hash() if (key not in self._data): match = self.query_one(spec, **kwargs) if match: return match.dag_hash() raise KeyError(('No such spec in database! %s' % spec)) return key
'Non-locking version of remove(); does real work.'
def _remove(self, spec):
key = self._get_matching_spec_key(spec) rec = self._data[key] if (rec.ref_count > 0): rec.installed = False return rec.spec del self._data[key] for dep in rec.spec.dependencies(_tracked_deps): self._decrement_ref_count(dep) return rec.spec
'Removes a spec from the database. To be called on uninstall. Reads the database, then: 1. Marks the spec as not installed. 2. Removes the spec if it has no more dependents. 3. If removed, recursively updates dependencies\' ref counts and removes them if they are no longer needed.'
@_autospec def remove(self, spec):
with self.write_transaction(): return self._remove(spec)
'Return installed specs related to this one.'
@_autospec def installed_relatives(self, spec, direction='children', transitive=True):
if (direction not in ('parents', 'children')): raise ValueError(('Invalid direction: %s' % direction)) relatives = set() for spec in self.query(spec): if transitive: to_add = spec.traverse(direction=direction, root=False) elif (direction == 'parents'): to_add = spec.dependents() else: to_add = spec.dependencies() for relative in to_add: hash_key = relative.dag_hash() if (hash_key not in self._data): reltype = ('Dependent' if (direction == 'parents') else 'Dependency') tty.warn(('Inconsistent state! %s %s of %s not in DB' % (reltype, hash_key, spec.dag_hash()))) continue if (not self._data[hash_key].installed): continue relatives.add(relative) return relatives
'Return the specs of all packages that extend the given spec'
@_autospec def installed_extensions_for(self, extendee_spec):
for spec in self.query(): try: spack.store.layout.check_activated(extendee_spec, spec) (yield spec.package) except spack.directory_layout.NoSuchExtensionError: continue
'Run a query on the database. ``query_spec`` Queries iterate through specs in the database and return those that satisfy the supplied ``query_spec``. If query_spec is `any`, This will match all specs in the database. If it is a spec, we\'ll evaluate ``spec.satisfies(query_spec)``. The query can be constrained by two additional attributes: ``known`` Possible values: True, False, any Specs that are "known" are those for which Spack can locate a ``package.py`` file -- i.e., Spack "knows" how to install them. Specs that are unknown may represent packages that existed in a previous version of Spack, but have since either changed their name or been removed. ``installed`` Possible values: True, False, any Specs for which a prefix exists are "installed". A spec that is NOT installed will be in the database if some other spec depends on it but its installation has gone away since Spack installed it. TODO: Specs are a lot like queries. Should there be a wildcard spec object, and should specs have attributes like installed and known that can be queried? Or are these really special cases that only belong here?'
def query(self, query_spec=any, known=any, installed=True, explicit=any):
with self.read_transaction(): if (isinstance(query_spec, spack.spec.Spec) and query_spec._concrete): hash_key = query_spec.dag_hash() if (hash_key in self._data): return [self._data[hash_key].spec] else: return [] results = [] for (key, rec) in self._data.items(): if ((installed is not any) and (rec.installed != installed)): continue if ((explicit is not any) and (rec.explicit != explicit)): continue if ((known is not any) and (spack.repo.exists(rec.spec.name) != known)): continue if ((query_spec is any) or rec.spec.satisfies(query_spec)): results.append(rec.spec) return sorted(results)
'Query for exactly one spec that matches the query spec. Raises an assertion error if more than one spec matches the query. Returns None if no installed package matches.'
def query_one(self, query_spec, known=any, installed=True):
concrete_specs = self.query(query_spec, known, installed) assert (len(concrete_specs) <= 1) return (concrete_specs[0] if concrete_specs else None)
'Create a new ProviderIndex. Optional arguments: specs List (or sequence) of specs. If provided, will call `update` on this ProviderIndex with each spec in the list. restrict "restricts" values to the verbatim input specs; do not pre-apply package\'s constraints. TODO: rename this. It is intended to keep things as broad as possible without overly restricting results, so it is not the best name.'
def __init__(self, specs=None, restrict=False):
if (specs is None): specs = [] self.restrict = restrict self.providers = {} for spec in specs: if (not isinstance(spec, spack.spec.Spec)): spec = spack.spec.Spec(spec) if spec.virtual: continue self.update(spec)
'Gives specs of all packages that provide virtual packages with the supplied specs.'
def providers_for(self, *vpkg_specs):
providers = set() for vspec in vpkg_specs: if (type(vspec) == str): vspec = spack.spec.Spec(vspec) if (vspec.name in self.providers): for (p_spec, spec_set) in self.providers[vspec.name].items(): if p_spec.satisfies(vspec, deps=False): providers.update(spec_set) return sorted((s.copy() for s in providers))
'Whether a particular vpkg name is in the index.'
def __contains__(self, name):
return (name in self.providers)