Merge branch 'develop' of https://github.com/LLNL/spack into features/module_file_explicit_load

Conflicts:
	lib/spack/spack/test/modules.py
This commit is contained in:
alalazo 2016-06-18 13:06:37 +02:00
commit 51cf847647
118 changed files with 5335 additions and 1241 deletions

View File

@ -8,6 +8,9 @@
# - E221: multiple spaces before operator
# - E241: multiple spaces after ,
#
# Let people use terse Python features:
# - E731 : lambda expressions
#
# Spack allows wildcard imports:
# - F403: disable wildcard import
#
@ -16,5 +19,5 @@
# - F999: name name be undefined or undefined from star imports.
#
[flake8]
ignore = E221,E241,F403,F821,F999
ignore = E221,E241,E731,F403,F821,F999,F405
max-line-length = 79

View File

@ -138,6 +138,9 @@ def main():
import spack.util.debug as debug
debug.register_interrupt_handler()
from spack.yaml_version_check import check_yaml_versions
check_yaml_versions()
spack.spack_working_dir = working_dir
if args.mock:
from spack.repository import RepoPath

View File

@ -102,8 +102,8 @@ that the packages is installed:
==> adept-utils is already installed in /home/gamblin2/spack/opt/chaos_5_x86_64_ib/gcc@4.4.7/adept-utils@1.0-5adef8da.
==> Trying to fetch from https://github.com/hpc/mpileaks/releases/download/v1.0/mpileaks-1.0.tar.gz
######################################################################## 100.0%
==> Staging archive: /home/gamblin2/spack/var/spack/stage/mpileaks@1.0%gcc@4.4.7=chaos_5_x86_64_ib-59f6ad23/mpileaks-1.0.tar.gz
==> Created stage in /home/gamblin2/spack/var/spack/stage/mpileaks@1.0%gcc@4.4.7=chaos_5_x86_64_ib-59f6ad23.
==> Staging archive: /home/gamblin2/spack/var/spack/stage/mpileaks@1.0%gcc@4.4.7 arch=chaos_5_x86_64_ib-59f6ad23/mpileaks-1.0.tar.gz
==> Created stage in /home/gamblin2/spack/var/spack/stage/mpileaks@1.0%gcc@4.4.7 arch=chaos_5_x86_64_ib-59f6ad23.
==> No patches needed for mpileaks.
==> Building mpileaks.
@ -132,10 +132,10 @@ sites, as installing a version that one user needs will not disrupt
existing installations for other users.
In addition to different versions, Spack can customize the compiler,
compile-time options (variants), and platform (for cross compiles) of
an installation. Spack is unique in that it can also configure the
*dependencies* a package is built with. For example, two
configurations of the same version of a package, one built with boost
compile-time options (variants), compiler flags, and platform (for
cross compiles) of an installation. Spack is unique in that it can
also configure the *dependencies* a package is built with. For example,
two configurations of the same version of a package, one built with boost
1.39.0, and the other version built with version 1.43.0, can coexist.
This can all be done on the command line using the *spec* syntax.
@ -334,9 +334,15 @@ of libelf would look like this:
-- chaos_5_x86_64_ib / gcc@4.4.7 --------------------------------
libdwarf@20130729-d9b90962
We can also search for packages that have a certain attribute. For example,
``spack find -l libdwarf +debug`` will show only installations of libdwarf
with the 'debug' compile-time option enabled, while ``spack find -l +debug``
will find every installed package with a 'debug' compile-time option enabled.
The full spec syntax is discussed in detail in :ref:`sec-specs`.
Compiler configuration
-----------------------------------
@ -463,6 +469,26 @@ For compilers, like ``clang``, that do not support Fortran, put
Once you save the file, the configured compilers will show up in the
list displayed by ``spack compilers``.
You can also add compiler flags to manually configured compilers. The
valid flags are ``cflags``, ``cxxflags``, ``fflags``, ``cppflags``,
``ldflags``, and ``ldlibs``. For example,::
...
chaos_5_x86_64_ib:
...
intel@15.0.0:
cc: /usr/local/bin/icc-15.0.024-beta
cxx: /usr/local/bin/icpc-15.0.024-beta
f77: /usr/local/bin/ifort-15.0.024-beta
fc: /usr/local/bin/ifort-15.0.024-beta
cppflags: -O3 -fPIC
...
These flags will be treated by spack as if they were enterred from
the command line each time this compiler is used. The compiler wrappers
then inject those flags into the compiler command. Compiler flags
enterred from the command line will be discussed in more detail in the
following section.
.. _sec-specs:
@ -480,7 +506,7 @@ the full syntax of specs.
Here is an example of a much longer spec than we've seen thus far::
mpileaks @1.2:1.4 %gcc@4.7.5 +debug -qt =bgqos_0 ^callpath @1.1 %gcc@4.7.2
mpileaks @1.2:1.4 %gcc@4.7.5 +debug -qt arch=bgq_os ^callpath @1.1 %gcc@4.7.2
If provided to ``spack install``, this will install the ``mpileaks``
library at some version between ``1.2`` and ``1.4`` (inclusive),
@ -498,8 +524,12 @@ More formally, a spec consists of the following pieces:
* ``%`` Optional compiler specifier, with an optional compiler version
(``gcc`` or ``gcc@4.7.3``)
* ``+`` or ``-`` or ``~`` Optional variant specifiers (``+debug``,
``-qt``, or ``~qt``)
* ``=`` Optional architecture specifier (``bgqos_0``)
``-qt``, or ``~qt``) for boolean variants
* ``name=<value>`` Optional variant specifiers that are not restricted to
boolean variants
* ``name=<value>`` Optional compiler flag specifiers. Valid flag names are
``cflags``, ``cxxflags``, ``fflags``, ``cppflags``, ``ldflags``, and ``ldlibs``.
* ``arch=<value>`` Optional architecture specifier (``arch=bgq_os``)
* ``^`` Dependency specs (``^callpath@1.1``)
There are two things to notice here. The first is that specs are
@ -579,7 +609,7 @@ compilers, variants, and architectures just like any other spec.
Specifiers are associated with the nearest package name to their left.
For example, above, ``@1.1`` and ``%gcc@4.7.2`` associates with the
``callpath`` package, while ``@1.2:1.4``, ``%gcc@4.7.5``, ``+debug``,
``-qt``, and ``=bgqos_0`` all associate with the ``mpileaks`` package.
``-qt``, and ``arch=bgq_os`` all associate with the ``mpileaks`` package.
In the diagram above, ``mpileaks`` depends on ``mpich`` with an
unspecified version, but packages can depend on other packages with
@ -635,22 +665,25 @@ based on site policies.
Variants
~~~~~~~~~~~~~~~~~~~~~~~
.. Note::
Variants are not yet supported, but will be in the next Spack
release (0.9), due in Q2 2015.
Variants are named options associated with a particular package, and
they can be turned on or off. For example, above, supplying
``+debug`` causes ``mpileaks`` to be built with debug flags. The
names of particular variants available for a package depend on what
was provided by the package author. ``spack info <package>`` will
Variants are named options associated with a particular package. They are
optional, as each package must provide default values for each variant it
makes available. Variants can be specified using
a flexible parameter syntax ``name=<value>``. For example,
``spack install libelf debug=True`` will install libelf build with debug
flags. The names of particular variants available for a package depend on
what was provided by the package author. ``spack into <package>`` will
provide information on what build variants are available.
Depending on the package a variant may be on or off by default. For
``mpileaks`` here, ``debug`` is off by default, and we turned it on
with ``+debug``. If a package is on by default you can turn it off by
either adding ``-name`` or ``~name`` to the spec.
For compatibility with earlier versions, variants which happen to be
boolean in nature can be specified by a syntax that represents turning
options on and off. For example, in the previous spec we could have
supplied ``libelf +debug`` with the same effect of enabling the debug
compile time option for the libelf package.
Depending on the package a variant may have any default value. For
``libelf`` here, ``debug`` is ``False`` by default, and we turned it on
with ``debug=True`` or ``+debug``. If a package is ``True`` by default
you can turn it off by either adding ``-name`` or ``~name`` to the spec.
There are two syntaxes here because, depending on context, ``~`` and
``-`` may mean different things. In most shells, the following will
@ -662,7 +695,7 @@ result in the shell performing home directory substitution:
mpileaks~debug # use this instead
If there is a user called ``debug``, the ``~`` will be incorrectly
expanded. In this situation, you would want to write ``mpileaks
expanded. In this situation, you would want to write ``libelf
-debug``. However, ``-`` can be ambiguous when included after a
package name without spaces:
@ -677,12 +710,35 @@ package, not a request for ``mpileaks`` built without ``debug``
options. In this scenario, you should write ``mpileaks~debug`` to
avoid ambiguity.
When spack normalizes specs, it prints them out with no spaces and
uses only ``~`` for disabled variants. We allow ``-`` and spaces on
the command line is provided for convenience and legibility.
When spack normalizes specs, it prints them out with no spaces boolean
variants using the backwards compatibility syntax and uses only ``~``
for disabled boolean variants. We allow ``-`` and spaces on the command
line is provided for convenience and legibility.
Architecture specifier
Compiler Flags
~~~~~~~~~~~~~~~~~~~~~~~
Compiler flags are specified using the same syntax as non-boolean variants,
but fulfill a different purpose. While the function of a variant is set by
the package, compiler flags are used by the compiler wrappers to inject
flags into the compile line of the build. Additionally, compiler flags are
inherited by dependencies. ``spack install libdwarf cppflags=\"-g\"`` will
install both libdwarf and libelf with the ``-g`` flag injected into their
compile line.
Notice that the value of the compiler flags must be escape quoted on the
command line. From within python files, the same spec would be specified
``libdwarf cppflags="-g"``. This is necessary because of how the shell
handles the quote symbols.
The six compiler flags are injected in the order of implicit make commands
in gnu autotools. If all flags are set, the order is
``$cppflags $cflags|$cxxflags $ldflags command $ldlibs`` for C and C++ and
``$fflags $cppflags $ldflags command $ldlibs`` for fortran.
Architecture specifiers
~~~~~~~~~~~~~~~~~~~~~~~
.. Note::
@ -690,12 +746,9 @@ Architecture specifier
Architecture specifiers are part of specs but are not yet
functional. They will be in Spack version 1.0, due in Q3 2015.
The architecture specifier starts with a ``=`` and also comes after
some package name within a spec. It allows a user to specify a
particular architecture for the package to be built. This is mostly
used for architectures that need cross-compilation, and in most cases,
users will not need to specify the architecture when they install a
package.
The architecture specifier looks identical to a variant specifier for a
non-boolean variant. The architecture can be specified only using the
reserved name ``arch`` (``arch=bgq_os``).
.. _sec-virtual-dependencies:
@ -773,6 +826,23 @@ any MPI implementation will do. If another package depends on
error. Likewise, if you try to plug in some package that doesn't
provide MPI, Spack will raise an error.
Specifying Specs by Hash
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Complicated specs can become cumbersome to enter on the command line,
especially when many of the qualifications are necessary to
distinguish between similar installs, for example when using the
``uninstall`` command. To avoid this, when referencing an existing spec,
Spack allows you to reference specs by their hash. We previously
discussed the spec hash that Spack computes. In place of a spec in any
command, substitute ``/<hash>`` where ``<hash>`` is any amount from
the beginning of a spec hash. If the given spec hash is sufficient
to be unique, Spack will replace the reference with the spec to which
it refers. Otherwise, it will prompt for a more qualified hash.
Note that this will not work to reinstall a depencency uninstalled by
``spack uninstall -f``.
.. _spack-providers:
``spack providers``
@ -1002,8 +1072,8 @@ than one installed package matches it), then Spack will warn you:
$ spack load libelf
==> Error: Multiple matches for spec libelf. Choose one:
libelf@0.8.13%gcc@4.4.7=chaos_5_x86_64_ib
libelf@0.8.13%intel@15.0.0=chaos_5_x86_64_ib
libelf@0.8.13%gcc@4.4.7 arch=chaos_5_x86_64_ib
libelf@0.8.13%intel@15.0.0 arch=chaos_5_x86_64_ib
You can either type the ``spack load`` command again with a fully
qualified argument, or you can add just enough extra constraints to
@ -1251,6 +1321,120 @@ regenerate all module and dotkit files from scratch:
.. _extensions:
Filesystem Views
-------------------------------
.. Maybe this is not the right location for this documentation.
The Spack installation area allows for many package installation trees
to coexist and gives the user choices as to what versions and variants
of packages to use. To use them, the user must rely on a way to
aggregate a subset of those packages. The section on Environment
Modules gives one good way to do that which relies on setting various
environment variables. An alternative way to aggregate is through
**filesystem views**.
A filesystem view is a single directory tree which is the union of the
directory hierarchies of the individual package installation trees
that have been included. The files of the view's installed packages
are brought into the view by symbolic or hard links back to their
location in the original Spack installation area. As the view is
formed, any clashes due to a file having the exact same path in its
package installation tree are handled in a first-come-first-served
basis and a warning is printed. Packages and their dependencies can
be both added and removed. During removal, empty directories will be
purged. These operations can be limited to pertain to just the
packages listed by the user or to exclude specific dependencies and
they allow for software installed outside of Spack to coexist inside
the filesystem view tree.
By its nature, a filesystem view represents a particular choice of one
set of packages among all the versions and variants that are available
in the Spack installation area. It is thus equivalent to the
directory hiearchy that might exist under ``/usr/local``. While this
limits a view to including only one version/variant of any package, it
provides the benefits of having a simpler and traditional layout which
may be used without any particular knowledge that its packages were
built by Spack.
Views can be used for a variety of purposes including:
- A central installation in a traditional layout, eg ``/usr/local`` maintained over time by the sysadmin.
- A self-contained installation area which may for the basis of a top-level atomic versioning scheme, eg ``/opt/pro`` vs ``/opt/dev``.
- Providing an atomic and monolithic binary distribution, eg for delivery as a single tarball.
- Producing ephemeral testing or developing environments.
Using Filesystem Views
~~~~~~~~~~~~~~~~~~~~~~
A filesystem view is created and packages are linked in by the ``spack
view`` command's ``symlink`` and ``hardlink`` sub-commands. The
``spack view remove`` command can be used to unlink some or all of the
filesystem view.
The following example creates a filesystem view based
on an installed ``cmake`` package and then removes from the view the
files in the ``cmake`` package while retaining its dependencies.
.. code-block:: sh
$ spack view -v symlink myview cmake@3.5.2
==> Linking package: "ncurses"
==> Linking package: "zlib"
==> Linking package: "openssl"
==> Linking package: "cmake"
$ ls myview/
bin doc etc include lib share
$ ls myview/bin/
captoinfo clear cpack ctest infotocap openssl tabs toe tset
ccmake cmake c_rehash infocmp ncurses6-config reset tic tput
$ spack view -v -d false rm myview cmake@3.5.2
==> Removing package: "cmake"
$ ls myview/bin/
captoinfo c_rehash infotocap openssl tabs toe tset
clear infocmp ncurses6-config reset tic tput
Limitations of Filesystem Views
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This section describes some limitations that should be considered in
using filesystems views.
Filesystem views are merely organizational. The binary executable
programs, shared libraries and other build products found in a view
are mere links into the "real" Spack installation area. If a view is
built with symbolic links it requires the Spack-installed package to
be kept in place. Building a view with hardlinks removes this
requirement but any internal paths (eg, rpath or ``#!`` interpreter
specifications) will still require the Spack-installed package files
to be in place.
.. FIXME: reference the relocation work of Hegner and Gartung.
As described above, when a view is built only a single instance of a
file may exist in the unified filesystem tree. If more than one
package provides a file at the same path (relative to its own root)
then it is the first package added to the view that "wins". A warning
is printed and it is up to the user to determine if the conflict
matters.
It is up to the user to assure a consistent view is produced. In
particular if the user excludes packages, limits the following of
dependencies or removes packages the view may become inconsistent. In
particular, if two packages require the same sub-tree of dependencies,
removing one package (recursively) will remove its dependencies and
leave the other package broken.
Extensions & Python support
------------------------------------
@ -1282,7 +1466,7 @@ You can find extensions for your Python installation like this:
.. code-block:: sh
$ spack extensions python
==> python@2.7.8%gcc@4.4.7=chaos_5_x86_64_ib-703c7a96
==> python@2.7.8%gcc@4.4.7 arch=chaos_5_x86_64_ib-703c7a96
==> 36 extensions:
geos py-ipython py-pexpect py-pyside py-sip
py-basemap py-libxml2 py-pil py-pytz py-six
@ -1372,9 +1556,9 @@ installation:
.. code-block:: sh
$ spack activate py-numpy
==> Activated extension py-setuptools@11.3.1%gcc@4.4.7=chaos_5_x86_64_ib-3c74eb69 for python@2.7.8%gcc@4.4.7.
==> Activated extension py-nose@1.3.4%gcc@4.4.7=chaos_5_x86_64_ib-5f70f816 for python@2.7.8%gcc@4.4.7.
==> Activated extension py-numpy@1.9.1%gcc@4.4.7=chaos_5_x86_64_ib-66733244 for python@2.7.8%gcc@4.4.7.
==> Activated extension py-setuptools@11.3.1%gcc@4.4.7 arch=chaos_5_x86_64_ib-3c74eb69 for python@2.7.8%gcc@4.4.7.
==> Activated extension py-nose@1.3.4%gcc@4.4.7 arch=chaos_5_x86_64_ib-5f70f816 for python@2.7.8%gcc@4.4.7.
==> Activated extension py-numpy@1.9.1%gcc@4.4.7 arch=chaos_5_x86_64_ib-66733244 for python@2.7.8%gcc@4.4.7.
Several things have happened here. The user requested that
``py-numpy`` be activated in the ``python`` installation it was built
@ -1389,7 +1573,7 @@ packages listed as activated:
.. code-block:: sh
$ spack extensions python
==> python@2.7.8%gcc@4.4.7=chaos_5_x86_64_ib-703c7a96
==> python@2.7.8%gcc@4.4.7 arch=chaos_5_x86_64_ib-703c7a96
==> 36 extensions:
geos py-ipython py-pexpect py-pyside py-sip
py-basemap py-libxml2 py-pil py-pytz py-six
@ -1437,7 +1621,7 @@ dependencies, you can use ``spack activate -f``:
.. code-block:: sh
$ spack activate -f py-numpy
==> Activated extension py-numpy@1.9.1%gcc@4.4.7=chaos_5_x86_64_ib-66733244 for python@2.7.8%gcc@4.4.7.
==> Activated extension py-numpy@1.9.1%gcc@4.4.7 arch=chaos_5_x86_64_ib-66733244 for python@2.7.8%gcc@4.4.7.
.. _spack-deactivate:

View File

@ -70,9 +70,9 @@ directory. Here's an example of an external configuration:
packages:
openmpi:
paths:
openmpi@1.4.3%gcc@4.4.7=chaos_5_x86_64_ib: /opt/openmpi-1.4.3
openmpi@1.4.3%gcc@4.4.7=chaos_5_x86_64_ib+debug: /opt/openmpi-1.4.3-debug
openmpi@1.6.5%intel@10.1=chaos_5_x86_64_ib: /opt/openmpi-1.6.5-intel
openmpi@1.4.3%gcc@4.4.7 arch=chaos_5_x86_64_ib: /opt/openmpi-1.4.3
openmpi@1.4.3%gcc@4.4.7 arch=chaos_5_x86_64_ib+debug: /opt/openmpi-1.4.3-debug
openmpi@1.6.5%intel@10.1 arch=chaos_5_x86_64_ib: /opt/openmpi-1.6.5-intel
This example lists three installations of OpenMPI, one built with gcc,
one built with gcc and debug information, and another built with Intel.
@ -108,9 +108,9 @@ be:
packages:
openmpi:
paths:
openmpi@1.4.3%gcc@4.4.7=chaos_5_x86_64_ib: /opt/openmpi-1.4.3
openmpi@1.4.3%gcc@4.4.7=chaos_5_x86_64_ib+debug: /opt/openmpi-1.4.3-debug
openmpi@1.6.5%intel@10.1=chaos_5_x86_64_ib: /opt/openmpi-1.6.5-intel
openmpi@1.4.3%gcc@4.4.7 arch=chaos_5_x86_64_ib: /opt/openmpi-1.4.3
openmpi@1.4.3%gcc@4.4.7 arch=chaos_5_x86_64_ib+debug: /opt/openmpi-1.4.3-debug
openmpi@1.6.5%intel@10.1 arch=chaos_5_x86_64_ib: /opt/openmpi-1.6.5-intel
buildable: False
The addition of the ``buildable`` flag tells Spack that it should never build

View File

@ -31,14 +31,21 @@ platform, all on the command line.
# Specify a compiler (and its version), with %
$ spack install mpileaks@1.1.2 %gcc@4.7.3
# Add special compile-time options with +
# Add special compile-time options by name
$ spack install mpileaks@1.1.2 %gcc@4.7.3 debug=True
# Add special boolean compile-time options with +
$ spack install mpileaks@1.1.2 %gcc@4.7.3 +debug
# Cross-compile for a different architecture with =
$ spack install mpileaks@1.1.2 =bgqos_0
# Add compiler flags using the conventional names
$ spack install mpileaks@1.1.2 %gcc@4.7.3 cppflags=\"-O3 -floop-block\"
# Cross-compile for a different architecture with arch=
$ spack install mpileaks@1.1.2 arch=bgqos_0
Users can specify as many or few options as they care about. Spack
will fill in the unspecified values with sensible defaults.
will fill in the unspecified values with sensible defaults. The two listed
syntaxes for variants are identical when the value is boolean.
Customize dependencies

View File

@ -1221,11 +1221,13 @@ just as easily provide a version range:
depends_on("libelf@0.8.2:0.8.4:")
Or a requirement for a particular variant:
Or a requirement for a particular variant or compiler flags:
.. code-block:: python
depends_on("libelf@0.8+debug")
depends_on('libelf debug=True')
depends_on('libelf cppflags="-fPIC")
Both users *and* package authors can use the same spec syntax to refer
to different package configurations. Users use the spec syntax on the
@ -1623,21 +1625,21 @@ the user runs ``spack install`` and the time the ``install()`` method
is called. The concretized version of the spec above might look like
this::
mpileaks@2.3%gcc@4.7.3=linux-ppc64
^callpath@1.0%gcc@4.7.3+debug=linux-ppc64
^dyninst@8.1.2%gcc@4.7.3=linux-ppc64
^libdwarf@20130729%gcc@4.7.3=linux-ppc64
^libelf@0.8.11%gcc@4.7.3=linux-ppc64
^mpich@3.0.4%gcc@4.7.3=linux-ppc64
mpileaks@2.3%gcc@4.7.3 arch=linux-ppc64
^callpath@1.0%gcc@4.7.3+debug arch=linux-ppc64
^dyninst@8.1.2%gcc@4.7.3 arch=linux-ppc64
^libdwarf@20130729%gcc@4.7.3 arch=linux-ppc64
^libelf@0.8.11%gcc@4.7.3 arch=linux-ppc64
^mpich@3.0.4%gcc@4.7.3 arch=linux-ppc64
.. graphviz::
digraph {
"mpileaks@2.3\n%gcc@4.7.3\n=linux-ppc64" -> "mpich@3.0.4\n%gcc@4.7.3\n=linux-ppc64"
"mpileaks@2.3\n%gcc@4.7.3\n=linux-ppc64" -> "callpath@1.0\n%gcc@4.7.3+debug\n=linux-ppc64" -> "mpich@3.0.4\n%gcc@4.7.3\n=linux-ppc64"
"callpath@1.0\n%gcc@4.7.3+debug\n=linux-ppc64" -> "dyninst@8.1.2\n%gcc@4.7.3\n=linux-ppc64"
"dyninst@8.1.2\n%gcc@4.7.3\n=linux-ppc64" -> "libdwarf@20130729\n%gcc@4.7.3\n=linux-ppc64" -> "libelf@0.8.11\n%gcc@4.7.3\n=linux-ppc64"
"dyninst@8.1.2\n%gcc@4.7.3\n=linux-ppc64" -> "libelf@0.8.11\n%gcc@4.7.3\n=linux-ppc64"
"mpileaks@2.3\n%gcc@4.7.3\n arch=linux-ppc64" -> "mpich@3.0.4\n%gcc@4.7.3\n arch=linux-ppc64"
"mpileaks@2.3\n%gcc@4.7.3\n arch=linux-ppc64" -> "callpath@1.0\n%gcc@4.7.3+debug\n arch=linux-ppc64" -> "mpich@3.0.4\n%gcc@4.7.3\n arch=linux-ppc64"
"callpath@1.0\n%gcc@4.7.3+debug\n arch=linux-ppc64" -> "dyninst@8.1.2\n%gcc@4.7.3\n arch=linux-ppc64"
"dyninst@8.1.2\n%gcc@4.7.3\n arch=linux-ppc64" -> "libdwarf@20130729\n%gcc@4.7.3\n arch=linux-ppc64" -> "libelf@0.8.11\n%gcc@4.7.3\n arch=linux-ppc64"
"dyninst@8.1.2\n%gcc@4.7.3\n arch=linux-ppc64" -> "libelf@0.8.11\n%gcc@4.7.3\n arch=linux-ppc64"
}
Here, all versions, compilers, and platforms are filled in, and there
@ -1666,9 +1668,9 @@ running ``spack spec``. For example:
^libdwarf
^libelf
dyninst@8.0.1%gcc@4.7.3=linux-ppc64
^libdwarf@20130729%gcc@4.7.3=linux-ppc64
^libelf@0.8.13%gcc@4.7.3=linux-ppc64
dyninst@8.0.1%gcc@4.7.3 arch=linux-ppc64
^libdwarf@20130729%gcc@4.7.3 arch=linux-ppc64
^libelf@0.8.13%gcc@4.7.3 arch=linux-ppc64
This is useful when you want to know exactly what Spack will do when
you ask for a particular spec.
@ -1908,6 +1910,12 @@ the command line.
``$rpath_flag`` can be overriden on a compiler specific basis in
``lib/spack/spack/compilers/$compiler.py``.
The compiler wrappers also pass the compiler flags specified by the user from
the command line (``cflags``, ``cxxflags``, ``fflags``, ``cppflags``, ``ldflags``,
and/or ``ldlibs``). They do not override the canonical autotools flags with the
same names (but in ALL-CAPS) that may be passed into the build by particularly
challenging package scripts.
Compiler flags
~~~~~~~~~~~~~~
In rare circumstances such as compiling and running small unit tests, a package
@ -2154,12 +2162,12 @@ example:
def install(self, prefix):
# Do default install
@when('=chaos_5_x86_64_ib')
@when('arch=chaos_5_x86_64_ib')
def install(self, prefix):
# This will be executed instead of the default install if
# the package's sys_type() is chaos_5_x86_64_ib.
@when('=bgqos_0")
@when('arch=bgqos_0")
def install(self, prefix):
# This will be executed if the package's sys_type is bgqos_0
@ -2749,11 +2757,11 @@ build it:
$ spack stage libelf
==> Trying to fetch from http://www.mr511.de/software/libelf-0.8.13.tar.gz
######################################################################## 100.0%
==> Staging archive: /Users/gamblin2/src/spack/var/spack/stage/libelf@0.8.13%gcc@4.8.3=linux-ppc64/libelf-0.8.13.tar.gz
==> Created stage in /Users/gamblin2/src/spack/var/spack/stage/libelf@0.8.13%gcc@4.8.3=linux-ppc64.
==> Staging archive: /Users/gamblin2/src/spack/var/spack/stage/libelf@0.8.13%gcc@4.8.3 arch=linux-ppc64/libelf-0.8.13.tar.gz
==> Created stage in /Users/gamblin2/src/spack/var/spack/stage/libelf@0.8.13%gcc@4.8.3 arch=linux-ppc64.
$ spack cd libelf
$ pwd
/Users/gamblin2/src/spack/var/spack/stage/libelf@0.8.13%gcc@4.8.3=linux-ppc64/libelf-0.8.13
/Users/gamblin2/src/spack/var/spack/stage/libelf@0.8.13%gcc@4.8.3 arch=linux-ppc64/libelf-0.8.13
``spack cd`` here changed he current working directory to the
directory containing the expanded ``libelf`` source code. There are a

44
lib/spack/env/cc vendored
View File

@ -174,6 +174,28 @@ if [[ -z $command ]]; then
die "ERROR: Compiler '$SPACK_COMPILER_SPEC' does not support compiling $language programs."
fi
#
# Filter '.' and Spack environment directories out of PATH so that
# this script doesn't just call itself
#
IFS=':' read -ra env_path <<< "$PATH"
IFS=':' read -ra spack_env_dirs <<< "$SPACK_ENV_PATH"
spack_env_dirs+=("" ".")
PATH=""
for dir in "${env_path[@]}"; do
addpath=true
for env_dir in "${spack_env_dirs[@]}"; do
if [[ $dir == $env_dir ]]; then
addpath=false
break
fi
done
if $addpath; then
PATH="${PATH:+$PATH:}$dir"
fi
done
export PATH
if [[ $mode == vcheck ]]; then
exec ${command} "$@"
fi
@ -286,28 +308,6 @@ unset LD_LIBRARY_PATH
unset LD_RUN_PATH
unset DYLD_LIBRARY_PATH
#
# Filter '.' and Spack environment directories out of PATH so that
# this script doesn't just call itself
#
IFS=':' read -ra env_path <<< "$PATH"
IFS=':' read -ra spack_env_dirs <<< "$SPACK_ENV_PATH"
spack_env_dirs+=("" ".")
PATH=""
for dir in "${env_path[@]}"; do
addpath=true
for env_dir in "${spack_env_dirs[@]}"; do
if [[ $dir == $env_dir ]]; then
addpath=false
break
fi
done
if $addpath; then
PATH="${PATH:+$PATH:}$dir"
fi
done
export PATH
full_command=("$command" "${args[@]}")
# In test command mode, write out full command for Spack tests.

View File

@ -64,12 +64,14 @@ def info(message, *args, **kwargs):
format = kwargs.get('format', '*b')
stream = kwargs.get('stream', sys.stdout)
wrap = kwargs.get('wrap', False)
break_long_words = kwargs.get('break_long_words', False)
cprint("@%s{==>} %s" % (format, cescape(str(message))), stream=stream)
for arg in args:
if wrap:
lines = textwrap.wrap(
str(arg), initial_indent=indent, subsequent_indent=indent)
str(arg), initial_indent=indent, subsequent_indent=indent,
break_long_words=break_long_words)
for line in lines:
stream.write(line + '\n')
else:

View File

@ -198,8 +198,13 @@ def colify(elts, **options):
for col in xrange(cols):
elt = col * rows + row
width = config.widths[col] + cextra(elts[elt])
if col < cols - 1:
fmt = '%%-%ds' % width
output.write(fmt % elts[elt])
else:
# Don't pad the rightmost column (sapces can wrap on
# small teriminals if one line is overlong)
output.write(elts[elt])
output.write("\n")
row += 1

View File

@ -39,7 +39,9 @@
lib_path = join_path(spack_root, "lib", "spack")
build_env_path = join_path(lib_path, "env")
module_path = join_path(lib_path, "spack")
platform_path = join_path(module_path, 'platforms')
compilers_path = join_path(module_path, "compilers")
operating_system_path = join_path(module_path, 'operating_systems')
test_path = join_path(module_path, "test")
hooks_path = join_path(module_path, "hooks")
var_path = join_path(spack_root, "var", "spack")
@ -105,7 +107,7 @@
# Version information
from spack.version import Version
spack_version = Version("0.9")
spack_version = Version("0.9.1")
#
# Executables used by Spack

View File

@ -35,8 +35,9 @@ class ABI(object):
The current implementation is rather rough and could be improved."""
def architecture_compatible(self, parent, child):
"""Returns true iff the parent and child specs have ABI compatible architectures."""
return not parent.architecture or not child.architecture or parent.architecture == child.architecture
"""Returns true iff the parent and child specs have ABI compatible targets."""
return not parent.architecture or not child.architecture \
or parent.architecture == child.architecture
@memoized

View File

@ -22,68 +22,498 @@
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import os
import re
import platform
"""
This module contains all the elements that are required to create an
architecture object. These include, the target processor, the operating system,
and the architecture platform (i.e. cray, darwin, linux, bgq, etc) classes.
from llnl.util.lang import memoized
On a multiple architecture machine, the architecture spec field can be set to
build a package against any target and operating system that is present on the
platform. On Cray platforms or any other architecture that has different front
and back end environments, the operating system will determine the method of
compiler
detection.
There are two different types of compiler detection:
1. Through the $PATH env variable (front-end detection)
2. Through the tcl module system. (back-end detection)
Depending on which operating system is specified, the compiler will be detected
using one of those methods.
For platforms such as linux and darwin, the operating system is autodetected
and the target is set to be x86_64.
The command line syntax for specifying an architecture is as follows:
target=<Target name> os=<OperatingSystem name>
If the user wishes to use the defaults, either target or os can be left out of
the command line and Spack will concretize using the default. These defaults
are set in the 'platforms/' directory which contains the different subclasses
for platforms. If the machine has multiple architectures, the user can
also enter front-end, or fe or back-end or be. These settings will concretize
to their respective front-end and back-end targets and operating systems.
Additional platforms can be added by creating a subclass of Platform
and adding it inside the platform directory.
Platforms are an abstract class that are extended by subclasses. If the user
wants to add a new type of platform (such as cray_xe), they can create a
subclass and set all the class attributes such as priority, front_target,
back_target, front_os, back_os. Platforms also contain a priority class
attribute. A lower number signifies higher priority. These numbers are
arbitrarily set and can be changed though often there isn't much need unless a
new platform is added and the user wants that to be detected first.
Targets are created inside the platform subclasses. Most architecture
(like linux, and darwin) will have only one target (x86_64) but in the case of
Cray machines, there is both a frontend and backend processor. The user can
specify which targets are present on front-end and back-end architecture
Depending on the platform, operating systems are either auto-detected or are
set. The user can set the front-end and back-end operating setting by the class
attributes front_os and back_os. The operating system as described earlier,
will be responsible for compiler detection.
"""
import os
import imp
import inspect
from llnl.util.lang import memoized, list_modules, key_ordering
from llnl.util.filesystem import join_path
import llnl.util.tty as tty
import spack
import spack.compilers
from spack.util.naming import mod_to_class
from spack.util.environment import get_path
from spack.util.multiproc import parmap
import spack.error as serr
class InvalidSysTypeError(serr.SpackError):
def __init__(self, sys_type):
super(InvalidSysTypeError,
self).__init__("Invalid sys_type value for Spack: " + sys_type)
super(InvalidSysTypeError, self).__init__(
"Invalid sys_type value for Spack: " + sys_type)
class NoSysTypeError(serr.SpackError):
def __init__(self):
super(NoSysTypeError,
self).__init__("Could not determine sys_type for this machine.")
super(NoSysTypeError, self).__init__(
"Could not determine sys_type for this machine.")
def get_sys_type_from_spack_globals():
"""Return the SYS_TYPE from spack globals, or None if it isn't set."""
if not hasattr(spack, "sys_type"):
return None
elif hasattr(spack.sys_type, "__call__"):
return spack.sys_type()
@key_ordering
class Target(object):
""" Target is the processor of the host machine.
The host machine may have different front-end and back-end targets,
especially if it is a Cray machine. The target will have a name and
also the module_name (e.g craype-compiler). Targets will also
recognize which platform they came from using the set_platform method.
Targets will have compiler finding strategies
"""
def __init__(self, name, module_name=None):
self.name = name # case of cray "ivybridge" but if it's x86_64
self.module_name = module_name # craype-ivybridge
# Sets only the platform name to avoid recursiveness
def _cmp_key(self):
return (self.name, self.module_name)
def __repr__(self):
return self.__str__()
def __str__(self):
return self.name
@key_ordering
class Platform(object):
""" Abstract class that each type of Platform will subclass.
Will return a instance of it once it
is returned
"""
priority = None # Subclass sets number. Controls detection order
front_end = None
back_end = None
default = None # The default back end target. On cray ivybridge
front_os = None
back_os = None
default_os = None
def __init__(self, name):
self.targets = {}
self.operating_sys = {}
self.name = name
def add_target(self, name, target):
"""Used by the platform specific subclass to list available targets.
Raises an error if the platform specifies a name
that is reserved by spack as an alias.
"""
if name in ['frontend', 'fe', 'backend', 'be', 'default_target']:
raise ValueError(
"%s is a spack reserved alias "
"and cannot be the name of a target" % name)
self.targets[name] = target
def target(self, name):
"""This is a getter method for the target dictionary
that handles defaulting based on the values provided by default,
front-end, and back-end. This can be overwritten
by a subclass for which we want to provide further aliasing options.
"""
if name == 'default_target':
name = self.default
elif name == 'frontend' or name == 'fe':
name = self.front_end
elif name == 'backend' or name == 'be':
name = self.back_end
return self.targets.get(name, None)
def add_operating_system(self, name, os_class):
""" Add the operating_system class object into the
platform.operating_sys dictionary
"""
if name in ['frontend', 'fe', 'backend', 'be', 'default_os']:
raise ValueError(
"%s is a spack reserved alias "
"and cannot be the name of an OS" % name)
self.operating_sys[name] = os_class
def operating_system(self, name):
if name == 'default_os':
name = self.default_os
if name == 'frontend' or name == "fe":
name = self.front_os
if name == 'backend' or name == 'be':
name = self.back_os
return self.operating_sys.get(name, None)
@classmethod
def detect(self):
""" Subclass is responsible for implementing this method.
Returns True if the Platform class detects that
it is the current platform
and False if it's not.
"""
raise NotImplementedError()
def __repr__(self):
return self.__str__()
def __str__(self):
return self.name
def _cmp_key(self):
t_keys = ''.join(str(t._cmp_key()) for t in
sorted(self.targets.values()))
o_keys = ''.join(str(o._cmp_key()) for o in
sorted(self.operating_sys.values()))
return (self.name,
self.default,
self.front_end,
self.back_end,
self.default_os,
self.front_os,
self.back_os,
t_keys,
o_keys)
@key_ordering
class OperatingSystem(object):
""" Operating System will be like a class similar to platform extended
by subclasses for the specifics. Operating System will contain the
compiler finding logic. Instead of calling two separate methods to
find compilers we call find_compilers method for each operating system
"""
def __init__(self, name, version):
self.name = name
self.version = version
def __str__(self):
return self.name + self.version
def __repr__(self):
return self.__str__()
def _cmp_key(self):
return (self.name, self.version)
def find_compilers(self, *paths):
"""
Return a list of compilers found in the suppied paths.
This invokes the find() method for each Compiler class,
and appends the compilers detected to a list.
"""
if not paths:
paths = get_path('PATH')
# Make sure path elements exist, and include /bin directories
# under prefixes.
filtered_path = []
for p in paths:
# Eliminate symlinks and just take the real directories.
p = os.path.realpath(p)
if not os.path.isdir(p):
continue
filtered_path.append(p)
# Check for a bin directory, add it if it exists
bin = join_path(p, 'bin')
if os.path.isdir(bin):
filtered_path.append(os.path.realpath(bin))
# Once the paths are cleaned up, do a search for each type of
# compiler. We can spawn a bunch of parallel searches to reduce
# the overhead of spelunking all these directories.
types = spack.compilers.all_compiler_types()
compiler_lists = parmap(lambda cmp_cls:
self.find_compiler(cmp_cls, *filtered_path),
types)
# ensure all the version calls we made are cached in the parent
# process, as well. This speeds up Spack a lot.
clist = reduce(lambda x, y: x+y, compiler_lists)
return clist
def find_compiler(self, cmp_cls, *path):
"""Try to find the given type of compiler in the user's
environment. For each set of compilers found, this returns
compiler objects with the cc, cxx, f77, fc paths and the
version filled in.
This will search for compilers with the names in cc_names,
cxx_names, etc. and it will group them if they have common
prefixes, suffixes, and versions. e.g., gcc-mp-4.7 would
be grouped with g++-mp-4.7 and gfortran-mp-4.7.
"""
dicts = parmap(
lambda t: cmp_cls._find_matches_in_path(*t),
[(cmp_cls.cc_names, cmp_cls.cc_version) + tuple(path),
(cmp_cls.cxx_names, cmp_cls.cxx_version) + tuple(path),
(cmp_cls.f77_names, cmp_cls.f77_version) + tuple(path),
(cmp_cls.fc_names, cmp_cls.fc_version) + tuple(path)])
all_keys = set()
for d in dicts:
all_keys.update(d)
compilers = {}
for k in all_keys:
ver, pre, suf = k
# Skip compilers with unknown version.
if ver == 'unknown':
continue
paths = tuple(pn[k] if k in pn else None for pn in dicts)
spec = spack.spec.CompilerSpec(cmp_cls.name, ver)
if ver in compilers:
prev = compilers[ver]
# prefer the one with more compilers.
prev_paths = [prev.cc, prev.cxx, prev.f77, prev.fc]
newcount = len([p for p in paths if p is not None])
prevcount = len([p for p in prev_paths if p is not None])
# Don't add if it's not an improvement over prev compiler.
if newcount <= prevcount:
continue
compilers[ver] = cmp_cls(spec, self, paths)
return list(compilers.values())
def to_dict(self):
d = {}
d['name'] = self.name
d['version'] = self.version
return d
@key_ordering
class Arch(object):
"Architecture is now a class to help with setting attributes"
def __init__(self, platform=None, platform_os=None, target=None):
self.platform = platform
if platform and platform_os:
platform_os = self.platform.operating_system(platform_os)
self.platform_os = platform_os
if platform and target:
target = self.platform.target(target)
self.target = target
# Hooks for parser to use when platform is set after target or os
self.target_string = None
self.os_string = None
@property
def concrete(self):
return all((self.platform is not None,
isinstance(self.platform, Platform),
self.platform_os is not None,
isinstance(self.platform_os, OperatingSystem),
self.target is not None, isinstance(self.target, Target)))
def __str__(self):
if self.platform or self.platform_os or self.target:
if self.platform.name == 'darwin':
os_name = self.platform_os.name if self.platform_os else "None"
else:
return spack.sys_type
os_name = str(self.platform_os)
return (str(self.platform) + "-" +
os_name + "-" + str(self.target))
else:
return ''
def get_sys_type_from_environment():
"""Return $SYS_TYPE or None if it's not defined."""
return os.environ.get('SYS_TYPE')
def __contains__(self, string):
return string in str(self)
def get_sys_type_from_platform():
"""Return the architecture from Python's platform module."""
sys_type = platform.system() + '-' + platform.machine()
sys_type = re.sub(r'[^\w-]', '_', sys_type)
return sys_type.lower()
def _cmp_key(self):
if isinstance(self.platform, Platform):
platform = self.platform.name
else:
platform = self.platform
if isinstance(self.platform_os, OperatingSystem):
platform_os = self.platform_os.name
else:
platform_os = self.platform_os
if isinstance(self.target, Target):
target = self.target.name
else:
target = self.target
return (platform, platform_os, target)
def to_dict(self):
d = {}
d['platform'] = str(self.platform) if self.platform else None
d['platform_os'] = str(self.platform_os) if self.platform_os else None
d['target'] = str(self.target) if self.target else None
return d
def _target_from_dict(target_name, platform=None):
""" Creates new instance of target and assigns all the attributes of
that target from the dictionary
"""
if not platform:
platform = sys_type()
return platform.target(target_name)
def _operating_system_from_dict(os_name, platform=None):
""" uses platform's operating system method to grab the constructed
operating systems that are valid on the platform.
"""
if not platform:
platform = sys_type()
if isinstance(os_name, dict):
name = os_name['name']
version = os_name['version']
return platform.operating_system(name+version)
else:
return platform.operating_system(os_name)
def _platform_from_dict(platform_name):
""" Constructs a platform from a dictionary. """
platform_list = all_platforms()
for p in platform_list:
if platform_name.replace("_", "").lower() == p.__name__.lower():
return p()
def arch_from_dict(d):
""" Uses _platform_from_dict, _operating_system_from_dict, _target_from_dict
helper methods to recreate the arch tuple from the dictionary read from
a yaml file
"""
arch = Arch()
if isinstance(d, basestring):
# We have an old spec using a string for the architecture
arch.platform = Platform('spack_compatibility')
arch.platform_os = OperatingSystem('unknown', '')
arch.target = Target(d)
arch.os_string = None
arch.target_string = None
else:
if d is None:
return None
platform_name = d['platform']
os_name = d['platform_os']
target_name = d['target']
if platform_name:
arch.platform = _platform_from_dict(platform_name)
else:
arch.platform = None
if target_name:
arch.target = _target_from_dict(target_name, arch.platform)
else:
arch.target = None
if os_name:
arch.platform_os = _operating_system_from_dict(os_name,
arch.platform)
else:
arch.platform_os = None
arch.os_string = None
arch.target_string = None
return arch
@memoized
def all_platforms():
classes = []
mod_path = spack.platform_path
parent_module = "spack.platforms"
for name in list_modules(mod_path):
mod_name = '%s.%s' % (parent_module, name)
class_name = mod_to_class(name)
mod = __import__(mod_name, fromlist=[class_name])
if not hasattr(mod, class_name):
tty.die('No class %s defined in %s' % (class_name, mod_name))
cls = getattr(mod, class_name)
if not inspect.isclass(cls):
tty.die('%s.%s is not a class' % (mod_name, class_name))
classes.append(cls)
return classes
@memoized
def sys_type():
"""Returns a SysType for the current machine."""
methods = [get_sys_type_from_spack_globals, get_sys_type_from_environment,
get_sys_type_from_platform]
""" Gather a list of all available subclasses of platforms.
Sorts the list according to their priority looking. Priority is
an arbitrarily set number. Detects platform either using uname or
a file path (/opt/cray...)
"""
# Try to create a Platform object using the config file FIRST
platform_list = all_platforms()
platform_list.sort(key=lambda a: a.priority)
# search for a method that doesn't return None
sys_type = None
for method in methods:
sys_type = method()
if sys_type:
break
# Couldn't determine the sys_type for this machine.
if sys_type is None:
return "unknown_arch"
if not isinstance(sys_type, basestring):
raise InvalidSysTypeError(sys_type)
return sys_type
for platform in platform_list:
if platform.detect():
return platform()

View File

@ -113,9 +113,66 @@ def __call__(self, *args, **kwargs):
return super(MakeExecutable, self).__call__(*args, **kwargs)
def load_module(mod):
"""Takes a module name and removes modules until it is possible to
load that module. It then loads the provided module. Depends on the
modulecmd implementation of modules used in cray and lmod.
"""
#Create an executable of the module command that will output python code
modulecmd = which('modulecmd')
modulecmd.add_default_arg('python')
# Read the module and remove any conflicting modules
# We do this without checking that they are already installed
# for ease of programming because unloading a module that is not
# loaded does nothing.
text = modulecmd('show', mod, output=str, error=str).split()
for i, word in enumerate(text):
if word == 'conflict':
exec(compile(modulecmd('unload', text[i+1], output=str, error=str), '<string>', 'exec'))
# Load the module now that there are no conflicts
load = modulecmd('load', mod, output=str, error=str)
exec(compile(load, '<string>', 'exec'))
def get_path_from_module(mod):
"""Inspects a TCL module for entries that indicate the absolute path
at which the library supported by said module can be found.
"""
# Create a modulecmd executable
modulecmd = which('modulecmd')
modulecmd.add_default_arg('python')
# Read the module
text = modulecmd('show', mod, output=str, error=str).split('\n')
# If it lists its package directory, return that
for line in text:
if line.find(mod.upper()+'_DIR') >= 0:
words = line.split()
return words[2]
# If it lists a -rpath instruction, use that
for line in text:
rpath = line.find('-rpath/')
if rpath >= 0:
return line[rpath+6:line.find('/lib')]
# If it lists a -L instruction, use that
for line in text:
L = line.find('-L/')
if L >= 0:
return line[L+2:line.find('/lib')]
# If it sets the LD_LIBRARY_PATH or CRAY_LD_LIBRARY_PATH, use that
for line in text:
if line.find('LD_LIBRARY_PATH') >= 0:
words = line.split()
path = words[2]
return path[:path.find('/lib')]
# Unable to find module path
return None
def set_compiler_environment_variables(pkg, env):
assert pkg.spec.concrete
assert(pkg.spec.concrete)
compiler = pkg.compiler
flags = pkg.spec.compiler_flags
@ -154,6 +211,10 @@ def set_compiler_environment_variables(pkg, env):
env.set('SPACK_' + flag.upper(), ' '.join(f for f in flags[flag]))
env.set('SPACK_COMPILER_SPEC', str(pkg.spec.compiler))
for mod in compiler.modules:
load_module(mod)
return env
@ -212,13 +273,15 @@ def set_build_environment_variables(pkg, env):
env.set(SPACK_DEBUG_LOG_DIR, spack.spack_working_dir)
# Add any pkgconfig directories to PKG_CONFIG_PATH
pkg_config_dirs = []
for p in dep_prefixes:
for maybe in ('lib', 'lib64', 'share'):
pcdir = join_path(p, maybe, 'pkgconfig')
for pre in dep_prefixes:
for directory in ('lib', 'lib64', 'share'):
pcdir = join_path(pre, directory, 'pkgconfig')
if os.path.isdir(pcdir):
pkg_config_dirs.append(pcdir)
env.set_path('PKG_CONFIG_PATH', pkg_config_dirs)
#pkg_config_dirs.append(pcdir)
env.prepend_path('PKG_CONFIG_PATH',pcdir)
if pkg.spec.architecture.target.module_name:
load_module(pkg.spec.architecture.target.module_name)
return env
@ -301,6 +364,10 @@ def get_rpaths(pkg):
if os.path.isdir(d.prefix.lib))
rpaths.extend(d.prefix.lib64 for d in pkg.spec.dependencies.values()
if os.path.isdir(d.prefix.lib64))
# Second module is our compiler mod name. We use that to get rpaths from
# module show output.
if pkg.compiler.modules and len(pkg.compiler.modules) > 1:
rpaths.append(get_path_from_module(pkg.compiler.modules[1]))
return rpaths
@ -317,6 +384,13 @@ def parent_class_modules(cls):
return result
def load_external_modules(pkg):
""" traverse the spec list and find any specs that have external modules.
"""
for dep in list(pkg.spec.traverse()):
if dep.external_module:
load_module(dep.external_module)
def setup_package(pkg):
"""Execute all environment setup routines."""
spack_env = EnvironmentModifications()
@ -340,7 +414,7 @@ def setup_package(pkg):
set_compiler_environment_variables(pkg, spack_env)
set_build_environment_variables(pkg, spack_env)
load_external_modules(pkg)
# traverse in postorder so package can use vars from its dependencies
spec = pkg.spec
for dspec in pkg.spec.traverse(order='post', root=False):

View File

@ -28,8 +28,4 @@
description = "Print the architecture for this machine"
def arch(parser, args):
configured_sys_type = architecture.get_sys_type_from_spack_globals()
if not configured_sys_type:
configured_sys_type = "autodetect"
print "Configured sys_type: %s" % configured_sys_type
print "Autodetected default sys_type: %s" % architecture.sys_type()
print architecture.sys_type()

View File

@ -69,8 +69,8 @@ def setup_parser(subparser):
help="Configuration scope to read from.")
def compiler_find(args):
"""Search either $PATH or a list of paths for compilers and add them
def compiler_add(args):
"""Search either $PATH or a list of paths OR MODULES for compilers and add them
to Spack's configuration."""
paths = args.add_paths
if not paths:
@ -121,6 +121,8 @@ def compiler_info(args):
print "\tcxx = %s" % c.cxx
print "\tf77 = %s" % c.f77
print "\tfc = %s" % c.fc
print "\tmodules = %s" % c.modules
print "\toperating system = %s" % c.operating_system
def compiler_list(args):
@ -135,8 +137,7 @@ def compiler_list(args):
def compiler(parser, args):
action = { 'add' : compiler_find,
'find' : compiler_find,
action = { 'add' : compiler_add,
'remove' : compiler_remove,
'rm' : compiler_remove,
'info' : compiler_info,

View File

@ -31,6 +31,7 @@
from llnl.util.lang import *
from llnl.util.tty.colify import *
from llnl.util.tty.color import *
from llnl.util.lang import *
description = "Find installed spack packages"
@ -85,6 +86,11 @@ def setup_parser(subparser):
action='store_true',
dest='missing',
help='Show missing dependencies as well as installed specs.')
subparser.add_argument(
'-v', '--variants',
action='store_true',
dest='variants',
help='Show variants in output (can be long)')
subparser.add_argument('-M', '--only-missing',
action='store_true',
dest='only_missing',
@ -106,6 +112,8 @@ def display_specs(specs, **kwargs):
mode = kwargs.get('mode', 'short')
hashes = kwargs.get('long', False)
namespace = kwargs.get('namespace', False)
flags = kwargs.get('show_flags', False)
variants = kwargs.get('variants', False)
hlen = 7
if kwargs.get('very_long', False):
@ -113,10 +121,9 @@ def display_specs(specs, **kwargs):
hlen = None
nfmt = '.' if namespace else '_'
format_string = '$%s$@$+' % nfmt
flags = kwargs.get('show_flags', False)
if flags:
format_string = '$%s$@$%%+$+' % nfmt
ffmt = '$%+' if flags else ''
vfmt = '$+' if variants else ''
format_string = '$%s$@%s%s' % (nfmt, ffmt, vfmt)
# Make a dict with specs keyed by architecture and compiler.
index = index_by(specs, ('architecture', 'compiler'))
@ -162,7 +169,7 @@ def fmt(s):
string = ""
if hashes:
string += gray_hash(s, hlen) + ' '
string += s.format('$-%s$@$+' % nfmt, color=True)
string += s.format('$-%s$@%s' % (nfmt, vfmt), color=True)
return string
@ -180,6 +187,29 @@ def fmt(s):
"deps, short)." % mode) # NOQA: ignore=E501
def query_arguments(args):
# Check arguments
if args.explicit and args.implicit:
tty.error('You can\'t pass -E and -e options simultaneously.')
raise SystemExit(1)
# Set up query arguments.
installed, known = True, any
if args.only_missing:
installed = False
elif args.missing:
installed = any
if args.unknown:
known = False
explicit = any
if args.explicit:
explicit = True
if args.implicit:
explicit = False
q_args = {'installed': installed, 'known': known, "explicit": explicit}
return q_args
def find(parser, args):
# Filter out specs that don't exist.
query_specs = spack.cmd.parse_specs(args.query_specs)
@ -194,22 +224,7 @@ def find(parser, args):
if not query_specs:
return
# Set up query arguments.
installed, known = True, any
if args.only_missing:
installed = False
elif args.missing:
installed = any
if args.unknown:
known = False
explicit = any
if args.explicit:
explicit = False
if args.implicit:
explicit = True
q_args = {'installed': installed, 'known': known, "explicit": explicit}
q_args = query_arguments(args)
# Get all the specs the user asked for
if not query_specs:
@ -228,4 +243,6 @@ def find(parser, args):
mode=args.mode,
long=args.long,
very_long=args.very_long,
show_flags=args.show_flags)
show_flags=args.show_flags,
namespace=args.namespace,
variants=args.variants)

View File

@ -39,6 +39,13 @@
b) use spack uninstall -a to uninstall ALL matching specs.
"""
# Arguments for display_specs when we find ambiguity
display_args = {
'long': True,
'show_flags': True,
'variants':True
}
def ask_for_confirmation(message):
while True:
@ -92,7 +99,7 @@ def concretize_specs(specs, allow_multiple_matches=False, force=False):
if not allow_multiple_matches and len(matching) > 1:
tty.error("%s matches multiple packages:" % spec)
print()
display_specs(matching, long=True, show_flags=True)
display_specs(matching, **display_args)
print()
has_errors = True
@ -172,7 +179,7 @@ def uninstall(parser, args):
tty.error("Will not uninstall %s" % spec.format("$_$@$%@$#", color=True))
print('')
print("The following packages depend on it:")
display_specs(lst, long=True)
display_specs(lst, **display_args)
print('')
has_error = True
elif args.dependents:
@ -186,7 +193,7 @@ def uninstall(parser, args):
if not args.yes_to_all:
tty.msg("The following packages will be uninstalled : ")
print('')
display_specs(uninstall_list, long=True, show_flags=True)
display_specs(uninstall_list, **display_args)
print('')
ask_for_confirmation('Do you want to proceed ? ')

295
lib/spack/spack/cmd/view.py Normal file
View File

@ -0,0 +1,295 @@
##############################################################################
# Copyright (c) 2013, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License (as published by
# the Free Software Foundation) version 2.1 dated February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
'''Produce a "view" of a Spack DAG.
A "view" is file hierarchy representing the union of a number of
Spack-installed package file hierarchies. The union is formed from:
- specs resolved from the package names given by the user (the seeds)
- all depenencies of the seeds unless user specifies `--no-depenencies`
- less any specs with names matching the regular expressions given by
`--exclude`
The `view` can be built and tore down via a number of methods (the "actions"):
- symlink :: a file system view which is a directory hierarchy that is
the union of the hierarchies of the installed packages in the DAG
where installed files are referenced via symlinks.
- hardlink :: like the symlink view but hardlinks are used.
- statlink :: a view producing a status report of a symlink or
hardlink view.
The file system view concept is imspired by Nix, implemented by
brett.viren@gmail.com ca 2016.
'''
# Implementation notes:
#
# This is implemented as a visitor pattern on the set of package specs.
#
# The command line ACTION maps to a visitor_*() function which takes
# the set of package specs and any args which may be specific to the
# ACTION.
#
# To add a new view:
# 1. add a new cmd line args sub parser ACTION
# 2. add any action-specific options/arguments, most likely a list of specs.
# 3. add a visitor_MYACTION() function
# 4. add any visitor_MYALIAS assignments to match any command line aliases
import os
import re
import spack
import spack.cmd
import llnl.util.tty as tty
description = "Produce a single-rooted directory view of a spec."
def setup_parser(sp):
setup_parser.parser = sp
sp.add_argument(
'-v', '--verbose', action='store_true', default=False,
help="Display verbose output.")
sp.add_argument(
'-e', '--exclude', action='append', default=[],
help="Exclude packages with names matching the given regex pattern.")
sp.add_argument(
'-d', '--dependencies', choices=['true', 'false', 'yes', 'no'],
default='true',
help="Follow dependencies.")
ssp = sp.add_subparsers(metavar='ACTION', dest='action')
specs_opts = dict(metavar='spec', nargs='+',
help="Seed specs of the packages to view.")
# The action parameterizes the command but in keeping with Spack
# patterns we make it a subcommand.
file_system_view_actions = [
ssp.add_parser(
'symlink', aliases=['add', 'soft'],
help='Add package files to a filesystem view via symbolic links.'),
ssp.add_parser(
'hardlink', aliases=['hard'],
help='Add packages files to a filesystem via via hard links.'),
ssp.add_parser(
'remove', aliases=['rm'],
help='Remove packages from a filesystem view.'),
ssp.add_parser(
'statlink', aliases=['status', 'check'],
help='Check status of packages in a filesystem view.')
]
# All these options and arguments are common to every action.
for act in file_system_view_actions:
act.add_argument('path', nargs=1,
help="Path to file system view directory.")
act.add_argument('specs', **specs_opts)
return
def assuredir(path):
'Assure path exists as a directory'
if not os.path.exists(path):
os.makedirs(path)
def relative_to(prefix, path):
'Return end of `path` relative to `prefix`'
assert 0 == path.find(prefix)
reldir = path[len(prefix):]
if reldir.startswith('/'):
reldir = reldir[1:]
return reldir
def transform_path(spec, path, prefix=None):
'Return the a relative path corresponding to given path spec.prefix'
if os.path.isabs(path):
path = relative_to(spec.prefix, path)
subdirs = path.split(os.path.sep)
if subdirs[0] == '.spack':
lst = ['.spack', spec.name] + subdirs[1:]
path = os.path.join(*lst)
if prefix:
path = os.path.join(prefix, path)
return path
def purge_empty_directories(path):
'''Ascend up from the leaves accessible from `path`
and remove empty directories.'''
for dirpath, subdirs, files in os.walk(path, topdown=False):
for sd in subdirs:
sdp = os.path.join(dirpath, sd)
try:
os.rmdir(sdp)
except OSError:
pass
def filter_exclude(specs, exclude):
'Filter specs given sequence of exclude regex'
to_exclude = [re.compile(e) for e in exclude]
def exclude(spec):
for e in to_exclude:
if e.match(spec.name):
return True
return False
return [s for s in specs if not exclude(s)]
def flatten(seeds, descend=True):
'Normalize and flattend seed specs and descend hiearchy'
flat = set()
for spec in seeds:
if not descend:
flat.add(spec)
continue
flat.update(spec.normalized().traverse())
return flat
def check_one(spec, path, verbose=False):
'Check status of view in path against spec'
dotspack = os.path.join(path, '.spack', spec.name)
if os.path.exists(os.path.join(dotspack)):
tty.info('Package in view: "%s"' % spec.name)
return
tty.info('Package not in view: "%s"' % spec.name)
return
def remove_one(spec, path, verbose=False):
'Remove any files found in `spec` from `path` and purge empty directories.'
if not os.path.exists(path):
return # done, short circuit
dotspack = transform_path(spec, '.spack', path)
if not os.path.exists(dotspack):
if verbose:
tty.info('Skipping nonexistent package: "%s"' % spec.name)
return
if verbose:
tty.info('Removing package: "%s"' % spec.name)
for dirpath, dirnames, filenames in os.walk(spec.prefix):
if not filenames:
continue
targdir = transform_path(spec, dirpath, path)
for fname in filenames:
dst = os.path.join(targdir, fname)
if not os.path.exists(dst):
continue
os.unlink(dst)
def link_one(spec, path, link=os.symlink, verbose=False):
'Link all files in `spec` into directory `path`.'
dotspack = transform_path(spec, '.spack', path)
if os.path.exists(dotspack):
tty.warn('Skipping existing package: "%s"' % spec.name)
return
if verbose:
tty.info('Linking package: "%s"' % spec.name)
for dirpath, dirnames, filenames in os.walk(spec.prefix):
if not filenames:
continue # avoid explicitly making empty dirs
targdir = transform_path(spec, dirpath, path)
assuredir(targdir)
for fname in filenames:
src = os.path.join(dirpath, fname)
dst = os.path.join(targdir, fname)
if os.path.exists(dst):
if '.spack' in dst.split(os.path.sep):
continue # silence these
tty.warn("Skipping existing file: %s" % dst)
continue
link(src, dst)
def visitor_symlink(specs, args):
'Symlink all files found in specs'
path = args.path[0]
assuredir(path)
for spec in specs:
link_one(spec, path, verbose=args.verbose)
visitor_add = visitor_symlink
visitor_soft = visitor_symlink
def visitor_hardlink(specs, args):
'Hardlink all files found in specs'
path = args.path[0]
assuredir(path)
for spec in specs:
link_one(spec, path, os.link, verbose=args.verbose)
visitor_hard = visitor_hardlink
def visitor_remove(specs, args):
'Remove all files and directories found in specs from args.path'
path = args.path[0]
for spec in specs:
remove_one(spec, path, verbose=args.verbose)
purge_empty_directories(path)
visitor_rm = visitor_remove
def visitor_statlink(specs, args):
'Give status of view in args.path relative to specs'
path = args.path[0]
for spec in specs:
check_one(spec, path, verbose=args.verbose)
visitor_status = visitor_statlink
visitor_check = visitor_statlink
def view(parser, args):
'Produce a view of a set of packages.'
# Process common args
seeds = [spack.cmd.disambiguate_spec(s) for s in args.specs]
specs = flatten(seeds, args.dependencies.lower() in ['yes', 'true'])
specs = filter_exclude(specs, args.exclude)
# Execute the visitation.
try:
visitor = globals()['visitor_' + args.action]
except KeyError:
tty.error('Unknown action: "%s"' % args.action)
visitor(specs, args)

View File

@ -33,6 +33,7 @@
import spack.error
import spack.spec
import spack.architecture
from spack.util.multiproc import parmap
from spack.util.executable import *
from spack.util.environment import get_path
@ -107,19 +108,32 @@ def f77_rpath_arg(self):
@property
def fc_rpath_arg(self):
return '-Wl,-rpath,'
# Cray PrgEnv name that can be used to load this compiler
PrgEnv = None
# Name of module used to switch versions of this compiler
PrgEnv_compiler = None
def __init__(self, cspec, cc, cxx, f77, fc, **kwargs):
def __init__(self, cspec, operating_system,
paths, modules=[], alias=None, **kwargs):
def check(exe):
if exe is None:
return None
_verify_executables(exe)
return exe
self.cc = check(cc)
self.cxx = check(cxx)
self.f77 = check(f77)
self.fc = check(fc)
self.cc = check(paths[0])
self.cxx = check(paths[1])
if len(paths) > 2:
self.f77 = check(paths[2])
if len(paths) == 3:
self.fc = self.f77
else:
self.fc = check(paths[3])
#self.cc = check(cc)
#self.cxx = check(cxx)
#self.f77 = check(f77)
#self.fc = check(fc)
# Unfortunately have to make sure these params are accepted
# in the same order they are returned by sorted(flags)
@ -130,8 +144,10 @@ def check(exe):
if value is not None:
self.flags[flag] = value.split()
self.operating_system = operating_system
self.spec = cspec
self.modules = modules
self.alias = alias
@property
def version(self):
@ -258,57 +274,6 @@ def check(key):
successful.reverse()
return dict(((v, p, s), path) for v, p, s, path in successful)
@classmethod
def find(cls, *path):
"""Try to find this type of compiler in the user's
environment. For each set of compilers found, this returns
compiler objects with the cc, cxx, f77, fc paths and the
version filled in.
This will search for compilers with the names in cc_names,
cxx_names, etc. and it will group them if they have common
prefixes, suffixes, and versions. e.g., gcc-mp-4.7 would
be grouped with g++-mp-4.7 and gfortran-mp-4.7.
"""
dicts = parmap(
lambda t: cls._find_matches_in_path(*t),
[(cls.cc_names, cls.cc_version) + tuple(path),
(cls.cxx_names, cls.cxx_version) + tuple(path),
(cls.f77_names, cls.f77_version) + tuple(path),
(cls.fc_names, cls.fc_version) + tuple(path)])
all_keys = set()
for d in dicts:
all_keys.update(d)
compilers = {}
for k in all_keys:
ver, pre, suf = k
# Skip compilers with unknown version.
if ver == 'unknown':
continue
paths = tuple(pn[k] if k in pn else None for pn in dicts)
spec = spack.spec.CompilerSpec(cls.name, ver)
if ver in compilers:
prev = compilers[ver]
# prefer the one with more compilers.
prev_paths = [prev.cc, prev.cxx, prev.f77, prev.fc]
newcount = len([p for p in paths if p is not None])
prevcount = len([p for p in prev_paths if p is not None])
# Don't add if it's not an improvement over prev compiler.
if newcount <= prevcount:
continue
compilers[ver] = cls(spec, *paths)
return list(compilers.values())
def __repr__(self):
"""Return a string representation of the compiler toolchain."""
return self.__str__()
@ -317,7 +282,7 @@ def __repr__(self):
def __str__(self):
"""Return a string representation of the compiler toolchain."""
return "%s(%s)" % (
self.name, '\n '.join((str(s) for s in (self.cc, self.cxx, self.f77, self.fc))))
self.name, '\n '.join((str(s) for s in (self.cc, self.cxx, self.f77, self.fc, self.modules, str(self.operating_system)))))
class CompilerAccessError(spack.error.SpackError):

View File

@ -28,6 +28,11 @@
import imp
import os
import platform
import copy
import hashlib
import base64
import yaml
import sys
from llnl.util.lang import memoized, list_modules
from llnl.util.filesystem import join_path
@ -45,7 +50,8 @@
from spack.util.environment import get_path
_imported_compilers_module = 'spack.compilers'
_required_instance_vars = ['cc', 'cxx', 'f77', 'fc']
_path_instance_vars = ['cc', 'cxx', 'f77', 'fc']
_other_instance_vars = ['modules', 'operating_system']
# TODO: customize order in config file
if platform.system() == 'Darwin':
@ -64,107 +70,103 @@ def converter(cspec_like, *args, **kwargs):
def _to_dict(compiler):
"""Return a dict version of compiler suitable to insert in YAML."""
d = {}
d['spec'] = str(compiler.spec)
d['paths'] = dict( (attr, getattr(compiler, attr, None)) for attr in _path_instance_vars )
d['operating_system'] = str(compiler.operating_system)
d['modules'] = compiler.modules if compiler.modules else []
if compiler.alias:
d['alias'] = compiler.alias
return {
str(compiler.spec) : dict(
(attr, getattr(compiler, attr, None))
for attr in _required_instance_vars)
'compiler': d
}
def get_compiler_config(arch=None, scope=None):
def get_compiler_config(scope=None):
"""Return the compiler configuration for the specified architecture.
"""
# Check whether we're on a front-end (native) architecture.
my_arch = spack.architecture.sys_type()
if arch is None:
arch = my_arch
def init_compiler_config():
"""Compiler search used when Spack has no compilers."""
config[arch] = {}
compilers = find_compilers(*get_path('PATH'))
compilers = find_compilers()
compilers_dict = []
for compiler in compilers:
config[arch].update(_to_dict(compiler))
spack.config.update_config('compilers', config, scope=scope)
compilers_dict.append(_to_dict(compiler))
spack.config.update_config('compilers', compilers_dict, scope=scope)
config = spack.config.get_config('compilers', scope=scope)
# Update the configuration if there are currently no compilers
# configured. Avoid updating automatically if there ARE site
# compilers configured but no user ones.
if arch == my_arch and arch not in config:
# if (isinstance(arch, basestring) or arch == my_arch) and arch not in config:
if not config:
if scope is None:
# We know no compilers were configured in any scope.
init_compiler_config()
config = spack.config.get_config('compilers', scope=scope)
elif scope == 'user':
# Check the site config and update the user config if
# nothing is configured at the site level.
site_config = spack.config.get_config('compilers', scope='site')
if not site_config:
init_compiler_config()
config = spack.config.get_config('compilers', scope=scope)
return config[arch] if arch in config else {}
return config
def add_compilers_to_config(compilers, arch=None, scope=None):
def add_compilers_to_config(compilers, scope=None):
"""Add compilers to the config for the specified architecture.
Arguments:
- compilers: a list of Compiler objects.
- arch: arch to add compilers for.
- scope: configuration scope to modify.
"""
if arch is None:
arch = spack.architecture.sys_type()
compiler_config = get_compiler_config(arch, scope)
compiler_config = get_compiler_config(scope)
for compiler in compilers:
compiler_config[str(compiler.spec)] = dict(
(c, getattr(compiler, c, "None"))
for c in _required_instance_vars)
compiler_config.append(_to_dict(compiler))
update = { arch : compiler_config }
spack.config.update_config('compilers', update, scope)
spack.config.update_config('compilers', compiler_config, scope)
@_auto_compiler_spec
def remove_compiler_from_config(compiler_spec, arch=None, scope=None):
def remove_compiler_from_config(compiler_spec, scope=None):
"""Remove compilers from the config, by spec.
Arguments:
- compiler_specs: a list of CompilerSpec objects.
- arch: arch to add compilers for.
- scope: configuration scope to modify.
"""
if arch is None:
arch = spack.architecture.sys_type()
compiler_config = get_compiler_config(scope)
matches = [(a,c) for (a,c) in compiler_config.items() if c['spec'] == compiler_spec]
if len(matches) == 1:
del compiler_config[matches[0][0]]
else:
CompilerSpecInsufficientlySpecificError(compiler_spec)
compiler_config = get_compiler_config(arch, scope)
del compiler_config[str(compiler_spec)]
update = { arch : compiler_config }
spack.config.update_config('compilers', compiler_config, scope)
spack.config.update_config('compilers', update, scope)
_cache_config_file = {}
def all_compilers_config(arch=None, scope=None):
def all_compilers_config(scope=None):
"""Return a set of specs for all the compiler versions currently
available to build with. These are instances of CompilerSpec.
"""
# Get compilers for this architecture.
arch_config = get_compiler_config(arch, scope)
global _cache_config_file #Create a cache of the config file so we don't load all the time.
# Merge 'all' compilers with arch-specific ones.
# Arch-specific compilers have higher precedence.
merged_config = get_compiler_config('all', scope=scope)
merged_config = spack.config._merge_yaml(merged_config, arch_config)
if not _cache_config_file:
_cache_config_file = get_compiler_config(scope)
return _cache_config_file
return merged_config
else:
return _cache_config_file
def all_compilers(arch=None, scope=None):
def all_compilers(scope=None):
# Return compiler specs from the merged config.
return [spack.spec.CompilerSpec(s)
for s in all_compilers_config(arch, scope)]
return [spack.spec.CompilerSpec(s['compiler']['spec'])
for s in all_compilers_config(scope)]
def default_compiler():
@ -179,37 +181,19 @@ def default_compiler():
return sorted(versions)[-1]
def find_compilers(*path):
def find_compilers():
"""Return a list of compilers found in the suppied paths.
This invokes the find() method for each Compiler class,
and appends the compilers detected to a list.
This invokes the find_compilers() method for each operating
system associated with the host platform, and appends
the compilers detected to a list.
"""
# Make sure path elements exist, and include /bin directories
# under prefixes.
filtered_path = []
for p in path:
# Eliminate symlinks and just take the real directories.
p = os.path.realpath(p)
if not os.path.isdir(p):
continue
filtered_path.append(p)
# Check for a bin directory, add it if it exists
bin = join_path(p, 'bin')
if os.path.isdir(bin):
filtered_path.append(os.path.realpath(bin))
# Once the paths are cleaned up, do a search for each type of
# compiler. We can spawn a bunch of parallel searches to reduce
# the overhead of spelunking all these directories.
types = all_compiler_types()
compiler_lists = parmap(lambda cls: cls.find(*filtered_path), types)
# ensure all the version calls we made are cached in the parent
# process, as well. This speeds up Spack a lot.
clist = reduce(lambda x,y: x+y, compiler_lists)
return clist
# Find compilers for each operating system class
oss = all_os_classes()
compiler_lists = []
for os in oss:
compiler_lists.extend(os.find_compilers())
return compiler_lists
def supported_compilers():
"""Return a set of names of compilers supported by Spack.
@ -227,51 +211,83 @@ def supported(compiler_spec):
@_auto_compiler_spec
def find(compiler_spec, arch=None, scope=None):
def find(compiler_spec, scope=None):
"""Return specs of available compilers that match the supplied
compiler spec. Return an list if nothing found."""
return [c for c in all_compilers(arch, scope) if c.satisfies(compiler_spec)]
return [c for c in all_compilers(scope) if c.satisfies(compiler_spec)]
@_auto_compiler_spec
def compilers_for_spec(compiler_spec, arch=None, scope=None):
def compilers_for_spec(compiler_spec, scope=None, **kwargs):
"""This gets all compilers that satisfy the supplied CompilerSpec.
Returns an empty list if none are found.
"""
config = all_compilers_config(arch, scope)
platform = kwargs.get("platform", None)
config = all_compilers_config(scope)
def get_compiler(cspec):
items = config[str(cspec)]
def get_compilers(cspec):
compilers = []
if not all(n in items for n in _required_instance_vars):
for items in config:
if items['compiler']['spec'] != str(cspec):
continue
items = items['compiler']
if not ('paths' in items and all(n in items['paths'] for n in _path_instance_vars)):
raise InvalidCompilerConfigurationError(cspec)
cls = class_for_compiler_name(cspec.name)
compiler_paths = []
for c in _required_instance_vars:
compiler_path = items[c]
for c in _path_instance_vars:
compiler_path = items['paths'][c]
if compiler_path != "None":
compiler_paths.append(compiler_path)
else:
compiler_paths.append(None)
mods = items.get('modules')
if mods == 'None':
mods = []
if 'operating_system' in items:
operating_system = spack.architecture._operating_system_from_dict(items['operating_system'], platform)
else:
operating_system = None
alias = items['alias'] if 'alias' in items else None
flags = {}
for f in spack.spec.FlagMap.valid_compiler_flags():
if f in items:
flags[f] = items[f]
return cls(cspec, *compiler_paths, **flags)
matches = find(compiler_spec, arch, scope)
return [get_compiler(cspec) for cspec in matches]
compilers.append(cls(cspec, operating_system, compiler_paths, mods, alias, **flags))
return compilers
matches = set(find(compiler_spec, scope))
compilers = []
for cspec in matches:
compilers.extend(get_compilers(cspec))
return compilers
# return [get_compilers(cspec) for cspec in matches]
@_auto_compiler_spec
def compiler_for_spec(compiler_spec):
def compiler_for_spec(compiler_spec, arch):
"""Get the compiler that satisfies compiler_spec. compiler_spec must
be concrete."""
operating_system = arch.platform_os
assert(compiler_spec.concrete)
compilers = compilers_for_spec(compiler_spec)
assert(len(compilers) == 1)
compilers = [c for c in compilers_for_spec(compiler_spec, platform=arch.platform)
if c.operating_system == operating_system]
if len(compilers) < 1:
raise NoCompilerForSpecError(compiler_spec, operating_system)
if len(compilers) > 1:
raise CompilerSpecInsufficientlySpecificError(compiler_spec)
return compilers[0]
@ -289,6 +305,19 @@ def class_for_compiler_name(compiler_name):
return cls
def all_os_classes():
"""
Return the list of classes for all operating systems available on
this platform
"""
classes = []
platform = spack.architecture.sys_type()
for os_class in platform.operating_sys.values():
classes.append(os_class)
return classes
def all_compiler_types():
return [class_for_compiler_name(c) for c in supported_compilers()]
@ -298,9 +327,19 @@ def __init__(self, compiler_spec):
super(InvalidCompilerConfigurationError, self).__init__(
"Invalid configuration for [compiler \"%s\"]: " % compiler_spec,
"Compiler configuration must contain entries for all compilers: %s"
% _required_instance_vars)
% _path_instance_vars)
class NoCompilersError(spack.error.SpackError):
def __init__(self):
super(NoCompilersError, self).__init__("Spack could not find any compilers!")
class NoCompilerForSpecError(spack.error.SpackError):
def __init__(self, compiler_spec, target):
super(NoCompilerForSpecError, self).__init__("No compilers for operating system %s satisfy spec %s" % (
target, compiler_spec))
class CompilerSpecInsufficientlySpecificError(spack.error.SpackError):
def __init__(self, compiler_spec):
super(CompilerSpecInsufficientlySpecificError, self).__init__("Multiple compilers satisfy spec %s",
compiler_spec)

View File

@ -73,7 +73,7 @@ def cxx11_flag(self):
return "-std=c++11"
@classmethod
def default_version(self, comp):
def default_version(cls, comp):
"""The '--version' option works for clang compilers.
On most platforms, output looks like this::

View File

@ -0,0 +1,58 @@
##############################################################################}
# Copyright (c) 2013, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://scalability-llnl.github.io/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License (as published by
# the Free Software Foundation) version 2.1 dated February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import llnl.util.tty as tty
#from spack.build_environment import load_module
from spack.compiler import *
#from spack.version import ver
class Craype(Compiler):
# Subclasses use possible names of C compiler
cc_names = ['cc']
# Subclasses use possible names of C++ compiler
cxx_names = ['CC']
# Subclasses use possible names of Fortran 77 compiler
f77_names = ['ftn']
# Subclasses use possible names of Fortran 90 compiler
fc_names = ['ftn']
# MacPorts builds gcc versions with prefixes and -mp-X.Y suffixes.
suffixes = [r'-mp-\d\.\d']
PrgEnv = 'PrgEnv-cray'
PrgEnv_compiler = 'craype'
link_paths = { 'cc' : 'cc',
'cxx' : 'c++',
'f77' : 'f77',
'fc' : 'fc'}
@classmethod
def default_version(cls, comp):
return get_compiler_version(comp, r'([Vv]ersion).*(\d+(\.\d+)+)')

View File

@ -49,6 +49,9 @@ class Gcc(Compiler):
'f77' : 'gcc/gfortran',
'fc' : 'gcc/gfortran' }
PrgEnv = 'PrgEnv-gnu'
PrgEnv_compiler = 'gcc'
@property
def openmp_flag(self):
return "-fopenmp"
@ -74,9 +77,9 @@ def fc_version(cls, fc):
return get_compiler_version(
fc, '-dumpversion',
# older gfortran versions don't have simple dumpversion output.
r'(?:GNU Fortran \(GCC\))?(\d+\.\d+(?:\.\d+)?)')
r'(?:GNU Fortran \(GCC\))?(\d+\.\d+(?:\.\d+)?)', module)
@classmethod
def f77_version(cls, f77):
return cls.fc_version(f77)
return cls.fc_version(f77, module)

View File

@ -45,6 +45,9 @@ class Intel(Compiler):
'f77' : 'intel/ifort',
'fc' : 'intel/ifort' }
PrgEnv = 'PrgEnv-intel'
PrgEnv_compiler = 'intel'
@property
def openmp_flag(self):
if self.version < ver('16.0'):

View File

@ -44,6 +44,12 @@ class Pgi(Compiler):
'f77' : 'pgi/pgfortran',
'fc' : 'pgi/pgfortran' }
PrgEnv = 'PrgEnv-pgi'
PrgEnv_compiler = 'pgi'
@property
def openmp_flag(self):
return "-mp"
@ -52,7 +58,6 @@ def openmp_flag(self):
def cxx11_flag(self):
return "-std=c++11"
@classmethod
def default_version(cls, comp):
"""The '-V' option works for all the PGI compilers.

View File

@ -56,8 +56,9 @@ def cxx11_flag(self):
else:
return "-qlanglvl=extended0x"
@classmethod
def default_version(self, comp):
def default_version(cls, comp):
"""The '-qversion' is the standard option fo XL compilers.
Output looks like this::
@ -83,6 +84,7 @@ def default_version(self, comp):
return get_compiler_version(
comp, '-qversion',r'([0-9]?[0-9]\.[0-9])')
@classmethod
def fc_version(cls, fc):
"""The fortran and C/C++ versions of the XL compiler are always two units apart.

View File

@ -84,7 +84,8 @@ def _valid_virtuals_and_externals(self, spec):
raise NoBuildError(spec)
def cmp_externals(a, b):
if a.name != b.name:
if a.name != b.name and (not a.external or a.external_module and
not b.external and b.external_module):
# We're choosing between different providers, so
# maintain order from provider sort
return candidates.index(a) - candidates.index(b)
@ -187,32 +188,65 @@ def prefer_key(v):
return True # Things changed
def concretize_architecture(self, spec):
"""If the spec already had an architecture, return. Otherwise if
the root of the DAG has an architecture, then use that.
Otherwise take the system's default architecture.
Intuition: Architectures won't be set a lot, and generally you
want the host system's architecture. When architectures are
mised in a spec, it is likely because the tool requries a
cross-compiled component, e.g. for tools that run on BlueGene
or Cray machines. These constraints will likely come directly
from packages, so require the user to be explicit if they want
to mess with the architecture, and revert to the default when
they're not explicit.
"""
if spec.architecture is not None:
def _concretize_operating_system(self, spec):
platform = spec.architecture.platform
if spec.architecture.platform_os is not None and isinstance(
spec.architecture.platform_os,spack.architecture.OperatingSystem):
return False
if spec.root.architecture:
spec.architecture = spec.root.architecture
if spec.root.architecture and spec.root.architecture.platform_os:
if isinstance(spec.root.architecture.platform_os,spack.architecture.OperatingSystem):
spec.architecture.platform_os = spec.root.architecture.platform_os
else:
spec.architecture = spack.architecture.sys_type()
assert(spec.architecture is not None)
spec.architecture.platform_os = spec.architecture.platform.operating_system('default_os')
return True #changed
def _concretize_target(self, spec):
platform = spec.architecture.platform
if spec.architecture.target is not None and isinstance(
spec.architecture.target, spack.architecture.Target):
return False
if spec.root.architecture and spec.root.architecture.target:
if isinstance(spec.root.architecture.target,spack.architecture.Target):
spec.architecture.target = spec.root.architecture.target
else:
spec.architecture.target = spec.architecture.platform.target('default_target')
return True #changed
def _concretize_platform(self, spec):
if spec.architecture.platform is not None and isinstance(
spec.architecture.platform, spack.architecture.Platform):
return False
if spec.root.architecture and spec.root.architecture.platform:
if isinstance(spec.root.architecture.platform,spack.architecture.Platform):
spec.architecture.platform = spec.root.architecture.platform
else:
spec.architecture.platform = spack.architecture.sys_type()
return True #changed?
def concretize_architecture(self, spec):
"""If the spec is empty provide the defaults of the platform. If the
architecture is not a basestring, then check if either the platform,
target or operating system are concretized. If any of the fields are
changed then return True. If everything is concretized (i.e the
architecture attribute is a namedtuple of classes) then return False.
If the target is a string type, then convert the string into a
concretized architecture. If it has no architecture and the root of the
DAG has an architecture, then use the root otherwise use the defaults
on the platform.
"""
if spec.architecture is None:
# Set the architecture to all defaults
spec.architecture = spack.architecture.Arch()
return True
# Concretize the operating_system and target based of the spec
ret = any((self._concretize_platform(spec),
self._concretize_operating_system(spec),
self._concretize_target(spec)))
return ret
def concretize_variants(self, spec):
"""If the spec already has variants filled in, return. Otherwise, add
@ -238,6 +272,23 @@ def concretize_compiler(self, spec):
build with the compiler that will be used by libraries that
link to this one, to maximize compatibility.
"""
# Pass on concretizing the compiler if the target is not yet determined
if not spec.architecture.platform_os:
#Although this usually means changed, this means awaiting other changes
return True
# Only use a matching compiler if it is of the proper style
# Takes advantage of the proper logic already existing in compiler_for_spec
# Should think whether this can be more efficient
def _proper_compiler_style(cspec, arch):
platform = arch.platform
compilers = spack.compilers.compilers_for_spec(cspec,
platform=platform)
return filter(lambda c: c.operating_system ==
arch.platform_os, compilers)
#return compilers
all_compilers = spack.compilers.all_compilers()
if (spec.compiler and
@ -247,6 +298,7 @@ def concretize_compiler(self, spec):
#Find the another spec that has a compiler, or the root if none do
other_spec = spec if spec.compiler else find_spec(spec, lambda(x) : x.compiler)
if not other_spec:
other_spec = spec.root
other_compiler = other_spec.compiler
@ -265,7 +317,12 @@ def concretize_compiler(self, spec):
raise UnavailableCompilerVersionError(other_compiler)
# copy concrete version into other_compiler
spec.compiler = matches[0].copy()
index = 0
while not _proper_compiler_style(matches[index], spec.architecture):
index += 1
if index == len(matches) - 1:
raise NoValidVersionError(spec)
spec.compiler = matches[index].copy()
assert(spec.compiler.concrete)
return True # things changed.
@ -276,14 +333,20 @@ def concretize_compiler_flags(self, spec):
compiler is used, defaulting to no compiler flags in the spec.
Default specs set at the compiler level will still be added later.
"""
if not spec.architecture.platform_os:
#Although this usually means changed, this means awaiting other changes
return True
ret = False
for flag in spack.spec.FlagMap.valid_compiler_flags():
try:
nearest = next(p for p in spec.traverse(direction='parents')
if ((p.compiler == spec.compiler and p is not spec)
and flag in p.compiler_flags))
if ((not flag in spec.compiler_flags) or
sorted(spec.compiler_flags[flag]) != sorted(nearest.compiler_flags[flag])):
if not flag in spec.compiler_flags or \
not (sorted(spec.compiler_flags[flag]) >= sorted(nearest.compiler_flags[flag])):
if flag in spec.compiler_flags:
spec.compiler_flags[flag] = list(set(spec.compiler_flags[flag]) |
set(nearest.compiler_flags[flag]))
@ -307,7 +370,7 @@ def concretize_compiler_flags(self, spec):
# Include the compiler flag defaults from the config files
# This ensures that spack will detect conflicts that stem from a change
# in default compiler flags.
compiler = spack.compilers.compiler_for_spec(spec.compiler)
compiler = spack.compilers.compiler_for_spec(spec.compiler, spec.architecture)
for flag in compiler.flags:
if flag not in spec.compiler_flags:
spec.compiler_flags[flag] = compiler.flags[flag]

View File

@ -135,7 +135,7 @@
# Hacked yaml for configuration files preserves line numbers.
import spack.util.spack_yaml as syaml
from spack.build_environment import get_path_from_module
"""Dict from section names -> schema for that section."""
section_schemas = {
@ -146,18 +146,17 @@
'additionalProperties': False,
'patternProperties': {
'compilers:?': { # optional colon for overriding site config.
'type': 'object',
'default': {},
'additionalProperties': False,
'patternProperties': {
r'\w[\w-]*': { # architecture
'type': 'array',
'items': {
'compiler': {
'type': 'object',
'additionalProperties': False,
'patternProperties': {
r'\w[\w-]*@\w[\w-]*': { # compiler spec
'required': ['paths', 'spec', 'modules', 'operating_system'],
'properties': {
'paths': {
'type': 'object',
'additionalProperties': False,
'required': ['cc', 'cxx', 'f77', 'fc'],
'additionalProperties': False,
'properties': {
'cc': { 'anyOf': [ {'type' : 'string' },
{'type' : 'null' }]},
@ -167,8 +166,27 @@
{'type' : 'null' }]},
'fc': { 'anyOf': [ {'type' : 'string' },
{'type' : 'null' }]},
},},},},},},},},
'cflags': { 'anyOf': [ {'type' : 'string' },
{'type' : 'null' }]},
'cxxflags': { 'anyOf': [ {'type' : 'string' },
{'type' : 'null' }]},
'fflags': { 'anyOf': [ {'type' : 'string' },
{'type' : 'null' }]},
'cppflags': { 'anyOf': [ {'type' : 'string' },
{'type' : 'null' }]},
'ldflags': { 'anyOf': [ {'type' : 'string' },
{'type' : 'null' }]},
'ldlibs': { 'anyOf': [ {'type' : 'string' },
{'type' : 'null' }]}}},
'spec': { 'type': 'string'},
'operating_system': { 'type': 'string'},
'alias': { 'anyOf': [ {'type' : 'string'},
{'type' : 'null' }]},
'modules': { 'anyOf': [ {'type' : 'string'},
{'type' : 'null' },
{'type': 'array'},
]}
},},},},},},
'mirrors': {
'$schema': 'http://json-schema.org/schema#',
'title': 'Spack mirror configuration file schema',
@ -194,7 +212,6 @@
'default': [],
'items': {
'type': 'string'},},},},
'packages': {
'$schema': 'http://json-schema.org/schema#',
'title': 'Spack package configuration file schema',
@ -224,6 +241,10 @@
'type': 'boolean',
'default': True,
},
'modules': {
'type' : 'object',
'default' : {},
},
'providers': {
'type': 'object',
'default': {},
@ -565,8 +586,7 @@ def they_are(t):
# Source list is prepended (for precedence)
if they_are(list):
seen = set(source)
dest[:] = source + [x for x in dest if x not in seen]
dest[:] = source + [x for x in dest if x not in source]
return dest
# Source dict is merged into dest.
@ -669,7 +689,8 @@ def spec_externals(spec):
external_specs = []
pkg_paths = allpkgs.get(name, {}).get('paths', None)
if not pkg_paths:
pkg_modules = allpkgs.get(name, {}).get('modules', None)
if (not pkg_paths) and (not pkg_modules):
return []
for external_spec, path in pkg_paths.iteritems():
@ -680,6 +701,17 @@ def spec_externals(spec):
external_spec = spack.spec.Spec(external_spec, external=path)
if external_spec.satisfies(spec):
external_specs.append(external_spec)
for external_spec, module in pkg_modules.iteritems():
if not module:
continue
path = get_path_from_module(module)
external_spec = spack.spec.Spec(external_spec, external=path, external_module=module)
if external_spec.satisfies(spec):
external_specs.append(external_spec)
return external_specs

View File

@ -214,6 +214,7 @@ def _read_spec_from_yaml(self, hash_key, installs, parent_key=None):
# Add dependencies from other records in the install DB to
# form a full spec.
if 'dependencies' in spec_dict[spec.name]:
for dep_hash in spec_dict[spec.name]['dependencies'].values():
child = self._read_spec_from_yaml(dep_hash, installs, hash_key)
spec._add_dependency(child)
@ -289,7 +290,8 @@ def check(cond, msg):
except Exception as e:
tty.warn("Invalid database reecord:",
"file: %s" % self._index_path,
"hash: %s" % hash_key, "cause: %s" % str(e))
"hash: %s" % hash_key,
"cause: %s: %s" % (type(e).__name__, str(e)))
raise
self._data = data
@ -309,7 +311,11 @@ def reindex(self, directory_layout):
for spec in directory_layout.all_specs():
# Create a spec for each known package and add it.
path = directory_layout.path_for_spec(spec)
self._add(spec, path, directory_layout)
old_info = old_data.get(spec.dag_hash())
explicit = False
if old_info is not None:
explicit = old_info.explicit
self._add(spec, path, directory_layout, explicit=explicit)
self._check_ref_counts()

View File

@ -257,7 +257,7 @@ def variant(pkg, name, default=False, description=""):
"""Define a variant for the package. Packager can specify a default
value (on or off) as well as a text description."""
default = bool(default)
default = default
description = str(description).strip()
if not re.match(spack.spec.identifier_re, name):

View File

@ -165,7 +165,7 @@ def remove_install_directory(self, spec):
class YamlDirectoryLayout(DirectoryLayout):
"""Lays out installation directories like this::
<install root>/
<architecture>/
<target>/
<compiler>-<compiler version>/
<name>-<version>-<variants>-<hash>
@ -207,8 +207,7 @@ def relative_path_for_spec(self, spec):
spec.version,
spec.dag_hash(self.hash_len))
path = join_path(
spec.architecture,
path = join_path(spec.architecture,
"%s-%s" % (spec.compiler.name, spec.compiler.version),
dir_name)

View File

@ -1,4 +1,4 @@
##############################################################################
#
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
@ -21,7 +21,7 @@
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
#
"""
Fetch strategies are used to download source code into a staging area
in order to build it. They need to define the following methods:
@ -75,11 +75,13 @@ def wrapper(self, *args, **kwargs):
class FetchStrategy(object):
"""Superclass of all fetch strategies."""
enabled = False # Non-abstract subclasses should be enabled.
required_attributes = None # Attributes required in version() args.
class __metaclass__(type):
"""This metaclass registers all fetch strategies in a list."""
def __init__(cls, name, bases, dict):
@ -126,6 +128,7 @@ def matches(cls, args):
@pattern.composite(interface=FetchStrategy)
class FetchStrategyComposite(object):
"""
Composite for a FetchStrategy object. Implements the GoF composite pattern.
"""
@ -134,6 +137,7 @@ class FetchStrategyComposite(object):
class URLFetchStrategy(FetchStrategy):
"""FetchStrategy that pulls source code from a URL for an archive,
checks the archive against a checksum,and decompresses the archive.
"""
@ -235,12 +239,13 @@ def fetch(self):
# redirects properly.
content_types = re.findall(r'Content-Type:[^\r\n]+', headers)
if content_types and 'text/html' in content_types[-1]:
tty.warn(
"The contents of " + self.archive_file + " look like HTML.",
tty.warn("The contents of ",
(self.archive_file if self.archive_file is not None
else "the archive"),
" look like HTML.",
"The checksum will likely be bad. If it is, you can use",
"'spack clean <package>' to remove the bad archive, then fix",
"your internet gateway issue and install again.")
"'spack clean <package>' to remove the bad archive, then",
"fix your internet gateway issue and install again.")
if save_file:
os.rename(partial_file, save_file)
@ -353,6 +358,7 @@ def __str__(self):
class VCSFetchStrategy(FetchStrategy):
def __init__(self, name, *rev_types, **kwargs):
super(VCSFetchStrategy, self).__init__()
self.name = name
@ -407,6 +413,7 @@ def __repr__(self):
class GoFetchStrategy(VCSFetchStrategy):
"""
Fetch strategy that employs the `go get` infrastructure
Use like this in a package:
@ -466,6 +473,7 @@ def __str__(self):
class GitFetchStrategy(VCSFetchStrategy):
"""
Fetch strategy that gets source code from a git repository.
Use like this in a package:
@ -586,6 +594,7 @@ def __str__(self):
class SvnFetchStrategy(VCSFetchStrategy):
"""Fetch strategy that gets source code from a subversion repository.
Use like this in a package:
@ -662,6 +671,7 @@ def __str__(self):
class HgFetchStrategy(VCSFetchStrategy):
"""
Fetch strategy that gets source code from a Mercurial repository.
Use like this in a package:
@ -806,11 +816,13 @@ def for_package_version(pkg, version):
class FetchError(spack.error.SpackError):
def __init__(self, msg, long_msg=None):
super(FetchError, self).__init__(msg, long_msg)
class FailedDownloadError(FetchError):
"""Raised wen a download fails."""
def __init__(self, url, msg=""):
@ -820,16 +832,19 @@ def __init__(self, url, msg=""):
class NoArchiveFileError(FetchError):
def __init__(self, msg, long_msg):
super(NoArchiveFileError, self).__init__(msg, long_msg)
class NoDigestError(FetchError):
def __init__(self, msg, long_msg=None):
super(NoDigestError, self).__init__(msg, long_msg)
class InvalidArgsError(FetchError):
def __init__(self, pkg, version):
msg = ("Could not construct a fetch strategy for package %s at "
"version %s")
@ -838,6 +853,7 @@ def __init__(self, pkg, version):
class ChecksumError(FetchError):
"""Raised when archive fails to checksum."""
def __init__(self, message, long_msg=None):
@ -845,6 +861,7 @@ def __init__(self, message, long_msg=None):
class NoStageError(FetchError):
"""Raised when fetch operations are called before set_stage()."""
def __init__(self, method):

View File

@ -26,7 +26,7 @@
import spack
import llnl.util.tty as tty
from llnl.util.filesystem import join_path
from llnl.util.filesystem import join_path, mkdirp
def pre_install(pkg):
@ -154,6 +154,9 @@ def symlink_license(pkg):
target = pkg.global_license_file
for filename in pkg.license_files:
link_name = join_path(pkg.prefix, filename)
license_dir = os.path.dirname(link_name)
if not os.path.exists(license_dir):
mkdirp(license_dir)
if os.path.exists(target):
os.symlink(target, link_name)
tty.msg("Added local symlink %s to global license file" %

View File

@ -0,0 +1,62 @@
import re
import os
from spack.architecture import OperatingSystem
from spack.util.executable import *
import spack.spec
from spack.util.multiproc import parmap
import spack.compilers
class Cnl(OperatingSystem):
""" Compute Node Linux (CNL) is the operating system used for the Cray XC
series super computers. It is a very stripped down version of GNU/Linux.
Any compilers found through this operating system will be used with
modules. If updated, user must make sure that version and name are
updated to indicate that OS has been upgraded (or downgraded)
"""
def __init__(self):
name = 'CNL'
version = '10'
super(Cnl, self).__init__(name, version)
def find_compilers(self, *paths):
types = spack.compilers.all_compiler_types()
compiler_lists = parmap(lambda cmp_cls: self.find_compiler(cmp_cls, *paths), types)
# ensure all the version calls we made are cached in the parent
# process, as well. This speeds up Spack a lot.
clist = reduce(lambda x,y: x+y, compiler_lists)
return clist
def find_compiler(self, cmp_cls, *paths):
compilers = []
if cmp_cls.PrgEnv:
if not cmp_cls.PrgEnv_compiler:
tty.die('Must supply PrgEnv_compiler with PrgEnv')
modulecmd = which('modulecmd')
modulecmd.add_default_arg('python')
# Save the environment variable to restore later
old_modulepath = os.environ['MODULEPATH']
# if given any explicit paths, search them for module files too
if paths:
module_paths = ':' + ':'.join(p for p in paths)
os.environ['MODULEPATH'] = module_paths
output = modulecmd('avail', cmp_cls.PrgEnv_compiler, output=str, error=str)
matches = re.findall(r'(%s)/([\d\.]+[\d])' % cmp_cls.PrgEnv_compiler, output)
for name, version in matches:
v = version
comp = cmp_cls(spack.spec.CompilerSpec(name + '@' + v), self,
['cc', 'CC', 'ftn'], [cmp_cls.PrgEnv, name +'/' + v])
compilers.append(comp)
# Restore modulepath environment variable
if paths:
os.environ['MODULEPATH'] = old_modulepath
return compilers

View File

@ -0,0 +1,22 @@
import re
import platform as py_platform
from spack.architecture import OperatingSystem
class LinuxDistro(OperatingSystem):
""" This class will represent the autodetected operating system
for a Linux System. Since there are many different flavors of
Linux, this class will attempt to encompass them all through
autodetection using the python module platform and the method
platform.dist()
"""
def __init__(self):
distname, version, _ = py_platform.linux_distribution(
full_distribution_name=False)
# Grabs major version from tuple on redhat; on other platforms
# grab the first legal identifier in the version field. On
# debian you get things like 'wheezy/sid'; sid means unstable.
# We just record 'wheezy' and don't get quite so detailed.
version = re.split(r'[^\w-]', version)[0]
super(LinuxDistro, self).__init__(distname, version)

View File

@ -0,0 +1,29 @@
import platform as py_platform
from spack.architecture import OperatingSystem
class MacOs(OperatingSystem):
"""This class represents the macOS operating system. This will be
auto detected using the python platform.mac_ver. The macOS
platform will be represented using the major version operating
system name, i.e el capitan, yosemite...etc.
"""
def __init__(self):
""" Autodetects the mac version from a dictionary. Goes back as
far as 10.6 snowleopard. If the user has an older mac then
the version will just be a generic mac_os.
"""
mac_releases = {'10.6': "snowleopard",
"10.7": "lion",
"10.8": "mountainlion",
"10.9": "mavericks",
"10.10": "yosemite",
"10.11": "elcapitan",
"10.12": "sierra"}
mac_ver = py_platform.mac_ver()[0][:-2]
name = mac_releases.get(mac_ver, "macos")
super(MacOs, self).__init__(name, mac_ver)
def __str__(self):
return self.name

View File

@ -397,14 +397,20 @@ def __init__(self, spec):
if self.is_extension:
spack.repo.get(self.extendee_spec)._check_extendable()
@property
def global_license_dir(self):
"""Returns the directory where global license files for all
packages are stored."""
spack_root = ancestor(__file__, 4)
return join_path(spack_root, 'etc', 'spack', 'licenses')
@property
def global_license_file(self):
"""Returns the path where a global license file should be stored."""
"""Returns the path where a global license file for this
particular package should be stored."""
if not self.license_files:
return
spack_root = ancestor(__file__, 4)
global_license_dir = join_path(spack_root, 'etc', 'spack', 'licenses')
return join_path(global_license_dir, self.name,
return join_path(self.global_license_dir, self.name,
os.path.basename(self.license_files[0]))
@property
@ -676,11 +682,13 @@ def prefix(self):
return self.spec.prefix
@property
#TODO: Change this to architecture
def compiler(self):
"""Get the spack.compiler.Compiler object used to build this package"""
if not self.spec.concrete:
raise ValueError("Can only get a compiler for a concrete package.")
return spack.compilers.compiler_for_spec(self.spec.compiler)
return spack.compilers.compiler_for_spec(self.spec.compiler,
self.spec.architecture)
def url_version(self, version):
"""

View File

View File

@ -0,0 +1,18 @@
import os
from spack.architecture import Platform, Target
class Bgq(Platform):
priority = 30
front_end = 'power7'
back_end = 'powerpc'
default = 'powerpc'
def __init__(self):
super(Bgq, self).__init__('bgq')
self.add_target(self.front_end, Target(self.front_end))
self.add_target(self.back_end, Target(self.back_end,))
@classmethod
def detect(self):
return os.path.exists('/bgsys')

View File

@ -0,0 +1,46 @@
import os
from spack.architecture import Platform, Target
from spack.operating_systems.linux_distro import LinuxDistro
from spack.operating_systems.cnl import Cnl
class CrayXc(Platform):
priority = 20
front_end = 'sandybridge'
back_end = 'ivybridge'
default = 'ivybridge'
front_os = "SuSE11"
back_os = "CNL10"
default_os = "CNL10"
def __init__(self):
''' Since cori doesn't have ivybridge as a front end it's better
if we use CRAY_CPU_TARGET as the default. This will ensure
that if we're on a XC-40 or XC-30 then we can detect the target
'''
super(CrayXc, self).__init__('cray_xc')
# Handle the default here so we can check for a key error
if 'CRAY_CPU_TARGET' in os.environ:
self.default = os.environ['CRAY_CPU_TARGET']
# Change the defaults to haswell if we're on an XC40
if self.default == 'haswell':
self.front_end = self.default
self.back_end = self.default
# Could switch to use modules and fe targets for front end
# Currently using compilers by path for front end.
self.add_target('sandybridge', Target('sandybridge'))
self.add_target('ivybridge',
Target('ivybridge', 'craype-ivybridge'))
self.add_target('haswell',
Target('haswell','craype-haswell'))
self.add_operating_system('SuSE11', LinuxDistro())
self.add_operating_system('CNL10', Cnl())
@classmethod
def detect(self):
return os.path.exists('/opt/cray/craype')

View File

@ -0,0 +1,26 @@
import subprocess
from spack.architecture import Platform, Target
from spack.operating_systems.mac_os import MacOs
class Darwin(Platform):
priority = 89
front_end = 'x86_64'
back_end = 'x86_64'
default = 'x86_64'
def __init__(self):
super(Darwin, self).__init__('darwin')
self.add_target(self.default, Target(self.default))
mac_os = MacOs()
self.default_os = str(mac_os)
self.front_os = str(mac_os)
self.back_os = str(mac_os)
self.add_operating_system(str(mac_os), mac_os)
@classmethod
def detect(self):
platform = subprocess.Popen(['uname', '-a'], stdout = subprocess.PIPE)
platform, _ = platform.communicate()
return 'darwin' in platform.strip().lower()

View File

@ -0,0 +1,24 @@
import subprocess
from spack.architecture import Platform, Target
from spack.operating_systems.linux_distro import LinuxDistro
class Linux(Platform):
priority = 90
front_end = 'x86_64'
back_end = 'x86_64'
default = 'x86_64'
def __init__(self):
super(Linux, self).__init__('linux')
self.add_target(self.default, Target(self.default))
linux_dist = LinuxDistro()
self.default_os = str(linux_dist)
self.front_os = self.default_os
self.back_os = self.default_os
self.add_operating_system(str(linux_dist), linux_dist)
@classmethod
def detect(self):
platform = subprocess.Popen(['uname', '-a'], stdout = subprocess.PIPE)
platform, _ = platform.communicate()
return 'linux' in platform.strip().lower()

View File

@ -0,0 +1,28 @@
import subprocess
from spack.architecture import Platform, Target
from spack.operating_systems.linux_distro import LinuxDistro
from spack.operating_systems.cnl import Cnl
class Test(Platform):
priority = 1000000
front_end = 'x86_32'
back_end = 'x86_64'
default = 'x86_64'
back_os = 'CNL10'
default_os = 'CNL10'
def __init__(self):
super(Test, self).__init__('test')
self.add_target(self.default, Target(self.default))
self.add_target(self.front_end, Target(self.front_end))
self.add_operating_system(self.default_os, Cnl())
linux_dist = LinuxDistro()
self.front_os = linux_dist.name
self.add_operating_system(self.front_os, linux_dist)
@classmethod
def detect(self):
return True

View File

@ -1,4 +1,4 @@
#
##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
@ -18,10 +18,10 @@
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
"""
Spack allows very fine-grained control over how packages are installed and
over how they are built and configured. To make this easy, it has its own
@ -96,8 +96,10 @@
expansion when it is the first character in an id typed on the command line.
"""
import sys
import itertools
import hashlib
import base64
import imp
from StringIO import StringIO
from operator import attrgetter
import yaml
@ -106,16 +108,22 @@
import llnl.util.tty as tty
from llnl.util.lang import *
from llnl.util.tty.color import *
from llnl.util.filesystem import join_path
import spack
import spack.architecture
import spack.parse
import spack.error
import spack.compilers as compilers
# TODO: move display_specs to some other location.
from spack.cmd.find import display_specs
from spack.version import *
from spack.util.string import *
from spack.util.prefix import Prefix
from spack.util.naming import mod_to_class
from spack.virtual import ProviderIndex
from spack.build_environment import get_path_from_module, load_module
# Valid pattern for an identifier in Spack
identifier_re = r'\w[\w-]*'
@ -165,7 +173,6 @@ def colorize_spec(spec):
"""Returns a spec colorized according to the colors specified in
color_formats."""
class insert_color:
def __init__(self):
self.last = None
@ -183,11 +190,9 @@ def __call__(self, match):
@key_ordering
class CompilerSpec(object):
"""The CompilerSpec field represents the compiler or range of compiler
versions that a package should be built with. CompilerSpecs have a
name and a version list. """
def __init__(self, *args):
nargs = len(args)
if nargs == 1:
@ -293,7 +298,6 @@ class VariantSpec(object):
on the particular package being built, and each named variant can
be enabled or disabled.
"""
def __init__(self, name, value):
self.name = name
self.value = value
@ -489,6 +493,7 @@ def __init__(self, spec_like, *dep_like, **kwargs):
# Allow a spec to be constructed with an external path.
self.external = kwargs.get('external', None)
self.external_module = kwargs.get('external_module', None)
# This allows users to construct a spec DAG with literals.
# Note that given two specs a and b, Spec(a) copies a, but
@ -520,8 +525,33 @@ def _add_flag(self, name, value):
Known flags currently include "arch"
"""
valid_flags = FlagMap.valid_compiler_flags()
if name == 'arch':
self._set_architecture(value)
if name == 'arch' or name == 'architecture':
parts = value.split('-')
if len(parts) == 3:
platform, op_sys, target = parts
else:
platform, op_sys, target = None, None, value
assert(self.architecture.platform is None)
assert(self.architecture.platform_os is None)
assert(self.architecture.target is None)
assert(self.architecture.os_string is None)
assert(self.architecture.target_string is None)
self._set_platform(platform)
self._set_os(op_sys)
self._set_target(target)
elif name == 'platform':
self._set_platform(value)
elif name == 'os' or name == 'operating_system':
if self.architecture.platform:
self._set_os(value)
else:
self.architecture.os_string = value
elif name == 'target':
if self.architecture.platform:
self._set_target(value)
else:
self.architecture.target_string = value
elif name in valid_flags:
assert(self.compiler_flags is not None)
self.compiler_flags[name] = value.split()
@ -535,12 +565,49 @@ def _set_compiler(self, compiler):
"Spec for '%s' cannot have two compilers." % self.name)
self.compiler = compiler
def _set_architecture(self, architecture):
"""Called by the parser to set the architecture."""
if self.architecture:
raise DuplicateArchitectureError(
"Spec for '%s' cannot have two architectures." % self.name)
self.architecture = architecture
def _set_platform(self, value):
"""Called by the parser to set the architecture platform"""
if isinstance(value, basestring):
mod_path = spack.platform_path
mod_string = 'spack.platformss'
names = list_modules(mod_path)
if value in names:
# Create a platform object from the name
mod_name = mod_string + value
path = join_path(mod_path, value) + '.py'
mod = imp.load_source(mod_name, path)
class_name = mod_to_class(value)
if not hasattr(mod, class_name):
tty.die('No class %s defined in %s' % (class_name, mod_name))
cls = getattr(mod, class_name)
if not inspect.isclass(cls):
tty.die('%s.%s is not a class' % (mod_name, class_name))
platform = cls()
else:
tty.die("No platform class %s defined." % value)
else:
# The value is a platform
platform = value
self.architecture.platform = platform
# Set os and target if we previously got strings for them
if self.architecture.os_string:
self._set_os(self.architecture.os_string)
self.architecture.os_string = None
if self.architecture.target_string:
self._set_target(self.architecture.target_string)
self.architecture.target_string = None
def _set_os(self, value):
"""Called by the parser to set the architecture operating system"""
if self.architecture.platform:
self.architecture.platform_os = self.architecture.platform.operating_system(value)
def _set_target(self, value):
"""Called by the parser to set the architecture target"""
if self.architecture.platform:
self.architecture.target = self.architecture.platform.target(value)
def _add_dependency(self, spec):
"""Called by the parser to add another spec as a dependency."""
@ -612,15 +679,15 @@ def concrete(self):
if self._concrete:
return True
self._concrete = bool(not self.virtual and
self.namespace is not None and
self.versions.concrete and
self.variants.concrete and
self.architecture and
self.compiler and
self.compiler.concrete and
self.compiler_flags.concrete and
self.dependencies.concrete)
self._concrete = bool(not self.virtual
and self.namespace is not None
and self.versions.concrete
and self.variants.concrete
and self.architecture
and self.architecture.concrete
and self.compiler and self.compiler.concrete
and self.compiler_flags.concrete
and self.dependencies.concrete)
return self._concrete
def traverse(self, visited=None, d=0, **kwargs):
@ -658,7 +725,7 @@ def traverse(self, visited=None, d=0, **kwargs):
in the traversal.
root [=True]
If false, this won't yield the root node, just its descendents.
If False, this won't yield the root node, just its descendents.
direction [=children|parents]
If 'children', does a traversal of this spec's children. If
@ -756,7 +823,7 @@ def to_node_dict(self):
'parameters' : params,
'arch' : self.architecture,
'dependencies' : dict((d, self.dependencies[d].dag_hash())
for d in sorted(self.dependencies)),
for d in sorted(self.dependencies))
}
# Older concrete specs do not have a namespace. Omit for
@ -764,6 +831,13 @@ def to_node_dict(self):
if not self.concrete or self.namespace:
d['namespace'] = self.namespace
if self.architecture:
# TODO: Fix the target.to_dict to account for the tuple
# Want it to be a dict of dicts
d['arch'] = self.architecture.to_dict()
else:
d['arch'] = None
if self.compiler:
d.update(self.compiler.to_dict())
else:
@ -789,11 +863,12 @@ def from_node_dict(node):
spec = Spec(name)
spec.namespace = node.get('namespace', None)
spec.versions = VersionList.from_dict(node)
spec.architecture = node['arch']
if 'hash' in node:
spec._hash = node['hash']
spec.architecture = spack.architecture.arch_from_dict(node['arch'])
if node['compiler'] is None:
spec.compiler = None
else:
@ -866,12 +941,10 @@ def _concretize_helper(self, presets=None, visited=None):
# Concretize deps first -- this is a bottom-up process.
for name in sorted(self.dependencies.keys()):
changed |= self.dependencies[
name]._concretize_helper(presets, visited)
changed |= self.dependencies[name]._concretize_helper(presets, visited)
if self.name in presets:
changed |= self.constrain(presets[self.name])
else:
# Concretize virtual dependencies last. Because they're added
# to presets below, their constraints will all be merged, but we'll
@ -936,11 +1009,12 @@ def _expand_virtual_packages(self):
"""
# Make an index of stuff this spec already provides
self_index = ProviderIndex(self.traverse(), restrict=True)
changed = False
done = False
while not done:
done = True
for spec in list(self.traverse()):
replacement = None
if spec.virtual:
@ -979,24 +1053,25 @@ def _expand_virtual_packages(self):
continue
# If replacement is external then trim the dependencies
if replacement.external:
if replacement.external or replacement.external_module:
if (spec.dependencies):
changed = True
spec.dependencies = DependencyMap()
replacement.dependencies = DependencyMap()
replacement.architecture = self.architecture
# TODO: could this and the stuff in _dup be cleaned up?
def feq(cfield, sfield):
return (not cfield) or (cfield == sfield)
if replacement is spec or (
feq(replacement.name, spec.name) and
if replacement is spec or (feq(replacement.name, spec.name) and
feq(replacement.versions, spec.versions) and
feq(replacement.compiler, spec.compiler) and
feq(replacement.architecture, spec.architecture) and
feq(replacement.dependencies, spec.dependencies) and
feq(replacement.variants, spec.variants) and
feq(replacement.external, spec.external)):
feq(replacement.external, spec.external) and
feq(replacement.external_module, spec.external_module)):
continue
# Refine this spec to the candidate. This uses
# replace_with AND dup so that it can work in
@ -1053,6 +1128,15 @@ def concretize(self):
if s.namespace is None:
s.namespace = spack.repo.repo_for_pkg(s.name).namespace
for s in self.traverse(root=False):
if s.external_module:
compiler = spack.compilers.compiler_for_spec(s.compiler, s.architecture)
for mod in compiler.modules:
load_module(mod)
s.external = get_path_from_module(s.external_module)
# Mark everything in the spec as concrete, as well.
self._mark_concrete()
@ -1253,7 +1337,7 @@ def _normalize_helper(self, visited, spec_deps, provider_index):
# if we descend into a virtual spec, there's nothing more
# to normalize. Concretize will finish resolving it later.
if self.virtual or self.external:
if self.virtual or self.external or self.external_module:
return False
# Combine constraints from package deps with constraints from
@ -1300,7 +1384,6 @@ def normalize(self, force=False):
# Ensure first that all packages & compilers in the DAG exist.
self.validate_names()
# Get all the dependencies into one DependencyMap
spec_deps = self.flat_dependencies(copy=False)
@ -1378,10 +1461,21 @@ def constrain(self, other, deps=True):
raise UnsatisfiableVariantSpecError(self.variants[v],
other.variants[v])
# TODO: Check out the logic here
if self.architecture is not None and other.architecture is not None:
if self.architecture != other.architecture:
if self.architecture.platform is not None and other.architecture.platform is not None:
if self.architecture.platform != other.architecture.platform:
raise UnsatisfiableArchitectureSpecError(self.architecture,
other.architecture)
if self.architecture.platform_os is not None and other.architecture.platform_os is not None:
if self.architecture.platform_os != other.architecture.platform_os:
raise UnsatisfiableArchitectureSpecError(self.architecture,
other.architecture)
if self.architecture.target is not None and other.architecture.target is not None:
if self.architecture.target != other.architecture.target:
raise UnsatisfiableArchitectureSpecError(self.architecture,
other.architecture)
changed = False
if self.compiler is not None and other.compiler is not None:
@ -1395,9 +1489,17 @@ def constrain(self, other, deps=True):
changed |= self.compiler_flags.constrain(other.compiler_flags)
old = self.architecture
old = str(self.architecture)
if self.architecture is None or other.architecture is None:
self.architecture = self.architecture or other.architecture
changed |= (self.architecture != old)
else:
if self.architecture.platform is None or other.architecture.platform is None:
self.architecture.platform = self.architecture.platform or other.architecture.platform
if self.architecture.platform_os is None or other.architecture.platform_os is None:
self.architecture.platform_os = self.architecture.platform_os or other.architecture.platform_os
if self.architecture.target is None or other.architecture.target is None:
self.architecture.target = self.architecture.target or other.architecture.target
changed |= (str(self.architecture) != old)
if deps:
changed |= self._constrain_dependencies(other)
@ -1524,9 +1626,14 @@ def satisfies(self, other, deps=True, strict=False):
# Architecture satisfaction is currently just string equality.
# If not strict, None means unconstrained.
if self.architecture and other.architecture:
if self.architecture != other.architecture:
if ((self.architecture.platform and other.architecture.platform and self.architecture.platform != other.architecture.platform) or
(self.architecture.platform_os and other.architecture.platform_os and self.architecture.platform_os != other.architecture.platform_os) or
(self.architecture.target and other.architecture.target and self.architecture.target != other.architecture.target)):
return False
elif strict and (other.architecture and not self.architecture):
elif strict and ((other.architecture and not self.architecture) or
(other.architecture.platform and not self.architecture.platform) or
(other.architecture.platform_os and not self.architecture.platform_os) or
(other.architecture.target and not self.architecture.target)):
return False
if not self.compiler_flags.satisfies(
@ -1601,20 +1708,17 @@ def _dup(self, other, **kwargs):
Options:
dependencies[=True]
Whether deps should be copied too. Set to false to copy a
Whether deps should be copied too. Set to False to copy a
spec but not its dependencies.
"""
# We don't count dependencies as changes here
changed = True
if hasattr(self, 'name'):
changed = (self.name != other.name and
self.versions != other.versions and
self.architecture != other.architecture and
self.compiler != other.compiler and
self.variants != other.variants and
self._normal != other._normal and
self.concrete != other.concrete and
self.external != other.external)
changed = (self.name != other.name and self.versions != other.versions and \
self.architecture != other.architecture and self.compiler != other.compiler and \
self.variants != other.variants and self._normal != other._normal and \
self.concrete != other.concrete and self.external != other.external and \
self.external_module != other.external_module and self.compiler_flags != other.compiler_flags)
# Local node attributes get copied first.
self.name = other.name
@ -1628,6 +1732,7 @@ def _dup(self, other, **kwargs):
self.variants = other.variants.copy()
self.variants.spec = self
self.external = other.external
self.external_module = other.external_module
self.namespace = other.namespace
self._hash = other._hash
@ -1648,6 +1753,7 @@ def _dup(self, other, **kwargs):
self._normal = other._normal
self._concrete = other._concrete
self.external = other.external
self.external_module = other.external_module
return changed
def copy(self, **kwargs):
@ -1752,6 +1858,7 @@ def _cmp_node(self):
self.compiler,
self.compiler_flags)
def eq_node(self, other):
"""Equality with another spec, not including dependencies."""
return self._cmp_node() == other._cmp_node()
@ -1862,7 +1969,7 @@ def write(s, c):
if self.variants:
write(fmt % str(self.variants), c)
elif c == '=':
if self.architecture:
if self.architecture and str(self.architecture):
write(fmt % (' arch' + c + str(self.architecture)), c)
elif c == '#':
out.write('-' + fmt % (self.dag_hash(7)))
@ -1920,8 +2027,8 @@ def write(s, c):
if self.variants:
write(fmt % str(self.variants), '+')
elif named_str == 'ARCHITECTURE':
if self.architecture:
write(fmt % str(self.architecture), '=')
if self.architecture and str(self.architecture):
write(fmt % str(self.architecture), ' arch=')
elif named_str == 'SHA1':
if self.dependencies:
out.write(fmt % str(self.dag_hash(7)))
@ -1946,6 +2053,41 @@ def write(s, c):
def dep_string(self):
return ''.join("^" + dep.format() for dep in self.sorted_deps())
def __cmp__(self, other):
#Package name sort order is not configurable, always goes alphabetical
if self.name != other.name:
return cmp(self.name, other.name)
#Package version is second in compare order
pkgname = self.name
if self.versions != other.versions:
return spack.pkgsort.version_compare(pkgname,
self.versions, other.versions)
#Compiler is third
if self.compiler != other.compiler:
return spack.pkgsort.compiler_compare(pkgname,
self.compiler, other.compiler)
#Variants
if self.variants != other.variants:
return spack.pkgsort.variant_compare(pkgname,
self.variants, other.variants)
#Target
if self.architecture != other.architecture:
return spack.pkgsort.architecture_compare(pkgname,
self.architecture, other.architecture)
#Dependency is not configurable
if self.dependencies != other.dependencies:
return -1 if self.dependencies < other.dependencies else 1
#Equal specs
return 0
def __str__(self):
return self.format() + self.dep_string()
@ -2015,15 +2157,17 @@ def __init__(self):
(r'\s+', lambda scanner, val: None)])
# Lexer is always the same for every parser.
_lexer = SpecLexer()
class SpecParser(spack.parse.Parser):
def __init__(self):
super(SpecParser, self).__init__(SpecLexer())
super(SpecParser, self).__init__(_lexer)
self.previous = None
def do_parse(self):
specs = []
try:
while self.next:
# TODO: clean this parsing up a bit
@ -2067,6 +2211,12 @@ def do_parse(self):
except spack.parse.ParseError, e:
raise SpecParseError(e)
# If the spec has an os or a target and no platform, give it the default platform
for spec in specs:
for s in spec.traverse():
if s.architecture.os_string or s.architecture.target_string:
s._set_platform(spack.architecture.sys_type())
return specs
def parse_compiler(self, text):
@ -2108,9 +2258,10 @@ def spec(self, name, check_valid_token=False):
spec.name = spec_name
spec.versions = VersionList()
spec.variants = VariantMap(spec)
spec.architecture = None
spec.architecture = spack.architecture.Arch()
spec.compiler = None
spec.external = None
spec.external_module = None
spec.compiler_flags = FlagMap(spec)
spec.dependents = DependencyMap()
spec.dependencies = DependencyMap()
@ -2186,12 +2337,6 @@ def variant(self, name=None):
self.check_identifier()
return self.token.value
def architecture(self):
# TODO: Make this work properly as a subcase of variant (includes
# adding names to grammar)
self.expect(ID)
return self.token.value
def version(self):
start = None
end = None

View File

@ -31,14 +31,15 @@
from llnl.util.tty.colify import colify
from spack.test.tally_plugin import Tally
"""Names of tests to be included in Spack's test suite"""
test_names = ['versions', 'url_parse', 'url_substitution', 'packages', 'stage',
test_names = ['architecture', 'versions', 'url_parse', 'url_substitution', 'packages', 'stage',
'spec_syntax', 'spec_semantics', 'spec_dag', 'concretize',
'multimethod', 'install', 'package_sanity', 'config',
'directory_layout', 'pattern', 'python_version', 'git_fetch',
'svn_fetch', 'hg_fetch', 'mirror', 'modules', 'url_extrapolate',
'cc', 'link_tree', 'spec_yaml', 'optional_deps',
'make_executable', 'configure_guess', 'lock', 'database',
'namespace_trie', 'yaml', 'sbang', 'environment',
'namespace_trie', 'yaml', 'sbang', 'environment', 'cmd.find',
'cmd.uninstall', 'cmd.test_install']

View File

@ -0,0 +1,112 @@
""" Test checks if the architecture class is created correctly and also that
the functions are looking for the correct architecture name
"""
import unittest
import os
import platform as py_platform
import spack
from spack.architecture import *
from spack.spec import *
from spack.platforms.cray_xc import CrayXc
from spack.platforms.linux import Linux
from spack.platforms.bgq import Bgq
from spack.platforms.darwin import Darwin
from spack.test.mock_packages_test import *
#class ArchitectureTest(unittest.TestCase):
class ArchitectureTest(MockPackagesTest):
def setUp(self):
super(ArchitectureTest, self).setUp()
self.platform = sys_type()
def tearDown(self):
super(ArchitectureTest, self).tearDown()
def test_dict_functions_for_architecture(self):
arch = Arch()
arch.platform = spack.architecture.sys_type()
arch.platform_os = arch.platform.operating_system('default_os')
arch.target = arch.platform.target('default_target')
d = arch.to_dict()
new_arch = spack.architecture.arch_from_dict(d)
self.assertEqual(arch, new_arch)
self.assertTrue( isinstance(arch, Arch) )
self.assertTrue( isinstance(arch.platform, Platform) )
self.assertTrue( isinstance(arch.platform_os, OperatingSystem) )
self.assertTrue( isinstance(arch.target, Target) )
self.assertTrue( isinstance(new_arch, Arch) )
self.assertTrue( isinstance(new_arch.platform, Platform) )
self.assertTrue( isinstance(new_arch.platform_os, OperatingSystem) )
self.assertTrue( isinstance(new_arch.target, Target) )
def test_sys_type(self):
output_platform_class = sys_type()
my_arch_class = None
if os.path.exists('/opt/cray/craype'):
my_platform_class = CrayXc()
elif os.path.exists('/bgsys'):
my_platform_class = Bgq()
elif 'Linux' in py_platform.system():
my_platform_class = Linux()
elif 'Darwin' in py_platform.system():
my_platform_class = Darwin()
self.assertEqual(str(output_platform_class), str(my_platform_class))
def test_user_front_end_input(self):
"""Test when user inputs just frontend that both the frontend target
and frontend operating system match
"""
frontend_os = self.platform.operating_system("frontend")
frontend_target = self.platform.target("frontend")
frontend_spec = Spec("libelf os=frontend target=frontend")
frontend_spec.concretize()
self.assertEqual(frontend_os, frontend_spec.architecture.platform_os)
self.assertEqual(frontend_target, frontend_spec.architecture.target)
def test_user_back_end_input(self):
"""Test when user inputs backend that both the backend target and
backend operating system match
"""
backend_os = self.platform.operating_system("backend")
backend_target = self.platform.target("backend")
backend_spec = Spec("libelf os=backend target=backend")
backend_spec.concretize()
self.assertEqual(backend_os, backend_spec.architecture.platform_os)
self.assertEqual(backend_target, backend_spec.architecture.target)
def test_user_defaults(self):
default_os = self.platform.operating_system("default_os")
default_target = self.platform.target("default_target")
default_spec = Spec("libelf") # default is no args
default_spec.concretize()
self.assertEqual(default_os, default_spec.architecture.platform_os)
self.assertEqual(default_target, default_spec.architecture.target)
def test_user_input_combination(self):
os_list = self.platform.operating_sys.keys()
target_list = self.platform.targets.keys()
additional = ["fe", "be", "frontend", "backend"]
os_list.extend(additional)
target_list.extend(additional)
combinations = itertools.product(os_list, target_list)
results = []
for arch in combinations:
o,t = arch
spec = Spec("libelf os=%s target=%s" % (o, t))
spec.concretize()
results.append(spec.architecture.platform_os == self.platform.operating_system(o))
results.append(spec.architecture.target == self.platform.target(t))
res = all(results)
self.assertTrue(res)

View File

@ -0,0 +1,60 @@
##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import spack.cmd.find
import unittest
class Bunch(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class FindTest(unittest.TestCase):
def test_query_arguments(self):
query_arguments = spack.cmd.find.query_arguments
# Default arguments
args = Bunch(only_missing=False, missing=False,
unknown=False, explicit=False, implicit=False)
q_args = query_arguments(args)
self.assertTrue('installed' in q_args)
self.assertTrue('known' in q_args)
self.assertTrue('explicit' in q_args)
self.assertEqual(q_args['installed'], True)
self.assertEqual(q_args['known'], any)
self.assertEqual(q_args['explicit'], any)
# Check that explicit works correctly
args.explicit = True
q_args = query_arguments(args)
self.assertEqual(q_args['explicit'], True)
args.explicit = False
args.implicit = True
q_args = query_arguments(args)
self.assertEqual(q_args['explicit'], False)
args.explicit = True
self.assertRaises(SystemExit, query_arguments, args)

View File

@ -23,6 +23,7 @@
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import spack
import spack.architecture
from spack.spec import Spec, CompilerSpec
from spack.version import ver
from spack.concretize import find_spec
@ -253,6 +254,19 @@ def test_external_package(self):
self.assertTrue(spec['externaltool'].compiler.satisfies('gcc'))
def test_external_package_module(self):
# No tcl modules on darwin/linux machines
# TODO: improved way to check for this.
if (spack.architecture.sys_type().name == 'darwin' or
spack.architecture.sys_type().name == 'linux'):
return
spec = Spec('externalmodule')
spec.concretize()
self.assertEqual(spec['externalmodule'].external_module, 'external-module')
self.assertFalse('externalprereq' in spec)
self.assertTrue(spec['externalmodule'].compiler.satisfies('gcc'))
def test_nobuild_package(self):
got_error = False
spec = Spec('externaltool%clang')

View File

@ -32,45 +32,75 @@
from spack.test.mock_packages_test import *
# Some sample compiler config data
a_comps = {
"x86_64_E5v2_IntelIB": {
"gcc@4.7.3" : {
a_comps = [
{'compiler': {
'paths': {
"cc" : "/gcc473",
"cxx": "/g++473",
"f77": None,
"fc" : None },
"gcc@4.5.0" : {
"fc" : None
},
'modules': None,
'spec': 'gcc@4.7.3',
'operating_system': 'CNL10'
}},
{'compiler': {
'paths': {
"cc" : "/gcc450",
"cxx": "/g++450",
"f77": "/gfortran",
"fc" : "/gfortran" },
"clang@3.3" : {
"f77": 'gfortran',
"fc" : 'gfortran'
},
'modules': None,
'spec': 'gcc@4.5.0',
'operating_system': 'CNL10'
}},
{'compiler': {
'paths': {
"cc" : "<overwritten>",
"cxx": "<overwritten>",
"f77": "<overwritten>",
"fc" : "<overwritten>" }
}
}
"f77": '<overwritten>',
"fc" : '<overwritten>' },
'modules': None,
'spec': 'clang@3.3',
'operating_system': 'CNL10'
}}
]
b_comps = {
"x86_64_E5v3": {
"icc@10.0" : {
b_comps = [
{'compiler': {
'paths': {
"cc" : "/icc100",
"cxx": "/icc100",
"cxx": "/icp100",
"f77": None,
"fc" : None },
"icc@11.1" : {
"fc" : None
},
'modules': None,
'spec': 'icc@10.0',
'operating_system': 'CNL10'
}},
{'compiler': {
'paths': {
"cc" : "/icc111",
"cxx": "/icp111",
"f77": "/ifort",
"fc" : "/ifort" },
"clang@3.3" : {
"cc" : "/clang",
"cxx": "/clang++",
"f77": None,
"fc" : None}
}
}
"f77": 'ifort',
"fc" : 'ifort'
},
'modules': None,
'spec': 'icc@11.1',
'operating_system': 'CNL10'
}},
{'compiler': {
'paths': {
"cc" : "<overwritten>",
"cxx": "<overwritten>",
"f77": '<overwritten>',
"fc" : '<overwritten>' },
'modules': None,
'spec': 'clang@3.3',
'operating_system': 'CNL10'
}}
]
# Some Sample repo data
repos_low = [ "/some/path" ]
@ -89,14 +119,27 @@ def tearDown(self):
super(ConfigTest, self).tearDown()
shutil.rmtree(self.tmp_dir, True)
def check_config(self, comps, arch, *compiler_names):
def check_config(self, comps, *compiler_names):
"""Check that named compilers in comps match Spack's config."""
config = spack.config.get_config('compilers')
compiler_list = ['cc', 'cxx', 'f77', 'fc']
for key in compiler_names:
param_list = ['modules', 'paths', 'spec', 'operating_system']
for compiler in config:
conf = compiler['compiler']
if conf['spec'] in compiler_names:
comp = None
for c in comps:
if c['compiler']['spec'] == conf['spec']:
comp = c['compiler']
break
if not comp:
self.fail('Bad config spec')
for p in param_list:
self.assertEqual(conf[p], comp[p])
for c in compiler_list:
expected = comps[arch][key][c]
actual = config[arch][key][c]
expected = comp['paths'][c]
actual = conf['paths'][c]
self.assertEqual(expected, actual)
def test_write_list_in_memory(self):
@ -111,8 +154,9 @@ def test_write_key_in_memory(self):
spack.config.update_config('compilers', b_comps, 'test_high_priority')
# Make sure the config looks how we expect.
self.check_config(a_comps, 'x86_64_E5v2_IntelIB', 'gcc@4.7.3', 'gcc@4.5.0')
self.check_config(b_comps, 'x86_64_E5v3', 'icc@10.0', 'icc@11.1', 'clang@3.3')
self.check_config(a_comps, 'gcc@4.7.3', 'gcc@4.5.0')
self.check_config(b_comps, 'icc@10.0', 'icc@11.1', 'clang@3.3')
def test_write_key_to_disk(self):
# Write b_comps "on top of" a_comps.
@ -123,8 +167,8 @@ def test_write_key_to_disk(self):
spack.config.clear_config_caches()
# Same check again, to ensure consistency.
self.check_config(a_comps, 'x86_64_E5v2_IntelIB', 'gcc@4.7.3', 'gcc@4.5.0')
self.check_config(b_comps, 'x86_64_E5v3', 'icc@10.0', 'icc@11.1', 'clang@3.3')
self.check_config(a_comps, 'gcc@4.7.3', 'gcc@4.5.0')
self.check_config(b_comps, 'icc@10.0', 'icc@11.1', 'clang@3.3')
def test_write_to_same_priority_file(self):
# Write b_comps in the same file as a_comps.
@ -135,5 +179,5 @@ def test_write_to_same_priority_file(self):
spack.config.clear_config_caches()
# Same check again, to ensure consistency.
self.check_config(a_comps, 'x86_64_E5v2_IntelIB', 'gcc@4.7.3', 'gcc@4.5.0')
self.check_config(b_comps, 'x86_64_E5v3', 'icc@10.0', 'icc@11.1', 'clang@3.3')
self.check_config(a_comps, 'gcc@4.7.3', 'gcc@4.5.0')
self.check_config(b_comps, 'icc@10.0', 'icc@11.1', 'clang@3.3')

View File

@ -24,17 +24,20 @@
##############################################################################
import unittest
import os
import copy
from spack.environment import EnvironmentModifications
class EnvironmentTest(unittest.TestCase):
def setUp(self):
os.environ.clear()
os.environ['UNSET_ME'] = 'foo'
os.environ['EMPTY_PATH_LIST'] = ''
os.environ['PATH_LIST'] = '/path/second:/path/third'
os.environ['REMOVE_PATH_LIST'] = '/a/b:/duplicate:/a/c:/remove/this:/a/d:/duplicate/:/f/g'
def tearDown(self):
pass
def test_set(self):
env = EnvironmentModifications()
env.set('A', 'dummy value')

View File

@ -34,20 +34,127 @@
from spack.repository import RepoPath
from spack.spec import Spec
platform = spack.architecture.sys_type()
linux_os_name = 'debian'
linux_os_version = '6'
if platform.name == 'linux':
linux_os = platform.operating_system("default_os")
linux_os_name = linux_os.name
linux_os_version = linux_os.version
mock_compiler_config = """\
compilers:
all:
clang@3.3:
- compiler:
spec: clang@3.3
operating_system: {0}{1}
paths:
cc: /path/to/clang
cxx: /path/to/clang++
f77: None
fc: None
gcc@4.5.0:
modules: 'None'
- compiler:
spec: gcc@4.5.0
operating_system: {0}{1}
paths:
cc: /path/to/gcc
cxx: /path/to/g++
f77: None
fc: None
modules: 'None'
- compiler:
spec: clang@3.3
operating_system: CNL10
paths:
cc: /path/to/clang
cxx: /path/to/clang++
f77: None
fc: None
modules: 'None'
- compiler:
spec: clang@3.3
operating_system: SuSE11
paths:
cc: /path/to/clang
cxx: /path/to/clang++
f77: None
fc: None
modules: 'None'
- compiler:
spec: clang@3.3
operating_system: redhat6
paths:
cc: /path/to/clang
cxx: /path/to/clang++
f77: None
fc: None
modules: 'None'
- compiler:
spec: clang@3.3
operating_system: yosemite
paths:
cc: /path/to/clang
cxx: /path/to/clang++
f77: None
fc: None
modules: 'None'
- compiler:
paths:
cc: /path/to/gcc
cxx: /path/to/g++
f77: /path/to/gfortran
fc: /path/to/gfortran
"""
operating_system: CNL10
spec: gcc@4.5.0
modules: 'None'
- compiler:
paths:
cc: /path/to/gcc
cxx: /path/to/g++
f77: /path/to/gfortran
fc: /path/to/gfortran
operating_system: SuSE11
spec: gcc@4.5.0
modules: 'None'
- compiler:
paths:
cc: /path/to/gcc
cxx: /path/to/g++
f77: /path/to/gfortran
fc: /path/to/gfortran
operating_system: redhat6
spec: gcc@4.5.0
modules: 'None'
- compiler:
paths:
cc: /path/to/gcc
cxx: /path/to/g++
f77: /path/to/gfortran
fc: /path/to/gfortran
operating_system: yosemite
spec: gcc@4.5.0
modules: 'None'
- compiler:
paths:
cc: /path/to/gcc
cxx: /path/to/g++
f77: /path/to/gfortran
fc: /path/to/gfortran
operating_system: elcapitan
spec: gcc@4.5.0
modules: 'None'
- compiler:
spec: clang@3.3
operating_system: elcapitan
paths:
cc: /path/to/clang
cxx: /path/to/clang++
f77: None
fc: None
modules: 'None'
""".format(linux_os_name, linux_os_version)
mock_packages_config = """\
packages:
@ -60,6 +167,10 @@
paths:
externalvirtual@2.0%clang@3.3: /path/to/external_virtual_clang
externalvirtual@1.0%gcc@4.5.0: /path/to/external_virtual_gcc
externalmodule:
buildable: False
modules:
externalmodule@1.0%gcc@4.5.0: external-module
"""
class MockPackagesTest(unittest.TestCase):

View File

@ -92,6 +92,9 @@ def mock_open(filename, mode):
'all': {
'filter': {'environment_blacklist': ['CMAKE_PREFIX_PATH']}
},
'platform=test target=x86_64': {
'environment': {'set': {'FOO': 'foo'},
'unset': ['BAR']}
'arch=x86-linux': {
'environment': {
'set': {'FOO': 'foo'},
@ -201,6 +204,7 @@ def tearDown(self):
def get_modulefile_content(self, spec):
spec.concretize()
print spec, '&&&&&'
generator = spack.modules.TclModule(spec)
generator.write()
content = FILE_REGISTRY[generator.file_name].split('\n')
@ -208,7 +212,7 @@ def get_modulefile_content(self, spec):
def test_simple_case(self):
spack.modules.CONFIGURATION = configuration_autoload_direct
spec = spack.spec.Spec('mpich@3.0.4 arch=x86-linux')
spec = spack.spec.Spec('mpich@3.0.4')
content = self.get_modulefile_content(spec)
self.assertTrue('module-whatis "mpich @3.0.4"' in content)
self.assertRaises(TypeError, spack.modules.dependencies,
@ -216,13 +220,13 @@ def test_simple_case(self):
def test_autoload(self):
spack.modules.CONFIGURATION = configuration_autoload_direct
spec = spack.spec.Spec('mpileaks arch=x86-linux')
spec = spack.spec.Spec('mpileaks')
content = self.get_modulefile_content(spec)
self.assertEqual(len([x for x in content if 'is-loaded' in x]), 2)
self.assertEqual(len([x for x in content if 'module load ' in x]), 2)
spack.modules.CONFIGURATION = configuration_autoload_all
spec = spack.spec.Spec('mpileaks arch=x86-linux')
spec = spack.spec.Spec('mpileaks')
content = self.get_modulefile_content(spec)
self.assertEqual(len([x for x in content if 'is-loaded' in x]), 5)
self.assertEqual(len([x for x in content if 'module load ' in x]), 5)
@ -240,8 +244,9 @@ def test_prerequisites(self):
def test_alter_environment(self):
spack.modules.CONFIGURATION = configuration_alter_environment
spec = spack.spec.Spec('mpileaks arch=x86-linux')
spec = spack.spec.Spec('mpileaks platform=test target=x86_64')
content = self.get_modulefile_content(spec)
print content
self.assertEqual(
len([x
for x in content
@ -250,8 +255,9 @@ def test_alter_environment(self):
len([x for x in content if 'setenv FOO "foo"' in x]), 1)
self.assertEqual(len([x for x in content if 'unsetenv BAR' in x]), 1)
spec = spack.spec.Spec('libdwarf arch=x64-linux')
spec = spack.spec.Spec('libdwarf %clang platform=test target=x86_32')
content = self.get_modulefile_content(spec)
print content
self.assertEqual(
len([x
for x in content
@ -266,7 +272,7 @@ def test_alter_environment(self):
def test_blacklist(self):
spack.modules.CONFIGURATION = configuration_blacklist
spec = spack.spec.Spec('mpileaks arch=x86-linux')
spec = spack.spec.Spec('mpileaks')
content = self.get_modulefile_content(spec)
print('\n'.join(content))
self.assertEqual(len([x for x in content if 'is-loaded' in x]), 1)
@ -282,7 +288,7 @@ def test_blacklist(self):
def test_conflicts(self):
spack.modules.CONFIGURATION = configuration_conflicts
spec = spack.spec.Spec('mpileaks arch=x86-linux')
spec = spack.spec.Spec('mpileaks')
content = self.get_modulefile_content(spec)
self.assertEqual(
len([x for x in content if x.startswith('conflict')]), 2)

View File

@ -92,21 +92,18 @@ def test_default_works(self):
self.assertEqual(pkg.has_a_default(), 'default')
def test_architecture_match(self):
pkg = spack.repo.get('multimethod arch=x86_64')
self.assertEqual(pkg.different_by_architecture(), 'x86_64')
def test_target_match(self):
platform = spack.architecture.sys_type()
targets = platform.targets.values()
for target in targets[:-1]:
pkg = spack.repo.get('multimethod target='+target.name)
self.assertEqual(pkg.different_by_target(), target.name)
pkg = spack.repo.get('multimethod arch=ppc64')
self.assertEqual(pkg.different_by_architecture(), 'ppc64')
pkg = spack.repo.get('multimethod arch=ppc32')
self.assertEqual(pkg.different_by_architecture(), 'ppc32')
pkg = spack.repo.get('multimethod arch=arm64')
self.assertEqual(pkg.different_by_architecture(), 'arm64')
pkg = spack.repo.get('multimethod arch=macos')
self.assertRaises(NoSuchMethodError, pkg.different_by_architecture)
pkg = spack.repo.get('multimethod target='+targets[-1].name)
if len(targets) == 1:
self.assertEqual(pkg.different_by_target(), targets[-1].name)
else:
self.assertRaises(NoSuchMethodError, pkg.different_by_target)
def test_dependency_match(self):

View File

@ -0,0 +1,55 @@
""" Test checks if the operating_system class is created correctly and that
the functions are using the correct operating_system. Also checks whether
the operating_system correctly uses the compiler_strategy
"""
import unittest
import os
import platform
from spack.platforms.cray_xc import CrayXc
from spack.platforms.linux import Linux
from spack.platforms.darwin import Darwin
from spack.operating_system.linux_distro import LinuxDistro
from spack.operating_system.mac_os import MacOs
from spack.operating_system.cnl import ComputeNodeLinux
class TestOperatingSystem(unittest.TestCase):
def setUp(self):
cray_xc = CrayXc()
linux = Linux()
darwin = Darwin()
self.cray_operating_sys = cray_xc.operating_system('front_os')
self.cray_default_os = cray_xc.operating_system('default_os')
self.cray_back_os = cray_xc.operating_system('back_os')
self.darwin_operating_sys = darwin.operating_system('default_os')
self.linux_operating_sys = linux.operating_system('default_os')
def test_cray_front_end_operating_system(self):
self.assertIsInstance(self.cray_operating_sys, LinuxDistro)
def test_cray_front_end_compiler_strategy(self):
self.assertEquals(self.cray_operating_sys.compiler_strategy, "PATH")
def test_cray_back_end_operating_system(self):
self.assertIsInstance(self.cray_back_os,ComputeNodeLinux)
def test_cray_back_end_compiler_strategy(self):
self.assertEquals(self.cray_back_os.compiler_strategy, "MODULES")
def test_linux_operating_system(self):
self.assertIsInstance(self.linux_operating_sys, LinuxDistro)
def test_linux_compiler_strategy(self):
self.assertEquals(self.linux_operating_sys.compiler_strategy, "PATH")
def test_cray_front_end_compiler_list(self):
""" Operating systems will now be in charge of finding compilers.
So, depending on which operating system you want to build for
or which operating system you are on, then you could detect
compilers in a certain way. Cray linux environment on the front
end is just a regular linux distro whereas the Cray linux compute
node is a stripped down version which modules are important
"""
self.assertEquals(True, False)

View File

@ -29,6 +29,7 @@
spack/lib/spack/spack/test/mock_packages
"""
import spack
import spack.architecture
import spack.package
from llnl.util.lang import list_modules
@ -241,8 +242,10 @@ def test_unsatisfiable_compiler_version(self):
def test_unsatisfiable_architecture(self):
self.set_pkg_dep('mpileaks', 'mpich arch=bgqos_0')
spec = Spec('mpileaks ^mpich arch=sles_10_ppc64 ^callpath ^dyninst ^libelf ^libdwarf')
platform = spack.architecture.sys_type()
self.set_pkg_dep('mpileaks', 'mpich platform=test target=be')
spec = Spec('mpileaks ^mpich platform=test target=fe ^callpath ^dyninst ^libelf ^libdwarf')
self.assertRaises(spack.spec.UnsatisfiableArchitectureSpecError, spec.normalize)

View File

@ -23,6 +23,7 @@
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import unittest
import spack.architecture
from spack.spec import *
from spack.test.mock_packages_test import *
@ -107,7 +108,8 @@ def test_satisfies_namespace(self):
def test_satisfies_namespaced_dep(self):
"""Ensure spec from same or unspecified namespace satisfies namespace constraint."""
"""Ensure spec from same or unspecified namespace satisfies namespace
constraint."""
self.check_satisfies('mpileaks ^builtin.mock.mpich', '^mpich')
self.check_satisfies('mpileaks ^builtin.mock.mpich', '^mpi')
@ -139,11 +141,16 @@ def test_satisfies_compiler_version(self):
def test_satisfies_architecture(self):
self.check_satisfies('foo arch=chaos_5_x86_64_ib', ' arch=chaos_5_x86_64_ib')
self.check_satisfies('foo arch=bgqos_0', ' arch=bgqos_0')
self.check_unsatisfiable('foo arch=bgqos_0', ' arch=chaos_5_x86_64_ib')
self.check_unsatisfiable('foo arch=chaos_5_x86_64_ib', ' arch=bgqos_0')
platform = spack.architecture.sys_type()
self.check_satisfies(
'foo platform=test target=frontend os=frontend',
'platform=test target=frontend os=frontend')
self.check_satisfies(
'foo platform=test target=backend os=backend',
'platform=test target=backend', 'platform=test os=backend')
self.check_satisfies(
'foo platform=test target=default_target os=default_os',
'platform=test target=default_target os=default_os')
def test_satisfies_dependencies(self):
@ -158,10 +165,14 @@ def test_satisfies_dependency_versions(self):
self.check_satisfies('mpileaks^mpich@2.0', '^mpich@1:3')
self.check_unsatisfiable('mpileaks^mpich@1.2', '^mpich@2.0')
self.check_satisfies('mpileaks^mpich@2.0^callpath@1.5', '^mpich@1:3^callpath@1.4:1.6')
self.check_unsatisfiable('mpileaks^mpich@4.0^callpath@1.5', '^mpich@1:3^callpath@1.4:1.6')
self.check_unsatisfiable('mpileaks^mpich@2.0^callpath@1.7', '^mpich@1:3^callpath@1.4:1.6')
self.check_unsatisfiable('mpileaks^mpich@4.0^callpath@1.7', '^mpich@1:3^callpath@1.4:1.6')
self.check_satisfies(
'mpileaks^mpich@2.0^callpath@1.5', '^mpich@1:3^callpath@1.4:1.6')
self.check_unsatisfiable(
'mpileaks^mpich@4.0^callpath@1.5', '^mpich@1:3^callpath@1.4:1.6')
self.check_unsatisfiable(
'mpileaks^mpich@2.0^callpath@1.7', '^mpich@1:3^callpath@1.4:1.6')
self.check_unsatisfiable(
'mpileaks^mpich@4.0^callpath@1.7', '^mpich@1:3^callpath@1.4:1.6')
def test_satisfies_virtual_dependencies(self):
@ -350,10 +361,13 @@ def test_constrain_compiler_flags(self):
self.check_constrain('libelf cflags="-O3" cppflags="-Wall"', 'libelf cflags="-O3"', 'libelf cflags="-O3" cppflags="-Wall"')
def test_constrain_arch(self):
self.check_constrain('libelf arch=bgqos_0', 'libelf arch=bgqos_0', 'libelf arch=bgqos_0')
self.check_constrain('libelf arch=bgqos_0', 'libelf', 'libelf arch=bgqos_0')
def test_constrain_architecture(self):
self.check_constrain('libelf target=default_target os=default_os',
'libelf target=default_target os=default_os',
'libelf target=default_target os=default_os')
self.check_constrain('libelf target=default_target os=default_os',
'libelf',
'libelf target=default_target os=default_os')
def test_constrain_compiler(self):
self.check_constrain('libelf %gcc@4.4.7', 'libelf %gcc@4.4.7', 'libelf %gcc@4.4.7')
@ -369,9 +383,8 @@ def test_invalid_constraint(self):
self.check_invalid_constraint('libelf debug=2', 'libelf debug=1')
self.check_invalid_constraint('libelf cppflags="-O3"', 'libelf cppflags="-O2"')
self.check_invalid_constraint('libelf arch=bgqos_0', 'libelf arch=x86_54')
self.check_invalid_constraint('libelf platform=test target=be os=be',
'libelf target=fe os=fe')
def test_constrain_changed(self):
self.check_constrain_changed('libelf', '@1.0')
@ -382,7 +395,10 @@ def test_constrain_changed(self):
self.check_constrain_changed('libelf', '~debug')
self.check_constrain_changed('libelf', 'debug=2')
self.check_constrain_changed('libelf', 'cppflags="-O3"')
self.check_constrain_changed('libelf', ' arch=bgqos_0')
platform = spack.architecture.sys_type()
self.check_constrain_changed('libelf', 'target='+platform.target('default_target').name)
self.check_constrain_changed('libelf', 'os='+platform.operating_system('default_os').name)
def test_constrain_not_changed(self):
@ -395,9 +411,10 @@ def test_constrain_not_changed(self):
self.check_constrain_not_changed('libelf~debug', '~debug')
self.check_constrain_not_changed('libelf debug=2', 'debug=2')
self.check_constrain_not_changed('libelf cppflags="-O3"', 'cppflags="-O3"')
self.check_constrain_not_changed('libelf arch=bgqos_0', ' arch=bgqos_0')
self.check_constrain_not_changed('libelf^foo', 'libelf^foo')
self.check_constrain_not_changed('libelf^foo^bar', 'libelf^foo^bar')
platform = spack.architecture.sys_type()
default_target = platform.target('default_target').name
self.check_constrain_not_changed('libelf target='+default_target, 'target='+default_target)
def test_constrain_dependency_changed(self):
@ -407,8 +424,10 @@ def test_constrain_dependency_changed(self):
self.check_constrain_changed('libelf^foo%gcc', 'libelf^foo%gcc@4.5')
self.check_constrain_changed('libelf^foo', 'libelf^foo+debug')
self.check_constrain_changed('libelf^foo', 'libelf^foo~debug')
self.check_constrain_changed('libelf^foo', 'libelf^foo cppflags="-O3"')
self.check_constrain_changed('libelf^foo', 'libelf^foo arch=bgqos_0')
platform = spack.architecture.sys_type()
default_target = platform.target('default_target').name
self.check_constrain_changed('libelf^foo', 'libelf^foo target='+default_target)
def test_constrain_dependency_not_changed(self):
@ -419,5 +438,7 @@ def test_constrain_dependency_not_changed(self):
self.check_constrain_not_changed('libelf^foo+debug', 'libelf^foo+debug')
self.check_constrain_not_changed('libelf^foo~debug', 'libelf^foo~debug')
self.check_constrain_not_changed('libelf^foo cppflags="-O3"', 'libelf^foo cppflags="-O3"')
self.check_constrain_not_changed('libelf^foo arch=bgqos_0', 'libelf^foo arch=bgqos_0')
platform = spack.architecture.sys_type()
default_target = platform.target('default_target').name
self.check_constrain_not_changed('libelf^foo target='+default_target, 'libelf^foo target='+default_target)

View File

@ -58,7 +58,7 @@ class SpecSyntaxTest(unittest.TestCase):
# ================================================================================
# Parse checks
# ================================================================================
def check_parse(self, expected, spec=None):
def check_parse(self, expected, spec=None, remove_arch=True):
"""Assert that the provided spec is able to be parsed.
If this is called with one argument, it assumes that the string is
canonical (i.e., no spaces and ~ instead of - for variants) and that it
@ -70,6 +70,7 @@ def check_parse(self, expected, spec=None):
if spec is None:
spec = expected
output = spack.spec.parse(spec)
parsed = (" ".join(str(spec) for spec in output))
self.assertEqual(expected, parsed)

View File

@ -43,7 +43,6 @@ def assert_ver_lt(self, a, b):
self.assertFalse(a > b)
self.assertFalse(a >= b)
def assert_ver_gt(self, a, b):
a, b = ver(a), ver(b)
self.assertTrue(a > b)
@ -53,7 +52,6 @@ def assert_ver_gt(self, a, b):
self.assertFalse(a < b)
self.assertFalse(a <= b)
def assert_ver_eq(self, a, b):
a, b = ver(a), ver(b)
self.assertFalse(a > b)
@ -63,55 +61,43 @@ def assert_ver_eq(self, a, b):
self.assertFalse(a < b)
self.assertTrue(a <= b)
def assert_in(self, needle, haystack):
self.assertTrue(ver(needle) in ver(haystack))
def assert_not_in(self, needle, haystack):
self.assertFalse(ver(needle) in ver(haystack))
def assert_canonical(self, canonical_list, version_list):
self.assertEqual(ver(canonical_list), ver(version_list))
def assert_overlaps(self, v1, v2):
self.assertTrue(ver(v1).overlaps(ver(v2)))
def assert_no_overlap(self, v1, v2):
self.assertFalse(ver(v1).overlaps(ver(v2)))
def assert_satisfies(self, v1, v2):
self.assertTrue(ver(v1).satisfies(ver(v2)))
def assert_does_not_satisfy(self, v1, v2):
self.assertFalse(ver(v1).satisfies(ver(v2)))
def check_intersection(self, expected, a, b):
self.assertEqual(ver(expected), ver(a).intersection(ver(b)))
def check_union(self, expected, a, b):
self.assertEqual(ver(expected), ver(a).union(ver(b)))
def test_two_segments(self):
self.assert_ver_eq('1.0', '1.0')
self.assert_ver_lt('1.0', '2.0')
self.assert_ver_gt('2.0', '1.0')
def test_three_segments(self):
self.assert_ver_eq('2.0.1', '2.0.1')
self.assert_ver_lt('2.0', '2.0.1')
self.assert_ver_gt('2.0.1', '2.0')
def test_alpha(self):
# TODO: not sure whether I like this. 2.0.1a is *usually*
# TODO: less than 2.0.1, but special-casing it makes version
@ -120,7 +106,6 @@ def test_alpha(self):
self.assert_ver_gt('2.0.1a', '2.0.1')
self.assert_ver_lt('2.0.1', '2.0.1a')
def test_patch(self):
self.assert_ver_eq('5.5p1', '5.5p1')
self.assert_ver_lt('5.5p1', '5.5p2')
@ -129,7 +114,6 @@ def test_patch(self):
self.assert_ver_lt('5.5p1', '5.5p10')
self.assert_ver_gt('5.5p10', '5.5p1')
def test_num_alpha_with_no_separator(self):
self.assert_ver_lt('10xyz', '10.1xyz')
self.assert_ver_gt('10.1xyz', '10xyz')
@ -137,7 +121,6 @@ def test_num_alpha_with_no_separator(self):
self.assert_ver_lt('xyz10', 'xyz10.1')
self.assert_ver_gt('xyz10.1', 'xyz10')
def test_alpha_with_dots(self):
self.assert_ver_eq('xyz.4', 'xyz.4')
self.assert_ver_lt('xyz.4', '8')
@ -145,30 +128,25 @@ def test_alpha_with_dots(self):
self.assert_ver_lt('xyz.4', '2')
self.assert_ver_gt('2', 'xyz.4')
def test_nums_and_patch(self):
self.assert_ver_lt('5.5p2', '5.6p1')
self.assert_ver_gt('5.6p1', '5.5p2')
self.assert_ver_lt('5.6p1', '6.5p1')
self.assert_ver_gt('6.5p1', '5.6p1')
def test_rc_versions(self):
self.assert_ver_gt('6.0.rc1', '6.0')
self.assert_ver_lt('6.0', '6.0.rc1')
def test_alpha_beta(self):
self.assert_ver_gt('10b2', '10a1')
self.assert_ver_lt('10a2', '10b2')
def test_double_alpha(self):
self.assert_ver_eq('1.0aa', '1.0aa')
self.assert_ver_lt('1.0a', '1.0aa')
self.assert_ver_gt('1.0aa', '1.0a')
def test_padded_numbers(self):
self.assert_ver_eq('10.0001', '10.0001')
self.assert_ver_eq('10.0001', '10.1')
@ -176,24 +154,20 @@ def test_padded_numbers(self):
self.assert_ver_lt('10.0001', '10.0039')
self.assert_ver_gt('10.0039', '10.0001')
def test_close_numbers(self):
self.assert_ver_lt('4.999.9', '5.0')
self.assert_ver_gt('5.0', '4.999.9')
def test_date_stamps(self):
self.assert_ver_eq('20101121', '20101121')
self.assert_ver_lt('20101121', '20101122')
self.assert_ver_gt('20101122', '20101121')
def test_underscores(self):
self.assert_ver_eq('2_0', '2_0')
self.assert_ver_eq('2.0', '2_0')
self.assert_ver_eq('2_0', '2.0')
def test_rpm_oddities(self):
self.assert_ver_eq('1b.fc17', '1b.fc17')
self.assert_ver_lt('1b.fc17', '1.fc17')
@ -202,7 +176,6 @@ def test_rpm_oddities(self):
self.assert_ver_gt('1g.fc17', '1.fc17')
self.assert_ver_lt('1.fc17', '1g.fc17')
# Stuff below here is not taken from RPM's tests and is
# unique to spack
def test_version_ranges(self):
@ -214,7 +187,6 @@ def test_version_ranges(self):
self.assert_ver_lt('1.2:1.4', '1.5:1.6')
self.assert_ver_gt('1.5:1.6', '1.2:1.4')
def test_contains(self):
self.assert_in('1.3', '1.2:1.4')
self.assert_in('1.2.5', '1.2:1.4')
@ -233,7 +205,6 @@ def test_contains(self):
self.assert_in('1.4.1', '1.2.7:1.4')
self.assert_not_in('1.4.1', '1.2.7:1.4.0')
def test_in_list(self):
self.assert_in('1.2', ['1.5', '1.2', '1.3'])
self.assert_in('1.2.5', ['1.5', '1.2:1.3'])
@ -245,7 +216,6 @@ def test_in_list(self):
self.assert_not_in('1.2.5:1.5', ['1.5', '1.2:1.3'])
self.assert_not_in('1.1:1.2.5', ['1.5', '1.2:1.3'])
def test_ranges_overlap(self):
self.assert_overlaps('1.2', '1.2')
self.assert_overlaps('1.2.1', '1.2.1')
@ -262,7 +232,6 @@ def test_ranges_overlap(self):
self.assert_overlaps(':', '1.6:1.9')
self.assert_overlaps('1.6:1.9', ':')
def test_overlap_with_containment(self):
self.assert_in('1.6.5', '1.6')
self.assert_in('1.6.5', ':1.6')
@ -273,7 +242,6 @@ def test_overlap_with_containment(self):
self.assert_not_in(':1.6', '1.6.5')
self.assert_in('1.6.5', ':1.6')
def test_lists_overlap(self):
self.assert_overlaps('1.2b:1.7,5', '1.6:1.9,1')
self.assert_overlaps('1,2,3,4,5', '3,4,5,6,7')
@ -287,7 +255,6 @@ def test_lists_overlap(self):
self.assert_no_overlap('1,2,3,4,5', '6,7')
self.assert_no_overlap('1,2,3,4,5', '6:7')
def test_canonicalize_list(self):
self.assert_canonical(['1.2', '1.3', '1.4'],
['1.2', '1.3', '1.3', '1.4'])
@ -316,7 +283,6 @@ def test_canonicalize_list(self):
self.assert_canonical([':'],
[':,1.3, 1.3.1,1.3.9,1.4 : 1.5 , 1.3 : 1.4'])
def test_intersection(self):
self.check_intersection('2.5',
'1.0:2.5', '2.5:3.0')
@ -330,7 +296,6 @@ def test_intersection(self):
['1.1:2.7'], ['2.5:3.0', '1.0'])
self.check_intersection(['0:1'], [':'], ['0:1'])
def test_intersect_with_containment(self):
self.check_intersection('1.6.5', '1.6.5', ':1.6')
self.check_intersection('1.6.5', ':1.6', '1.6.5')
@ -338,7 +303,6 @@ def test_intersect_with_containment(self):
self.check_intersection('1.6:1.6.5', ':1.6.5', '1.6')
self.check_intersection('1.6:1.6.5', '1.6', ':1.6.5')
def test_union_with_containment(self):
self.check_union(':1.6', '1.6.5', ':1.6')
self.check_union(':1.6', ':1.6', '1.6.5')
@ -346,8 +310,6 @@ def test_union_with_containment(self):
self.check_union(':1.6', ':1.6.5', '1.6')
self.check_union(':1.6', '1.6', ':1.6.5')
def test_union_with_containment(self):
self.check_union(':', '1.0:', ':2.0')
self.check_union('1:4', '1:3', '2:4')
@ -356,7 +318,6 @@ def test_union_with_containment(self):
# Tests successor/predecessor case.
self.check_union('1:4', '1:2', '3:4')
def test_basic_version_satisfaction(self):
self.assert_satisfies('4.7.3', '4.7.3')
@ -372,7 +333,6 @@ def test_basic_version_satisfaction(self):
self.assert_does_not_satisfy('4.8', '4.9')
self.assert_does_not_satisfy('4', '4.9')
def test_basic_version_satisfaction_in_lists(self):
self.assert_satisfies(['4.7.3'], ['4.7.3'])
@ -388,7 +348,6 @@ def test_basic_version_satisfaction_in_lists(self):
self.assert_does_not_satisfy(['4.8'], ['4.9'])
self.assert_does_not_satisfy(['4'], ['4.9'])
def test_version_range_satisfaction(self):
self.assert_satisfies('4.7b6', '4.3:4.7')
self.assert_satisfies('4.3.0', '4.3:4.7')
@ -400,7 +359,6 @@ def test_version_range_satisfaction(self):
self.assert_satisfies('4.7b6', '4.3:4.7')
self.assert_does_not_satisfy('4.8.0', '4.3:4.7')
def test_version_range_satisfaction_in_lists(self):
self.assert_satisfies(['4.7b6'], ['4.3:4.7'])
self.assert_satisfies(['4.3.0'], ['4.3:4.7'])
@ -423,3 +381,11 @@ def test_satisfaction_with_lists(self):
self.assert_satisfies('4.8.0', '4.2, 4.3:4.8')
self.assert_satisfies('4.8.2', '4.2, 4.3:4.8')
def test_formatted_strings(self):
versions = '1.2.3', '1_2_3', '1-2-3'
for item in versions:
v = Version(item)
self.assertEqual(v.dotted, '1.2.3')
self.assertEqual(v.dashed, '1-2-3')
self.assertEqual(v.underscored, '1_2_3')

View File

@ -165,6 +165,7 @@ def streamify(arg, mode):
raise ProcessError("Command exited with status %d:" %
proc.returncode, cmd_line)
if output is str or error is str:
result = ''
if output is str:

View File

@ -43,16 +43,16 @@
intersection
concrete
"""
import os
import sys
import re
from bisect import bisect_left
from functools import wraps
from functools_backport import total_ordering
# Valid version characters
VALID_VERSION = r'[A-Za-z0-9_.-]'
def int_if_int(string):
"""Convert a string to int if possible. Otherwise, return a string."""
try:
@ -62,7 +62,8 @@ def int_if_int(string):
def coerce_versions(a, b):
"""Convert both a and b to the 'greatest' type between them, in this order:
"""
Convert both a and b to the 'greatest' type between them, in this order:
Version < VersionRange < VersionList
This is used to simplify comparison operations below so that we're always
comparing things that are of the same type.
@ -105,6 +106,7 @@ def coercing_method(a, b, *args, **kwargs):
@total_ordering
class Version(object):
"""Class to represent versions"""
def __init__(self, string):
string = str(string)
@ -124,6 +126,17 @@ def __init__(self, string):
# last element of separators is ''
self.separators = tuple(re.split(segment_regex, string)[1:-1])
@property
def dotted(self):
return '.'.join(str(x) for x in self.version)
@property
def underscored(self):
return '_'.join(str(x) for x in self.version)
@property
def dashed(self):
return '-'.join(str(x) for x in self.version)
def up_to(self, index):
"""Return a version string up to the specified component, exclusive.
@ -131,15 +144,12 @@ def up_to(self, index):
"""
return '.'.join(str(x) for x in self[:index])
def lowest(self):
return self
def highest(self):
return self
@coerced
def satisfies(self, other):
"""A Version 'satisfies' another if it is at least as specific and has a
@ -151,7 +161,6 @@ def satisfies(self, other):
nother = len(other.version)
return nother <= nself and self.version[:nother] == other.version
def wildcard(self):
"""Create a regex that will match variants of this version string."""
def a_or_n(seg):
@ -181,28 +190,22 @@ def a_or_n(seg):
wc += '(?:[a-z]|alpha|beta)?)?' * (len(segments) - 1)
return wc
def __iter__(self):
return iter(self.version)
def __getitem__(self, idx):
return tuple(self.version[idx])
def __repr__(self):
return self.string
def __str__(self):
return self.string
@property
def concrete(self):
return self
@coerced
def __lt__(self, other):
"""Version comparison is designed for consistency with the way RPM
@ -235,28 +238,23 @@ def __lt__(self, other):
# If the common prefix is equal, the one with more segments is bigger.
return len(self.version) < len(other.version)
@coerced
def __eq__(self, other):
return (other is not None and
type(other) == Version and self.version == other.version)
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(self.version)
@coerced
def __contains__(self, other):
if other is None:
return False
return other.version[:len(self.version)] == self.version
def is_predecessor(self, other):
"""True if the other version is the immediate predecessor of this one.
That is, NO versions v exist such that:
@ -269,16 +267,13 @@ def is_predecessor(self, other):
ol = other.version[-1]
return type(sl) == int and type(ol) == int and (ol - sl == 1)
def is_successor(self, other):
return other.is_predecessor(self)
@coerced
def overlaps(self, other):
return self in other or other in self
@coerced
def union(self, other):
if self == other or other in self:
@ -288,7 +283,6 @@ def union(self, other):
else:
return VersionList([self, other])
@coerced
def intersection(self, other):
if self == other:
@ -299,6 +293,7 @@ def intersection(self, other):
@total_ordering
class VersionRange(object):
def __init__(self, start, end):
if isinstance(start, basestring):
start = Version(start)
@ -310,15 +305,12 @@ def __init__(self, start, end):
if start and end and end < start:
raise ValueError("Invalid Version range: %s" % self)
def lowest(self):
return self.start
def highest(self):
return self.end
@coerced
def __lt__(self, other):
"""Sort VersionRanges lexicographically so that they are ordered first
@ -331,28 +323,24 @@ def __lt__(self, other):
s, o = self, other
if s.start != o.start:
return s.start is None or (o.start is not None and s.start < o.start)
return s.start is None or (o.start is not None and s.start < o.start) # NOQA: ignore=E501
return (s.end != o.end and
o.end is None or (s.end is not None and s.end < o.end))
@coerced
def __eq__(self, other):
return (other is not None and
type(other) == VersionRange and
self.start == other.start and self.end == other.end)
def __ne__(self, other):
return not (self == other)
@property
def concrete(self):
return self.start if self.start == self.end else None
@coerced
def __contains__(self, other):
if other is None:
@ -373,10 +361,10 @@ def __contains__(self, other):
other.end in self.end)))
return in_upper
@coerced
def satisfies(self, other):
"""A VersionRange satisfies another if some version in this range
"""
A VersionRange satisfies another if some version in this range
would satisfy some version in the other range. To do this it must
either:
a) Overlap with the other range
@ -404,17 +392,15 @@ def satisfies(self, other):
# satisfy, or overlaps() would've taken care of it.
self.start and other.end and self.start.satisfies(other.end))
@coerced
def overlaps(self, other):
return ((self.start == None or other.end is None or
return ((self.start is None or other.end is None or
self.start <= other.end or
other.end in self.start or self.start in other.end) and
(other.start is None or self.end == None or
(other.start is None or self.end is None or
other.start <= self.end or
other.start in self.end or self.end in other.start))
@coerced
def union(self, other):
if not self.overlaps(other):
@ -442,13 +428,12 @@ def union(self, other):
else:
end = self.end
# TODO: See note in intersection() about < and in discrepancy.
if not other.end in self.end:
if other.end not in self.end:
if end in other.end or other.end > self.end:
end = other.end
return VersionRange(start, end)
@coerced
def intersection(self, other):
if self.overlaps(other):
@ -470,7 +455,7 @@ def intersection(self, other):
# 1.6 < 1.6.5 = True (lexicographic)
# Should 1.6 NOT be less than 1.6.5? Hm.
# Here we test (not end in other.end) first to avoid paradox.
if other.end is not None and not end in other.end:
if other.end is not None and end not in other.end:
if other.end < end or other.end in end:
end = other.end
@ -479,15 +464,12 @@ def intersection(self, other):
else:
return VersionList()
def __hash__(self):
return hash((self.start, self.end))
def __repr__(self):
return self.__str__()
def __str__(self):
out = ""
if self.start:
@ -501,6 +483,7 @@ def __str__(self):
@total_ordering
class VersionList(object):
"""Sorted, non-redundant list of Versions and VersionRanges."""
def __init__(self, vlist=None):
self.versions = []
if vlist is not None:
@ -515,7 +498,6 @@ def __init__(self, vlist=None):
for v in vlist:
self.add(ver(v))
def add(self, version):
if type(version) in (Version, VersionRange):
# This normalizes single-value version ranges.
@ -542,7 +524,6 @@ def add(self, version):
else:
raise TypeError("Can't add %s to VersionList" % type(version))
@property
def concrete(self):
if len(self) == 1:
@ -550,11 +531,9 @@ def concrete(self):
else:
return None
def copy(self):
return VersionList(self)
def lowest(self):
"""Get the lowest version in the list."""
if not self:
@ -562,7 +541,6 @@ def lowest(self):
else:
return self[0].lowest()
def highest(self):
"""Get the highest version in the list."""
if not self:
@ -570,7 +548,6 @@ def highest(self):
else:
return self[-1].highest()
@coerced
def overlaps(self, other):
if not other or not self:
@ -586,7 +563,6 @@ def overlaps(self, other):
o += 1
return False
def to_dict(self):
"""Generate human-readable dict for YAML."""
if self.concrete:
@ -594,7 +570,6 @@ def to_dict(self):
else:
return {'versions': [str(v) for v in self]}
@staticmethod
def from_dict(dictionary):
"""Parse dict from to_dict."""
@ -605,7 +580,6 @@ def from_dict(dictionary):
else:
raise ValueError("Dict must have 'version' or 'versions' in it.")
@coerced
def satisfies(self, other, strict=False):
"""A VersionList satisfies another if some version in the list
@ -633,20 +607,17 @@ def satisfies(self, other, strict=False):
o += 1
return False
@coerced
def update(self, other):
for v in other.versions:
self.add(v)
@coerced
def union(self, other):
result = self.copy()
result.update(other)
return result
@coerced
def intersection(self, other):
# TODO: make this faster. This is O(n^2).
@ -656,7 +627,6 @@ def intersection(self, other):
result.add(s.intersection(o))
return result
@coerced
def intersect(self, other):
"""Intersect this spec's list with other.
@ -683,45 +653,35 @@ def __contains__(self, other):
return True
def __getitem__(self, index):
return self.versions[index]
def __iter__(self):
return iter(self.versions)
def __reversed__(self):
return reversed(self.versions)
def __len__(self):
return len(self.versions)
@coerced
def __eq__(self, other):
return other is not None and self.versions == other.versions
def __ne__(self, other):
return not (self == other)
@coerced
def __lt__(self, other):
return other is not None and self.versions < other.versions
def __hash__(self):
return hash(tuple(self.versions))
def __str__(self):
return ",".join(str(v) for v in self.versions)
def __repr__(self):
return str(self.versions)

View File

@ -0,0 +1,55 @@
##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
"""Yaml Version Check is a module for ensuring that config file
formats are compatible with the current version of Spack."""
import os.path
import os
import llnl.util.tty as tty
import spack.util.spack_yaml as syaml
import spack.config
def check_yaml_versions():
check_compiler_yaml_version()
def check_compiler_yaml_version():
config_scopes = spack.config.config_scopes
for scope in config_scopes.values():
file_name = os.path.join(scope.path, 'compilers.yaml')
data = None
if os.path.isfile(file_name):
with open(file_name) as f:
data = syaml.load(f)
if data:
compilers = data['compilers']
if len(compilers) > 0:
if (not isinstance(compilers, list)) or 'operating_system' not in compilers[0]['compiler']:
new_file = os.path.join(scope.path, '_old_compilers.yaml')
tty.warn('%s in out of date compilers format. '
'Moved to %s. Spack automatically generate '
'a compilers config file '
% (file_name, new_file))
os.rename(file_name, new_file)

View File

@ -0,0 +1,40 @@
compilers:
all:
clang@3.3:
cc: /path/to/clang
cxx: /path/to/clang++
f77: None
fc: None
modules: None
strategy: PATH
gcc@4.5.0:
cc: /path/to/gcc
cxx: /path/to/g++
f77: /path/to/gfortran
fc: /path/to/gfortran
modules: None
strategy: PATH
gcc@5.2.0:
cc: cc
cxx: CC
f77: ftn
fc: ftn
modules:
- PrgEnv-gnu
- gcc/5.2.0
strategy: MODULES
intel@15.0.1:
cc: cc
ccx: CC
f77: ftn
fc: ftn
modules:
- PrgEnv-intel
- intel/15.0.1
strategy: MODULES
intel@15.1.2:
cc: /path/to/icc
cxx: /path/to/ic++
f77: /path/to/ifort
fc: /path/to/ifort
strategy: PATH

View File

@ -0,0 +1,38 @@
import os
from spack import *
class Adios(Package):
"""The Adaptable IO System (ADIOS) provides a simple,
flexible way for scientists to describe the
data in their code that may need to be written,
read, or processed outside of the running simulation
"""
homepage = "http://www.olcf.ornl.gov/center-projects/adios/"
url = "http://users.nccs.gov/~pnorbert/adios-1.9.0.tar.gz"
version('1.9.0', 'dbf5cb10e32add2f04c9b4052b7ffa76')
# Lots of setting up here for this package
# module swap PrgEnv-intel PrgEnv-$COMP
# module load cray-netcdf/4.3.3.1
# module load cray-hdf5/1.8.14
# module load python/2.7.10
depends_on('hdf5')
depends_on('mxml')
def install(self, spec, prefix):
configure_args = ["--prefix=%s" % prefix,
"--with-mxml=%s" % spec['mxml'].prefix,
"--with-hdf5=%s" % spec['hdf5'].prefix,
"--with-netcdf=%s" % os.environ["NETCDF_DIR"],
"--with-infiniband=no",
"MPICC=cc","MPICXX=CC","MPIFC=ftn",
"CPPFLAGS=-DMPICH_IGNORE_CXX_SEEK"]
if spec.satisfies('%gcc'):
configure_args.extend(["CC=gcc", "CXX=g++", "FC=gfortran"])
configure(*configure_args)
make()
make("install")

View File

@ -0,0 +1,26 @@
import os
from spack import *
class Mxml(Package):
"""Mini-XML is a small XML library that you can use to read and write XML
and XML-like data files in your application without requiring large
non-standard libraries
"""
homepage = "http://www.msweet.org"
url = "http://www.msweet.org/files/project3/mxml-2.9.tar.gz"
version('2.9', 'e21cad0f7aacd18f942aa0568a8dee19')
version('2.8', 'd85ee6d30de053581242c4a86e79a5d2')
version('2.7', '76f2ae49bf0f5745d5cb5d9507774dc9')
version('2.6', '68977789ae64985dddbd1a1a1652642e')
version('2.5', 'f706377fba630b39fa02fd63642b17e5')
# module swap PrgEnv-intel PrgEnv-$COMP (Can use whatever compiler you want to use)
# Case statement to change CC and CXX flags
def install(self, spec, prefix):
configure('--prefix=%s' % prefix, "--disable-shared", 'CFLAGS=-static')
make()
make("install")

View File

@ -0,0 +1,37 @@
##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Externalmodule(Package):
homepage = "http://somewhere.com"
url = "http://somewhere.com/module-1.0.tar.gz"
version('1.0', '1234567890abcdef1234567890abcdef')
depends_on('externalprereq')
def install(self, spec, prefix):
pass

View File

@ -22,8 +22,11 @@
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import imp
from llnl.util.filesystem import join_path
from spack.util.naming import mod_to_class
from spack import *
import spack.architecture
class Multimethod(Package):
"""This package is designed for use with Spack's multimethod test.
@ -101,25 +104,26 @@ def has_a_default(self):
#
# Make sure we can switch methods on different architectures
# Make sure we can switch methods on different target
#
@when('arch=x86_64')
def different_by_architecture(self):
return 'x86_64'
@when('arch=ppc64')
def different_by_architecture(self):
return 'ppc64'
@when('arch=ppc32')
def different_by_architecture(self):
return 'ppc32'
@when('arch=arm64')
def different_by_architecture(self):
return 'arm64'
# for platform_name in ['cray_xc', 'darwin', 'linux']:
# file_path = join_path(spack.platform_path, platform_name)
# platform_mod = imp.load_source('spack.platforms', file_path + '.py')
# cls = getattr(platform_mod, mod_to_class(platform_name))
# platform = cls()
platform = spack.architecture.sys_type()
targets = platform.targets.values()
if len(targets) > 1:
targets = targets[:-1]
for target in targets:
@when('target='+target.name)
def different_by_target(self):
if isinstance(self.spec.architecture.target,basestring):
return self.spec.architecture.target
else:
return self.spec.architecture.target.name
#
# Make sure we can switch methods on different dependencies
#

View File

@ -0,0 +1,50 @@
##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Bertini(Package):
"""Bertini is a general-purpose solver, written in C, that was created
for research about polynomial continuation. It solves for the numerical
solution of systems of polynomial equations using homotopy continuation."""
homepage = "https://bertini.nd.edu/"
url = "https://bertini.nd.edu/BertiniSource_v1.5.tar.gz"
version('1.5', 'e3f6cc6e7f9a0cf1d73185e8671af707')
variant('mpi', default=True, description='Compile in parallel')
depends_on('flex')
depends_on('bison')
depends_on('gmp')
depends_on('mpfr')
depends_on('mpi', when='+mpi')
def install(self, spec, prefix):
configure('--prefix=%s' % prefix)
make()
make("install")

View File

@ -30,8 +30,9 @@ class Binutils(Package):
url="https://ftp.gnu.org/gnu/binutils/binutils-2.25.tar.bz2"
# 2.26 is incompatible with py-pillow build for some reason.
version('2.26', '64146a0faa3b411ba774f47d41de239f')
version('2.25', 'd9f3303f802a5b6b0bb73a335ab89d66')
version('2.25', 'd9f3303f802a5b6b0bb73a335ab89d66', preferred=True)
version('2.24', 'e0f71a7b2ddab0f8612336ac81d9636b')
version('2.23.2', '4f8fa651e35ef262edc01d60fb45702e')
version('2.20.1', '2b9dc8f2b7dbd5ec5992c6e29de0b764')

View File

@ -27,7 +27,7 @@
import sys
import os
import sys
class Boost(Package):
"""Boost provides free peer-reviewed portable C++ source
@ -109,6 +109,7 @@ class Boost(Package):
variant('multithreaded', default=True, description="Build multi-threaded versions of libraries")
variant('singlethreaded', default=True, description="Build single-threaded versions of libraries")
variant('icu_support', default=False, description="Include ICU support (for regex/locale libraries)")
variant('graph', default=False, description="Build the Boost Graph library")
depends_on('icu', when='+icu_support')
depends_on('python', when='+python')
@ -120,15 +121,18 @@ class Boost(Package):
patch('boost_11856.patch', when='@1.60.0%gcc@4.4.7')
def url_for_version(self, version):
"""Handle Boost's weird URLs, which write the version two different ways."""
"""
Handle Boost's weird URLs,
which write the version two different ways.
"""
parts = [str(p) for p in Version(version)]
dots = ".".join(parts)
underscores = "_".join(parts)
return "http://downloads.sourceforge.net/project/boost/boost/%s/boost_%s.tar.bz2" % (
dots, underscores)
return "http://downloads.sourceforge.net/project/boost" \
"/boost/%s/boost_%s.tar.bz2" % (dots, underscores)
def determine_toolset(self, spec):
if spec.satisfies("arch=darwin-x86_64"):
if spec.satisfies("platform=darwin"):
return 'darwin'
toolsets = {'g++': 'gcc',
@ -178,8 +182,7 @@ def determine_b2_options(self, spec, options):
'-s', 'BZIP2_INCLUDE=%s' % spec['bzip2'].prefix.include,
'-s', 'BZIP2_LIBPATH=%s' % spec['bzip2'].prefix.lib,
'-s', 'ZLIB_INCLUDE=%s' % spec['zlib'].prefix.include,
'-s', 'ZLIB_LIBPATH=%s' % spec['zlib'].prefix.lib,
])
'-s', 'ZLIB_LIBPATH=%s' % spec['zlib'].prefix.lib])
linkTypes = ['static']
if '+shared' in spec:
@ -191,7 +194,8 @@ def determine_b2_options(self, spec, options):
if '+singlethreaded' in spec:
threadingOpts.append('single')
if not threadingOpts:
raise RuntimeError("At least one of {singlethreaded, multithreaded} must be enabled")
raise RuntimeError("""At least one of {singlethreaded,
multithreaded} must be enabled""")
options.extend([
'toolset=%s' % self.determine_toolset(spec),
@ -202,9 +206,9 @@ def determine_b2_options(self, spec, options):
def install(self, spec, prefix):
# On Darwin, Boost expects the Darwin libtool. However, one of the
# dependencies may have pulled in Spack's GNU libtool, and these two are
# not compatible. We thus create a symlink to Darwin's libtool and add
# it at the beginning of PATH.
# dependencies may have pulled in Spack's GNU libtool, and these two
# are not compatible. We thus create a symlink to Darwin's libtool
# and add it at the beginning of PATH.
if sys.platform == 'darwin':
newdir = os.path.abspath('darwin-libtool')
mkdirp(newdir)
@ -217,7 +221,8 @@ def install(self, spec, prefix):
withLibs.append(lib)
if not withLibs:
# if no libraries are specified for compilation, then you dont have
# to configure/build anything, just copy over to the prefix directory.
# to configure/build anything, just copy over to the prefix
# directory.
src = join_path(self.stage.source_path, 'boost')
mkdirp(join_path(prefix, 'include'))
dst = join_path(prefix, 'include', 'boost')
@ -235,6 +240,9 @@ def install(self, spec, prefix):
withLibs.remove('chrono')
if not spec.satisfies('@1.43.0:'):
withLibs.remove('random')
if '+graph' in spec and '+mpi' in spec:
withLibs.remove('graph')
withLibs.append('graph_parallel')
# to make Boost find the user-config.jam
env['BOOST_BUILD_PATH'] = './'
@ -259,6 +267,7 @@ def install(self, spec, prefix):
for threadingOpt in threadingOpts:
b2('install', 'threading=%s' % threadingOpt, *b2_options)
# The shared libraries are not installed correctly on Darwin; correct this
# The shared libraries are not installed correctly
# on Darwin; correct this
if (sys.platform == 'darwin') and ('+shared' in spec):
fix_darwin_install_name(prefix.lib)

View File

@ -0,0 +1,51 @@
##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import sys
from spack import *
class CBlosc(Package):
"""Blosc, an extremely fast, multi-threaded, meta-compressor library"""
homepage = "http://www.blosc.org"
url = "https://github.com/Blosc/c-blosc/archive/v1.9.2.tar.gz"
version('1.9.2', 'dd2d83069d74b36b8093f1c6b49defc5')
version('1.9.1', '7d708d3daadfacf984a87b71b1734ce2')
version('1.9.0', 'e4c1dc8e2c468e5cfa2bf05eeee5357a')
version('1.8.1', 'd73d5be01359cf271e9386c90dcf5b05')
version('1.8.0', '5b92ecb287695ba20cc33d30bf221c4f')
depends_on("cmake")
depends_on("snappy")
depends_on("zlib")
def install(self, spec, prefix):
cmake('.', *std_cmake_args)
make()
make("install")
if sys.platform == 'darwin':
fix_darwin_install_name(prefix.lib)

View File

@ -34,7 +34,7 @@ class Caliper(Package):
homepage = "https://github.com/LLNL/Caliper"
url = ""
version('master', git='ssh://git@github.com:LLNL/Caliper.git')
version('master', git='https://github.com/LLNL/Caliper.git')
variant('mpi', default=False, description='Enable MPI function wrappers.')

View File

@ -0,0 +1,28 @@
from spack import *
import os
from spack.pkg.builtin.intel import IntelInstaller
class Daal(IntelInstaller):
"""Intel Data Analytics Acceleration Library.
Note: You will have to add the download file to a
mirror so that Spack can find it. For instructions on how to set up a
mirror, see http://software.llnl.gov/spack/mirrors.html"""
homepage = "https://software.intel.com/en-us/daal"
version('2016.2.181', 'aad2aa70e5599ebfe6f85b29d8719d46',
url="file://%s/l_daal_2016.2.181.tgz" % os.getcwd())
version('2016.3.210', 'ad747c0dd97dace4cad03cf2266cad28',
url="file://%s/l_daal_2016.3.210.tgz" % os.getcwd())
def install(self, spec, prefix):
self.intel_prefix = os.path.join(prefix, "pkg")
IntelInstaller.install(self, spec, prefix)
daal_dir = os.path.join(self.intel_prefix, "daal")
for f in os.listdir(daal_dir):
os.symlink(os.path.join(daal_dir, f), os.path.join(self.prefix, f))

View File

@ -25,12 +25,18 @@
from spack import *
import sys
class Dealii(Package):
"""C++ software library providing well-documented tools to build finite element codes for a broad variety of PDEs."""
homepage = "https://www.dealii.org"
url = "https://github.com/dealii/dealii/releases/download/v8.4.0/dealii-8.4.0.tar.gz"
class Dealii(Package):
"""C++ software library providing well-documented tools to build finite
element codes for a broad variety of PDEs."""
homepage = "https://www.dealii.org"
url = "https://github.com/dealii/dealii/releases/download/v8.4.1/dealii-8.4.1.tar.gz"
version('8.4.1', 'efbaf16f9ad59cfccad62302f36c3c1d')
version('8.4.0', 'ac5dbf676096ff61e092ce98c80c2b00')
version('8.3.0', 'fc6cdcb16309ef4bea338a4f014de6fa')
version('8.2.1', '71c728dbec14f371297cd405776ccf08')
version('8.1.0', 'aa8fadc2ce5eb674f44f997461bf668d')
version('dev', git='https://github.com/dealii/dealii.git')
variant('mpi', default=True, description='Compile with MPI')
@ -48,10 +54,11 @@ class Dealii(Package):
# required dependencies, light version
depends_on("blas")
# Boost 1.58 is blacklisted, see https://github.com/dealii/dealii/issues/1591
# require at least 1.59
depends_on ("boost@1.59.0:", when='~mpi')
depends_on ("boost@1.59.0:+mpi", when='+mpi')
# Boost 1.58 is blacklisted, see
# https://github.com/dealii/dealii/issues/1591
# Require at least 1.59
depends_on("boost@1.59.0:+thread+system+serialization+iostreams", when='~mpi') # NOQA: ignore=E501
depends_on("boost@1.59.0:+mpi+thread+system+serialization+iostreams", when='+mpi') # NOQA: ignore=E501
depends_on("bzip2")
depends_on("cmake")
depends_on("lapack")
@ -63,10 +70,11 @@ class Dealii(Package):
# optional dependencies
depends_on("mpi", when="+mpi")
depends_on("arpack-ng+mpi", when='+arpack+mpi')
depends_on ("doxygen", when='+doc')
depends_on("doxygen+graphviz", when='+doc')
depends_on("graphviz", when='+doc')
depends_on("gsl", when='@8.5.0:+gsl')
depends_on("gsl", when='@dev+gsl')
depends_on ("hdf5+mpi~cxx", when='+hdf5+mpi') #FIXME NetCDF declares dependency with ~cxx, why?
depends_on("hdf5+mpi", when='+hdf5+mpi')
depends_on("metis@5:", when='+metis')
depends_on("netcdf+mpi", when="+netcdf+mpi")
depends_on("netcdf-cxx", when='+netcdf+mpi')
@ -96,16 +104,16 @@ def install(self, spec, prefix):
'-DDEAL_II_WITH_THREADS:BOOL=ON',
'-DBOOST_DIR=%s' % spec['boost'].prefix,
'-DBZIP2_DIR=%s' % spec['bzip2'].prefix,
# CMake's FindBlas/Lapack may pickup system's blas/lapack instead of Spack's.
# Be more specific to avoid this.
# Note that both lapack and blas are provided in -DLAPACK_XYZ variables
# CMake's FindBlas/Lapack may pickup system's blas/lapack instead
# of Spack's. Be more specific to avoid this.
# Note that both lapack and blas are provided in -DLAPACK_XYZ.
'-DLAPACK_FOUND=true',
'-DLAPACK_INCLUDE_DIRS=%s;%s' %
(spec['lapack'].prefix.include,
spec['blas'].prefix.include),
'-DLAPACK_LIBRARIES=%s;%s' %
(join_path(spec['lapack'].prefix.lib,'liblapack.%s' % dsuf), # FIXME don't hardcode names
join_path(spec['blas'].prefix.lib,'libblas.%s' % dsuf)), # FIXME don't hardcode names
(spec['lapack'].lapack_shared_lib,
spec['blas'].blas_shared_lib),
'-DMUPARSER_DIR=%s' % spec['muparser'].prefix,
'-DUMFPACK_DIR=%s' % spec['suite-sparse'].prefix,
'-DTBB_DIR=%s' % spec['tbb'].prefix,
@ -116,33 +124,34 @@ def install(self, spec, prefix):
if '+mpi' in spec:
options.extend([
'-DDEAL_II_WITH_MPI:BOOL=ON',
'-DCMAKE_C_COMPILER=%s' % join_path(self.spec['mpi'].prefix.bin, 'mpicc'), # FIXME: avoid hardcoding mpi wrappers names
'-DCMAKE_CXX_COMPILER=%s' % join_path(self.spec['mpi'].prefix.bin, 'mpic++'),
'-DCMAKE_Fortran_COMPILER=%s' % join_path(self.spec['mpi'].prefix.bin, 'mpif90'),
'-DCMAKE_C_COMPILER=%s' % spec['mpi'].mpicc,
'-DCMAKE_CXX_COMPILER=%s' % spec['mpi'].mpicxx,
'-DCMAKE_Fortran_COMPILER=%s' % spec['mpi'].mpifc,
])
else:
options.extend([
'-DDEAL_II_WITH_MPI:BOOL=OFF',
])
# Optional dependencies for which librariy names are the same as CMake variables
for library in ('gsl','hdf5','p4est','petsc','slepc','trilinos','metis'):
# Optional dependencies for which librariy names are the same as CMake
# variables:
for library in ('gsl', 'hdf5', 'p4est', 'petsc', 'slepc', 'trilinos', 'metis'): # NOQA: ignore=E501
if library in spec:
options.extend([
'-D{library}_DIR={value}'.format(library=library.upper(), value=spec[library].prefix),
'-DDEAL_II_WITH_{library}:BOOL=ON'.format(library=library.upper())
'-D%s_DIR=%s' % (library.upper(), spec[library].prefix),
'-DDEAL_II_WITH_%s:BOOL=ON' % library.upper()
])
else:
options.extend([
'-DDEAL_II_WITH_{library}:BOOL=OFF'.format(library=library.upper())
'-DDEAL_II_WITH_%s:BOOL=OFF' % library.upper()
])
# doxygen
options.extend([
'-DDEAL_II_COMPONENT_DOCUMENTATION=%s' % ('ON' if '+doc' in spec else 'OFF'),
'-DDEAL_II_COMPONENT_DOCUMENTATION=%s' %
('ON' if '+doc' in spec else 'OFF'),
])
# arpack
if '+arpack' in spec:
options.extend([
@ -160,8 +169,10 @@ def install(self, spec, prefix):
options.extend([
'-DNETCDF_FOUND=true',
'-DNETCDF_LIBRARIES=%s;%s' %
(join_path(spec['netcdf-cxx'].prefix.lib,'libnetcdf_c++.%s' % dsuf),
join_path(spec['netcdf'].prefix.lib,'libnetcdf.%s' % dsuf)),
(join_path(spec['netcdf-cxx'].prefix.lib,
'libnetcdf_c++.%s' % dsuf),
join_path(spec['netcdf'].prefix.lib,
'libnetcdf.%s' % dsuf)),
'-DNETCDF_INCLUDE_DIRS=%s;%s' %
(spec['netcdf-cxx'].prefix.include,
spec['netcdf'].prefix.include),
@ -222,7 +233,8 @@ def install(self, spec, prefix):
print('========== Step-40 PETSc ============')
print('=====================================')
# list the number of cycles to speed up
filter_file(r'(const unsigned int n_cycles = 8;)', ('const unsigned int n_cycles = 2;'), 'step-40.cc')
filter_file(r'(const unsigned int n_cycles = 8;)',
('const unsigned int n_cycles = 2;'), 'step-40.cc')
cmake('.')
if '^petsc' in spec:
make('release')
@ -232,20 +244,33 @@ def install(self, spec, prefix):
print('========= Step-40 Trilinos ==========')
print('=====================================')
# change Linear Algebra to Trilinos
filter_file(r'(\/\/ #define FORCE_USE_OF_TRILINOS.*)', ('#define FORCE_USE_OF_TRILINOS'), 'step-40.cc')
# The below filter_file should be different for versions
# before and after 8.4.0
if spec.satisfies('@8.4.0:'):
filter_file(r'(\/\/ #define FORCE_USE_OF_TRILINOS.*)',
('#define FORCE_USE_OF_TRILINOS'), 'step-40.cc')
else:
filter_file(r'(#define USE_PETSC_LA.*)',
('// #define USE_PETSC_LA'), 'step-40.cc')
if '^trilinos+hypre' in spec:
make('release')
make('run', parallel=False)
# the rest of the tests on step 40 only works for
# dealii version 8.4.0 and after
if spec.satisfies('@8.4.0:'):
print('=====================================')
print('=== Step-40 Trilinos SuperluDist ====')
print('=====================================')
# change to direct solvers
filter_file(r'(LA::SolverCG solver\(solver_control\);)', ('TrilinosWrappers::SolverDirect::AdditionalData data(false,"Amesos_Superludist"); TrilinosWrappers::SolverDirect solver(solver_control,data);'), 'step-40.cc')
filter_file(r'(LA::MPI::PreconditionAMG preconditioner;)', (''), 'step-40.cc')
filter_file(r'(LA::MPI::PreconditionAMG::AdditionalData data;)', (''), 'step-40.cc')
filter_file(r'(preconditioner.initialize\(system_matrix, data\);)', (''), 'step-40.cc')
filter_file(r'(solver\.solve \(system_matrix, completely_distributed_solution, system_rhs,)', ('solver.solve (system_matrix, completely_distributed_solution, system_rhs);'), 'step-40.cc')
filter_file(r'(LA::SolverCG solver\(solver_control\);)', ('TrilinosWrappers::SolverDirect::AdditionalData data(false,"Amesos_Superludist"); TrilinosWrappers::SolverDirect solver(solver_control,data);'), 'step-40.cc') # NOQA: ignore=E501
filter_file(r'(LA::MPI::PreconditionAMG preconditioner;)',
(''), 'step-40.cc')
filter_file(r'(LA::MPI::PreconditionAMG::AdditionalData data;)',
(''), 'step-40.cc')
filter_file(r'(preconditioner.initialize\(system_matrix, data\);)',
(''), 'step-40.cc')
filter_file(r'(solver\.solve \(system_matrix, completely_distributed_solution, system_rhs,)', ('solver.solve (system_matrix, completely_distributed_solution, system_rhs);'), 'step-40.cc') # NOQA: ignore=E501
filter_file(r'(preconditioner\);)', (''), 'step-40.cc')
if '^trilinos+superlu-dist' in spec:
make('release')
@ -255,7 +280,8 @@ def install(self, spec, prefix):
print('====== Step-40 Trilinos MUMPS =======')
print('=====================================')
# switch to Mumps
filter_file(r'(Amesos_Superludist)', ('Amesos_Mumps'), 'step-40.cc')
filter_file(r'(Amesos_Superludist)',
('Amesos_Mumps'), 'step-40.cc')
if '^trilinos+mumps' in spec:
make('release')
make('run', parallel=False)

View File

@ -26,20 +26,28 @@
import os
class Espresso(Package):
"""
QE is an integrated suite of Open-Source computer codes for electronic-structure calculations and materials
modeling at the nanoscale. It is based on density-functional theory, plane waves, and pseudopotentials.
QE is an integrated suite of Open-Source computer codes for
electronic-structure calculations and materials modeling at
the nanoscale. It is based on density-functional theory, plane
waves, and pseudopotentials.
"""
homepage = 'http://quantum-espresso.org'
url = 'http://www.qe-forge.org/gf/download/frsrelease/204/912/espresso-5.3.0.tar.gz'
version(
'5.4.0',
'8bb78181b39bd084ae5cb7a512c1cfe7',
url='http://www.qe-forge.org/gf/download/frsrelease/211/968/espresso-5.4.0.tar.gz'
)
version('5.3.0', '6848fcfaeb118587d6be36bd10b7f2c3')
variant('mpi', default=True, description='Build Quantum-ESPRESSO with mpi support')
variant('mpi', default=True, description='Builds with mpi support')
variant('openmp', default=False, description='Enables openMP support')
variant('scalapack', default=True, description='Enables scalapack support')
variant('elpa', default=True, description='Use elpa as an eigenvalue solver')
variant('elpa', default=True, description='Uses elpa as an eigenvalue solver')
depends_on('blas')
depends_on('lapack')
@ -47,7 +55,12 @@ class Espresso(Package):
depends_on('mpi', when='+mpi')
depends_on('fftw~mpi', when='~mpi')
depends_on('fftw+mpi', when='+mpi')
depends_on('scalapack', when='+scalapack+mpi') # TODO : + mpi needed to avoid false dependencies installation
# TODO : + mpi needed to avoid false dependencies installation
depends_on('scalapack', when='+scalapack+mpi')
# Spurious problems running in parallel the Makefile
# generated by qe configure
parallel = False
def check_variants(self, spec):
error = 'you cannot ask for \'+{variant}\' when \'+mpi\' is not active'
@ -87,10 +100,9 @@ def install(self, spec, prefix):
configure(*options)
make('all')
if spec.architecture.startswith('darwin'):
if spec.satisfies('platform=darwin'):
mkdirp(prefix.bin)
for filename in glob("bin/*.x"):
install(filename, prefix.bin)
else:
make('install')

View File

@ -0,0 +1,176 @@
##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Fenics(Package):
"""FEniCS is organized as a collection of interoperable components
that together form the FEniCS Project. These components include
the problem-solving environment DOLFIN, the form compiler FFC, the
finite element tabulator FIAT, the just-in-time compiler Instant,
the code generation interface UFC, the form language UFL and a
range of additional components."""
homepage = "http://fenicsproject.org/"
url = "https://bitbucket.org/fenics-project/dolfin/downloads/dolfin-1.6.0.tar.gz"
base_url = "https://bitbucket.org/fenics-project/{pkg}/downloads/{pkg}-{version}.tar.gz" # NOQA: ignore E501
variant('hdf5', default=True, description='Compile with HDF5')
variant('parmetis', default=True, description='Compile with ParMETIS')
variant('scotch', default=True, description='Compile with Scotch')
variant('petsc', default=True, description='Compile with PETSc')
variant('slepc', default=True, description='Compile with SLEPc')
variant('trilinos', default=True, description='Compile with Trilinos')
variant('suite-sparse', default=True, description='Compile with SuiteSparse solvers')
variant('vtk', default=False, description='Compile with VTK')
variant('qt', default=False, description='Compile with QT')
variant('mpi', default=True, description='Enables the distributed memory support')
variant('openmp', default=True, description='Enables the shared memory support')
variant('shared', default=True, description='Enables the build of shared libraries')
variant('debug', default=False, description='Builds a debug version of the libraries')
# not part of spack list for now
# variant('petsc4py', default=True, description='Uses PETSc4py')
# variant('slepc4py', default=True, description='Uses SLEPc4py')
# variant('pastix', default=True, description='Compile with Pastix')
extends('python')
depends_on('py-numpy')
depends_on('py-ply')
depends_on('py-six')
depends_on('py-sphinx@1.0.1:', when='+doc')
depends_on('eigen@3.2.0:')
depends_on('boost')
depends_on('mpi', when='+mpi')
depends_on('hdf5', when='+hdf5')
depends_on('parmetis@4.0.2:^metis+real64', when='+parmetis')
depends_on('scotch~metis', when='+scotch~mpi')
depends_on('scotch+mpi~metis', when='+scotch+mpi')
depends_on('petsc@3.4:', when='+petsc')
depends_on('slepc@3.4:', when='+slepc')
depends_on('trilinos', when='+trilinos')
depends_on('vtk', when='+vtk')
depends_on('suite-sparse', when='+suite-sparse')
depends_on('qt', when='+qt')
# This are the build dependencies
depends_on('py-setuptools')
depends_on('cmake@2.8.12:')
depends_on('swig@3.0.3:')
releases = [
{
'version': '1.6.0',
'md5': '35cb4baf7ab4152a40fb7310b34d5800',
'resources': {
'ffc': '358faa3e9da62a1b1a717070217b793e',
'fiat': 'f4509d05c911fd93cea8d288a78a6c6f',
'instant': '5f2522eb032a5bebbad6597b6fe0732a',
'ufl': 'c40c5f04eaa847377ab2323122284016',
}
},
{
'version': '1.5.0',
'md5': '9b589a3534299a5e6d22c13c5eb30bb8',
'resources': {
'ffc': '343f6d30e7e77d329a400fd8e73e0b63',
'fiat': 'da3fa4dd8177bb251e7f68ec9c7cf6c5',
'instant': 'b744023ded27ee9df4a8d8c6698c0d58',
'ufl': '130d7829cf5a4bd5b52bf6d0955116fd',
}
},
]
for release in releases:
version(release['version'], release['md5'], url=base_url.format(pkg='dolfin', version=release['version']))
for name, md5 in release['resources'].items():
resource(name=name,
url=base_url.format(pkg=name, **release),
md5=md5,
destination='depends',
when='@{version}'.format(**release),
placement=name)
def cmake_is_on(self, option):
return 'ON' if option in self.spec else 'OFF'
def install(self, spec, prefix):
for package in ['ufl', 'ffc', 'fiat', 'instant']:
with working_dir(join_path('depends', package)):
python('setup.py', 'install', '--prefix=%s' % prefix)
cmake_args = [
'-DCMAKE_BUILD_TYPE:STRING={0}'.format(
'Debug' if '+debug' in spec else 'RelWithDebInfo'),
'-DBUILD_SHARED_LIBS:BOOL={0}'.format(
self.cmake_is_on('+shared')),
'-DDOLFIN_SKIP_BUILD_TESTS:BOOL=ON',
'-DDOLFIN_ENABLE_OPENMP:BOOL={0}'.format(
self.cmake_is_on('+openmp')),
'-DDOLFIN_ENABLE_CHOLMOD:BOOL={0}'.format(
self.cmake_is_on('suite-sparse')),
'-DDOLFIN_ENABLE_HDF5:BOOL={0}'.format(
self.cmake_is_on('hdf5')),
'-DDOLFIN_ENABLE_MPI:BOOL={0}'.format(
self.cmake_is_on('mpi')),
'-DDOLFIN_ENABLE_PARMETIS:BOOL={0}'.format(
self.cmake_is_on('parmetis')),
'-DDOLFIN_ENABLE_PASTIX:BOOL={0}'.format(
self.cmake_is_on('pastix')),
'-DDOLFIN_ENABLE_PETSC:BOOL={0}'.format(
self.cmake_is_on('petsc')),
'-DDOLFIN_ENABLE_PETSC4PY:BOOL={0}'.format(
self.cmake_is_on('py-petsc4py')),
'-DDOLFIN_ENABLE_PYTHON:BOOL={0}'.format(
self.cmake_is_on('python')),
'-DDOLFIN_ENABLE_QT:BOOL={0}'.format(
self.cmake_is_on('qt')),
'-DDOLFIN_ENABLE_SCOTCH:BOOL={0}'.format(
self.cmake_is_on('scotch')),
'-DDOLFIN_ENABLE_SLEPC:BOOL={0}'.format(
self.cmake_is_on('slepc')),
'-DDOLFIN_ENABLE_SLEPC4PY:BOOL={0}'.format(
self.cmake_is_on('py-slepc4py')),
'-DDOLFIN_ENABLE_SPHINX:BOOL={0}'.format(
self.cmake_is_on('py-sphinx')),
'-DDOLFIN_ENABLE_TRILINOS:BOOL={0}'.format(
self.cmake_is_on('trilinos')),
'-DDOLFIN_ENABLE_UMFPACK:BOOL={0}'.format(
self.cmake_is_on('suite-sparse')),
'-DDOLFIN_ENABLE_VTK:BOOL={0}'.format(
self.cmake_is_on('vtk')),
'-DDOLFIN_ENABLE_ZLIB:BOOL={0}'.format(
self.cmake_is_on('zlib')),
]
cmake_args.extend(std_cmake_args)
with working_dir('build', create=True):
cmake('..', *cmake_args)
make()
make('install')

View File

@ -0,0 +1,206 @@
##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import os
import shutil
import sys
from spack import *
def _install_shlib(name, src, dst):
"""Install a shared library from directory src to directory dst"""
if sys.platform == "darwin":
shlib0 = name + ".0.dylib"
shlib = name + ".dylib"
shutil.copyfile(join_path(src, shlib0), join_path(dst, shlib0))
os.symlink(shlib0, join_path(dst, shlib))
else:
shlib000 = name + ".so.0.0.0"
shlib0 = name + ".so.0"
shlib = name + ".dylib"
shutil.copyfile(join_path(src, shlib000), join_path(dst, shlib000))
os.symlink(shlib000, join_path(dst, shlib0))
os.symlink(shlib0, join_path(dst, shlib))
class Hdf5Blosc(Package):
"""Blosc filter for HDF5"""
homepage = "https://github.com/Blosc/hdf5-blosc"
url = "https://github.com/Blosc/hdf5-blosc/archive/master.zip"
version('master', '02c04acbf4bec66ec8a35bf157d1c9de')
depends_on("c-blosc")
depends_on("hdf5")
depends_on("libtool")
parallel = False
def install(self, spec, prefix):
# The included cmake recipe doesn"t work for Darwin
# cmake(".", *std_cmake_args)
#
# make()
# make("install")
# if sys.platform == "darwin":
# fix_darwin_install_name(prefix.lib)
libtool = Executable(join_path(spec["libtool"].prefix.bin, "libtool"))
if "+mpi" in spec["hdf5"]:
cc = "mpicc"
else:
cc = "cc"
shlibext = "so" if sys.platform!="darwin" else "dylib"
mkdirp(prefix.include)
mkdirp(prefix.lib)
# Build and install filter
with working_dir("src"):
libtool("--mode=compile", "--tag=CC",
"cc", "-g", "-O",
"-c", "blosc_filter.c")
libtool("--mode=link", "--tag=CC",
"cc", "-g", "-O",
"-rpath", prefix.lib,
"-o", "libblosc_filter.la",
"blosc_filter.lo",
"-L%s" % spec["c-blosc"].prefix.lib, "-lblosc",
"-L%s" % spec["hdf5"].prefix.lib, "-lhdf5")
_install_shlib("libblosc_filter", ".libs", prefix.lib)
# Build and install plugin
# The plugin requires at least HDF5 1.8.11:
if spec["hdf5"].satisfies("@1.8.11:"):
libtool("--mode=compile", "--tag=CC",
"cc", "-g", "-O",
"-c", "blosc_plugin.c")
libtool("--mode=link", "--tag=CC",
"cc", "-g", "-O",
"-rpath", prefix.lib,
"-o", "libblosc_plugin.la",
"blosc_plugin.lo",
"-L%s" % prefix.lib, "-lblosc_filter",
"-L%s" % spec["c-blosc"].prefix.lib, "-lblosc",
"-L%s" % spec["hdf5"].prefix.lib, "-lhdf5")
_install_shlib("libblosc_plugin", ".libs", prefix.lib)
self.check_install(spec)
def check_install(self, spec):
"Build and run a small program to test the installed HDF5 Blosc plugin"
print "Checking HDF5-Blosc plugin..."
checkdir = "spack-check"
with working_dir(checkdir, create=True):
source = r"""\
#include <hdf5.h>
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#define FILTER_BLOSC 32001 /* Blosc filter ID registered with the HDF group */
int main(int argc, char **argv) {
herr_t herr;
hid_t file = H5Fcreate("file.h5", H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
assert(file >= 0);
hsize_t dims[3] = {10, 10, 10};
hid_t space = H5Screate_simple(3, dims, NULL);
assert(space >= 0);
hid_t create_proplist = H5Pcreate(H5P_DATASET_CREATE);
assert(create_proplist >= 0);
herr = H5Pset_chunk(create_proplist, 3, dims);
assert(herr >= 0);
herr = H5Pset_filter(create_proplist, FILTER_BLOSC, H5Z_FLAG_OPTIONAL, 0,
NULL);
assert(herr >= 0);
htri_t all_filters_avail = H5Pall_filters_avail(create_proplist);
assert(all_filters_avail > 0);
hid_t dataset = H5Dcreate(file, "dataset", H5T_NATIVE_DOUBLE, space,
H5P_DEFAULT, create_proplist, H5P_DEFAULT);
assert(dataset >= 0);
double data[10][10][10];
for (int k=0; k<10; ++k) {
for (int j=0; j<10; ++j) {
for (int i=0; i<10; ++i) {
data[k][j][i] = 1.0 / (1.0 + i + j + k);
}
}
}
herr = H5Dwrite(dataset, H5T_NATIVE_DOUBLE, space, space, H5P_DEFAULT,
&data[0][0][0]);
assert(herr >= 0);
herr = H5Pclose(create_proplist);
assert(herr >= 0);
herr = H5Dclose(dataset);
assert(herr >= 0);
herr = H5Sclose(space);
assert(herr >= 0);
herr = H5Fclose(file);
assert(herr >= 0);
printf("Done.\n");
return 0;
}
"""
expected = """\
Done.
"""
with open("check.c", "w") as f:
f.write(source)
if "+mpi" in spec["hdf5"]:
cc = which("mpicc")
else:
cc = which("cc")
# TODO: Automate these path and library settings
cc("-c", "-I%s" % spec["hdf5"].prefix.include, "check.c")
cc("-o", "check", "check.o",
"-L%s" % spec["hdf5"].prefix.lib, "-lhdf5")
try:
check = Executable("./check")
output = check(return_output=True)
except:
output = ""
success = output == expected
if not success:
print "Produced output does not match expected output."
print "Expected output:"
print "-"*80
print expected
print "-"*80
print "Produced output:"
print "-"*80
print output
print "-"*80
print "Environment:"
env = which("env")
env()
raise RuntimeError("HDF5 Blosc plugin check failed")
shutil.rmtree(checkdir)
def setup_environment(self, spack_env, run_env):
spack_env.append_path("HDF5_PLUGIN_PATH", self.spec.prefix.lib)
run_env.append_path("HDF5_PLUGIN_PATH", self.spec.prefix.lib)
def setup_dependent_environment(self, spack_env, run_env, dependent_spec):
spack_env.append_path("HDF5_PLUGIN_PATH", self.spec.prefix.lib)
run_env.append_path("HDF5_PLUGIN_PATH", self.spec.prefix.lib)

View File

@ -38,6 +38,7 @@ class Hdf5(Package):
list_url = "http://www.hdfgroup.org/ftp/HDF5/releases"
list_depth = 3
version('1.10.0-patch1', '9180ff0ef8dc2ef3f61bd37a7404f295')
version('1.10.0', 'bdc935337ee8282579cd6bc4270ad199')
version('1.8.16', 'b8ed9a36ae142317f88b0c7ef4b9c618', preferred=True)
version('1.8.15', '03cccb5b33dbe975fdcd8ae9dc021f24')

View File

@ -0,0 +1,119 @@
##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import os
import platform
class Hpl(Package):
"""HPL is a software package that solves a (random) dense linear system
in double precision (64 bits) arithmetic on distributed-memory computers.
It can thus be regarded as a portable as well as freely available
implementation of the High Performance Computing Linpack Benchmark."""
homepage = "http://www.netlib.org/benchmark/hpl/"
url = "http://www.netlib.org/benchmark/hpl/hpl-2.2.tar.gz"
version('2.2', '0eb19e787c3dc8f4058db22c9e0c5320')
variant('openmp', default=False, description='Enable OpenMP support')
depends_on('mpi@1.1:')
depends_on('blas')
parallel = False
def configure(self, spec, arch):
# List of configuration options
# Order is important
config = []
# OpenMP support
if '+openmp' in spec:
config.append(
'OMP_DEFS = {0}'.format(self.compiler.openmp_flag)
)
config.extend([
# Shell
'SHELL = /bin/sh',
'CD = cd',
'CP = cp',
'LN_S = ln -fs',
'MKDIR = mkdir -p',
'RM = /bin/rm -f',
'TOUCH = touch',
# Platform identifier
'ARCH = {0}'.format(arch),
# HPL Directory Structure / HPL library
'TOPdir = {0}'.format(os.getcwd()),
'INCdir = $(TOPdir)/include',
'BINdir = $(TOPdir)/bin/$(ARCH)',
'LIBdir = $(TOPdir)/lib/$(ARCH)',
'HPLlib = $(LIBdir)/libhpl.a',
# Message Passing library (MPI)
'MPinc = -I{0}'.format(spec['mpi'].prefix.include),
'MPlib = -L{0}'.format(spec['mpi'].prefix.lib),
# Linear Algebra library (BLAS or VSIPL)
'LAinc = {0}'.format(spec['blas'].prefix.include),
'LAlib = {0}'.format(spec['blas'].blas_shared_lib),
# F77 / C interface
'F2CDEFS = -DAdd_ -DF77_INTEGER=int -DStringSunStyle',
# HPL includes / libraries / specifics
'HPL_INCLUDES = -I$(INCdir) -I$(INCdir)/$(ARCH) ' +
'-I$(LAinc) -I$(MPinc)',
'HPL_LIBS = $(HPLlib) $(LAlib) $(MPlib)',
'HPL_OPTS = -DHPL_DETAILED_TIMING -DHPL_PROGRESS_REPORT',
'HPL_DEFS = $(F2CDEFS) $(HPL_OPTS) $(HPL_INCLUDES)',
# Compilers / linkers - Optimization flags
'CC = {0}'.format(spec['mpi'].mpicc),
'CCNOOPT = $(HPL_DEFS)',
'CCFLAGS = $(HPL_DEFS) -O3',
'LINKER = $(CC)',
'LINKFLAGS = $(CCFLAGS) $(OMP_DEFS)',
'ARCHIVER = ar',
'ARFLAGS = r',
'RANLIB = echo'
])
# Write configuration options to include file
with open('Make.{0}'.format(arch), 'w') as makefile:
for var in config:
makefile.write('{0}\n'.format(var))
def install(self, spec, prefix):
# Arch used for file naming purposes only
arch = '{0}-{1}'.format(platform.system(), platform.processor())
# Generate Makefile include
self.configure(spec, arch)
make('arch={0}'.format(arch))
# Manual installation
install_tree(join_path('bin', arch), prefix.bin)
install_tree(join_path('lib', arch), prefix.lib)
install_tree(join_path('include', arch), prefix.include)
install_tree('man', prefix.man)

View File

@ -0,0 +1,144 @@
from spack import *
import os
import re
from spack.pkg.builtin.intel import IntelInstaller, filter_pick, \
get_all_components
class IntelParallelStudio(IntelInstaller):
"""Intel Parallel Studio.
Note: You will have to add the download file to a
mirror so that Spack can find it. For instructions on how to set up a
mirror, see http://software.llnl.gov/spack/mirrors.html"""
homepage = "https://software.intel.com/en-us/intel-parallel-studio-xe"
# TODO: can also try the online installer (will download files on demand)
version('composer.2016.2', '1133fb831312eb519f7da897fec223fa',
url="file://%s/parallel_studio_xe_2016_composer_edition_update2.tgz" # NOQA: ignore=E501
% os.getcwd())
version('professional.2016.2', '70be832f2d34c9bf596a5e99d5f2d832',
url="file://%s/parallel_studio_xe_2016_update2.tgz" % os.getcwd()) # NOQA: ignore=E501
version('cluster.2016.2', '70be832f2d34c9bf596a5e99d5f2d832',
url="file://%s/parallel_studio_xe_2016_update2.tgz" % os.getcwd()) # NOQA: ignore=E501
version('composer.2016.3', '3208eeabee951fc27579177b593cefe9',
url="file://%s/parallel_studio_xe_2016_composer_edition_update3.tgz" # NOQA: ignore=E501
% os.getcwd())
version('professional.2016.3', 'eda19bb0d0d19709197ede58f13443f3',
url="file://%s/parallel_studio_xe_2016_update3.tgz" % os.getcwd()) # NOQA: ignore=E501
version('cluster.2016.3', 'eda19bb0d0d19709197ede58f13443f3',
url="file://%s/parallel_studio_xe_2016_update3.tgz" % os.getcwd()) # NOQA: ignore=E501
variant('rpath', default=True, description="Add rpath to .cfg files")
variant('newdtags', default=False,
description="Allow use of --enable-new-dtags in MPI wrappers")
variant('all', default=False,
description="Install all files with the requested edition")
variant('mpi', default=True,
description="Install the Intel MPI library and ITAC tool")
variant('mkl', default=True, description="Install the Intel MKL library")
variant('daal',
default=True, description="Install the Intel DAAL libraries")
variant('ipp', default=True, description="Install the Intel IPP libraries")
variant('tools', default=True, description="""Install the Intel Advisor,\
VTune Amplifier, and Inspector tools""")
provides('mpi', when='@cluster:+mpi')
provides('mkl', when='+mkl')
provides('daal', when='+daal')
provides('ipp', when='+ipp')
def install(self, spec, prefix):
base_components = "ALL" # when in doubt, install everything
mpi_components = ""
mkl_components = ""
daal_components = ""
ipp_components = ""
if spec.satisfies('+all'):
base_components = "ALL"
else:
all_components = get_all_components()
regex = '(comp|openmp|intel-tbb|icc|ifort|psxe|icsxe-pset)'
base_components = \
filter_pick(all_components, re.compile(regex).search)
regex = '(icsxe|imb|mpi|itac|intel-tc|clck)'
mpi_components = \
filter_pick(all_components, re.compile(regex).search)
mkl_components = \
filter_pick(all_components, re.compile('(mkl)').search)
daal_components = \
filter_pick(all_components, re.compile('(daal)').search)
ipp_components = \
filter_pick(all_components, re.compile('(ipp)').search)
regex = '(gdb|vtune|inspector|advisor)'
tool_components = \
filter_pick(all_components, re.compile(regex).search)
components = base_components
if not spec.satisfies('+all'):
if spec.satisfies('+mpi') and 'cluster' in str(spec.version):
components += mpi_components
if spec.satisfies('+mkl'):
components += mkl_components
if spec.satisfies('+daal'):
components += daal_components
if spec.satisfies('+ipp'):
components += ipp_components
if spec.satisfies('+tools') and (spec.satisfies('@cluster') or
spec.satisfies('@professional')):
components += tool_components
self.intel_components = ';'.join(components)
IntelInstaller.install(self, spec, prefix)
absbindir = os.path.dirname(os.path.realpath(os.path.join(
self.prefix.bin, "icc")))
abslibdir = os.path.dirname(os.path.realpath(os.path.join
(self.prefix.lib, "intel64", "libimf.a")))
os.symlink(self.global_license_file, os.path.join(absbindir,
"license.lic"))
if spec.satisfies('+tools') and (spec.satisfies('@cluster') or
spec.satisfies('@professional')):
os.mkdir(os.path.join(self.prefix, "inspector_xe/licenses"))
os.symlink(self.global_license_file, os.path.join(
self.prefix, "inspector_xe/licenses", "license.lic"))
os.mkdir(os.path.join(self.prefix, "advisor_xe/licenses"))
os.symlink(self.global_license_file, os.path.join(
self.prefix, "advisor_xe/licenses", "license.lic"))
os.mkdir(os.path.join(self.prefix, "vtune_amplifier_xe/licenses"))
os.symlink(self.global_license_file, os.path.join(
self.prefix, "vtune_amplifier_xe/licenses", "license.lic"))
if (spec.satisfies('+all') or spec.satisfies('+mpi')) and \
spec.satisfies('@cluster'):
os.symlink(self.global_license_file, os.path.join(
self.prefix, "itac_latest", "license.lic"))
if spec.satisfies('~newdtags'):
wrappers = ["mpif77", "mpif77", "mpif90", "mpif90",
"mpigcc", "mpigcc", "mpigxx", "mpigxx",
"mpiicc", "mpiicc", "mpiicpc", "mpiicpc",
"mpiifort", "mpiifort"]
wrapper_paths = []
for root, dirs, files in os.walk(spec.prefix):
for name in files:
if name in wrappers:
wrapper_paths.append(os.path.join(spec.prefix,
root, name))
for wrapper in wrapper_paths:
filter_file(r'-Xlinker --enable-new-dtags', r' ',
wrapper)
if spec.satisfies('+rpath'):
for compiler_command in ["icc", "icpc", "ifort"]:
cfgfilename = os.path.join(absbindir, "%s.cfg" %
compiler_command)
with open(cfgfilename, "w") as f:
f.write('-Xlinker -rpath -Xlinker %s\n' % abslibdir)
os.symlink(os.path.join(self.prefix.man, "common", "man1"),
os.path.join(self.prefix.man, "man1"))

View File

@ -0,0 +1,125 @@
from spack import *
import os
import re
def filter_pick(input_list, regex_filter):
"""Returns the items in input_list that are found in the regex_filter"""
return [l for l in input_list for m in (regex_filter(l),) if m]
def unfilter_pick(input_list, regex_filter):
"""Returns the items in input_list that are not found in the
regex_filter"""
return [l for l in input_list for m in (regex_filter(l),) if not m]
def get_all_components():
"""Returns a list of all the components associated with the downloaded
Intel package"""
all_components = []
with open("pset/mediaconfig.xml", "r") as f:
lines = f.readlines()
for line in lines:
if line.find('<Abbr>') != -1:
component = line[line.find('<Abbr>') + 6:line.find('</Abbr>')]
all_components.append(component)
return all_components
class IntelInstaller(Package):
"""Base package containing common methods for installing Intel software"""
homepage = "https://software.intel.com/en-us"
intel_components = "ALL"
license_required = True
license_comment = '#'
license_files = ['Licenses/license.lic']
license_vars = ['INTEL_LICENSE_FILE']
license_url = \
'https://software.intel.com/en-us/articles/intel-license-manager-faq'
@property
def global_license_file(self):
"""Returns the path where a global license file should be stored."""
if not self.license_files:
return
return join_path(self.global_license_dir, "intel",
os.path.basename(self.license_files[0]))
def install(self, spec, prefix):
# Remove the installation DB, otherwise it will try to install into
# location of other Intel builds
if os.path.exists(os.path.join(os.environ["HOME"], "intel",
"intel_sdp_products.db")):
os.remove(os.path.join(os.environ["HOME"], "intel",
"intel_sdp_products.db"))
if not hasattr(self, "intel_prefix"):
self.intel_prefix = self.prefix
silent_config_filename = 'silent.cfg'
with open(silent_config_filename, 'w') as f:
f.write("""
ACCEPT_EULA=accept
PSET_MODE=install
CONTINUE_WITH_INSTALLDIR_OVERWRITE=yes
PSET_INSTALL_DIR=%s
ACTIVATION_LICENSE_FILE=%s
ACTIVATION_TYPE=license_file
PHONEHOME_SEND_USAGE_DATA=no
CONTINUE_WITH_OPTIONAL_ERROR=yes
COMPONENTS=%s
""" % (self.intel_prefix, self.global_license_file, self.intel_components))
install_script = which("install.sh")
install_script('--silent', silent_config_filename)
class Intel(IntelInstaller):
"""Intel Compilers.
Note: You will have to add the download file to a
mirror so that Spack can find it. For instructions on how to set up a
mirror, see http://software.llnl.gov/spack/mirrors.html"""
homepage = "https://software.intel.com/en-us/intel-parallel-studio-xe"
# TODO: can also try the online installer (will download files on demand)
version('16.0.2', '1133fb831312eb519f7da897fec223fa',
url="file://%s/parallel_studio_xe_2016_composer_edition_update2.tgz" # NOQA: ignore=E501
% os.getcwd())
version('16.0.3', '3208eeabee951fc27579177b593cefe9',
url="file://%s/parallel_studio_xe_2016_composer_edition_update3.tgz" # NOQA: ignore=E501
% os.getcwd())
variant('rpath', default=True, description="Add rpath to .cfg files")
def install(self, spec, prefix):
components = []
all_components = get_all_components()
regex = '(comp|openmp|intel-tbb|icc|ifort|psxe|icsxe-pset)'
components = filter_pick(all_components, re.compile(regex).search)
self.intel_components = ';'.join(components)
IntelInstaller.install(self, spec, prefix)
absbindir = os.path.split(os.path.realpath(os.path.join(
self.prefix.bin, "icc")))[0]
abslibdir = os.path.split(os.path.realpath(os.path.join(
self.prefix.lib, "intel64", "libimf.a")))[0]
# symlink or copy?
os.symlink(self.global_license_file, os.path.join(absbindir,
"license.lic"))
if spec.satisfies('+rpath'):
for compiler_command in ["icc", "icpc", "ifort"]:
cfgfilename = os.path.join(absbindir, "%s.cfg" %
compiler_command)
with open(cfgfilename, "w") as f:
f.write('-Xlinker -rpath -Xlinker %s\n' % abslibdir)
os.symlink(os.path.join(self.prefix.man, "common", "man1"),
os.path.join(self.prefix.man, "man1"))

View File

@ -0,0 +1,26 @@
from spack import *
import os
from spack.pkg.builtin.intel import IntelInstaller
class Ipp(IntelInstaller):
"""Intel Integrated Performance Primitives.
Note: You will have to add the download file to a
mirror so that Spack can find it. For instructions on how to set up a
mirror, see http://software.llnl.gov/spack/mirrors.html"""
homepage = "https://software.intel.com/en-us/intel-ipp"
version('9.0.3.210', '0e1520dd3de7f811a6ef6ebc7aa429a3',
url="file://%s/l_ipp_9.0.3.210.tgz" % os.getcwd())
def install(self, spec, prefix):
self.intel_prefix = os.path.join(prefix, "pkg")
IntelInstaller.install(self, spec, prefix)
ipp_dir = os.path.join(self.intel_prefix, "ipp")
for f in os.listdir(ipp_dir):
os.symlink(os.path.join(ipp_dir, f), os.path.join(self.prefix, f))

View File

@ -24,29 +24,19 @@
##############################################################################
from spack import *
class Launchmon(Package):
"""Software infrastructure that enables HPC run-time tools to
co-locate tool daemons with a parallel job."""
homepage = "http://sourceforge.net/projects/launchmon"
url = "http://downloads.sourceforge.net/project/launchmon/launchmon/1.0.1%20release/launchmon-1.0.1.tar.gz"
homepage = "https://github.com/LLNL/LaunchMON"
url = "https://github.com/LLNL/LaunchMON/releases/download/v1.0.2/launchmon-v1.0.2.tar.gz" # NOQA: ignore=E501
version('1.0.1', '2f12465803409fd07f91174a4389eb2b')
version('1.0.1-2', git='https://github.com/llnl/launchmon.git', commit='ff7e22424b8f375318951eb1c9282fcbbfa8aadf')
version('1.0.2', '8d6ba77a0ec2eff2fde2c5cc8fa7ff7a')
depends_on('autoconf')
depends_on('automake')
depends_on('libtool')
def patch(self):
# This patch makes libgcrypt compile correctly with newer gcc versions.
mf = FileFilter('tools/libgcrypt/tests/Makefile.in')
mf.filter(r'(basic_LDADD\s*=\s*.*)', r'\1 -lgpg-error')
mf.filter(r'(tsexp_LDADD\s*=\s*.*)', r'\1 -lgpg-error')
mf.filter(r'(keygen_LDADD\s*=\s*.*)', r'\1 -lgpg-error')
mf.filter(r'(benchmark_LDADD\s*=\s*.*)', r'\1 -lgpg-error')
def install(self, spec, prefix):
configure(
"--prefix=" + prefix,

View File

@ -23,11 +23,11 @@
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import os
# Only build certain parts of dwarf because the other ones break.
dwarf_dirs = ['libdwarf', 'dwarfdump2']
class Libdwarf(Package):
"""The DWARF Debugging Information Format is of interest to
programmers working on compilers and debuggers (and any one
@ -41,18 +41,17 @@ class Libdwarf(Package):
MIPS/IRIX C compiler."""
homepage = "http://www.prevanders.net/dwarf.html"
url = "http://www.prevanders.net/libdwarf-20130729.tar.gz"
url = "http://www.prevanders.net/libdwarf-20160507.tar.gz"
list_url = homepage
version('20160507', 'ae32d6f9ece5daf05e2d4b14822ea811')
version('20130729', '4cc5e48693f7b93b7aa0261e63c0e21d')
version('20130207', '64b42692e947d5180e162e46c689dfbf')
version('20130126', 'ded74a5e90edb5a12aac3c29d260c5db')
depends_on("libelf")
parallel = False
def install(self, spec, prefix):
# dwarf build does not set arguments for ar properly
make.add_default_arg('ARFLAGS=rcs')
@ -69,7 +68,11 @@ def install(self, spec, prefix):
install('libdwarf.h', prefix.include)
install('dwarf.h', prefix.include)
with working_dir('dwarfdump2'):
if spec.satisfies('@20130126:20130729'):
dwarfdump_dir = 'dwarfdump2'
else:
dwarfdump_dir = 'dwarfdump'
with working_dir(dwarfdump_dir):
configure("--prefix=" + prefix)
# This makefile has strings of copy commands that

View File

@ -37,7 +37,7 @@ class Libpciaccess(Package):
def install(self, spec, prefix):
# libpciaccess does not support OS X
if spec.satisfies('arch=darwin-x86_64'):
if spec.satisfies('platform=darwin'):
# create a dummy directory
mkdir(prefix.lib)
return

View File

@ -0,0 +1,39 @@
##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class LibpthreadStubs(Package):
"""The libpthread-stubs package provides weak aliases for pthread
functions not provided in libc or otherwise available by
default. """
homepage = "http://xcb.freedesktop.org/"
url = "http://xcb.freedesktop.org/dist/libpthread-stubs-0.1.tar.bz2"
version('0.3', 'e8fa31b42e13f87e8f5a7a2b731db7ee')
def install(self, spec, prefix):
configure('--prefix=%s' % prefix)
make()
make("install")

View File

@ -0,0 +1,44 @@
##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Libxau(Package):
"""The libXau package contains a library implementing the X11
Authorization Protocol. This is useful for restricting client
access to the display."""
homepage = "http://xcb.freedesktop.org/"
url = "http://ftp.x.org/pub/individual/lib/libXau-1.0.8.tar.bz2"
version('1.0.8', '685f8abbffa6d145c0f930f00703b21b')
depends_on('xproto')
def install(self, spec, prefix):
# FIXME: Modify the configure line to suit your build system here.
configure('--prefix=%s' % prefix)
# FIXME: Add logic to build and install here
make()
make("install")

View File

@ -35,18 +35,19 @@ class Libxcb(Package):
version('1.11', '1698dd837d7e6e94d029dbe8b3a82deb')
version('1.11.1', '118623c15a96b08622603a71d8789bf3')
depends_on("python")
depends_on("xcb-proto")
depends_on("pkg-config")
# depends_on('pthread') # Ubuntu: apt-get install libpthread-stubs0-dev
# depends_on('xau') # Ubuntu: apt-get install libxau-dev
depends_on("libpthread-stubs")
depends_on('libxau')
def patch(self):
filter_file('typedef struct xcb_auth_info_t {', 'typedef struct {', 'src/xcb.h')
def install(self, spec, prefix):
env['PKG_CONFIG_PATH'] = env['PKG_CONFIG_PATH'] + ':/usr/lib64/pkgconfig'
configure("--prefix=%s" % prefix)
make()

View File

@ -23,7 +23,7 @@
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import os, shutil
import os, glob
class Llvm(Package):
@ -46,7 +46,9 @@ class Llvm(Package):
variant('libcxx', default=True, description="Build the LLVM C++ standard library")
variant('compiler-rt', default=True, description="Build the LLVM compiler runtime, including sanitizers")
variant('gold', default=True, description="Add support for LTO with the gold linker plugin")
variant('shared_libs', default=False, description="Build all components as shared libraries, faster, less memory to build, less stable")
variant('link_dylib', default=False, description="Build and link the libLLVM shared library rather than static")
variant('all_targets', default=True, description="Build all supported targets, default targets <current arch>,NVPTX,AMDGPU,CppBackend")
# Build dependency
depends_on('cmake @2.8.12.2:')
@ -257,6 +259,28 @@ def install(self, spec, prefix):
if '+compiler-rt' not in spec:
cmake_args.append('-DLLVM_EXTERNAL_COMPILER_RT_BUILD:Bool=OFF')
if '+shared_libs' in spec:
cmake_args.append('-DBUILD_SHARED_LIBS:Bool=ON')
if '+link_dylib' in spec:
cmake_args.append('-DLLVM_LINK_LLVM_DYLIB:Bool=ON')
if '+all_targets' not in spec: # all is default on cmake
targets = ['CppBackend', 'NVPTX', 'AMDGPU']
if 'x86' in spec.architecture.target.lower():
targets.append('X86')
elif 'arm' in spec.architecture.target.lower():
targets.append('ARM')
elif 'aarch64' in spec.architecture.target.lower():
targets.append('AArch64')
elif 'sparc' in spec.architecture.target.lower():
targets.append('sparc')
elif ('ppc' in spec.architecture.target.lower() or
'power' in spec.architecture.target.lower()):
targets.append('PowerPC')
cmake_args.append('-DLLVM_TARGETS_TO_BUILD:Bool=' + ';'.join(targets))
if '+clang' not in spec:
if '+clang_extra' in spec:
raise SpackException('The clang_extra variant requires the clang variant to be selected')
@ -267,7 +291,5 @@ def install(self, spec, prefix):
cmake(*cmake_args)
make()
make("install")
query_path = os.path.join('bin', 'clang-query')
# Manually install clang-query, because llvm doesn't...
if os.path.exists(query_path):
shutil.copy(query_path, os.path.join(prefix, 'bin'))
cp = which('cp')
cp('-a', 'bin/', prefix)

View File

@ -23,7 +23,7 @@
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import os
class Lmod(Package):
"""
@ -34,17 +34,25 @@ class Lmod(Package):
variable. Modulefiles for Library packages provide environment variables
that specify where the library and header files can be found.
"""
homepage = "https://www.tacc.utexas.edu/research-development/tacc-projects/lmod"
url = "http://sourceforge.net/projects/lmod/files/Lmod-6.0.1.tar.bz2/download"
homepage = 'https://www.tacc.utexas.edu/research-development/tacc-projects/lmod' # NOQA: ignore=E501
url = 'https://github.com/TACC/Lmod/archive/6.4.1.tar.gz'
version('6.4.1', '7978ba777c8aa41a4d8c05fec5f780f4')
version('6.3.7', '0fa4d5a24c41cae03776f781aa2dedc1')
version('6.0.1', '91abf52fe5033bd419ffe2842ebe7af9')
depends_on("lua@5.2:")
depends_on('lua@5.2:')
depends_on('lua-luaposix')
depends_on('lua-luafilesystem')
parallel = False
def setup_environment(self, spack_env, run_env):
stage_lua_path = join_path(
self.stage.path, 'Lmod-{version}', 'src', '?.lua')
spack_env.append_path('LUA_PATH', stage_lua_path.format(
version=self.version), separator=';')
def install(self, spec, prefix):
# Add our lua to PATH
os.environ['PATH'] = spec['lua'].prefix.bin + ';' + os.environ['PATH']
configure('--prefix=%s' % prefix)
make()
make("install")
make('install')

View File

@ -0,0 +1,51 @@
##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class LuaLuafilesystem(Package):
"""
LuaFileSystem is a Lua library developed to complement the set of
functions related to file systems offered by the standard Lua distribution.
LuaFileSystem offers a portable way to access the underlying directory
structure and file attributes.
LuaFileSystem is free software and uses the same license as Lua 5.1
"""
homepage = 'http://keplerproject.github.io/luafilesystem'
url = 'https://github.com/keplerproject/luafilesystem/archive/v_1_6_3.tar.gz'
version('1_6_3', 'd0552c7e5a082f5bb2865af63fb9dc95')
extends('lua')
def install(self, spec, prefix):
rockspec_fmt = join_path(self.stage.path,
'luafilesystem-v_{version.underscored}',
'rockspecs',
'luafilesystem-{version.dotted}-1.rockspec')
luarocks('--tree=' + prefix, 'install',
rockspec_fmt.format(version=self.spec.version))

View File

@ -57,7 +57,7 @@ class Lua(Package):
placement='luarocks')
def install(self, spec, prefix):
if spec.satisfies("arch=darwin-i686") or spec.satisfies("arch=darwin-x86_64"):
if spec.satisfies("platform=darwin"):
target = 'macosx'
else:
target = 'linux'
@ -105,6 +105,9 @@ def setup_dependent_environment(self, spack_env, run_env, extension_spec):
spack_env.set('LUA_PATH', ';'.join(lua_patterns), separator=';')
spack_env.set('LUA_CPATH', ';'.join(lua_cpatterns), separator=';')
# Add LUA to PATH for dependent packages
spack_env.prepend_path('PATH', self.prefix.bin)
# For run time environment set only the path for extension_spec and
# prepend it to LUAPATH
if extension_spec.package.extends(self.spec):
@ -153,5 +156,5 @@ def setup_dependent_package(self, module, ext_spec):
"""
# Lua extension builds can have lua and luarocks executable functions
module.lua = Executable(join_path(self.spec.prefix.bin, 'lua'))
module.luarocks = Executable(join_path(self.spec.prefix.bin,
'luarocks'))
module.luarocks = Executable(
join_path(self.spec.prefix.bin, 'luarocks'))

View File

@ -24,55 +24,61 @@
##############################################################################
from spack import *
import glob, sys, os
import glob
import sys
import os
class Metis(Package):
"""
METIS is a set of serial programs for partitioning graphs, partitioning finite element meshes, and producing fill
reducing orderings for sparse matrices. The algorithms implemented in METIS are based on the multilevel
recursive-bisection, multilevel k-way, and multi-constraint partitioning schemes.
"""
"""METIS is a set of serial programs for partitioning graphs, partitioning
finite element meshes, and producing fill reducing orderings for sparse
matrices. The algorithms implemented in METIS are based on the
multilevel recursive-bisection, multilevel k-way, and multi-constraint
partitioning schemes."""
homepage = 'http://glaros.dtc.umn.edu/gkhome/metis/metis/overview'
url = "http://glaros.dtc.umn.edu/gkhome/fetch/sw/metis/metis-5.1.0.tar.gz"
homepage = "http://glaros.dtc.umn.edu/gkhome/metis/metis/overview"
base_url = "http://glaros.dtc.umn.edu/gkhome/fetch/sw/metis"
version('5.1.0', '5465e67079419a69e0116de24fce58fe',
url='http://glaros.dtc.umn.edu/gkhome/fetch/sw/metis/metis-5.1.0.tar.gz')
version('4.0.3', '5efa35de80703c1b2c4d0de080fafbcf4e0d363a21149a1ad2f96e0144841a55',
url='http://glaros.dtc.umn.edu/gkhome/fetch/sw/metis/OLD/metis-4.0.3.tar.gz')
version('5.1.0', '5465e67079419a69e0116de24fce58fe')
version('5.0.2', 'acb521a4e8c2e6dd559a7f9abd0468c5')
version('4.0.3', 'd3848b454532ef18dc83e4fb160d1e10')
variant('shared', default=True, description='Enables the build of shared libraries')
variant('debug', default=False, description='Builds the library in debug mode')
variant('gdb', default=False, description='Enables gdb support')
variant('idx64', default=False, description='Use int64_t as default index type')
variant('double', default=False, description='Use double precision floating point types')
variant('real64', default=False, description='Use double precision floating point types')
depends_on('cmake@2.8:', when='@5:') # build-time dependency
depends_on('gdb', when='+gdb')
patch('install_gklib_defs_rename.patch', when='@5:')
def url_for_version(self, version):
verdir = 'OLD/' if version < Version('4.0.3') else ''
return '%s/%smetis-%s.tar.gz' % (Metis.base_url, verdir, version)
@when('@4:4.0.3')
@when('@:4')
def install(self, spec, prefix):
if '+gdb' in spec:
raise InstallError('gdb support not implemented in METIS 4!')
if '+idx64' in spec:
raise InstallError('idx64 option not implemented in METIS 4!')
if '+double' in spec:
raise InstallError('double option not implemented for METIS 4!')
# Process library spec and options
unsupp_vars = [v for v in ('+gdb', '+idx64', '+real64') if v in spec]
if unsupp_vars:
msg = 'Given variants %s are unsupported by METIS 4!' % unsupp_vars
raise InstallError(msg)
options = ['COPTIONS=-fPIC']
if '+debug' in spec:
options.append('OPTFLAGS=-g -O0')
make(*options)
# Compile and install library files
ccompile = Executable(self.compiler.cc)
mkdir(prefix.bin)
for x in ('pmetis', 'kmetis', 'oemetis', 'onmetis', 'partnmesh',
'partdmesh', 'mesh2nodal', 'mesh2dual', 'graphchk'):
install(x, prefix.bin)
binfiles = ('pmetis', 'kmetis', 'oemetis', 'onmetis', 'partnmesh',
'partdmesh', 'mesh2nodal', 'mesh2dual', 'graphchk')
for binfile in binfiles:
install(binfile, prefix.bin)
mkdir(prefix.lib)
install('libmetis.a', prefix.lib)
@ -82,93 +88,107 @@ def install(self, spec, prefix):
install(h, prefix.include)
mkdir(prefix.share)
for f in (join_path(*p)
for p in (('Programs', 'io.c'),
('Test','mtest.c'),
('Graphs','4elt.graph'),
('Graphs', 'metis.mesh'),
('Graphs', 'test.mgraph'))):
install(f, prefix.share)
sharefiles = (('Graphs', '4elt.graph'), ('Graphs', 'metis.mesh'),
('Graphs', 'test.mgraph'))
for sharefile in tuple(join_path(*sf) for sf in sharefiles):
install(sharefile, prefix.share)
if '+shared' in spec:
shared_flags = ['-fPIC', '-shared']
if sys.platform == 'darwin':
lib_dsuffix = 'dylib'
load_flag = '-Wl,-all_load'
no_load_flag = ''
shared_suffix = 'dylib'
shared_flags.extend(['-Wl,-all_load', 'libmetis.a'])
else:
lib_dsuffix = 'so'
load_flag = '-Wl,-whole-archive'
no_load_flag = '-Wl,-no-whole-archive'
shared_suffix = 'so'
shared_flags.extend(['-Wl,-whole-archive', 'libmetis.a',
'-Wl,-no-whole-archive'])
os.system(spack_cc + ' -fPIC -shared ' + load_flag +
' libmetis.a ' + no_load_flag + ' -o libmetis.' +
lib_dsuffix)
install('libmetis.' + lib_dsuffix, prefix.lib)
shared_out = '%s/libmetis.%s' % (prefix.lib, shared_suffix)
shared_flags.extend(['-o', shared_out])
ccompile(*shared_flags)
# Set up and run tests on installation
symlink(join_path(prefix.share, 'io.c'), 'io.c')
symlink(join_path(prefix.share, 'mtest.c'), 'mtest.c')
os.system(spack_cc + ' -I%s' % prefix.include + ' -c io.c')
os.system(spack_cc + ' -I%s' % prefix.include +
' -L%s' % prefix.lib + ' -lmetis mtest.c io.o -o mtest')
_4eltgraph = join_path(prefix.share, '4elt.graph')
test_mgraph = join_path(prefix.share, 'test.mgraph')
metis_mesh = join_path(prefix.share, 'metis.mesh')
kmetis = join_path(prefix.bin, 'kmetis')
os.system('./mtest ' + _4eltgraph)
os.system(kmetis + ' ' + _4eltgraph + ' 40')
os.system(join_path(prefix.bin, 'onmetis') + ' ' + _4eltgraph)
os.system(join_path(prefix.bin, 'pmetis') + ' ' + test_mgraph + ' 2')
os.system(kmetis + ' ' + test_mgraph + ' 2')
os.system(kmetis + ' ' + test_mgraph + ' 5')
os.system(join_path(prefix.bin, 'partnmesh') + metis_mesh + ' 10')
os.system(join_path(prefix.bin, 'partdmesh') + metis_mesh + ' 10')
os.system(join_path(prefix.bin, 'mesh2dual') + metis_mesh)
ccompile('-I%s' % prefix.include, '-L%s' % prefix.lib,
'-Wl,-rpath=%s' % (prefix.lib if '+shared' in spec else ''),
join_path('Programs', 'io.o'), join_path('Test', 'mtest.c'),
'-o', '%s/mtest' % prefix.bin, '-lmetis', '-lm')
test_bin = lambda testname: join_path(prefix.bin, testname)
test_graph = lambda graphname: join_path(prefix.share, graphname)
graph = test_graph('4elt.graph')
os.system('%s %s' % (test_bin('mtest'), graph))
os.system('%s %s 40' % (test_bin('kmetis'), graph))
os.system('%s %s' % (test_bin('onmetis'), graph))
graph = test_graph('test.mgraph')
os.system('%s %s 2' % (test_bin('pmetis'), graph))
os.system('%s %s 2' % (test_bin('kmetis'), graph))
os.system('%s %s 5' % (test_bin('kmetis'), graph))
graph = test_graph('metis.mesh')
os.system('%s %s 10' % (test_bin('partnmesh'), graph))
os.system('%s %s 10' % (test_bin('partdmesh'), graph))
os.system('%s %s' % (test_bin('mesh2dual'), graph))
# FIXME: The following code should replace the testing code in the
# block above since it causes installs to fail when one or more of the
# Metis tests fail, but it currently doesn't work because the 'mtest',
# 'onmetis', and 'partnmesh' tests return error codes that trigger
# false positives for failure.
"""
Executable(test_bin('mtest'))(test_graph('4elt.graph'))
Executable(test_bin('kmetis'))(test_graph('4elt.graph'), '40')
Executable(test_bin('onmetis'))(test_graph('4elt.graph'))
Executable(test_bin('pmetis'))(test_graph('test.mgraph'), '2')
Executable(test_bin('kmetis'))(test_graph('test.mgraph'), '2')
Executable(test_bin('kmetis'))(test_graph('test.mgraph'), '5')
Executable(test_bin('partnmesh'))(test_graph('metis.mesh'), '10')
Executable(test_bin('partdmesh'))(test_graph('metis.mesh'), '10')
Executable(test_bin('mesh2dual'))(test_graph('metis.mesh'))
"""
@when('@5:')
def install(self, spec, prefix):
options = []
options.extend(std_cmake_args)
build_directory = join_path(self.stage.path, 'spack-build')
source_directory = self.stage.source_path
options.append('-DGKLIB_PATH:PATH={metis_source}/GKlib'.format(metis_source=source_directory))
options.append('-DGKLIB_PATH:PATH=%s/GKlib' % source_directory)
options.append('-DCMAKE_INSTALL_NAME_DIR:PATH=%s/lib' % prefix)
if '+shared' in spec:
options.append('-DSHARED:BOOL=ON')
if '+debug' in spec:
options.extend(['-DDEBUG:BOOL=ON',
'-DCMAKE_BUILD_TYPE:STRING=Debug'])
if '+gdb' in spec:
options.append('-DGDB:BOOL=ON')
metis_header = join_path(source_directory, 'include', 'metis.h')
if '+idx64' in spec:
filter_file('IDXTYPEWIDTH 32', 'IDXTYPEWIDTH 64', metis_header)
if '+double' in spec:
if '+real64' in spec:
filter_file('REALTYPEWIDTH 32', 'REALTYPEWIDTH 64', metis_header)
# Make clang 7.3 happy.
# Prevents "ld: section __DATA/__thread_bss extends beyond end of file"
# See upstream LLVM issue https://llvm.org/bugs/show_bug.cgi?id=27059
# Adopted from https://github.com/Homebrew/homebrew-science/blob/master/metis.rb
# and https://github.com/Homebrew/homebrew-science/blob/master/metis.rb
if spec.satisfies('%clang@7.3.0'):
filter_file('#define MAX_JBUFS 128', '#define MAX_JBUFS 24', join_path(source_directory, 'GKlib', 'error.c'))
filter_file('#define MAX_JBUFS 128', '#define MAX_JBUFS 24',
join_path(source_directory, 'GKlib', 'error.c'))
with working_dir(build_directory, create=True):
cmake(source_directory, *options)
make()
make("install")
make('install')
# now run some tests:
for f in ["4elt", "copter2", "mdual"]:
for f in ['4elt', 'copter2', 'mdual']:
graph = join_path(source_directory, 'graphs', '%s.graph' % f)
Executable(join_path(prefix.bin, 'graphchk'))(graph)
Executable(join_path(prefix.bin, 'gpmetis'))(graph, '2')
@ -182,6 +202,6 @@ def install(self, spec, prefix):
# install GKlib headers, which will be needed for ParMETIS
GKlib_dist = join_path(prefix.include, 'GKlib')
mkdirp(GKlib_dist)
fs = glob.glob(join_path(source_directory,'GKlib',"*.h"))
for f in fs:
install(f, GKlib_dist)
hfiles = glob.glob(join_path(source_directory, 'GKlib', '*.h'))
for hfile in hfiles:
install(hfile, GKlib_dist)

View File

@ -0,0 +1,28 @@
from spack import *
import os
from spack.pkg.builtin.intel import IntelInstaller
class Mkl(IntelInstaller):
"""Intel Math Kernel Library.
Note: You will have to add the download file to a
mirror so that Spack can find it. For instructions on how to set up a
mirror, see http://software.llnl.gov/spack/mirrors.html"""
homepage = "https://software.intel.com/en-us/intel-mkl"
version('11.3.2.181', '536dbd82896d6facc16de8f961d17d65',
url="file://%s/l_mkl_11.3.2.181.tgz" % os.getcwd())
version('11.3.3.210', 'f72546df27f5ebb0941b5d21fd804e34',
url="file://%s/l_mkl_11.3.3.210.tgz" % os.getcwd())
def install(self, spec, prefix):
self.intel_prefix = os.path.join(prefix, "pkg")
IntelInstaller.install(self, spec, prefix)
mkl_dir = os.path.join(self.intel_prefix, "mkl")
for f in os.listdir(mkl_dir):
os.symlink(os.path.join(mkl_dir, f), os.path.join(self.prefix, f))

View File

@ -25,6 +25,7 @@
from spack import *
import os
class Mvapich2(Package):
"""MVAPICH2 is an MPI implementation for Infiniband networks."""
homepage = "http://mvapich.cse.ohio-state.edu/"
@ -43,8 +44,9 @@ class Mvapich2(Package):
variant('debug', default=False, description='Enables debug information and error messages at run-time')
##########
# TODO : Process managers should be grouped into the same variant, as soon as variant capabilities will be extended
# See https://groups.google.com/forum/#!topic/spack/F8-f8B4_0so
# TODO : Process managers should be grouped into the same variant,
# as soon as variant capabilities will be extended See
# https://groups.google.com/forum/#!topic/spack/F8-f8B4_0so
SLURM = 'slurm'
HYDRA = 'hydra'
GFORKER = 'gforker'
@ -57,7 +59,8 @@ class Mvapich2(Package):
##########
##########
# TODO : Network types should be grouped into the same variant, as soon as variant capabilities will be extended
# TODO : Network types should be grouped into the same variant, as
# soon as variant capabilities will be extended
PSM = 'psm'
SOCK = 'sock'
NEMESISIBTCP = 'nemesisibtcp'
@ -84,8 +87,8 @@ def url_for_version(self, version):
@staticmethod
def enabled(x):
"""
Given a variant name returns the string that means the variant is enabled
"""Given a variant name returns the string that means the variant is
enabled
:param x: variant name
:return:
@ -93,8 +96,8 @@ def enabled(x):
return '+' + x
def set_build_type(self, spec, configure_args):
"""
Appends to configure_args the flags that depends only on the build type (i.e. release or debug)
"""Appends to configure_args the flags that depends only on the build
type (i.e. release or debug)
:param spec: spec
:param configure_args: list of current configure arguments
@ -104,7 +107,8 @@ def set_build_type(self, spec, configure_args):
"--disable-fast",
"--enable-error-checking=runtime",
"--enable-error-messages=all",
"--enable-g=dbg", "--enable-debuginfo" # Permits debugging with TotalView
# Permits debugging with TotalView
"--enable-g=dbg", "--enable-debuginfo"
]
else:
build_type_options = ["--enable-fast=all"]
@ -112,25 +116,41 @@ def set_build_type(self, spec, configure_args):
configure_args.extend(build_type_options)
def set_process_manager(self, spec, configure_args):
"""
Appends to configure_args the flags that will enable the appropriate process managers
"""Appends to configure_args the flags that will enable the
appropriate process managers
:param spec: spec
:param configure_args: list of current configure arguments
"""
# Check that slurm variant is not activated together with other pm variants
has_slurm_incompatible_variants = any(self.enabled(x) in spec for x in Mvapich2.SLURM_INCOMPATIBLE_PMS)
if self.enabled(Mvapich2.SLURM) in spec and has_slurm_incompatible_variants:
raise RuntimeError(" %s : 'slurm' cannot be activated together with other process managers" % self.name)
# Check that slurm variant is not activated together with
# other pm variants
has_slurm_incompatible_variants = \
any(self.enabled(x) in spec
for x in Mvapich2.SLURM_INCOMPATIBLE_PMS)
if self.enabled(Mvapich2.SLURM) in spec and \
has_slurm_incompatible_variants:
raise RuntimeError(" %s : 'slurm' cannot be activated \
together with other process managers" % self.name)
process_manager_options = []
# See: http://slurm.schedmd.com/mpi_guide.html#mvapich2
if self.enabled(Mvapich2.SLURM) in spec:
if self.version > Version('2.0'):
process_manager_options = [
"--with-pmi=pmi2",
"--with-pm=slurm"
]
else:
process_manager_options = [
"--with-pmi=slurm",
"--with-pm=no"
]
elif has_slurm_incompatible_variants:
pms = []
# The variant name is equal to the process manager name in the configuration options
# The variant name is equal to the process manager name in
# the configuration options
for x in Mvapich2.SLURM_INCOMPATIBLE_PMS:
if self.enabled(x) in spec:
pms.append(x)
@ -146,7 +166,9 @@ def set_network_type(self, spec, configure_args):
if self.enabled(x) in spec:
count += 1
if count > 1:
raise RuntimeError('network variants are mutually exclusive (only one can be selected at a time)')
raise RuntimeError('network variants are mutually exclusive \
(only one can be selected at a time)')
network_options = []
# From here on I can suppose that only one variant has been selected
if self.enabled(Mvapich2.PSM) in spec:
@ -164,6 +186,11 @@ def set_network_type(self, spec, configure_args):
configure_args.extend(network_options)
def setup_environment(self, spack_env, run_env):
if self.enabled(Mvapich2.SLURM) in self.spec and \
self.version > Version('2.0'):
run_env.set('SLURM_MPI_TYPE', 'pmi2')
def setup_dependent_environment(self, spack_env, run_env, extension_spec):
spack_env.set('MPICH_CC', spack_cc)
spack_env.set('MPICH_CXX', spack_cxx)
@ -178,7 +205,8 @@ def setup_dependent_package(self, module, dep_spec):
self.spec.mpif77 = join_path(self.prefix.bin, 'mpif77')
def install(self, spec, prefix):
# we'll set different configure flags depending on our environment
# we'll set different configure flags depending on our
# environment
configure_args = [
"--prefix=%s" % prefix,
"--enable-shared",
@ -208,7 +236,6 @@ def install(self, spec, prefix):
self.filter_compilers()
def filter_compilers(self):
"""Run after install to make the MPI compilers use the
compilers that Spack built the package with.
@ -228,8 +255,17 @@ def filter_compilers(self):
spack_f77 = os.environ['F77']
spack_fc = os.environ['FC']
kwargs = { 'ignore_absent' : True, 'backup' : False, 'string' : True }
filter_file('CC="%s"' % spack_cc , 'CC="%s"' % self.compiler.cc, mpicc, **kwargs)
filter_file('CXX="%s"'% spack_cxx, 'CXX="%s"' % self.compiler.cxx, mpicxx, **kwargs)
filter_file('F77="%s"'% spack_f77, 'F77="%s"' % self.compiler.f77, mpif77, **kwargs)
filter_file('FC="%s"' % spack_fc , 'FC="%s"' % self.compiler.fc, mpif90, **kwargs)
kwargs = {
'ignore_absent': True,
'backup': False,
'string': True
}
filter_file('CC="%s"' % spack_cc,
'CC="%s"' % self.compiler.cc, mpicc, **kwargs)
filter_file('CXX="%s"' % spack_cxx,
'CXX="%s"' % self.compiler.cxx, mpicxx, **kwargs)
filter_file('F77="%s"' % spack_f77,
'F77="%s"' % self.compiler.f77, mpif77, **kwargs)
filter_file('FC="%s"' % spack_fc,
'FC="%s"' % self.compiler.fc, mpif90, **kwargs)

View File

@ -27,6 +27,26 @@
from spack import *
def _verbs_dir():
"""
Try to find the directory where the OpenFabrics verbs package is
installed. Return None if not found.
"""
try:
# Try to locate Verbs by looking for a utility in the path
ibv_devices = which("ibv_devices")
# Run it (silently) to ensure it works
ibv_devices(output=str, error=str)
# Get path to executable
path = ibv_devices.exe[0]
# Remove executable name and "bin" directory
path = os.path.dirname(path)
path = os.path.dirname(path)
return path
except:
return None
class Openmpi(Package):
"""Open MPI is a project combining technologies and resources from
several other projects (FT-MPI, LA-MPI, LAM/MPI, and PACX-MPI)
@ -54,7 +74,7 @@ class Openmpi(Package):
variant('psm', default=False, description='Build support for the PSM library.')
variant('psm2', default=False, description='Build support for the Intel PSM2 library.')
variant('pmi', default=False, description='Build support for PMI-based launchers')
variant('verbs', default=False, description='Build support for OpenFabrics verbs.')
variant('verbs', default=_verbs_dir() is not None, description='Build support for OpenFabrics verbs.')
variant('mxm', default=False, description='Build Mellanox Messaging support')
variant('thread_multiple', default=False, description='Enable MPI_THREAD_MULTIPLE support')
@ -113,7 +133,6 @@ def install(self, spec, prefix):
# Fabrics
'--with-psm' if '+psm' in spec else '--without-psm',
'--with-psm2' if '+psm2' in spec else '--without-psm2',
('--with-%s' % self.verbs) if '+verbs' in spec else ('--without-%s' % self.verbs),
'--with-mxm' if '+mxm' in spec else '--without-mxm',
# Other options
'--enable-mpi-thread-multiple' if '+thread_multiple' in spec else '--disable-mpi-thread-multiple',
@ -121,6 +140,14 @@ def install(self, spec, prefix):
'--with-sqlite3' if '+sqlite3' in spec else '--without-sqlite3',
'--enable-vt' if '+vt' in spec else '--disable-vt'
])
if '+verbs' in spec:
path = _verbs_dir()
if path is not None and path not in ('/usr', '/usr/local'):
config_args.append('--with-%s=%s' % (self.verbs, path))
else:
config_args.append('--with-%s' % self.verbs)
else:
config_args.append('--without-%s' % self.verbs)
# TODO: use variants for this, e.g. +lanl, +llnl, etc.
# use this for LANL builds, but for LLNL builds, we need:

View File

@ -100,7 +100,7 @@ def install(self, spec, prefix):
# in the environment, then this will override what is set in the
# Makefile, leading to build errors.
env.pop('APPS', None)
if spec.satisfies("arch=darwin-x86_64") or spec.satisfies("arch=ppc64"):
if spec.satisfies("target=x86_64") or spec.satisfies("target=ppc64"):
# This needs to be done for all 64-bit architectures (except Linux,
# where it happens automatically?)
env['KERNEL_BITS'] = '64'

View File

@ -26,15 +26,17 @@
from spack import *
import sys
class Parmetis(Package):
"""
ParMETIS is an MPI-based parallel library that implements a variety of algorithms for partitioning unstructured
graphs, meshes, and for computing fill-reducing orderings of sparse matrices.
"""
"""ParMETIS is an MPI-based parallel library that implements a variety of
algorithms for partitioning unstructured graphs, meshes, and for
computing fill-reducing orderings of sparse matrices."""
homepage = 'http://glaros.dtc.umn.edu/gkhome/metis/parmetis/overview'
url = 'http://glaros.dtc.umn.edu/gkhome/fetch/sw/parmetis/parmetis-4.0.3.tar.gz'
base_url = 'http://glaros.dtc.umn.edu/gkhome/fetch/sw/parmetis'
version('4.0.3', 'f69c479586bf6bb7aff6a9bc0c739628')
version('4.0.2', '0912a953da5bb9b5e5e10542298ffdce')
variant('shared', default=True, description='Enables the build of shared libraries')
variant('debug', default=False, description='Builds the library in debug mode')
@ -42,17 +44,18 @@ class Parmetis(Package):
depends_on('cmake@2.8:') # build dependency
depends_on('mpi')
patch('enable_external_metis.patch')
depends_on('metis@5:')
patch('enable_external_metis.patch')
# bug fixes from PETSc developers
# https://bitbucket.org/petsc/pkg-parmetis/commits/1c1a9fd0f408dc4d42c57f5c3ee6ace411eb222b/raw/
# https://bitbucket.org/petsc/pkg-parmetis/commits/1c1a9fd0f408dc4d42c57f5c3ee6ace411eb222b/raw/ # NOQA: ignore=E501
patch('pkg-parmetis-1c1a9fd0f408dc4d42c57f5c3ee6ace411eb222b.patch')
# https://bitbucket.org/petsc/pkg-parmetis/commits/82409d68aa1d6cbc70740d0f35024aae17f7d5cb/raw/
# https://bitbucket.org/petsc/pkg-parmetis/commits/82409d68aa1d6cbc70740d0f35024aae17f7d5cb/raw/ # NOQA: ignore=E501
patch('pkg-parmetis-82409d68aa1d6cbc70740d0f35024aae17f7d5cb.patch')
depends_on('gdb', when='+gdb')
def url_for_version(self, version):
verdir = 'OLD/' if version < Version('3.2.0') else ''
return '%s/%sparmetis-%s.tar.gz' % (Parmetis.base_url, verdir, version)
def install(self, spec, prefix):
options = []
@ -60,30 +63,27 @@ def install(self, spec, prefix):
build_directory = join_path(self.stage.path, 'spack-build')
source_directory = self.stage.source_path
metis_source = join_path(source_directory, 'metis')
# FIXME : Once a contract is defined, MPI compilers should be retrieved indirectly via spec['mpi'] in case
# FIXME : they use a non-standard name
options.extend(['-DGKLIB_PATH:PATH={metis_source}/GKlib'.format(metis_source=spec['metis'].prefix.include),
'-DMETIS_PATH:PATH={metis_source}'.format(metis_source=spec['metis'].prefix),
'-DCMAKE_C_COMPILER:STRING=mpicc',
'-DCMAKE_CXX_COMPILER:STRING=mpicxx'])
options.extend([
'-DGKLIB_PATH:PATH=%s/GKlib' % spec['metis'].prefix.include,
'-DMETIS_PATH:PATH=%s' % spec['metis'].prefix,
'-DCMAKE_C_COMPILER:STRING=%s' % spec['mpi'].mpicc,
'-DCMAKE_CXX_COMPILER:STRING=%s' % spec['mpi'].mpicxx
])
if '+shared' in spec:
options.append('-DSHARED:BOOL=ON')
if '+debug' in spec:
options.extend(['-DDEBUG:BOOL=ON',
'-DCMAKE_BUILD_TYPE:STRING=Debug'])
if '+gdb' in spec:
options.append('-DGDB:BOOL=ON')
with working_dir(build_directory, create=True):
cmake(source_directory, *options)
make()
make("install")
make('install')
# The shared library is not installed correctly on Darwin; correct this
# The shared library is not installed correctly on Darwin; fix this
if (sys.platform == 'darwin') and ('+shared' in spec):
fix_darwin_install_name(prefix.lib)

View File

@ -0,0 +1,12 @@
diff -up pcre-8.38/pcrecpp.cc.intel pcre-8.38/pcrecpp.cc
--- pcre-8.38/pcrecpp.cc.intel 2014-09-15 07:48:59.000000000 -0600
+++ pcre-8.38/pcrecpp.cc 2016-06-08 16:16:56.702721214 -0600
@@ -66,7 +66,7 @@ Arg RE::no_arg((void*)NULL);
// inclusive test if we ever needed it. (Note that not only the
// __attribute__ syntax, but also __USER_LABEL_PREFIX__, are
// gnu-specific.)
-#if defined(__GNUC__) && __GNUC__ >= 3 && defined(__ELF__)
+#if defined(__GNUC__) && __GNUC__ >= 3 && defined(__ELF__) && !defined(__INTEL_COMPILER)
# define ULP_AS_STRING(x) ULP_AS_STRING_INTERNAL(x)
# define ULP_AS_STRING_INTERNAL(x) #x
# define USER_LABEL_PREFIX_STR ULP_AS_STRING(__USER_LABEL_PREFIX__)

View File

@ -24,6 +24,7 @@
##############################################################################
from spack import *
class Pcre(Package):
"""The PCRE package contains Perl Compatible Regular Expression
libraries. These are useful for implementing regular expression
@ -34,6 +35,8 @@ class Pcre(Package):
version('8.36', 'b767bc9af0c20bc9c1fe403b0d41ad97')
version('8.38', '00aabbfe56d5a48b270f999b508c5ad2')
patch("intel.patch")
def install(self, spec, prefix):
configure("--prefix=%s" % prefix)
make()

Some files were not shown because too many files have changed in this diff Show More