Using Helix¶
Login¶
There is one gateway that redirects to any of the login nodes in a load-balanced way.
Hostname |
Node type |
|---|---|
|
login to one of the two Helix login nodes |
Host key fingerprint:
Algorithm |
Fingerprint (SHA256) |
|---|---|
RSA |
|
ECDSA |
|
ED25519 |
|
Your username for the cluster will be your ICP ID with an st_ prefix.
For example, if your ID is ac123456, then your Helix username will be st_ac123456.
More details can be found in Helix/Login.
Building dependencies¶
Boost¶
# last update: July 2025
module load compiler/gnu/14.1 mpi/openmpi/4.1
mkdir boost-build
cd boost-build
BOOST_VERSION=1.88.0
BOOST_DOMAIN="https://archives.boost.io"
BOOST_ROOT="${HOME}/bin/boost_mpi_${BOOST_VERSION//./_}"
mkdir -p "${BOOST_ROOT}"
curl -sL "${BOOST_DOMAIN}/release/${BOOST_VERSION}/source/boost_${BOOST_VERSION//./_}.tar.bz2" | tar xj
cd "boost_${BOOST_VERSION//./_}"
echo 'using mpi ;' > tools/build/src/user-config.jam
./bootstrap.sh --with-libraries=filesystem,system,mpi,serialization,test
./b2 -j 4 install --prefix="${BOOST_ROOT}"
cd "${HOME}"
rm -rf boost-build
hwloc¶
# last update: April 2026
module load compiler/gnu/14.1 mpi/openmpi/4.1
mkdir hwloc-build
cd hwloc-build
HWLOC_VERSION=2.13.0
HWLOC_ROOT="${HOME}/bin/hwloc_${HWLOC_VERSION//./_}"
git clone -b v2.13 https://github.com/open-mpi/hwloc
cd hwloc
autoreconf -i
./configure --prefix="${HWLOC_ROOT}"
make -j 4
make install
make clean
cd ../..
rm -rf hwloc-build
FFTW¶
# last update: July 2025
module load compiler/gnu/14.1 mpi/openmpi/4.1
mkdir fftw-build
cd fftw-build
FFTW3_VERSION=3.3.10
FFTW3_ROOT="${HOME}/bin/fftw_${FFTW3_VERSION//./_}"
curl -sL "https://www.fftw.org/fftw-${FFTW3_VERSION}.tar.gz" | tar xz
cd "fftw-${FFTW3_VERSION}"
for floating_point in "" "--enable-float"; do
./configure --enable-shared --enable-mpi --enable-threads --enable-openmp \
--disable-fortran --enable-avx --prefix="${FFTW3_ROOT}" ${floating_point}
make -j 4
make install
make clean
done
cd ../..
rm -rf fftw-build
CUDA¶
# last update: September 2025
module load compiler/gnu/14.1 devel/cuda/12.9
export CLUSTER_CUDA_ROOT="${HOME}/bin/cuda_${CUDA_VERSION//./_}"
mkdir -p "${CLUSTER_CUDA_ROOT}/lib"
ln -s "${CUDA_HOME}/targets/x86_64-linux/lib/stubs/libcuda.so" "${CLUSTER_CUDA_ROOT}/lib/libcuda.so"
ln -s "${CUDA_HOME}/targets/x86_64-linux/lib/stubs/libcuda.so" "${CLUSTER_CUDA_ROOT}/lib/libcuda.so.1"
Building software¶
ESPResSo¶
Release 4.2:
# last update: December 2025
module load compiler/gnu/14.1 \
mpi/openmpi/4.1 \
devel/python/3.13.1 \
devel/cmake/3.31.6
CLUSTER_FFTW3_VERSION=3.3.10
CLUSTER_BOOST_VERSION=1.88.0
export BOOST_ROOT="${HOME}/bin/boost_mpi_${CLUSTER_BOOST_VERSION//./_}"
export FFTW3_ROOT="${HOME}/bin/fftw_${CLUSTER_FFTW3_VERSION//./_}"
export CLUSTER_CUDA_ROOT="${HOME}/bin/cuda_${CUDA_VERSION//./_}"
export LD_LIBRARY_PATH="${BOOST_ROOT}/lib${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH}"
export LD_LIBRARY_PATH="${FFTW3_ROOT}/lib${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH}"
export LD_LIBRARY_PATH="${CLUSTER_CUDA_ROOT}/lib${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH}"
python3 -m venv "${HOME}/venv"
source "${HOME}/venv/bin/activate"
git clone --recursive --branch 4.2 --origin upstream \
https://github.com/espressomd/espresso.git espresso-4.2
cd espresso-4.2
python -m pip install 'cython>=3.0.0,<3.0.10'
python -m pip install -c "requirements.txt" setuptools numpy scipy vtk
mkdir build
cd build
cp ../maintainer/configs/maxset.hpp myconfig.hpp
sed -i "/ADDITIONAL_CHECKS/d" myconfig.hpp
cmake .. -D CMAKE_BUILD_TYPE=Release -D WITH_CUDA=OFF -D WITH_CCACHE=OFF -D WITH_SCAFACOS=OFF -D WITH_HDF5=OFF
make -j 4
Release 5.0:
# last update: April 2026
module load compiler/gnu/14.1 \
mpi/openmpi/4.1 \
devel/cuda/12.9 \
devel/python/3.13.1 \
lib/hdf5/1.14.6-gnu-14.1-openmpi-4.1 \
devel/cmake/3.31.6
CLUSTER_FFTW3_VERSION=3.3.10
CLUSTER_HWLOC_VERSION=2.13.0
CLUSTER_BOOST_VERSION=1.88.0
export CLUSTER_BOOST_ROOT="${HOME}/bin/boost_mpi_${CLUSTER_BOOST_VERSION//./_}"
export CLUSTER_FFTW3_ROOT="${HOME}/bin/fftw_${CLUSTER_FFTW3_VERSION//./_}"
export CLUSTER_HWLOC_ROOT="${HOME}/bin/hwloc_${CLUSTER_HWLOC_VERSION//./_}"
export CLUSTER_CUDA_ROOT="${HOME}/bin/cuda_${CUDA_VERSION//./_}"
export LD_LIBRARY_PATH="${CLUSTER_BOOST_ROOT}/lib${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH}"
export LD_LIBRARY_PATH="${CLUSTER_FFTW3_ROOT}/lib${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH}"
export LD_LIBRARY_PATH="${CLUSTER_HWLOC_ROOT}/lib${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH}"
export LD_LIBRARY_PATH="${CLUSTER_CUDA_ROOT}/lib${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH}"
python -m venv "${HOME}/venv"
source "${HOME}/venv/bin/activate"
git clone --branch python --origin upstream \
https://github.com/espressomd/espresso.git espresso-5.0
cd espresso-5.0
python -m pip install -c "requirements.txt" cython setuptools numpy scipy vtk h5py
mkdir build
cd build
rm -rf *
cp ../maintainer/configs/maxset.hpp myconfig.hpp
sed -i "/ADDITIONAL_CHECKS/d" myconfig.hpp
FFTW3_ROOT=${CLUSTER_FFTW3_ROOT} cmake .. \
-D CUDAToolkit_ROOT="${CUDA_HOME}" \
-D FFTW_ROOT="${CLUSTER_FFTW3_ROOT}" \
-D Boost_DIR="${CLUSTER_BOOST_ROOT}/lib/cmake/Boost-1.88.0" \
-D CMAKE_BUILD_TYPE=Release -D ESPRESSO_BUILD_WITH_CUDA=ON -D CMAKE_CUDA_ARCHITECTURES="86;89" \
-D ESPRESSO_BUILD_WITH_CCACHE=OFF -D ESPRESSO_BUILD_WITH_WALBERLA=ON \
-D ESPRESSO_BUILD_WITH_SCAFACOS=OFF -D ESPRESSO_BUILD_WITH_HDF5=ON \
-D ESPRESSO_BUILD_WITH_SHARED_MEMORY_PARALLELISM=ON -D Kokkos_HWLOC_DIR=${CLUSTER_HWLOC_ROOT}
make -j 10
Submitting jobs¶
To show which nodes are idle:
sinfo_t_idle
Batch command:
sbatch job.sh
Job script:
#!/bin/bash
#SBATCH --partition=cpu-single
#SBATCH --job-name=test
#SBATCH --ntasks=4
#SBATCH --ntasks-per-core=1
#SBATCH --time=00:10:00
#SBATCH --output %j.stdout
#SBATCH --error %j.stderr
# last update: April 2026
module load compiler/gnu/14.1 \
mpi/openmpi/4.1 \
devel/cuda/12.9 \
devel/python/3.13.1 \
lib/hdf5/1.14.6-gnu-14.1-openmpi-4.1
CLUSTER_FFTW3_VERSION=3.3.10
CLUSTER_HWLOC_VERSION=2.13.0
CLUSTER_BOOST_VERSION=1.88.0
export CLUSTER_BOOST_ROOT="${HOME}/bin/boost_mpi_${CLUSTER_BOOST_VERSION//./_}"
export CLUSTER_FFTW3_ROOT="${HOME}/bin/fftw_${CLUSTER_FFTW3_VERSION//./_}"
export CLUSTER_HWLOC_ROOT="${HOME}/bin/hwloc_${CLUSTER_HWLOC_VERSION//./_}"
export CLUSTER_CUDA_ROOT="${HOME}/bin/cuda_${CUDA_VERSION//./_}"
export LD_LIBRARY_PATH="${CLUSTER_BOOST_ROOT}/lib${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH}"
export LD_LIBRARY_PATH="${CLUSTER_FFTW3_ROOT}/lib${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH}"
export LD_LIBRARY_PATH="${CLUSTER_HWLOC_ROOT}/lib${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH}"
export LD_LIBRARY_PATH="${CLUSTER_CUDA_ROOT}/lib${LD_LIBRARY_PATH:+:$LD_LIBRARY_PATH}"
source "${HOME}/venv/bin/activate"
# using srun
srun --mpi=pmi2 ./pypresso ../maintainer/benchmarks/lb.py
# using OpenMPI directly
mpiexec --bind-to core --map-by core --report-bindings ./pypresso ../maintainer/benchmarks/lb.py
The desired partition needs to be specified via #SBATCH --partition command,
without which your job will not be allocated any resources.
Helix has following partitions available:
Partition |
Default Configuration |
Limit |
|---|---|---|
|
ntasks=1, time=00:10:00, mem-per-cpu=2gb |
nodes=2, time=00:30:00 |
|
ntasks=1, time=00:30:00, mem-per-cpu=2gb |
nodes=1, time=120:00:00 |
|
ntasks=1, time=00:30:00, mem-per-cpu=2gb |
nodes=1, time=120:00:00 |
|
nodes=2, time=00:30:00 |
nodes=32, time=48:00:00 |
|
nodes=2, time=00:30:00 |
nodes=8, time=48:00:00 |
Source:
scontrol show partition
Refer to Helix/Slurm for more details on submitting job scripts on Helix.