diff --git a/Basic/MPI/Hello/README.txt b/Basic/MPI/Hello/README.txt index 830ee55..3890a7a 100644 --- a/Basic/MPI/Hello/README.txt +++ b/Basic/MPI/Hello/README.txt @@ -1,55 +1,86 @@ To compile the hello_*.* source files: + source /ssoft/spack/bin/slmodules.sh + +Then, either: + module load intel intelmpi mpiicc -o hello_c hello.c mpiicpc -o hello_cxx hello.cxx mpiifort -o hello_f90 hello.f90 +or: + + module load gcc mvapich2 + + mpicc -o hello_c hello.c + mpicxx -o hello_cxx hello.cxx + mpifort -o hello_f90 hello.f90 + +or: + + module load gcc openmpi + + mpicc -o hello_c hello.c + mpicxx -o hello_cxx hello.cxx + mpifort -o hello_f90 hello.f90 + +according to the desired compiler and MPI variant + -- To run them on the front-end: ./hello_c and so on with hello_cxx and hello_f90 -- To run them in an interactive SLURM session: > salloc -n 4 salloc: Granted job allocation 43196 +> source /ssoft/spack/bin/slmodules.sh + > module load intel intelmpi +# or +# module load gcc mvapich2 +# module load gcc openmpi + > srun ./hello_c Hello, my rank is 2 among 4 tasks on machine c07 Hello, my rank is 0 among 4 tasks on machine c07 Hello, my rank is 3 among 4 tasks on machine c07 Hello, my rank is 1 among 4 tasks on machine c07 --- BARRIER! --- > exit exit salloc: Relinquishing job allocation 43196 salloc: Job allocation 43196 has been revoked. > -- To run them in batch mode: > sbatch hello.run Submitted batch job 43198 The result should be something similar to > cat slurm-43198.out -Currently Loaded Modulefiles: - 1) intel/14.0.1 2) intelmpi/4.1.3 +Currently Loaded Modules: + 1) intel/16.0.3 2) intelmpi/5.1.3 + +*** C *** Hello, my rank is 2 among 4 tasks on machine c07 Hello, my rank is 3 among 4 tasks on machine c07 Hello, my rank is 0 among 4 tasks on machine c07 Hello, my rank is 1 among 4 tasks on machine c07 +etc. diff --git a/Basic/MPI/Hello/hello.run b/Basic/MPI/Hello/hello.run index 74a6bfe..71a4ba2 100644 --- a/Basic/MPI/Hello/hello.run +++ b/Basic/MPI/Hello/hello.run @@ -1,30 +1,35 @@ #!/bin/bash -l #SBATCH --nodes 2 #SBATCH --ntasks-per-node 4 ## cpus-per-task > 1 needed for multithreaded applications #SBATCH --cpus-per-task 1 ## maximum memory needed in MB #SBATCH --mem 4096 ## maximum walltime needed #SBATCH --time 00:05:00 echo echo STARTING AT `date` echo +source /ssoft/spack/bin/slmodules.sh + module purge +# load the compiler and MPI variant used to generate the executable module load intel intelmpi +# module load gcc mvapich2 +# module load gcc openmpi module list echo echo "*** C ***" srun ./hello_c echo "*** FORTRAN ***" srun ./hello_f90 echo "*** C++ ***" srun ./hello_cxx diff --git a/Basic/MPI/Minimalist/README.txt b/Basic/MPI/Minimalist/README.txt new file mode 100644 index 0000000..bc746b4 --- /dev/null +++ b/Basic/MPI/Minimalist/README.txt @@ -0,0 +1,12 @@ +To compile the hello_mpi.c source file: + + source /ssoft/spack/bin/slmodules.sh + + module load intel intelmpi + + mpiicc hello_mpi.c -o hello_mpi + + +To run the executable file in batch mode: + + sbatch hello.run diff --git a/Basic/MPI/Minimalist/hello.run b/Basic/MPI/Minimalist/hello.run index 42c009e..32bd752 100644 --- a/Basic/MPI/Minimalist/hello.run +++ b/Basic/MPI/Minimalist/hello.run @@ -1,21 +1,24 @@ #!/bin/bash -l #SBATCH --nodes 1 #SBATCH --ntasks-per-node 4 ## cpus-per-task > 1 needed for multithreaded applications #SBATCH --cpus-per-task 1 ## maximum memory needed in MB #SBATCH --mem 4096 ## maximum walltime needed #SBATCH --time 00:05:00 +source /ssoft/spack/bin/slmodules.sh + module purge +# load the compiler and MPI variant used to generate the executable module load intel intelmpi # compiled with mpiicc hello_mpi.c -o hello_mpi srun ./hello_mpi diff --git a/Basic/MPI/Ring/README.txt b/Basic/MPI/Ring/README.txt index 0f8718a..5bc0001 100644 --- a/Basic/MPI/Ring/README.txt +++ b/Basic/MPI/Ring/README.txt @@ -1,87 +1,118 @@ To compile the ring.* source files: -module load intel intelmpi + source /ssoft/spack/bin/slmodules.sh + +Then, either: + + module load intel intelmpi mpiicc -o ring_c ring.c mpiicpc -o ring_cxx ring.cxx mpiifort -o ring_f90 ring.f90 +or: + + module load gcc mvapich2 + + mpicc -o ring_c ring.c + mpicxx -o ring_cxx ring.cxx + mpifort -o ring_f90 ring.f90 + +or: + + module load gcc openmpi + + mpicc -o ring_c ring.c + mpicxx -o ring_cxx ring.cxx + mpifort -o ring_f90 ring.f90 + +according to the desired compiler and MPI variant + -- To run them on the front-end: ./ring_c and so on with ring_cxx and ring_f90 -- To run them in an interactive SLURM session: > salloc -n 4 salloc: Granted job allocation 43196 +> source /ssoft/spack/bin/slmodules.sh + > module load intel intelmpi +# or +# module load gcc mvapich2 +# module load gcc openmpi + > srun ./ring_c Hello, my rank is 2 among 4 tasks on machine c03, with my_var = 20 initially Hello, my rank is 1 among 4 tasks on machine c03, with my_var = 10 initially Hello, my rank is 3 among 4 tasks on machine c03, with my_var = 30 initially Hello, my rank is 0 among 4 tasks on machine c03, with my_var = 0 initially c03: rank 1 sends to 2 and receives from 0, my_var = 10 c03: rank 2 sends to 3 and receives from 1, my_var = 20 --- BARRIER! --- c03: rank 0 sends to 1 and receives from 3, my_var = 0 c03: rank 3 sends to 0 and receives from 2, my_var = 30 Hello, my rank is 1 among 4 tasks on machine c03, with my_var = 0 finally --- BARRIER! --- Hello, my rank is 0 among 4 tasks on machine c03, with my_var = 30 finally Hello, my rank is 3 among 4 tasks on machine c03, with my_var = 20 finally Hello, my rank is 2 among 4 tasks on machine c03, with my_var = 10 finally > -- To run them in batch mode: Set the EXECUTABLE to be run in ring.run, and then: > sbatch ring.run -Submitted batch job 43314 +Submitted batch job 1028026 + +result on slurm-1028026.out file should look like this: + + +STARTING AT ven. juil. 1 13:09:27 CEST 2016 -result on slurm out file should look like this: -STARTING AT Sun Jun 14 18:54:33 CEST 2015 +Currently Loaded Modules: + 1) intel/16.0.3 2) intelmpi/5.1.3 -Currently Loaded Modulefiles: - 1) intel/15.0.2.164 2) intelmpi/5.0.1 --> EXECUTABLE = ./ring_c --> ./ring_c depends on the following dynamic libraries: - linux-vdso.so.1 => (0x00007fffb69ff000) - libmpifort.so.12 => /ssoft/intelmpi/5.0.1/RH6/all/x86_E5v2/impi/5.0.1.035/lib64/libmpifort.so.12 (0x00007f52fb72b000) - libmpi.so.12 => /ssoft/intelmpi/5.0.1/RH6/all/x86_E5v2/impi/5.0.1.035/lib64/libmpi.so.12 (0x00007f52faffa000) - libdl.so.2 => /lib64/libdl.so.2 (0x0000003d3ae00000) - librt.so.1 => /lib64/librt.so.1 (0x0000003538800000) - libpthread.so.0 => /lib64/libpthread.so.0 (0x0000003deaa00000) - libm.so.6 => /lib64/libm.so.6 (0x0000003d3be00000) - libgcc_s.so.1 => /lib64/libgcc_s.so.1 (0x000000337e400000) - libc.so.6 => /lib64/libc.so.6 (0x0000003d3b200000) - /lib64/ld-linux-x86-64.so.2 (0x0000003d3aa00000) - -Hello, my rank is 0 among 4 tasks on machine b093, with my_var = 0 initially -Hello, my rank is 1 among 4 tasks on machine b093, with my_var = 10 initially -Hello, my rank is 2 among 4 tasks on machine b093, with my_var = 20 initially -Hello, my rank is 3 among 4 tasks on machine b093, with my_var = 30 initially + linux-vdso.so.1 => (0x00007fff07bff000) + libmpifort.so.12 => /ssoft/spack/external/intel/2016/compilers_and_libraries_2016.3.210/linux/mpi/intel64/lib/libmpifort.so.12 (0x00007f8313a24000) + libmpi.so.12 => /ssoft/spack/external/intel/2016/compilers_and_libraries_2016.3.210/linux/mpi/intel64/lib/release_mt/libmpi.so.12 (0x00007f8313255000) + libdl.so.2 => /lib64/libdl.so.2 (0x0000003be3e00000) + librt.so.1 => /lib64/librt.so.1 (0x0000003be4200000) + libpthread.so.0 => /lib64/libpthread.so.0 (0x0000003be3a00000) + libm.so.6 => /lib64/libm.so.6 (0x0000003968600000) + libgcc_s.so.1 => /ssoft/spack/lafnetscha/opt/spack/x86_E5v1_IntelIB/gcc-4.4.7/gcc-4.9.3-2gys3tu6lq35ge6woeixfo4tfz5nnvzn/lib64/libgcc_s.so.1 (0x00007f831302f000) + libc.so.6 => /lib64/libc.so.6 (0x0000003be3600000) + /lib64/ld-linux-x86-64.so.2 (0x0000003be3200000) + +Hello, my rank is 0 among 4 tasks on machine b181, with my_var = 0 initially +Hello, my rank is 1 among 4 tasks on machine b181, with my_var = 10 initially +Hello, my rank is 2 among 4 tasks on machine b181, with my_var = 20 initially +Hello, my rank is 3 among 4 tasks on machine b181, with my_var = 30 initially --- BARRIER! --- -b093: rank 0 sends to 1 and receives from 3, my_var = 0 -b093: rank 1 sends to 2 and receives from 0, my_var = 10 -b093: rank 2 sends to 3 and receives from 1, my_var = 20 -b093: rank 3 sends to 0 and receives from 2, my_var = 30 +b181: rank 1 sends to 2 and receives from 0, my_var = 10 +b181: rank 2 sends to 3 and receives from 1, my_var = 20 +b181: rank 3 sends to 0 and receives from 2, my_var = 30 +b181: rank 0 sends to 1 and receives from 3, my_var = 0 --- BARRIER! --- -Hello, my rank is 0 among 4 tasks on machine b093, with my_var = 30 finally -Hello, my rank is 1 among 4 tasks on machine b093, with my_var = 0 finally -Hello, my rank is 2 among 4 tasks on machine b093, with my_var = 10 finally -Hello, my rank is 3 among 4 tasks on machine b093, with my_var = 20 finally +Hello, my rank is 0 among 4 tasks on machine b181, with my_var = 30 finally +Hello, my rank is 1 among 4 tasks on machine b181, with my_var = 0 finally +Hello, my rank is 2 among 4 tasks on machine b181, with my_var = 10 finally +Hello, my rank is 3 among 4 tasks on machine b181, with my_var = 20 finally -FINISHED at Sun Jun 14 18:54:35 CEST 2015 +FINISHED at ven. juil. 1 13:09:29 CEST 2016 diff --git a/Basic/MPI/Ring/ring.run b/Basic/MPI/Ring/ring.run index c37d2a1..6e96bb7 100644 --- a/Basic/MPI/Ring/ring.run +++ b/Basic/MPI/Ring/ring.run @@ -1,39 +1,44 @@ #!/bin/bash -l #SBATCH --nodes 1 #SBATCH --ntasks-per-node 4 ## --cpus-per-task > 1 needed for multithreaded applications #SBATCH --cpus-per-task 1 ## maximum memory needed #SBATCH --mem 4096 ## maximum walltime needed #SBATCH --time 00:05:00 echo echo STARTING AT `date` echo +source /ssoft/spack/bin/slmodules.sh + module purge +# load the compiler and MPI variant used to generate the executable module load intel intelmpi +# module load gcc mvapich2 +# module load gcc openmpi module list echo EXECUTABLE="./ring_c" #EXECUTABLE="./ex5_cxx" #EXECUTABLE="./ex5_f90" echo "--> EXECUTABLE = ${EXECUTABLE}" echo echo "--> ${EXECUTABLE} depends on the following dynamic libraries:" ldd ${EXECUTABLE} echo srun ${EXECUTABLE} echo echo FINISHED at `date` echo diff --git a/Basic/Pi_integral/pi.run b/Basic/Pi_integral/pi.run index 8c77664..537f27e 100644 --- a/Basic/Pi_integral/pi.run +++ b/Basic/Pi_integral/pi.run @@ -1,11 +1,18 @@ #!/bin/bash -l #SBATCH --nodes 1 #SBATCH --ntasks 1 #SBATCH --cpus-per-task 16 -# compile with gcc -lpthread -lm pi.c -o pi +# compile with: +# source /ssoft/spack/bin/slmodules.sh +# module load gcc +# gcc -lpthread -lm pi.c -o pi + +source /ssoft/spack/bin/slmodules.sh + +module load gcc srun ./pi 1024 16 diff --git a/Basic/Pi_mc/pi_mc.run b/Basic/Pi_mc/pi_mc.run index 5910ed0..284f649 100644 --- a/Basic/Pi_mc/pi_mc.run +++ b/Basic/Pi_mc/pi_mc.run @@ -1,10 +1,17 @@ #!/bin/bash -l #SBATCH --nodes 1 #SBATCH --ntasks 1 #SBATCH --cpus-per-task 1 -# compile with gcc pi_mc.c -o pi_mc +# compile with: +# source /ssoft/spack/bin/slmodules.sh +# module load gcc +# gcc pi_mc.c -o pi_mc + +source /ssoft/spack/bin/slmodules.sh + +module load gcc srun ./pi_mc