diff --git a/Jenkinsfile b/Jenkinsfile
index 8921bb666..e0fd2cbaf 100644
--- a/Jenkinsfile
+++ b/Jenkinsfile
@@ -1,185 +1,170 @@
 pipeline {
   parameters {string(defaultValue: '', description: 'api-token', name: 'API_TOKEN')
               string(defaultValue: '', description: 'buildable phid', name: 'BUILD_TARGET_PHID')
               string(defaultValue: '', description: 'Commit id', name: 'COMMIT_ID')
               string(defaultValue: '', description: 'Diff id', name: 'DIFF_ID')
+	      string(defaultValue: 'PHID-PROJ-5eqyu6ooyjktagbhf473', description: 'ID of the project', name: 'PROJECT_ID')
   }
 
   options {
     disableConcurrentBuilds()
   }
 
   environment {
     PHABRICATOR_HOST = 'https://c4science.ch/api/'
     PYTHONPATH = sh returnStdout: true, script: 'echo ${WORKSPACE}/test/ci/script/'
     BLA_VENDOR = 'OpenBLAS'
     OMPI_MCA_plm = 'isolated'
     OMPI_MCA_btl = 'tcp,self'
   }
   
   agent {
     dockerfile {
       filename 'Dockerfile'
       dir 'test/ci'
       additionalBuildArgs '--tag akantu-environment'
     }
   }
   
-  stages {
+  stages {    
     stage('Lint') {
       steps {
 	sh """
-           arc lint --output json --rev ${GIT_PREVIOUS_COMMIT}^1 | jq . -srM | tee lint.json
+           arc lint --output json --rev HEAD^ | jq . -srM | tee lint.json
            ./test/ci/scripts/hbm send-arc-lint -f lint.json
            """
       }
     }
+    
     stage('Configure') {
       steps {
         sh """#!/bin/bash
            set -o pipefail
            mkdir -p build
            cd build
            cmake -DAKANTU_COHESIVE_ELEMENT:BOOL=TRUE \
                  -DAKANTU_IMPLICIT:BOOL=TRUE \
                  -DAKANTU_PARALLEL:BOOL=TRUE \
                  -DAKANTU_PYTHON_INTERFACE:BOOL=TRUE \
                  -DAKANTU_TESTS:BOOL=TRUE .. | tee configure.txt
            """
       }
       post {
 	failure {
 	  uploadArtifact('configure.txt', 'Configure')
 	  deleteDir()
 	}
       }
     }
+    
     stage('Compile') {
       steps {
 	sh '''#!/bin/bash
            set -o pipefail
            make -C build/src | tee compilation.txt
            '''
       }
       post {
 	failure {
 	  uploadArtifact('compilation.txt', 'Compilation')
 	}
       }
     }
 
     stage ('Warnings gcc') {
       steps {
         warnings(consoleParsers: [[parserName: 'GNU Make + GNU C Compiler (gcc)']])
       }
     }
 
     stage('Compile python') {
       steps {
         sh '''#!/bin/bash
            set -o pipefail
+
            make -C build/python | tee compilation_python.txt
            '''
       }
       post {
 	failure {
 	  uploadArtifact('compilation_python.txt', 'Compilation_Python')
 	}
       }
     }
 
     stage('Compile tests') {
       steps {
         sh '''#!/bin/bash
            set -o pipefail
+
            make -C build/test | tee compilation_test.txt
            '''
       }
       post {
 	failure {
 	  uploadArtifact('compilation_test.txt', 'Compilation_Tests')
 	}
       }
     }
 
     stage('Tests') {
       steps {
         sh '''
           #rm -rf build/gtest_reports
           cd build/
           #source ./akantu_environement.sh
         
           ctest -T test --no-compress-output || true
+          tag=$(head -n 1 < Testing/TAG)
+          if [ -e Testing/${tag}/Test.xml ]; then
+            cp Testing/${tag}/Test.xml ../CTestResults.xml
+          fi
         '''
       }
-      post {
-	always {
-	  script {
-	    def TAG = sh returnStdout: true, script: 'head -n 1 < build/Testing/TAG'
-	    def TAG_ = TAG.trim()
-
-	    if (fileExists("build/Testing/${TAG}/Test.xml")) {
-	      sh "cp build/Testing/${TAG}/Test.xml CTestResults.xml"
-	    }
-	  }
-	}
+    }
+    post {
+      failure {
+	zip zipFile: 'build.zip',  dir: 'build/', archive: true
       }
     }
   }
 
   post {
     always {
       createArtifact("./CTestResults.xml")
-      
+
       step([$class: 'XUnitBuilder',
 	    thresholds: [
           [$class: 'SkippedThreshold', failureThreshold: '0'],
           [$class: 'FailedThreshold', failureThreshold: '0']],
 	    tools: [
 	  [$class: 'CTestType', pattern: 'CTestResults.xml', skipNoTestFiles: true]
 	]])
-
-      // step([$class: 'XUnitBuilder',
-      //       thresholds: [
-      //     [$class: 'SkippedThreshold', failureThreshold: '100'],
-      //     [$class: 'FailedThreshold', failureThreshold: '0']],
-      //       tools: [
-      // 	  [$class: 'GoogleTestType', pattern: 'build/gtest_reports/**', skipNoTestFiles: true]
-      // 	]])
     }
 
     success {
       passed()
     }
 
     failure {
-      // emailext(
-      //   body: '''${SCRIPT, template="groovy-html.template"}''',
-      // 	mimeType: 'text/html',
-      //   subject: "[Jenkins] ${currentBuild.fullDisplayName} Failed",
-      // 	recipientProviders: [[$class: 'CulpritsRecipientProvider']],
-      // 	to: 'akantu-admins@akantu.ch',
-      // 	replyTo: 'akantu-admins@akantu.ch',
-      // 	attachLog: true,
-      //   compressLog: false)
       failed()
     }
   }
 }
 
 def failed() {
   sh "./test/ci/scripts/hbm failed"
 }
 
 def passed() {
   sh "./test/ci/scripts/hbm passed"
 }
 
-def createArtifact(artifact) {
+def createArtifact(filename) {
   sh "./test/ci/scripts/hbm send-uri -k 'Jenkins URI' -u ${BUILD_URL} -l 'View Jenkins result'"
-  sh "./test/ci/scripts/hbm send-ctest-results -f ${artifact}"
+  sh "./test/ci/scripts/hbm send-ctest-results -f ${filename}"
 }
 
 def uploadArtifact(artifact, name) {
-  sh "./test/ci/scripts/hbm upload-file -f ${artifact} -n \"${name}\" -v PHID-PROJ-5eqyu6ooyjktagbhf473"
+  sh "./test/ci/scripts/hbm upload-file -f ${artifact} -n \"${name}\" -v ${PROJECT_ID}"
 }
diff --git a/python/py_model.cc b/python/py_model.cc
index 69aaeb72b..cd952ef43 100644
--- a/python/py_model.cc
+++ b/python/py_model.cc
@@ -1,75 +1,76 @@
 /* -------------------------------------------------------------------------- */
 #include "py_aka_array.hh"
 /* -------------------------------------------------------------------------- */
 #include <model.hh>
 #include <non_linear_solver.hh>
 #include <sparse_matrix_aij.hh>
 /* -------------------------------------------------------------------------- */
 #include <pybind11/operators.h>
 #include <pybind11/pybind11.h>
 #include <pybind11/stl.h>
 /* -------------------------------------------------------------------------- */
 namespace py = pybind11;
 /* -------------------------------------------------------------------------- */
 
 namespace akantu {
 
 /* -------------------------------------------------------------------------- */
 
 void register_model(py::module & mod) {
   py::class_<SparseMatrix>(mod, "SparseMatrix")
       .def("getMatrixType", &SparseMatrix::getMatrixType)
       .def("size", &SparseMatrix::size);
 
   py::class_<SparseMatrixAIJ, SparseMatrix>(mod, "SparseMatrixAIJ")
       .def("getIRN", &SparseMatrixAIJ::getIRN)
       .def("getJCN", &SparseMatrixAIJ::getJCN)
       .def("getA", &SparseMatrixAIJ::getA);
 
   py::class_<DOFManager>(mod, "DOFManager")
       .def("getMatrix",
            [](DOFManager & self, const std::string & name) {
              return dynamic_cast<akantu::SparseMatrixAIJ &>(
                  self.getMatrix(name));
            },
            py::return_value_policy::reference);
 
   py::class_<NonLinearSolver>(mod, "NonLinearSolver")
       .def(
           "set",
           [](NonLinearSolver & self, const std::string & id, const Real & val) {
             if (id == "max_iterations")
               self.set(id, int(val));
             else
               self.set(id, val);
           })
       .def("set",
            [](NonLinearSolver & self, const std::string & id,
               const SolveConvergenceCriteria & val) { self.set(id, val); });
 
   py::class_<ModelSolver, Parsable>(mod, "ModelSolver",
                                     py::multiple_inheritance())
       .def("getNonLinearSolver",
            (NonLinearSolver & (ModelSolver::*)(const ID &)) &
                ModelSolver::getNonLinearSolver,
            py::arg("solver_id") = "", py::return_value_policy::reference)
-      .def("solveStep", [](ModelSolver & self, const ID & solver_id = "") {
+      .def("solveStep", [](ModelSolver & self) { self.solveStep(); })
+      .def("solveStep", [](ModelSolver & self, const ID & solver_id) {
         self.solveStep(solver_id);
       });
 
   py::class_<Model, ModelSolver>(mod, "Model", py::multiple_inheritance())
       .def("setBaseName", &Model::setBaseName)
       .def("getFEEngine", &Model::getFEEngine, py::arg("name") = "",
            py::return_value_policy::reference)
       .def("addDumpFieldVector", &Model::addDumpFieldVector)
       .def("addDumpField", &Model::addDumpField)
       .def("setBaseNameToDumper", &Model::setBaseNameToDumper)
       .def("addDumpFieldVectorToDumper", &Model::addDumpFieldVectorToDumper)
       .def("addDumpFieldToDumper", &Model::addDumpFieldToDumper)
       .def("dump", &Model::dump)
       .def("initNewSolver", &Model::initNewSolver)
       .def("getDOFManager", &Model::getDOFManager,
            py::return_value_policy::reference);
 }
 
 } // namespace akantu
diff --git a/src/synchronizer/communicator_mpi_inline_impl.cc b/src/synchronizer/communicator_mpi_inline_impl.cc
index 9ebc835e5..2692952a7 100644
--- a/src/synchronizer/communicator_mpi_inline_impl.cc
+++ b/src/synchronizer/communicator_mpi_inline_impl.cc
@@ -1,492 +1,498 @@
 /**
  * @file   communicator_mpi_inline_impl.cc
  *
  * @author Nicolas Richart <nicolas.richart@epfl.ch>
  *
  * @date creation: Tue Nov 07 2017
  * @date last modification: Mon Dec 18 2017
  *
  * @brief  StaticCommunicatorMPI implementation
  *
  * @section LICENSE
  *
  * Copyright (©) 2016-2018 EPFL (Ecole Polytechnique Fédérale de Lausanne)
  * Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides)
  *
  * Akantu is free  software: you can redistribute it and/or  modify it under the
  * terms  of the  GNU Lesser  General Public  License as published by  the Free
  * Software Foundation, either version 3 of the License, or (at your option) any
  * later version.
  *
  * Akantu is  distributed in the  hope that it  will be useful, but  WITHOUT ANY
  * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
  * A PARTICULAR PURPOSE. See  the GNU  Lesser General  Public License  for more
  * details.
  *
  * You should  have received  a copy  of the GNU  Lesser General  Public License
  * along with Akantu. If not, see <http://www.gnu.org/licenses/>.
  *
  */
 
 /* -------------------------------------------------------------------------- */
 #include "aka_iterators.hh"
 #include "communicator.hh"
 #include "mpi_communicator_data.hh"
 /* -------------------------------------------------------------------------- */
 #include <memory>
 #include <type_traits>
 #include <unordered_map>
 #include <vector>
 /* -------------------------------------------------------------------------- */
 #include <mpi.h>
 /* -------------------------------------------------------------------------- */
 
 #if (defined(__GNUC__) || defined(__GNUG__))
 #if AKA_GCC_VERSION < 60000
 namespace std {
 template <> struct hash<akantu::SynchronizerOperation> {
   using argument_type = akantu::SynchronizerOperation;
   size_t operator()(const argument_type & e) const noexcept {
     auto ue = underlying_type_t<argument_type>(e);
     return uh(ue);
   }
 
 private:
   const hash<underlying_type_t<argument_type>> uh{};
 };
 } // namespace std
 #endif
 #endif
 
 namespace akantu {
 
 class CommunicationRequestMPI : public InternalCommunicationRequest {
 public:
   CommunicationRequestMPI(UInt source, UInt dest)
       : InternalCommunicationRequest(source, dest),
         request(std::make_unique<MPI_Request>()) {}
   MPI_Request & getMPIRequest() { return *request; };
 
 private:
   std::unique_ptr<MPI_Request> request;
 };
 
 namespace {
   template <typename T> inline MPI_Datatype getMPIDatatype();
   MPI_Op getMPISynchronizerOperation(SynchronizerOperation op) {
     std::unordered_map<SynchronizerOperation, MPI_Op> _operations{
         {SynchronizerOperation::_sum, MPI_SUM},
         {SynchronizerOperation::_min, MPI_MIN},
         {SynchronizerOperation::_max, MPI_MAX},
         {SynchronizerOperation::_prod, MPI_PROD},
         {SynchronizerOperation::_land, MPI_LAND},
         {SynchronizerOperation::_band, MPI_BAND},
         {SynchronizerOperation::_lor, MPI_LOR},
         {SynchronizerOperation::_bor, MPI_BOR},
         {SynchronizerOperation::_lxor, MPI_LXOR},
         {SynchronizerOperation::_bxor, MPI_BXOR},
         {SynchronizerOperation::_min_loc, MPI_MINLOC},
         {SynchronizerOperation::_max_loc, MPI_MAXLOC},
         {SynchronizerOperation::_null, MPI_OP_NULL}};
     return _operations[op];
   }
 
   template <typename T> MPI_Datatype inline getMPIDatatype() {
     return MPI_DATATYPE_NULL;
   }
 
 #define SPECIALIZE_MPI_DATATYPE(type, mpi_type)                                \
   template <> MPI_Datatype inline getMPIDatatype<type>() { return mpi_type; }
 
 #define COMMA ,
   SPECIALIZE_MPI_DATATYPE(char, MPI_CHAR)
   SPECIALIZE_MPI_DATATYPE(std::uint8_t, MPI_UINT8_T)
   SPECIALIZE_MPI_DATATYPE(float, MPI_FLOAT)
   SPECIALIZE_MPI_DATATYPE(double, MPI_DOUBLE)
   SPECIALIZE_MPI_DATATYPE(long double, MPI_LONG_DOUBLE)
   SPECIALIZE_MPI_DATATYPE(signed int, MPI_INT)
   SPECIALIZE_MPI_DATATYPE(unsigned int, MPI_UNSIGNED)
   SPECIALIZE_MPI_DATATYPE(signed long int, MPI_LONG)
   SPECIALIZE_MPI_DATATYPE(unsigned long int, MPI_UNSIGNED_LONG)
   SPECIALIZE_MPI_DATATYPE(signed long long int, MPI_LONG_LONG)
   SPECIALIZE_MPI_DATATYPE(unsigned long long int, MPI_UNSIGNED_LONG_LONG)
   SPECIALIZE_MPI_DATATYPE(SCMinMaxLoc<double COMMA int>, MPI_DOUBLE_INT)
   SPECIALIZE_MPI_DATATYPE(SCMinMaxLoc<float COMMA int>, MPI_FLOAT_INT)
   SPECIALIZE_MPI_DATATYPE(bool, MPI_CXX_BOOL)
 
   template <> MPI_Datatype inline getMPIDatatype<NodeFlag>() {
     return getMPIDatatype<std::underlying_type_t<NodeFlag>>();
   }
 
   inline int getMPISource(int src) {
     if (src == _any_source)
       return MPI_ANY_SOURCE;
     return src;
   }
 
   decltype(auto) convertRequests(std::vector<CommunicationRequest> & requests) {
     std::vector<MPI_Request> mpi_requests(requests.size());
 
     for (auto && request_pair : zip(requests, mpi_requests)) {
       auto && req = std::get<0>(request_pair);
       auto && mpi_req = std::get<1>(request_pair);
       mpi_req = aka::as_type<CommunicationRequestMPI>(req.getInternal())
                     .getMPIRequest();
     }
     return mpi_requests;
   }
 
 } // namespace
 
 // this is ugly but shorten the code a lot
 #define MPIDATA                                                                \
   (*reinterpret_cast<MPICommunicatorData *>(communicator_data.get()))
 
 /* -------------------------------------------------------------------------- */
 /* Implementation                                                             */
 /* -------------------------------------------------------------------------- */
 
 /* -------------------------------------------------------------------------- */
 Communicator::Communicator(int & /*argc*/, char **& /*argv*/,
                            const private_member & /*unused*/)
     : communicator_data(std::make_unique<MPICommunicatorData>()) {
   prank = MPIDATA.rank();
   psize = MPIDATA.size();
 }
 
 /* -------------------------------------------------------------------------- */
 template <typename T>
 void Communicator::sendImpl(const T * buffer, Int size, Int receiver, Int tag,
                             const CommunicationMode & mode) const {
   MPI_Comm communicator = MPIDATA.getMPICommunicator();
   MPI_Datatype type = getMPIDatatype<T>();
 
   switch (mode) {
   case CommunicationMode::_auto:
     MPI_Send(buffer, size, type, receiver, tag, communicator);
     break;
   case CommunicationMode::_synchronous:
     MPI_Ssend(buffer, size, type, receiver, tag, communicator);
     break;
   case CommunicationMode::_ready:
     MPI_Rsend(buffer, size, type, receiver, tag, communicator);
     break;
   }
 }
 
 /* -------------------------------------------------------------------------- */
 template <typename T>
 void Communicator::receiveImpl(T * buffer, Int size, Int sender,
                                Int tag) const {
   MPI_Comm communicator = MPIDATA.getMPICommunicator();
   MPI_Status status;
   MPI_Datatype type = getMPIDatatype<T>();
   MPI_Recv(buffer, size, type, getMPISource(sender), tag, communicator,
            &status);
 }
 
 /* -------------------------------------------------------------------------- */
 template <typename T>
 CommunicationRequest
 Communicator::asyncSendImpl(const T * buffer, Int size, Int receiver, Int tag,
                             const CommunicationMode & mode) const {
   MPI_Comm communicator = MPIDATA.getMPICommunicator();
   auto * request = new CommunicationRequestMPI(prank, receiver);
   MPI_Request & req = request->getMPIRequest();
 
   MPI_Datatype type = getMPIDatatype<T>();
 
   switch (mode) {
   case CommunicationMode::_auto:
     MPI_Isend(buffer, size, type, receiver, tag, communicator, &req);
     break;
   case CommunicationMode::_synchronous:
     MPI_Issend(buffer, size, type, receiver, tag, communicator, &req);
     break;
   case CommunicationMode::_ready:
     MPI_Irsend(buffer, size, type, receiver, tag, communicator, &req);
     break;
   }
   return std::shared_ptr<InternalCommunicationRequest>(request);
 }
 
 /* -------------------------------------------------------------------------- */
 template <typename T>
 CommunicationRequest Communicator::asyncReceiveImpl(T * buffer, Int size,
                                                     Int sender, Int tag) const {
   MPI_Comm communicator = MPIDATA.getMPICommunicator();
   auto * request = new CommunicationRequestMPI(sender, prank);
   MPI_Datatype type = getMPIDatatype<T>();
 
   MPI_Request & req = request->getMPIRequest();
   MPI_Irecv(buffer, size, type, getMPISource(sender), tag, communicator, &req);
   return std::shared_ptr<InternalCommunicationRequest>(request);
 }
 
 /* -------------------------------------------------------------------------- */
 template <typename T>
 void Communicator::probe(Int sender, Int tag,
                          CommunicationStatus & status) const {
   MPI_Comm communicator = MPIDATA.getMPICommunicator();
   MPI_Status mpi_status;
   MPI_Probe(getMPISource(sender), tag, communicator, &mpi_status);
 
   MPI_Datatype type = getMPIDatatype<T>();
   int count;
   MPI_Get_count(&mpi_status, type, &count);
 
   status.setSource(mpi_status.MPI_SOURCE);
   status.setTag(mpi_status.MPI_TAG);
   status.setSize(count);
 }
 
 /* -------------------------------------------------------------------------- */
 template <typename T>
 bool Communicator::asyncProbe(Int sender, Int tag,
                               CommunicationStatus & status) const {
   MPI_Comm communicator = MPIDATA.getMPICommunicator();
   MPI_Status mpi_status;
   int test;
   MPI_Iprobe(getMPISource(sender), tag, communicator, &test, &mpi_status);
 
   if (not test)
     return false;
 
   MPI_Datatype type = getMPIDatatype<T>();
   int count;
   MPI_Get_count(&mpi_status, type, &count);
 
   status.setSource(mpi_status.MPI_SOURCE);
   status.setTag(mpi_status.MPI_TAG);
   status.setSize(count);
   return true;
 }
 
 /* -------------------------------------------------------------------------- */
 bool Communicator::test(CommunicationRequest & request) const {
   MPI_Status status;
   int flag;
   auto & req_mpi =
       aka::as_type<CommunicationRequestMPI>(request.getInternal());
 
   MPI_Request & req = req_mpi.getMPIRequest();
   MPI_Test(&req, &flag, &status);
 
   return flag;
 }
 
 /* -------------------------------------------------------------------------- */
 bool Communicator::testAll(std::vector<CommunicationRequest> & requests) const {
   //int are_finished;
   //auto && mpi_requests = convertRequests(requests);
   //MPI_Testall(mpi_requests.size(), mpi_requests.data(), &are_finished,
   //            MPI_STATUSES_IGNORE);
   //return are_finished;
   for(auto & request : requests) {
     if(not test(request)) return false;
   }
   return true;
 }
 
 /* -------------------------------------------------------------------------- */
 void Communicator::wait(CommunicationRequest & request) const {
   MPI_Status status;
   auto & req_mpi =
       aka::as_type<CommunicationRequestMPI>(request.getInternal());
   MPI_Request & req = req_mpi.getMPIRequest();
   MPI_Wait(&req, &status);
 }
 
 /* -------------------------------------------------------------------------- */
 void Communicator::waitAll(std::vector<CommunicationRequest> & requests) const {
   auto && mpi_requests = convertRequests(requests);
   MPI_Waitall(mpi_requests.size(), mpi_requests.data(), MPI_STATUSES_IGNORE);
 }
 
 /* -------------------------------------------------------------------------- */
 UInt Communicator::waitAny(std::vector<CommunicationRequest> & requests) const {
   auto && mpi_requests = convertRequests(requests);
 
   int pos;
   MPI_Waitany(mpi_requests.size(), mpi_requests.data(), &pos,
               MPI_STATUSES_IGNORE);
 
   if (pos != MPI_UNDEFINED) {
     return pos;
   } else {
     return UInt(-1);
   }
 }
 
 /* -------------------------------------------------------------------------- */
 void Communicator::barrier() const {
   MPI_Comm communicator = MPIDATA.getMPICommunicator();
   MPI_Barrier(communicator);
 }
 
 /* -------------------------------------------------------------------------- */
 CommunicationRequest Communicator::asyncBarrier() const {
   MPI_Comm communicator = MPIDATA.getMPICommunicator();
   auto * request = new CommunicationRequestMPI(0, 0);
 
   MPI_Request & req = request->getMPIRequest();
   MPI_Ibarrier(communicator, &req);
 
   return std::shared_ptr<InternalCommunicationRequest>(request);
 }
 
 /* -------------------------------------------------------------------------- */
 template <typename T>
 void Communicator::reduceImpl(T * values, int nb_values,
                               SynchronizerOperation op, int root) const {
   MPI_Comm communicator = MPIDATA.getMPICommunicator();
   MPI_Datatype type = getMPIDatatype<T>();
 
   MPI_Reduce(MPI_IN_PLACE, values, nb_values, type,
              getMPISynchronizerOperation(op), root, communicator);
 }
 
 /* -------------------------------------------------------------------------- */
 template <typename T>
 void Communicator::allReduceImpl(T * values, int nb_values,
                                  SynchronizerOperation op) const {
   MPI_Comm communicator = MPIDATA.getMPICommunicator();
   MPI_Datatype type = getMPIDatatype<T>();
 
   MPI_Allreduce(MPI_IN_PLACE, values, nb_values, type,
                 getMPISynchronizerOperation(op), communicator);
 }
 
 /* -------------------------------------------------------------------------- */
 template <typename T>
 void Communicator::scanImpl(T * values, T * result, int nb_values,
                             SynchronizerOperation op) const {
   MPI_Comm communicator = MPIDATA.getMPICommunicator();
   MPI_Datatype type = getMPIDatatype<T>();
 
   if(values == result) {
     values = reinterpret_cast<T*>(MPI_IN_PLACE);
   }
   
   MPI_Scan(values, result, nb_values, type,
            getMPISynchronizerOperation(op), communicator);
 }
 
 /* -------------------------------------------------------------------------- */
 template <typename T>
 void Communicator::exclusiveScanImpl(T * values, T * result, int nb_values,
                                      SynchronizerOperation op) const {
   MPI_Comm communicator = MPIDATA.getMPICommunicator();
   MPI_Datatype type = getMPIDatatype<T>();
 
+  bool in_place = false;
   if(values == result) {
+    in_place = true;
     values = reinterpret_cast<T*>(MPI_IN_PLACE);
   }
   
   MPI_Exscan(values, result, nb_values, type,
            getMPISynchronizerOperation(op), communicator);
+
+  if(prank == 0) {
+    result[0] = T();
+  }
 }
 
 /* -------------------------------------------------------------------------- */
 template <typename T>
 void Communicator::allGatherImpl(T * values, int nb_values) const {
   MPI_Comm communicator = MPIDATA.getMPICommunicator();
   MPI_Datatype type = getMPIDatatype<T>();
 
   MPI_Allgather(MPI_IN_PLACE, nb_values, type, values, nb_values, type,
                 communicator);
 }
 
 /* -------------------------------------------------------------------------- */
 template <typename T>
 void Communicator::allGatherVImpl(T * values, int * nb_values) const {
   MPI_Comm communicator = MPIDATA.getMPICommunicator();
   std::vector<int> displs(psize);
   displs[0] = 0;
   for (int i = 1; i < psize; ++i) {
     displs[i] = displs[i - 1] + nb_values[i - 1];
   }
 
   MPI_Datatype type = getMPIDatatype<T>();
   MPI_Allgatherv(MPI_IN_PLACE, *nb_values, type, values, nb_values,
                  displs.data(), type, communicator);
 }
 
 /* -------------------------------------------------------------------------- */
 template <typename T>
 void Communicator::gatherImpl(T * values, int nb_values, int root) const {
   MPI_Comm communicator = MPIDATA.getMPICommunicator();
   T *send_buf = nullptr, *recv_buf = nullptr;
   if (prank == root) {
     send_buf = (T *)MPI_IN_PLACE;
     recv_buf = values;
   } else {
     send_buf = values;
   }
 
   MPI_Datatype type = getMPIDatatype<T>();
   MPI_Gather(send_buf, nb_values, type, recv_buf, nb_values, type, root,
              communicator);
 }
 
 /* -------------------------------------------------------------------------- */
 template <typename T>
 void Communicator::gatherImpl(T * values, int nb_values, T * gathered,
                               int nb_gathered) const {
   MPI_Comm communicator = MPIDATA.getMPICommunicator();
   T * send_buf = values;
   T * recv_buf = gathered;
 
   if (nb_gathered == 0)
     nb_gathered = nb_values;
 
   MPI_Datatype type = getMPIDatatype<T>();
   MPI_Gather(send_buf, nb_values, type, recv_buf, nb_gathered, type,
              this->prank, communicator);
 }
 
 /* -------------------------------------------------------------------------- */
 template <typename T>
 void Communicator::gatherVImpl(T * values, int * nb_values, int root) const {
   MPI_Comm communicator = MPIDATA.getMPICommunicator();
   int * displs = nullptr;
   if (prank == root) {
     displs = new int[psize];
     displs[0] = 0;
     for (int i = 1; i < psize; ++i) {
       displs[i] = displs[i - 1] + nb_values[i - 1];
     }
   }
 
   T *send_buf = nullptr, *recv_buf = nullptr;
   if (prank == root) {
     send_buf = (T *)MPI_IN_PLACE;
     recv_buf = values;
   } else
     send_buf = values;
 
   MPI_Datatype type = getMPIDatatype<T>();
 
   MPI_Gatherv(send_buf, *nb_values, type, recv_buf, nb_values, displs, type,
               root, communicator);
 
   if (prank == root) {
     delete[] displs;
   }
 }
 
 /* -------------------------------------------------------------------------- */
 template <typename T>
 void Communicator::broadcastImpl(T * values, int nb_values, int root) const {
   MPI_Comm communicator = MPIDATA.getMPICommunicator();
   MPI_Datatype type = getMPIDatatype<T>();
   MPI_Bcast(values, nb_values, type, root, communicator);
 }
 
 /* -------------------------------------------------------------------------- */
 int Communicator::getMaxTag() const { return MPIDATA.getMaxTag(); }
 int Communicator::getMinTag() const { return 0; }
 
 /* -------------------------------------------------------------------------- */
 
 } // namespace akantu
diff --git a/test/ci/Dockerfile b/test/ci/Dockerfile
index 55c6d9438..5cb74a1b5 100644
--- a/test/ci/Dockerfile
+++ b/test/ci/Dockerfile
@@ -1,21 +1,22 @@
 FROM debian:testing
 MAINTAINER Nicolas Richart <nicolas.richart@epfl.ch>
 
 # Make sure the package repository is up to date.
 RUN apt-get -qq update && apt-get -qq -y install \
     g++ gfortran  cmake \
     libmumps-seq-dev libscotch-dev \
     libboost-dev libopenblas-dev \
     python3 python3-dev \
     python3-numpy python3-scipy python3-mpi4py\
     python3-phabricator python3-click python3-yaml \
+    python3-pytest \
     swig3.0 gmsh curl flake8 \
     git clang-format xsltproc jq \
     php-cli php-curl php-xml \
     && rm -rf /var/lib/apt/lists/*
 
 # apt-get on one line due to https://docs.docker.com/develop/develop-images/dockerfile_best-practices/#run
 RUN git clone https://github.com/phacility/libphutil.git /libphutil
 RUN git clone https://github.com/phacility/arcanist.git /arcanist
 
 ENV PATH="$PATH:/arcanist/bin/"
\ No newline at end of file
diff --git a/test/ci/scripts/hbm b/test/ci/scripts/hbm
index 494b55969..0ebd246a7 100755
--- a/test/ci/scripts/hbm
+++ b/test/ci/scripts/hbm
@@ -1,69 +1,69 @@
 #!/usr/bin/env python3
 import click
 import harbomaster
 
 @click.group()
 @click.option('-a', '--api-token', default=None, envvar='API_TOKEN')
 @click.option('-h', '--host', default=None, envvar='PHABRICATOR_HOST')
 @click.option('-b', '--build-target-phid', envvar='BUILD_TARGET_PHID')
 @click.pass_context
 def hbm(ctx, api_token, host, build_target_phid):
     ctx.obj['API_TOKEN'] = api_token
     ctx.obj['HOST'] = host
     ctx.obj['BUILD_TARGET_PHID'] = build_target_phid
     
 @hbm.command()
 @click.option('-f', '--filename')
 @click.pass_context
 def send_ctest_results(ctx, filename):
     try:
         _hbm = harbomaster.Harbormaster(ctx=ctx.obj)
         with harbomaster.CTestResults(filename) as tests:
             _hbm.send_unit_tests(tests)
-    except e:
+    except:
         pass
 
 @hbm.command()
 @click.option('-f', '--filename')
 @click.pass_context
 def send_arc_lint(ctx, filename):
     try:
         _hbm = harbomaster.Harbormaster(ctx=ctx.obj)
         with harbomaster.ARCLintJson(filename) as tests:
             _hbm.send_lint(tests)
-    except e:
+    except:
         pass
 
 @hbm.command()
 @click.option('-k', '--key')
 @click.option('-u', '--uri')
 @click.option('-l', '--label')
 @click.pass_context
 def send_uri(ctx, key, uri, label):
     _hbm = harbomaster.Harbormaster(ctx=ctx.obj)
     _hbm.send_uri(key, uri, label)
 
 @hbm.command()
 @click.option('-f', '--filename')
 @click.option('-n', '--name')
 @click.option('-v', '--view_policy', default=None)
 @click.pass_context
 def upload_file(ctx, filename, name, view_policy):
     _hbm = harbomaster.Harbormaster(ctx=ctx.obj)
     _hbm.upload_file(filename, name, view_policy)
     
 @hbm.command()
 @click.pass_context
 def passed(ctx):
     _hbm = harbomaster.Harbormaster(ctx=ctx.obj)
     _hbm.passed()
 
 @hbm.command()
 @click.pass_context
 def failed(ctx):
     _hbm = harbomaster.Harbormaster(ctx=ctx.obj)
     _hbm.failed()
    
 
 if __name__ == '__main__':
     hbm(obj={})