Page MenuHomec4science

test_loop.cpp
No OneTemporary

File Metadata

Created
Sun, May 5, 10:51

test_loop.cpp

/*
* SPDX-License-Indentifier: AGPL-3.0-or-later
*
* Copyright (©) 2016-2022 EPFL (École Polytechnique Fédérale de Lausanne),
* Laboratory (LSMS - Laboratoire de Simulation en Mécanique des Solides)
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*
*/
/* -------------------------------------------------------------------------- */
#include "grid.hh"
#include "grid_view.hh"
#include "loop.hh"
#include "mpi_interface.hh"
#include "static_types.hh"
#include "test.hh"
/* -------------------------------------------------------------------------- */
/* WARNING: here we cannot use lambdas for tests because GoogleTest declares */
/* test functions as private members of classes which is incompatible with */
/* cuda's extended lambdas. I know... it's f*cking stupid */
/* -------------------------------------------------------------------------- */
using namespace tamaas;
template <typename T>
struct AddOneInplace {
CUDA_LAMBDA void operator()(T& x) { x += 1; }
};
// Testing loops on one grid
TEST(TestLoops, OneArgument) {
Grid<Real, 1> grid({20}, 1);
Grid<Real, 1> solution({20}, 1);
auto add_one = [](auto x) { return x + 1; };
std::iota(grid.begin(), grid.end(), 1);
// Makeing solution
std::transform(grid.begin(), grid.end(), solution.begin(), add_one);
auto add_one_inplace = AddOneInplace<Real>();
Loop::loop(add_one_inplace, grid);
ASSERT_TRUE(compare(grid, solution, AreFloatEqual()))
<< "One argument loop failed";
}
struct PrimalTest {
CUDA_LAMBDA void operator()(Int& primal, Int& val) {
val = (primal > 0) ? -1 : 1;
}
};
// Testing loops on two grids
TEST(TestLoops, TwoArguments) {
// Why no ints?
Grid<Int, 2> grid({20, 20}, 1);
Grid<Int, 2> primal({20, 20}, 1);
Grid<Int, 2> solution({20, 20}, 1);
primal(0, 0) = 1;
primal(0, 1) = 1;
primal(1, 0) = 1;
primal(1, 1) = 1;
std::transform(primal.begin(), primal.end(), solution.begin(),
[](Int& primal) { return (primal > 0) ? -1 : 1; });
auto primal_test = PrimalTest();
Loop::loop(primal_test, primal, grid);
ASSERT_TRUE(compare(solution, grid)) << "Two argument loop failed";
}
struct AssignUInt {
CUDA_LAMBDA void operator()(UInt& x, UInt i) { x = i; }
};
// Testing an enumeration
TEST(TestLoops, Enumeration) {
Grid<UInt, 1> grid({100}, 1);
Grid<UInt, 1> solution({100}, 1);
std::iota(solution.begin(), solution.end(), 0);
auto assign_uint = AssignUInt();
Loop::loop(assign_uint, grid, Loop::range(100));
ASSERT_TRUE(compare(solution, grid)) << "Enumeration loop failed";
}
/* -------------------------------------------------------------------------- */
struct Identity {
CUDA_LAMBDA UInt operator()(UInt& x) const { return x; }
};
// Testing one grid reductions
TEST(TestReductions, OneArgument) {
Grid<UInt, 1> grid({6}, 1);
std::iota(grid.begin(), grid.end(), 1);
const auto id = Identity();
// Sum reduction
UInt sol = mpi::allreduce<operation::plus>(
std::accumulate(grid.begin(), grid.end(), 0, std::plus<>()));
UInt red = Loop::reduce<operation::plus>(id, grid);
ASSERT_TRUE(sol == red) << "Addition reduction failed on one argument";
// Product reduction
sol = mpi::allreduce<operation::times>(
std::accumulate(grid.begin(), grid.end(), 1, std::multiplies<>()));
red = Loop::reduce<operation::times>(id, grid);
ASSERT_TRUE(sol == red) << "Multiplication reduction failed on one argument";
// Min reduction
sol = mpi::allreduce<operation::min>(
*std::min_element(grid.begin(), grid.end()));
red = Loop::reduce<operation::min>(id, grid);
ASSERT_TRUE(sol == red) << "Min reduction failed on one argument";
// Max reduction
sol = mpi::allreduce<operation::max>(
*std::max_element(grid.begin(), grid.end()));
red = Loop::reduce<operation::max>(id, grid);
ASSERT_TRUE(sol == red) << "Max reduction failed on one argument";
}
struct AssignReduce {
CUDA_LAMBDA UInt operator()(UInt& x, UInt i) {
x = i;
return x;
}
};
TEST(TestReductions, ReduceAndTransform) {
UInt n = 20;
Grid<UInt, 1> grid({n}, 1), solution({n}, 1);
std::iota(solution.begin(), solution.end(), 0);
UInt sum_value = mpi::allreduce<operation::plus>((n - 1) * n / 2);
auto assign_reduce = AssignReduce{};
UInt res = Loop::reduce<operation::plus>(assign_reduce, grid, Loop::range(n));
EXPECT_EQ(res, sum_value) << "Reduction failed";
EXPECT_TRUE(compare(grid, solution)) << "Assign failed";
}
struct PrimalReduce {
CUDA_LAMBDA UInt operator()(UInt& p, UInt& val) { return (p > 0) ? val : 0; }
};
TEST(TestReductions, TwoArguments) {
Grid<UInt, 1> grid({20}, 1);
Grid<UInt, 1> primal({20}, 1);
grid = 1;
primal(0) = 1;
primal(1) = 1;
auto primal_reduce = PrimalReduce();
// Reduce on values where primal > 0
UInt red = Loop::reduce<operation::plus>(primal_reduce, primal, grid);
ASSERT_TRUE(red == mpi::allreduce<operation::plus>(UInt{2}))
<< "Two args reduction failed";
}
/* -------------------------------------------------------------------------- */
TEST(TestRange, type_trait) {
Grid<UInt, 1> grid({1}, 1);
auto gridrange = range<VectorProxy<UInt, 1>>(grid);
static_assert(decltype(gridrange)::is_valid_container<Grid<UInt, 1>>::value,
"is_valid_container Type trait is wrong");
static_assert(
not decltype(gridrange)::is_valid_container<Grid<Real, 1>&>::value,
"is_valid_container Type trait is wrong");
static_assert(not Range<VectorProxy<Real, 1>, Real,
1>::is_valid_container<decltype(grid)>::value,
"is_valid_container Type trait is wrong");
}
struct AssignOne {
CUDA_LAMBDA void operator()(VectorProxy<UInt, 1> x) { x = 1; }
CUDA_LAMBDA void operator()(UInt& x) { x = 1; }
CUDA_LAMBDA void operator()(VectorProxy<UInt, 3> v) { v(2) = 1; }
CUDA_LAMBDA void operator()(VectorProxy<UInt, 2> v) { v = 1; }
};
TEST(TestRange, headless) {
if (mpi::rank() != 0)
GTEST_SKIP() << "Skipping because not root process";
Grid<UInt, 1> grid({10}, 1), solution({10}, 1);
std::fill(++solution.begin(), solution.end(), 1);
auto gridrange = range<VectorProxy<UInt, 1>>(grid).headless();
auto assign_one = AssignOne{};
Loop::loop(assign_one, gridrange);
ASSERT_TRUE(compare(grid, solution)) << "Headless fail";
}
template <typename T>
using WrapVector = VectorProxy<T, 2>;
struct AddOneVector {
CUDA_LAMBDA void operator()(WrapVector<UInt> x) { x(0) += 1; }
};
TEST(TestStridedLoops, VectorStride) {
Grid<UInt, 2> grid({10, 10}, 2);
std::iota(grid.begin(), grid.end(), 1);
Grid<UInt, 2> solution({10, 10}, 2);
solution = grid;
std::for_each(solution.begin(), solution.end(), [](UInt& x) {
if (x % 2 == 1)
x += 1;
});
auto add_one_inplace = AddOneVector();
Loop::loop(add_one_inplace, range<WrapVector<UInt>>(grid));
ASSERT_TRUE(compare(solution, grid)) << "Static vector strided loop failed";
}
template <typename T>
using WrapMatrix = MatrixProxy<T, 2, 2>;
struct SetOneMatrix {
CUDA_LAMBDA void operator()(WrapMatrix<UInt> x) {
x(0, 0) = 1;
x(1, 1) = 1;
}
};
TEST(TestStridedLoops, MatrixStride) {
Grid<UInt, 2> grid({10, 10}, 4);
Grid<UInt, 2> solution({10, 10}, 4);
std::iota(solution.begin(), solution.end(), 0);
std::for_each(solution.begin(), solution.end(), [](UInt& x) {
if (x % 4 == 0 || x % 4 == 3)
x = 1;
else
x = 0;
});
auto set_one = SetOneMatrix();
Loop::loop(set_one, range<WrapMatrix<UInt>>(grid));
ASSERT_TRUE(compare(solution, grid)) << "Static matrix strided loop failed";
}
struct VectorReduction {
CUDA_LAMBDA Vector<UInt, 3> operator()(const VectorProxy<UInt, 3>& v) const {
return v;
}
};
struct BroadcastSet123 {
CUDA_LAMBDA inline void operator()(VectorProxy<UInt, 3> v) const {
v(0) = 1;
v(1) = 2;
v(2) = 3;
}
};
TEST(TestStridedReduction, VectorReduce) {
Grid<UInt, 2> grid({10, 10}, 3);
Loop::loop(BroadcastSet123(), range<VectorProxy<UInt, 3>>(grid));
auto res = Loop::reduce<operation::plus>(VectorReduction(),
range<VectorProxy<UInt, 3>>(grid));
auto reduce = [](UInt x) { return mpi::allreduce<operation::plus>(x); };
ASSERT_EQ(res(0), reduce(100));
ASSERT_EQ(res(1), reduce(200));
ASSERT_EQ(res(2), reduce(300));
}
struct ScalarReduce {
CUDA_LAMBDA UInt operator()(UInt& x) { return x; }
};
TEST(TestViewReduction, ScalarReduce) {
Grid<UInt, 2> grid({10, 10}, 3);
Loop::loop(BroadcastSet123(), range<VectorProxy<UInt, 3>>(grid));
auto view = make_component_view(grid, 2);
auto scalar_reduce = ScalarReduce{};
UInt res = Loop::reduce<operation::plus>(scalar_reduce, view);
EXPECT_EQ(res, mpi::allreduce<operation::plus>(UInt{300}))
<< "Reduce on component view fail";
}
TEST(TestViewReduction, VectorReduce) {
Grid<UInt, 2> grid({10, 10}, 3);
auto view2 = make_view(grid, 0);
Loop::loop(BroadcastSet123(), range<VectorProxy<UInt, 3>>(view2));
auto res2 = Loop::reduce<operation::plus>(VectorReduction(),
range<VectorProxy<UInt, 3>>(view2));
auto reduce = [](UInt x) { return mpi::allreduce<operation::plus>(x); };
EXPECT_EQ(res2(0), reduce(10));
EXPECT_EQ(res2(1), reduce(20));
EXPECT_EQ(res2(2), reduce(30));
}
TEST(TestViewLoop, ScalarLoop) {
Grid<UInt, 2> grid({10, 10}, 3), solution({10, 10}, 3);
auto view = make_component_view(grid, 2);
auto assign_one = AssignOne{};
Loop::loop(assign_one, view);
Loop::loop(assign_one, range<VectorProxy<UInt, 3>>(solution));
ASSERT_TRUE(compare(grid, solution)) << "View loop fail";
}
TEST(TestLoopChecks, Components) {
Grid<UInt, 2> grid({10, 10}, 3);
auto assign_one = AssignOne();
EXPECT_THROW(Loop::loop(assign_one, range<VectorProxy<UInt, 2>>(grid)),
Exception)
<< "Broken check on number of components";
}
struct CopyValues {
CUDA_LAMBDA auto operator()(UInt& x, UInt& y) { x = y; }
CUDA_LAMBDA auto operator()(VectorProxy<UInt, 2> x, VectorProxy<UInt, 1> y) {
x(0) = y(0);
}
};
TEST(TestLoopChecks, LoopSize) {
Grid<UInt, 1> grid({10}, 2), other({10}, 1);
CopyValues func;
EXPECT_THROW(Loop::loop(func, grid, other), Exception)
<< "Check on loop size without ranges fail";
other.resize({11});
EXPECT_THROW(Loop::loop(func, range<VectorProxy<UInt, 2>>(grid),
range<VectorProxy<UInt, 1>>(other)),
Exception)
<< "Check on loop size with ranges fail";
Grid<UInt, 2> twod({10, 11}, 2);
auto view = make_view(twod, 0);
EXPECT_THROW(Loop::loop(func, grid, view), Exception)
<< "Check on loop size with view fail";
}
struct ReduceAndTransform {
CUDA_LAMBDA UInt operator()(VectorProxy<UInt, 2> x, UInt /*i*/) {
x += 1;
return x(0) + x(1);
}
};
TEST(TestReductions, ReduceAndTransformVector) {
UInt n = 20;
Grid<UInt, 1> grid({n}, 2), solution({n}, 2);
std::iota(solution.begin(), solution.end(), 1);
std::iota(grid.begin(), grid.end(), 0);
UInt sum_value = mpi::allreduce<operation::plus>((2 * n + 1) * 2 * n / 2);
auto reduce_transform = ReduceAndTransform();
UInt res = Loop::reduce<operation::plus>(
reduce_transform, range<VectorProxy<UInt, 2>>(grid), Loop::range(n));
EXPECT_EQ(res, sum_value) << "Reduction failed";
EXPECT_TRUE(compare(grid, solution)) << "Assign failed";
}

Event Timeline