diff --git a/test/common/test_suite.hpp b/test/common/test_suite.hpp index 7c30fea89b5d3782a5263526da8327a63bfcfa97..cc79103785148c5da7493334c21fe17d1a7c6c8f 100644 --- a/test/common/test_suite.hpp +++ b/test/common/test_suite.hpp @@ -12,7 +12,8 @@ #include #include #include -#include + +#include "matrix_portion.hpp" // clang-format off #define UKERNEL_MATMUL_VARIANT(name) \ @@ -79,6 +80,7 @@ struct MatMulShape { /// Matrix multiplication test information. using MatMulTestParams = std::tuple; +using MatMulTestPortionedParams = std::tuple; class UkernelVariantTest : public ::testing::TestWithParam {}; diff --git a/test/tests/matmul_clamp_f32_qai8dxp_qsi8cxp_test.cpp b/test/tests/matmul_clamp_f32_qai8dxp_qsi8cxp_test.cpp index ccdc10851e1fa525e69da1589334e779a23f27a9..4dd69e4ba9179f547595932e5ea708d43b096950 100644 --- a/test/tests/matmul_clamp_f32_qai8dxp_qsi8cxp_test.cpp +++ b/test/tests/matmul_clamp_f32_qai8dxp_qsi8cxp_test.cpp @@ -13,6 +13,7 @@ #include #include #include +#include #include #include "kai/ukernels/matmul/matmul_clamp_f32_qai8dxp_qsi8cxp/kai_matmul_clamp_f32_qai8dxp1x4_qsi8cxp4x4_1x4_neon_dotprod.h" @@ -24,13 +25,11 @@ #include "kai/ukernels/matmul/pack/kai_rhs_pack_kxn_qsi8cxp_qsi8cx_neon.h" #include "kai/ukernels/matmul/pack/kai_rhs_pack_nxk_qsi8cxp_qsi8cx_neon.h" #include "test/common/cpu_info.hpp" +#include "test/common/matrix_portion.hpp" #include "test/common/memory.hpp" -#include "test/common/round.hpp" #include "test/common/test_suite.hpp" -#include "test/reference/cast.hpp" #include "test/reference/fill.hpp" #include "test/reference/matmul.hpp" -#include "test/reference/pad.hpp" #include "test/reference/quantize.hpp" #include "test/reference/transpose.hpp" @@ -47,10 +46,10 @@ static const std::array {}; TEST_P(MatMulTest_f32_qai8dxp_qsi8cxp, Offset_RHS) { - const auto& [variant_index, matmul_shape] = GetParam(); + const auto& [variant_index, matmul_shape, portion] = GetParam(); const auto& ukernel_variant = variants_kai_matmul_clamp_f32_qai8dxp_qsi8cxp.at(variant_index); if (ukernel_variant.fn_is_supported && !ukernel_variant.fn_is_supported()) { @@ -74,7 +73,7 @@ TEST_P(MatMulTest_f32_qai8dxp_qsi8cxp, Offset_RHS) { } TEST_P(MatMulTest_f32_qai8dxp_qsi8cxp, Offset_LHS) { - const auto& [variant_index, matmul_shape] = GetParam(); + const auto& [variant_index, matmul_shape, portion] = GetParam(); const auto& ukernel_variant = variants_kai_matmul_clamp_f32_qai8dxp_qsi8cxp.at(variant_index); if (ukernel_variant.fn_is_supported && !ukernel_variant.fn_is_supported()) { @@ -95,7 +94,7 @@ TEST_P(MatMulTest_f32_qai8dxp_qsi8cxp, Offset_LHS) { } TEST_P(MatMulTest_f32_qai8dxp_qsi8cxp, EndToEnd_RHS_nxk_qsi8cx) { - auto& [variant_index, matmul_shape] = GetParam(); + auto& [variant_index, matmul_shape, portion] = GetParam(); const auto& ukernel_variant = variants_kai_matmul_clamp_f32_qai8dxp_qsi8cxp.at(variant_index); if (ukernel_variant.fn_is_supported && !ukernel_variant.fn_is_supported()) { @@ -132,11 +131,30 @@ TEST_P(MatMulTest_f32_qai8dxp_qsi8cxp, EndToEnd_RHS_nxk_qsi8cx) { ref_rhs_scales.data(), nullptr, K, ref_biases.data(), std::numeric_limits::lowest(), std::numeric_limits::max()); + auto m_step = ukernel_variant.interface.get_m_step(); + ASSERT_TRUE(m_step % mr == 0); + + auto n_step = ukernel_variant.interface.get_n_step(); + ASSERT_TRUE(n_step % nr == 0); + + const auto rect = portion.compute_portion(M, N, m_step, n_step); + if (rect.height() == 0 || rect.width() == 0) { + GTEST_SKIP() << "Skipping empty portion."; + } + // Runs the LHS packing micro-kernel. const auto imp_packed_lhs_size = kai_get_lhs_packed_size_lhs_quant_pack_qai8dxp_f32(M, K, mr, kr, sr); std::vector imp_packed_lhs(imp_packed_lhs_size); + + const auto lhs_start_row = rect.start_row(); + size_t lhs_stride = K * sizeof(float); + + auto lhs_offset = kai_get_lhs_offset_lhs_quant_pack_qai8dxp_f32(lhs_start_row, lhs_stride); + auto lhs_packed_offset = kai_get_lhs_packed_offset_lhs_quant_pack_qai8dxp_f32(lhs_start_row, K, mr, kr, sr); + kai_run_lhs_quant_pack_qai8dxp_f32( - M, K, mr, kr, sr, 0, reinterpret_cast(ref_lhs.data()), K * sizeof(float), imp_packed_lhs.data()); + rect.height(), K, mr, kr, sr, 0, reinterpret_cast(ref_lhs.data() + lhs_offset), lhs_stride, + imp_packed_lhs.data() + lhs_packed_offset); // Runs the RHS packing micro-kernel. // * Generates the 8-bit signed symmetric quantized input for the micro-kernel. @@ -150,19 +168,36 @@ TEST_P(MatMulTest_f32_qai8dxp_qsi8cxp, EndToEnd_RHS_nxk_qsi8cx) { reinterpret_cast(ref_biases.data()), reinterpret_cast(ref_rhs_scales.data()), imp_packed_rhs.data(), 0, ¶ms); + const auto packed_rhs_start_row = rect.start_col(); + auto rhs_packed_offset = + kai_get_rhs_packed_offset_rhs_pack_nxk_qsi8cxp_qsi8cx_neon(packed_rhs_start_row, K, nr, kr, sr); + + const auto dst_stride = N * sizeof(float); + const auto dst_offset = ukernel_variant.interface.get_dst_offset(rect.start_row(), rect.start_col(), dst_stride); + const auto ref_dst_offset = rect.start_row() * dst_stride + rect.start_col() * sizeof(float); + ASSERT_EQ(dst_offset, ref_dst_offset); + + const auto matmul_lhs_packed_offset = ukernel_variant.interface.get_lhs_packed_offset(rect.start_row(), K); + ASSERT_EQ(lhs_packed_offset, matmul_lhs_packed_offset); + const auto matmul_rhs_packed_offset = ukernel_variant.interface.get_rhs_packed_offset(rect.start_col(), K); + ASSERT_EQ(rhs_packed_offset, matmul_rhs_packed_offset); + // Runs the GEMM micro-kernel. const auto imp_dst_size = ukernel_variant.interface.get_dst_size(M, N); ASSERT_EQ(imp_dst_size, ref_dst.size()); std::vector imp_dst(imp_dst_size); ukernel_variant.interface.run_matmul( - M, N, K, imp_packed_lhs.data(), imp_packed_rhs.data(), reinterpret_cast(imp_dst.data()), + rect.height(), rect.width(), K, imp_packed_lhs.data() + matmul_lhs_packed_offset, + imp_packed_rhs.data() + matmul_rhs_packed_offset, reinterpret_cast(imp_dst.data() + dst_offset), N * sizeof(float), sizeof(float), std::numeric_limits::lowest(), std::numeric_limits::max()); // Compares the output of the micro-kernels against the output of the reference implementation. - for (size_t y = 0; y < M; ++y) { - for (size_t x = 0; x < N; ++x) { - const auto imp_value = read_array(imp_dst.data(), y * N + x); - const auto ref_value = read_array(ref_dst.data(), y * N + x); + for (size_t y = 0; y < rect.height(); ++y) { + for (size_t x = 0; x < rect.width(); ++x) { + const auto imp_value = + read_array(imp_dst.data(), (rect.start_row() + y) * N + (x + rect.start_col())); + const auto ref_value = + read_array(ref_dst.data(), (rect.start_row() + y) * N + (x + rect.start_col())); const auto rel_error = ref_value != 0 ? std::abs((imp_value - ref_value) / ref_value) : std::abs(imp_value); if (rel_error > 0.0001F) { @@ -173,7 +208,7 @@ TEST_P(MatMulTest_f32_qai8dxp_qsi8cxp, EndToEnd_RHS_nxk_qsi8cx) { } TEST_P(MatMulTest_f32_qai8dxp_qsi8cxp, EndToEnd_RHS_kxn_qsi8cx) { - auto& [variant_index, matmul_shape] = GetParam(); + auto& [variant_index, matmul_shape, portion] = GetParam(); const auto& ukernel_variant = variants_kai_matmul_clamp_f32_qai8dxp_qsi8cxp.at(variant_index); if (ukernel_variant.fn_is_supported && !ukernel_variant.fn_is_supported()) { @@ -221,11 +256,29 @@ TEST_P(MatMulTest_f32_qai8dxp_qsi8cxp, EndToEnd_RHS_kxn_qsi8cx) { ref_rhs_scales.data(), nullptr, K, ref_biases.data(), std::numeric_limits::lowest(), std::numeric_limits::max()); + auto m_step = ukernel_variant.interface.get_m_step(); + ASSERT_TRUE(m_step % mr == 0); + + auto n_step = ukernel_variant.interface.get_n_step(); + ASSERT_TRUE(n_step % nr == 0); + + const auto rect = portion.compute_portion(M, N, m_step, n_step); + if (rect.height() == 0 || rect.width() == 0) { + GTEST_SKIP() << "Skipping empty portion."; + } + + const auto lhs_start_row = rect.start_row(); + size_t const lhs_stride = K * sizeof(float); + + auto lhs_offset = kai_get_lhs_offset_lhs_quant_pack_qai8dxp_f32(lhs_start_row, lhs_stride); + auto lhs_packed_offset = kai_get_lhs_packed_offset_lhs_quant_pack_qai8dxp_f32(lhs_start_row, K, mr, kr, sr); + // Runs the LHS packing micro-kernel. const auto imp_packed_lhs_size = kai_get_lhs_packed_size_lhs_quant_pack_qai8dxp_f32(M, K, mr, kr, sr); std::vector imp_packed_lhs(imp_packed_lhs_size); kai_run_lhs_quant_pack_qai8dxp_f32( - M, K, mr, kr, sr, 0, reinterpret_cast(ref_lhs.data()), K * sizeof(float), imp_packed_lhs.data()); + rect.height(), K, mr, kr, sr, 0, reinterpret_cast(ref_lhs.data() + lhs_offset), K * sizeof(float), + imp_packed_lhs.data() + lhs_packed_offset); // Runs the RHS packing micro-kernel. // * Generates the 8-bit signed symmetric quantized input for the micro-kernel. @@ -239,19 +292,36 @@ TEST_P(MatMulTest_f32_qai8dxp_qsi8cxp, EndToEnd_RHS_kxn_qsi8cx) { reinterpret_cast(ref_biases.data()), reinterpret_cast(ref_rhs_scales.data()), imp_packed_rhs.data(), 0, ¶ms); + const auto packed_rhs_start_row = rect.start_col(); + auto rhs_packed_offset = + kai_get_rhs_packed_offset_rhs_pack_kxn_qsi8cxp_qsi8cx_neon(packed_rhs_start_row, K, nr, kr, sr); + + const auto dst_stride = N * sizeof(float); + const auto dst_offset = ukernel_variant.interface.get_dst_offset(rect.start_row(), rect.start_col(), dst_stride); + const auto ref_dst_offset = rect.start_row() * dst_stride + rect.start_col() * sizeof(float); + ASSERT_EQ(dst_offset, ref_dst_offset); + + const auto matmul_lhs_packed_offset = ukernel_variant.interface.get_lhs_packed_offset(rect.start_row(), K); + ASSERT_EQ(lhs_packed_offset, matmul_lhs_packed_offset); + const auto matmul_rhs_packed_offset = ukernel_variant.interface.get_rhs_packed_offset(rect.start_col(), K); + ASSERT_EQ(rhs_packed_offset, matmul_rhs_packed_offset); + // Runs the GEMM micro-kernel. const auto imp_dst_size = ukernel_variant.interface.get_dst_size(M, N); ASSERT_EQ(imp_dst_size, ref_dst.size()); std::vector imp_dst(imp_dst_size); ukernel_variant.interface.run_matmul( - M, N, K, imp_packed_lhs.data(), imp_packed_rhs.data(), reinterpret_cast(imp_dst.data()), + rect.height(), rect.width(), K, imp_packed_lhs.data() + matmul_lhs_packed_offset, + imp_packed_rhs.data() + matmul_rhs_packed_offset, reinterpret_cast(imp_dst.data() + dst_offset), N * sizeof(float), sizeof(float), std::numeric_limits::lowest(), std::numeric_limits::max()); // Compares the output of the micro-kernels against the output of the reference implementation. - for (size_t y = 0; y < M; ++y) { - for (size_t x = 0; x < N; ++x) { - const auto imp_value = read_array(imp_dst.data(), y * N + x); - const auto ref_value = read_array(ref_dst.data(), y * N + x); + for (size_t y = 0; y < rect.height(); ++y) { + for (size_t x = 0; x < rect.width(); ++x) { + const auto imp_value = + read_array(imp_dst.data(), (rect.start_row() + y) * N + (x + rect.start_col())); + const auto ref_value = + read_array(ref_dst.data(), (rect.start_row() + y) * N + (x + rect.start_col())); const auto rel_error = ref_value != 0 ? std::abs((imp_value - ref_value) / ref_value) : std::abs(imp_value); if (rel_error > 0.0001F) { @@ -265,14 +335,31 @@ INSTANTIATE_TEST_SUITE_P( MatMul, MatMulTest_f32_qai8dxp_qsi8cxp, testing::Combine( testing::Range(0, variants_kai_matmul_clamp_f32_qai8dxp_qsi8cxp.size()), - testing::Values(MatMulShape{17, 33, 67}, MatMulShape{19, 35, 63}, MatMulShape{1, 27, 31})), + testing::Values( + MatMulShape{17, 33, 67}, // + MatMulShape{19, 35, 63}, // + MatMulShape{1, 27, 31}), + testing::Values( + MatrixPortion(0, 0, 1, 1), // Full matrix. + MatrixPortion(0, 0, 1, 0.25), // Leftmost portion. + MatrixPortion(0, 0.75, 1, 1), // Rightmost portion. + MatrixPortion(0, 0.5, 1, 0.8), // Somewhere Middle + MatrixPortion(0.75, 0.75, 1, 1), // Bottom-right corner. + MatrixPortion(0.75, 0, 1, 1), // Partial rows + MatrixPortion(0.4, 0.5, 0.6, 0.8) // Somewhere Middle + )), [](const auto& info) { const auto variant_idx = std::get<0>(info.param); const std::string name{variants_kai_matmul_clamp_f32_qai8dxp_qsi8cxp.at(variant_idx).name}; const auto shape = std::get(info.param); + const auto portion = std::get(info.param); std::stringstream sstream; - sstream << name << "__M_" << shape.m << "__N_" << shape.n << "__K_" << shape.k; + sstream << name << "__M_" << shape.m << "__N_" << shape.n << "__K_" << shape.k // + << "__PortionStartRow_" << static_cast(portion.start_row() * 1000) // + << "__PortionStartCol_" << static_cast(portion.start_col() * 1000) // + << "__PortionHeight_" << static_cast(portion.height() * 1000) // + << "__PortionWidth_" << static_cast(portion.width() * 1000); return sstream.str(); });