diff --git a/test/tests/bfloat16_test.cpp b/test/tests/bfloat16_test.cpp index a8d4bb881b803be8a7435a907bdb15a1b104aafc..8a2886c84788a03189b58fc727f3e7fabc9c5320 100644 --- a/test/tests/bfloat16_test.cpp +++ b/test/tests/bfloat16_test.cpp @@ -1,5 +1,5 @@ // -// SPDX-FileCopyrightText: Copyright 2024 Arm Limited and/or its affiliates +// SPDX-FileCopyrightText: Copyright 2024-2025 Arm Limited and/or its affiliates // // SPDX-License-Identifier: Apache-2.0 // @@ -14,7 +14,7 @@ namespace kai::test { TEST(BFloat16, SimpleTest) { if (!cpu_has_bf16()) { - GTEST_SKIP(); + GTEST_SKIP() << "No CPU support for BFloat16"; } ASSERT_EQ(static_cast(BFloat16()), 0.0F); diff --git a/test/tests/float16_test.cpp b/test/tests/float16_test.cpp index 66abe88cc156aec07358fe55f4eeed92f275f3bb..ea919fdd5a1a38d885ccac9dcdd3a55f2b7d4080 100644 --- a/test/tests/float16_test.cpp +++ b/test/tests/float16_test.cpp @@ -1,5 +1,5 @@ // -// SPDX-FileCopyrightText: Copyright 2024 Arm Limited and/or its affiliates +// SPDX-FileCopyrightText: Copyright 2024-2025 Arm Limited and/or its affiliates // // SPDX-License-Identifier: Apache-2.0 // @@ -14,7 +14,7 @@ namespace kai::test { TEST(Float16, SimpleTest) { if (!cpu_has_fp16()) { - GTEST_SKIP(); + GTEST_SKIP() << "No CPU support for FP16"; } ASSERT_EQ(static_cast(Float16()), 0.0F); diff --git a/test/tests/matmul_clamp_f16_bf16p_bf16p_test.cpp b/test/tests/matmul_clamp_f16_bf16p_bf16p_test.cpp index cb73302dfd2e8f940d491ba29b97fad22685737f..038e361569949441f7540600f23c214fff423fde 100644 --- a/test/tests/matmul_clamp_f16_bf16p_bf16p_test.cpp +++ b/test/tests/matmul_clamp_f16_bf16p_bf16p_test.cpp @@ -1,5 +1,5 @@ // -// SPDX-FileCopyrightText: Copyright 2024 Arm Limited and/or its affiliates +// SPDX-FileCopyrightText: Copyright 2024-2025 Arm Limited and/or its affiliates // // SPDX-License-Identifier: Apache-2.0 // @@ -242,11 +242,11 @@ TEST_P(MatMulTestBf16OutFp16, Output) { const auto& [method, info, portion] = GetParam(); if (method.fn_is_supported && !method.fn_is_supported()) { - GTEST_SKIP(); + GTEST_SKIP() << "CPU features are not supported by current CPU"; } if (!method.has_main_kernel()) { - GTEST_SKIP(); + GTEST_SKIP() << "No main kernel available"; } const auto& data = test_data(); @@ -260,7 +260,7 @@ TEST_P(MatMulTestBf16OutFp16, Output) { const auto rect = portion.compute_portion(info.m, info.n, method.m0, method.n0); if (rect.height() == 0 || rect.width() == 0) { - GTEST_SKIP(); + GTEST_SKIP() << "Empty dimension of matrix(" << rect.width() << "," << rect.height() << ")"; } const size_t lhs_w = info.k; diff --git a/test/tests/matmul_clamp_f32_bf16p_bf16p_test.cpp b/test/tests/matmul_clamp_f32_bf16p_bf16p_test.cpp index f59174a60961e362c57e267343e8cc8caa92267d..9fff8005fb61150844e0c99a01a29fc00a3cee45 100644 --- a/test/tests/matmul_clamp_f32_bf16p_bf16p_test.cpp +++ b/test/tests/matmul_clamp_f32_bf16p_bf16p_test.cpp @@ -497,11 +497,11 @@ TEST_P(MatMulTestBf16, Output) { const auto& [method, info, portion] = GetParam(); if (method.fn_is_supported && !method.fn_is_supported()) { - GTEST_SKIP(); + GTEST_SKIP() << "CPU features are not supported by current CPU"; } if (!method.has_main_kernel()) { - GTEST_SKIP(); + GTEST_SKIP() << "No main kernel available"; } const auto& data = test_data(); @@ -514,7 +514,7 @@ TEST_P(MatMulTestBf16, Output) { const auto rect = portion.compute_portion(info.m, info.n, m_step, n_step); if (rect.height() == 0 || rect.width() == 0) { - GTEST_SKIP(); + GTEST_SKIP() << "Empty dimension of matrix(" << rect.width() << "," << rect.height() << ")"; } const size_t lhs_w = info.k; diff --git a/test/tests/matmul_clamp_f32_f32_f32p_test.cpp b/test/tests/matmul_clamp_f32_f32_f32p_test.cpp index 4ac1f986d56322667d2d538807b265aa196f3b53..2f6fd131e1cfc6fae97cec5351d813722c84369d 100644 --- a/test/tests/matmul_clamp_f32_f32_f32p_test.cpp +++ b/test/tests/matmul_clamp_f32_f32_f32p_test.cpp @@ -1,5 +1,5 @@ // -// SPDX-FileCopyrightText: Copyright 2024 Arm Limited and/or its affiliates +// SPDX-FileCopyrightText: Copyright 2024-2025 Arm Limited and/or its affiliates // // SPDX-License-Identifier: Apache-2.0 // @@ -68,7 +68,7 @@ TEST_P(MatMulTest_f32_f32_f32p, EndToEnd) // NOLINT(google-readability-avoid-un const auto& ukernel_variant = ukernel_variants.at(variant_idx); if (ukernel_variant.fn_is_supported && !ukernel_variant.fn_is_supported()) { - GTEST_SKIP(); + GTEST_SKIP() << "CPU features are not supported by current CPU"; } constexpr uint32_t seed = 0; diff --git a/test/tests/matmul_clamp_f32_qai8dxp_qsi4c32p_test.cpp b/test/tests/matmul_clamp_f32_qai8dxp_qsi4c32p_test.cpp index 06b50043a8685b8888edda680ec8d2162541c5c1..954496bfef3d2108ae6592df0f62a270ea6587d6 100644 --- a/test/tests/matmul_clamp_f32_qai8dxp_qsi4c32p_test.cpp +++ b/test/tests/matmul_clamp_f32_qai8dxp_qsi4c32p_test.cpp @@ -91,7 +91,7 @@ TEST_P(MatMulTest_f32_qmatmul_clamp_f32_qai8dxp_qsi4c32p, Offset_RHS) { const auto& ukernel_variant = variants_kai_matmul_clamp_f32_qai8dxp_qsi4c32p.at(variant_index); if (ukernel_variant.fn_is_supported && !ukernel_variant.fn_is_supported()) { - GTEST_SKIP(); + GTEST_SKIP() << "CPU features are not supported by current CPU"; } const size_t M = matmul_shape.m; @@ -103,7 +103,7 @@ TEST_P(MatMulTest_f32_qmatmul_clamp_f32_qai8dxp_qsi4c32p, Offset_RHS) { const auto rect = portion.compute_portion(M, N, m_step, n_step); if (rect.height() == 0 || rect.width() == 0) { - GTEST_SKIP(); + GTEST_SKIP() << "Empty dimension of matrix(" << rect.width() << "," << rect.height() << ")"; } const auto nr = ukernel_variant.interface.get_nr(); @@ -128,7 +128,7 @@ TEST_P(MatMulTest_f32_qmatmul_clamp_f32_qai8dxp_qsi4c32p, Offset_LHS) { const auto& ukernel_variant = variants_kai_matmul_clamp_f32_qai8dxp_qsi4c32p.at(variant_index); if (ukernel_variant.fn_is_supported && !ukernel_variant.fn_is_supported()) { - GTEST_SKIP(); + GTEST_SKIP() << "CPU features are not supported by current CPU"; } const size_t M = matmul_shape.m; @@ -140,7 +140,7 @@ TEST_P(MatMulTest_f32_qmatmul_clamp_f32_qai8dxp_qsi4c32p, Offset_LHS) { const auto rect = portion.compute_portion(M, N, m_step, n_step); if (rect.height() == 0 || rect.width() == 0) { - GTEST_SKIP(); + GTEST_SKIP() << "Empty dimension of matrix(" << rect.width() << "," << rect.height() << ")"; } const auto mr = ukernel_variant.interface.get_mr(); @@ -159,7 +159,7 @@ TEST_P(MatMulTest_f32_qmatmul_clamp_f32_qai8dxp_qsi4c32p, EndToEnd_RHS_nxk) { const auto& ukernel_variant = variants_kai_matmul_clamp_f32_qai8dxp_qsi4c32p.at(variant_index); if (ukernel_variant.fn_is_supported && !ukernel_variant.fn_is_supported()) { - GTEST_SKIP(); + GTEST_SKIP() << "CPU features are not supported by current CPU"; } constexpr uint32_t seed = 0; @@ -201,7 +201,7 @@ TEST_P(MatMulTest_f32_qmatmul_clamp_f32_qai8dxp_qsi4c32p, EndToEnd_RHS_nxk) { const auto rect = portion.compute_portion(M, N, m_step, n_step); if (rect.height() == 0 || rect.width() == 0) { - GTEST_SKIP(); + GTEST_SKIP() << "Empty dimension of matrix(" << rect.width() << "," << rect.height() << ")"; } const auto lhs_start_row = rect.start_row(); @@ -293,7 +293,7 @@ TEST_P(MatMulTest_f32_qmatmul_clamp_f32_qai8dxp_qsi4c32p, EndToEnd_RHS_kxn) { const auto& ukernel_variant = variants_kai_matmul_clamp_f32_qai8dxp_qsi4c32p.at(variant_index); if (ukernel_variant.fn_is_supported && !ukernel_variant.fn_is_supported()) { - GTEST_SKIP(); + GTEST_SKIP() << "CPU features are not supported by current CPU"; } const uint32_t seed = 0; @@ -347,7 +347,7 @@ TEST_P(MatMulTest_f32_qmatmul_clamp_f32_qai8dxp_qsi4c32p, EndToEnd_RHS_kxn) { const auto rect = portion.compute_portion(M, N, m_step, n_step); if (rect.height() == 0 || rect.width() == 0) { - GTEST_SKIP(); + GTEST_SKIP() << "Empty dimension of matrix(" << rect.width() << "," << rect.height() << ")"; } const auto lhs_start_row = rect.start_row(); diff --git a/test/tests/matmul_clamp_f32_qai8dxp_qsi4cxp_test.cpp b/test/tests/matmul_clamp_f32_qai8dxp_qsi4cxp_test.cpp index 22edc8c1219131ece921ef1d4b8f1d63fc587075..eaaa6b06acebd7e0382fdf75bc8772b3641a0d87 100644 --- a/test/tests/matmul_clamp_f32_qai8dxp_qsi4cxp_test.cpp +++ b/test/tests/matmul_clamp_f32_qai8dxp_qsi4cxp_test.cpp @@ -210,7 +210,7 @@ TEST_P(MatMulTest_f32_qai8dxp_qsi4cxp, Offset_RHS) { const auto& ukernel_variant = variants_kai_matmul_clamp_f32_qai8dxp_qsi4cxp.at(variant_index); if (ukernel_variant.fn_is_supported && !ukernel_variant.fn_is_supported()) { - GTEST_SKIP(); + GTEST_SKIP() << "CPU features are not supported by current CPU"; } const size_t M = matmul_shape.m; @@ -222,7 +222,7 @@ TEST_P(MatMulTest_f32_qai8dxp_qsi4cxp, Offset_RHS) { const auto rect = portion.compute_portion(M, N, m_step, n_step); if (rect.height() == 0 || rect.width() == 0) { - GTEST_SKIP(); + GTEST_SKIP() << "Empty dimension of matrix(" << rect.width() << "," << rect.height() << ")"; } const auto nr = ukernel_variant.interface.get_nr(); @@ -240,7 +240,7 @@ TEST_P(MatMulTest_f32_qai8dxp_qsi4cxp, Offset_LHS) { const auto& ukernel_variant = variants_kai_matmul_clamp_f32_qai8dxp_qsi4cxp.at(variant_index); if (ukernel_variant.fn_is_supported && !ukernel_variant.fn_is_supported()) { - GTEST_SKIP(); + GTEST_SKIP() << "CPU features are not supported by current CPU"; } const size_t M = matmul_shape.m; @@ -252,7 +252,7 @@ TEST_P(MatMulTest_f32_qai8dxp_qsi4cxp, Offset_LHS) { const auto rect = portion.compute_portion(M, N, m_step, n_step); if (rect.height() == 0 || rect.width() == 0) { - GTEST_SKIP(); + GTEST_SKIP() << "Empty dimension of matrix(" << rect.width() << "," << rect.height() << ")"; } const auto mr = ukernel_variant.interface.get_mr(); @@ -271,10 +271,10 @@ TEST_P(MatMulTest_f32_qai8dxp_qsi4cxp, EndToEnd_RHS_nxk_qsi4cx) { const auto& ukernel_variant = variants_kai_matmul_clamp_f32_qai8dxp_qsi4cxp.at(variant_index); if (ukernel_variant.fn_is_supported && !ukernel_variant.fn_is_supported()) { - GTEST_SKIP(); + GTEST_SKIP() << "CPU features are not supported by current CPU"; } if (ukernel_variant.rhs_pack_type == RhsPackType::KxN) { - GTEST_SKIP(); // Wrong type. This test for NxK + GTEST_SKIP() << "Wrong type. This test for NxK"; } if (!ukernel_variant.signed_integer_support) { GTEST_SKIP() << "Signed integer input unsupported"; @@ -318,7 +318,7 @@ TEST_P(MatMulTest_f32_qai8dxp_qsi4cxp, EndToEnd_RHS_nxk_qsi4cx) { const auto rect = portion.compute_portion(M, N, m_step, n_step); if (rect.height() == 0 || rect.width() == 0) { - GTEST_SKIP(); + GTEST_SKIP() << "Empty dimension of matrix(" << rect.width() << "," << rect.height() << ")"; } const auto lhs_start_row = rect.start_row(); @@ -398,10 +398,10 @@ TEST_P(MatMulTest_f32_qai8dxp_qsi4cxp, EndToEnd_RHS_nxk_qsu4cx) { const auto& ukernel_variant = variants_kai_matmul_clamp_f32_qai8dxp_qsi4cxp.at(variant_index); if (ukernel_variant.fn_is_supported && !ukernel_variant.fn_is_supported()) { - GTEST_SKIP(); + GTEST_SKIP() << "CPU features are not supported by current CPU"; } if (ukernel_variant.rhs_pack_type == RhsPackType::KxN) { - GTEST_SKIP(); // Wrong type. This test for NxK + GTEST_SKIP() << "Wrong type. This test for NxK"; } const uint32_t seed = 0; @@ -442,7 +442,7 @@ TEST_P(MatMulTest_f32_qai8dxp_qsi4cxp, EndToEnd_RHS_nxk_qsu4cx) { const auto rect = portion.compute_portion(M, N, m_step, n_step); if (rect.height() == 0 || rect.width() == 0) { - GTEST_SKIP(); + GTEST_SKIP() << "Empty dimension of matrix(" << rect.width() << "," << rect.height() << ")"; } const auto lhs_start_row = rect.start_row(); @@ -522,10 +522,10 @@ TEST_P(MatMulTest_f32_qai8dxp_qsi4cxp, EndToEnd_RHS_kxn_qsi4cx) { const auto& ukernel_variant = variants_kai_matmul_clamp_f32_qai8dxp_qsi4cxp.at(variant_index); if (ukernel_variant.fn_is_supported && !ukernel_variant.fn_is_supported()) { - GTEST_SKIP(); + GTEST_SKIP() << "CPU features are not supported by current CPU"; } if (ukernel_variant.rhs_pack_type == RhsPackType::NxK) { - GTEST_SKIP(); // Wrong type. This test for KxN + GTEST_SKIP() << "Wrong type. This test for KxN"; } if (!ukernel_variant.signed_integer_support) { GTEST_SKIP() << "Signed integer input unsupported"; @@ -580,7 +580,7 @@ TEST_P(MatMulTest_f32_qai8dxp_qsi4cxp, EndToEnd_RHS_kxn_qsi4cx) { const auto rect = portion.compute_portion(M, N, m_step, n_step); if (rect.height() == 0 || rect.width() == 0) { - GTEST_SKIP(); + GTEST_SKIP() << "Empty dimension of matrix(" << rect.width() << "," << rect.height() << ")"; } const auto lhs_start_row = rect.start_row(); @@ -653,10 +653,10 @@ TEST_P(MatMulTest_f32_qai8dxp_qsi4cxp, EndToEnd_RHS_kxn_qsu4cx) { const auto& ukernel_variant = variants_kai_matmul_clamp_f32_qai8dxp_qsi4cxp.at(variant_index); if (ukernel_variant.fn_is_supported && !ukernel_variant.fn_is_supported()) { - GTEST_SKIP(); + GTEST_SKIP() << "CPU features are not supported by current CPU"; } if (ukernel_variant.rhs_pack_type == RhsPackType::NxK) { - GTEST_SKIP(); // Wrong type. This test for KxN + GTEST_SKIP() << "Wrong type. This test for KxN"; } const uint32_t seed = 0; @@ -709,7 +709,7 @@ TEST_P(MatMulTest_f32_qai8dxp_qsi4cxp, EndToEnd_RHS_kxn_qsu4cx) { const auto rect = portion.compute_portion(M, N, m_step, n_step); if (rect.height() == 0 || rect.width() == 0) { - GTEST_SKIP(); + GTEST_SKIP() << "Empty dimension of matrix(" << rect.width() << "," << rect.height() << ")"; } const auto lhs_start_row = rect.start_row(); diff --git a/test/tests/matmul_clamp_f32_qai8dxp_qsi8cxp_test.cpp b/test/tests/matmul_clamp_f32_qai8dxp_qsi8cxp_test.cpp index eab1e6a7691c4f86d2e780437db2b8eb7275f12f..f55213de2c13428772eff462d339c90cefb806f5 100644 --- a/test/tests/matmul_clamp_f32_qai8dxp_qsi8cxp_test.cpp +++ b/test/tests/matmul_clamp_f32_qai8dxp_qsi8cxp_test.cpp @@ -53,7 +53,7 @@ TEST_P(MatMulTest_f32_qai8dxp_qsi8cxp, Offset_RHS) { const auto& ukernel_variant = variants_kai_matmul_clamp_f32_qai8dxp_qsi8cxp.at(variant_index); if (ukernel_variant.fn_is_supported && !ukernel_variant.fn_is_supported()) { - GTEST_SKIP(); + GTEST_SKIP() << "CPU features are not supported by current CPU"; } const size_t K = matmul_shape.k; @@ -77,7 +77,7 @@ TEST_P(MatMulTest_f32_qai8dxp_qsi8cxp, Offset_LHS) { const auto& ukernel_variant = variants_kai_matmul_clamp_f32_qai8dxp_qsi8cxp.at(variant_index); if (ukernel_variant.fn_is_supported && !ukernel_variant.fn_is_supported()) { - GTEST_SKIP(); + GTEST_SKIP() << "CPU features are not supported by current CPU"; } const size_t K = matmul_shape.k; @@ -98,7 +98,7 @@ TEST_P(MatMulTest_f32_qai8dxp_qsi8cxp, EndToEnd_RHS_nxk_qsi8cx) { const auto& ukernel_variant = variants_kai_matmul_clamp_f32_qai8dxp_qsi8cxp.at(variant_index); if (ukernel_variant.fn_is_supported && !ukernel_variant.fn_is_supported()) { - GTEST_SKIP(); + GTEST_SKIP() << "CPU features are not supported by current CPU"; } const uint32_t seed = 0; @@ -139,7 +139,7 @@ TEST_P(MatMulTest_f32_qai8dxp_qsi8cxp, EndToEnd_RHS_nxk_qsi8cx) { const auto rect = portion.compute_portion(M, N, m_step, n_step); if (rect.height() == 0 || rect.width() == 0) { - GTEST_SKIP() << "Skipping empty portion."; + GTEST_SKIP() << "Empty dimension of matrix(" << rect.width() << "," << rect.height() << ")"; } // Runs the LHS packing micro-kernel. @@ -212,7 +212,7 @@ TEST_P(MatMulTest_f32_qai8dxp_qsi8cxp, EndToEnd_RHS_kxn_qsi8cx) { const auto& ukernel_variant = variants_kai_matmul_clamp_f32_qai8dxp_qsi8cxp.at(variant_index); if (ukernel_variant.fn_is_supported && !ukernel_variant.fn_is_supported()) { - GTEST_SKIP(); + GTEST_SKIP() << "CPU features are not supported by current CPU"; } const uint32_t seed = 0; @@ -264,7 +264,7 @@ TEST_P(MatMulTest_f32_qai8dxp_qsi8cxp, EndToEnd_RHS_kxn_qsi8cx) { const auto rect = portion.compute_portion(M, N, m_step, n_step); if (rect.height() == 0 || rect.width() == 0) { - GTEST_SKIP() << "Skipping empty portion."; + GTEST_SKIP() << "Empty dimension of matrix(" << rect.width() << "," << rect.height() << ")"; } const auto lhs_start_row = rect.start_row(); diff --git a/test/tests/matmul_clamp_f32_qsi8d32p_qsi4c32p_test.cpp b/test/tests/matmul_clamp_f32_qsi8d32p_qsi4c32p_test.cpp index b91c1fbc62ee11260e51baa4d29f921850f61e3d..153decb53e1c839e68eafbadf67c60dcf9c387d5 100644 --- a/test/tests/matmul_clamp_f32_qsi8d32p_qsi4c32p_test.cpp +++ b/test/tests/matmul_clamp_f32_qsi8d32p_qsi4c32p_test.cpp @@ -102,7 +102,7 @@ TEST_P(MatMulTest_f32_qsi8d32p_qsi4c32p, Offset_RHS) { const auto& ukernel_variant = variants_kai_matmul_clamp_f32_qsi8d32p_qsi4c32p.at(variant_index); if (ukernel_variant.ukernel.fn_is_supported && !ukernel_variant.ukernel.fn_is_supported()) { - GTEST_SKIP(); + GTEST_SKIP() << "CPU features are not supported by current CPU"; } const size_t bl = 32; @@ -118,7 +118,7 @@ TEST_P(MatMulTest_f32_qsi8d32p_qsi4c32p, Offset_RHS) { const auto rect = portion.compute_portion(M, N, m_step, n_step); if (rect.height() == 0 || rect.width() == 0) { - GTEST_SKIP() << "Test Portion size is 0!"; + GTEST_SKIP() << "Empty dimension of matrix(" << rect.width() << "," << rect.height() << ")"; } const auto rhs_start_row = rect.start_col(); @@ -132,7 +132,7 @@ TEST_P(MatMulTest_f32_qsi8d32p_qsi4c32p, Offset_LHS) { const auto& ukernel_variant = variants_kai_matmul_clamp_f32_qsi8d32p_qsi4c32p.at(variant_index); if (ukernel_variant.ukernel.fn_is_supported && !ukernel_variant.ukernel.fn_is_supported()) { - GTEST_SKIP(); + GTEST_SKIP() << "CPU features are not supported by current CPU"; } const size_t bl = 32; @@ -149,7 +149,7 @@ TEST_P(MatMulTest_f32_qsi8d32p_qsi4c32p, Offset_LHS) { const auto rect = portion.compute_portion(M, N, m_step, n_step); if (rect.height() == 0 || rect.width() == 0) { - GTEST_SKIP() << "Test Portion size is 0!"; + GTEST_SKIP() << "Empty dimension of matrix(" << rect.width() << "," << rect.height() << ")"; } const auto lhs_start_row = rect.start_row(); @@ -164,7 +164,7 @@ TEST_P(MatMulTest_f32_qsi8d32p_qsi4c32p, EndToEnd) { const auto& ukernel_variant = variants_kai_matmul_clamp_f32_qsi8d32p_qsi4c32p.at(variant_index); if (ukernel_variant.ukernel.fn_is_supported && !ukernel_variant.ukernel.fn_is_supported()) { - GTEST_SKIP(); + GTEST_SKIP() << "CPU features are not supported by current CPU"; } const std::uint32_t seed = 0; @@ -191,7 +191,7 @@ TEST_P(MatMulTest_f32_qsi8d32p_qsi4c32p, EndToEnd) { const auto rect = portion.compute_portion(M, N, m_step, n_step); if (rect.height() == 0 || rect.width() == 0) { - GTEST_SKIP() << "Test Portion size is 0!"; + GTEST_SKIP() << "Empty dimension of matrix(" << rect.width() << "," << rect.height() << ")"; } // Generates input data. const auto ref_lhs = fill_random(M * K, seed + 0); diff --git a/test/tests/matmul_test.cpp b/test/tests/matmul_test.cpp index 387f9e6ee0c7f806fa3a98992a52bd4f9b8818a4..dd4b0dd8eef53e327dc58cc9106666c79d3a521d 100644 --- a/test/tests/matmul_test.cpp +++ b/test/tests/matmul_test.cpp @@ -1,5 +1,5 @@ // -// SPDX-FileCopyrightText: Copyright 2024 Arm Limited and/or its affiliates +// SPDX-FileCopyrightText: Copyright 2024-2025 Arm Limited and/or its affiliates // // SPDX-License-Identifier: Apache-2.0 // @@ -456,11 +456,11 @@ TEST_P(MatMulTest, PackedLhs) { const auto& [method, info, portion] = GetParam(); if (method.fn_is_supported && !method.fn_is_supported()) { - GTEST_SKIP(); + GTEST_SKIP() << "CPU features are not supported by current CPU"; } if (!method.is_pack_lhs_needed()) { - GTEST_SKIP(); + GTEST_SKIP() << "Test not valid w/o LHS pack"; } const auto& data = test_data(); @@ -472,7 +472,7 @@ TEST_P(MatMulTest, PackedLhs) { lhs_w); // LHS packing micro-kernel API doesn't support scheduling over K dimension. if (rect.height() == 0 || rect.width() == 0) { - GTEST_SKIP(); + GTEST_SKIP() << "Empty dimension of matrix(" << rect.width() << "," << rect.height() << ")"; } const auto mr = method.fn_get_mr(); @@ -509,11 +509,11 @@ TEST_P(MatMulTest, PackedRhs) { const auto& [method, info, portion] = GetParam(); if (method.fn_is_supported && !method.fn_is_supported()) { - GTEST_SKIP(); + GTEST_SKIP() << "CPU features are not supported by current CPU"; } if (!method.is_pack_rhs_needed()) { - GTEST_SKIP(); + GTEST_SKIP() << "Test not valid w/o RHS pack"; } const auto& data = test_data(); @@ -526,7 +526,7 @@ TEST_P(MatMulTest, PackedRhs) { const Rect rect = portion.compute_portion(rhs_full_width, rhs_full_height, block_height, block_width); if (rect.height() == 0 || rect.width() == 0) { - GTEST_SKIP(); + GTEST_SKIP() << "Empty dimension of matrix(" << rect.width() << "," << rect.height() << ")"; } const auto rhs_start_row = rect.start_row(); @@ -581,11 +581,11 @@ TEST_P(MatMulTest, PackedTransposedRhs) { const auto& [method, info, portion] = GetParam(); if (method.fn_is_supported && !method.fn_is_supported()) { - GTEST_SKIP(); + GTEST_SKIP() << "CPU features are not supported by current CPU"; } if (!method.is_pack_rhs_nxk_needed()) { - GTEST_SKIP(); + GTEST_SKIP() << "Test not valid w/o pre-processing of transposed RHS matrix"; } const auto& data = test_data(); @@ -598,7 +598,7 @@ TEST_P(MatMulTest, PackedTransposedRhs) { method.packed_rhs_format.scheduler_block_width(info.k)); if (rect.height() == 0 || rect.width() == 0) { - GTEST_SKIP(); + GTEST_SKIP() << "Empty dimension of matrix(" << rect.width() << "," << rect.height() << ")"; } const auto ref_rhs_row_stride = method.rhs_format.default_row_stride(info.k); @@ -643,11 +643,11 @@ TEST_P(MatMulTest, Output) { const auto& [method, info, portion] = GetParam(); if (method.fn_is_supported && !method.fn_is_supported()) { - GTEST_SKIP(); + GTEST_SKIP() << "CPU features are not supported by current CPU"; } if (!method.has_main_kernel()) { - GTEST_SKIP(); + GTEST_SKIP() << "No main kernel available"; } const auto& data = test_data(); @@ -660,7 +660,7 @@ TEST_P(MatMulTest, Output) { const auto rect = portion.compute_portion(info.m, info.n, method.m0, method.n0); if (rect.height() == 0 || rect.width() == 0) { - GTEST_SKIP(); + GTEST_SKIP() << "Empty dimension of matrix(" << rect.width() << "," << rect.height() << ")"; } const auto lhs_w = info.k;