diff --git a/test/common/cpu_info.cpp b/test/common/cpu_info.cpp index 6fa59677fd6e02d07ca1aa7d2f6657bfa3b61a72..4a9d4f9664f56ed9ada145feb647cf8435a6b806 100644 --- a/test/common/cpu_info.cpp +++ b/test/common/cpu_info.cpp @@ -6,9 +6,11 @@ #include "test/common/cpu_info.hpp" +#include #include #include #include +#include #include "kai/kai_common.h" @@ -25,55 +27,155 @@ namespace kai::test { namespace { -#if defined(__aarch64__) && defined(__linux__) -constexpr uint64_t A64_HWCAP2_SME = 1UL << 23; -constexpr uint64_t A64_HWCAP2_SME2 = 1UL << 37; -#endif // defined(__aarch64__) && defined(__linux__) - -#if defined(__aarch64__) && defined(__APPLE__) -template -T get_sysctl_by_name(std::string_view name) { - T value{}; - size_t size = sizeof(T); - - KAI_ASSERT(sysctlbyname(name.data(), nullptr, &size, nullptr, 0) == 0); - KAI_ASSERT(size == sizeof(T)); +enum CpuFeatures { + ADVSIMD = 0, // + DOTPROD, // + I8MM, // + FP16, // + BF16, // + SVE, // + SVE2, // + SME, // + SME2, // + LAST_ELEMENT // This should be last element, please add new CPU capabilities before it +}; - [[maybe_unused]] int status = sysctlbyname(name.data(), &value, &size, nullptr, 0); - KAI_ASSERT(status == 0); +#if defined(__aarch64__) && defined(__linux__) +/// Define recent CPU capabilities not available in toolchain definitions yet +#ifndef HWCAP2_SME +constexpr uint64_t HWCAP2_SME = 1UL << 23; +#endif +#ifndef HWCAP2_SME2 +constexpr uint64_t HWCAP2_SME2 = 1UL << 37; +#endif + +const std::array, CpuFeatures::LAST_ELEMENT> cpu_caps{{ + {CpuFeatures::ADVSIMD, AT_HWCAP, HWCAP_ASIMD}, // + {CpuFeatures::DOTPROD, AT_HWCAP, HWCAP_ASIMDDP}, // + {CpuFeatures::I8MM, AT_HWCAP2, HWCAP2_I8MM}, // + {CpuFeatures::FP16, AT_HWCAP, HWCAP_FPHP | HWCAP_ASIMDHP}, // + {CpuFeatures::BF16, AT_HWCAP2, HWCAP2_BF16}, // + {CpuFeatures::SVE, AT_HWCAP, HWCAP_SVE}, // + {CpuFeatures::SVE2, AT_HWCAP2, HWCAP2_SVE2}, // + {CpuFeatures::SME, AT_HWCAP2, HWCAP2_SME}, // + {CpuFeatures::SME2, AT_HWCAP2, HWCAP2_SME2}, // +}}; + +bool get_cap_support(CpuFeatures feature) { + KAI_ASSERT(feature < cpu_caps.size()); + + auto [cpu_feature, cap_id, cap_bits] = cpu_caps[static_cast(feature)]; + // Make sure CPU feature is correctly initialized + KAI_ASSERT(feature == cpu_feature); + + const uint64_t hwcaps = getauxval(cap_id); + + return (hwcaps & cap_bits) == cap_bits; +} +#elif defined(__aarch64__) && defined(__APPLE__) +const std::array, CpuFeatures::LAST_ELEMENT> cpu_caps{{ + {CpuFeatures::ADVSIMD, "hw.optional.AdvSIMD"}, + {CpuFeatures::DOTPROD, "hw.optional.arm.FEAT_DotProd"}, + {CpuFeatures::I8MM, "hw.optional.arm.FEAT_I8MM"}, + {CpuFeatures::FP16, "hw.optional.arm.FEAT_FP16"}, + {CpuFeatures::BF16, "hw.optional.arm.FEAT_BF16"}, + {CpuFeatures::SVE, ""}, // not supported + {CpuFeatures::SVE2, ""}, // not supported + {CpuFeatures::SME, "hw.optional.arm.FEAT_SME"}, + {CpuFeatures::SME2, "hw.optional.arm.FEAT_SME2"}, +}}; + +bool get_cap_support(CpuFeatures feature) { + KAI_ASSERT(feature < CpuFeatures::LAST_ELEMENT); + + auto [cpu_feature, cap_name] = cpu_caps[static_cast(feature)]; + KAI_ASSERT(feature == cpu_feature); + + uint32_t value{}; + + if (cap_name.length() > 0) { + size_t size = sizeof(value); + + KAI_ASSERT(sysctlbyname(cap_name.data(), nullptr, &size, nullptr, 0) == 0); + KAI_ASSERT(size == sizeof(value)); + + [[maybe_unused]] int status = sysctlbyname(cap_name.data(), &value, &size, nullptr, 0); + KAI_ASSERT(status == 0); + } - return value; + return value == 1; } -#endif // defined(__aarch64__) && defined(__APPLE__) +#elif defined(__aarch64__) +#error Please add a way how to check implemented CPU features +#else +bool get_cap_support(CpuFeatures feature) { + KAI_UNUSED(feature); + return false; +} +#endif /// Information about the CPU that is executing the program. struct CpuInfo { - CpuInfo() { -#if defined(__aarch64__) && defined(__linux__) - const uint64_t hwcaps2 = getauxval(AT_HWCAP2); - - has_sme = (hwcaps2 & A64_HWCAP2_SME) != 0; - has_sme2 = (hwcaps2 & A64_HWCAP2_SME2) != 0; -#endif // defined(__aarch64__) && defined(__linux__) - -#if defined(__aarch64__) && defined(__APPLE__) - has_sme = get_sysctl_by_name("hw.optional.arm.FEAT_SME") == 1; - has_sme2 = get_sysctl_by_name("hw.optional.arm.FEAT_SME2") == 1; -#endif // defined(__aarch64__) && defined(__APPLE__) + CpuInfo() : + has_advsimd(get_cap_support(CpuFeatures::ADVSIMD)), + has_dotprod(get_cap_support(CpuFeatures::DOTPROD)), + has_i8mm(get_cap_support(CpuFeatures::I8MM)), + has_fp16(get_cap_support(CpuFeatures::FP16)), + has_bf16(get_cap_support(CpuFeatures::BF16)), + has_sve(get_cap_support(CpuFeatures::SVE)), + has_sve2(get_cap_support(CpuFeatures::SVE2)), + has_sme(get_cap_support(CpuFeatures::SME)), + has_sme2(get_cap_support(CpuFeatures::SME2)) { } /// Gets the singleton @ref CpuInfo object. - static CpuInfo& current() { - static CpuInfo cpu_info{}; + static const CpuInfo& current() { + static const CpuInfo cpu_info{}; return cpu_info; } - bool has_sme{}; ///< FEAT_SME is supported. - bool has_sme2{}; ///< FEAT_SME2 is supported. + const bool has_advsimd{}; ///< AdvSIMD is supported. + const bool has_dotprod{}; ///< DotProd is supported. + const bool has_i8mm{}; ///< I8MM is supported. + const bool has_fp16{}; ///< FP16 is supported. + const bool has_bf16{}; ///< B16 is supported. + const bool has_sve{}; ///< SVE is supported. + const bool has_sve2{}; ///< SVE2 is supported. + const bool has_sme{}; ///< SME is supported. + const bool has_sme2{}; ///< SME2 is supported. }; } // namespace +/// Helper functions +bool cpu_has_advsimd() { + return CpuInfo::current().has_advsimd; +} + +bool cpu_has_dotprod() { + return CpuInfo::current().has_dotprod; +} + +bool cpu_has_i8mm() { + return CpuInfo::current().has_i8mm; +} + +bool cpu_has_fp16() { + return CpuInfo::current().has_fp16; +} + +bool cpu_has_bf16() { + return CpuInfo::current().has_bf16; +} + +bool cpu_has_sve() { + return CpuInfo::current().has_sve; +} + +bool cpu_has_sve2() { + return CpuInfo::current().has_sve2; +} + bool cpu_has_sme() { return CpuInfo::current().has_sme; } diff --git a/test/common/cpu_info.hpp b/test/common/cpu_info.hpp index b231ecea2d651f20b6b340ed5fa3a1dadaef1b42..298a358a7bb2cab80b2f9082791178d36b8de358 100644 --- a/test/common/cpu_info.hpp +++ b/test/common/cpu_info.hpp @@ -8,6 +8,27 @@ namespace kai::test { +/// Returns a value indicating whether the current CPU supports FEAT_AdvSIMD. +bool cpu_has_advsimd(); + +/// Returns a value indicating whether the current CPU supports FEAT_DotProd. +bool cpu_has_dotprod(); + +/// Returns a value indicating whether the current CPU supports FEAT_I8MM. +bool cpu_has_i8mm(); + +/// Returns a value indicating whether the current CPU supports FEAT_FP16. +bool cpu_has_fp16(); + +/// Returns a value indicating whether the current CPU supports FEAT_BF16. +bool cpu_has_bf16(); + +/// Returns a value indicating whether the current CPU supports FEAT_SVE. +bool cpu_has_sve(); + +/// Returns a value indicating whether the current CPU supports FEAT_SVE2. +bool cpu_has_sve2(); + /// Returns a value indicating whether the current CPU supports FEAT_SME. bool cpu_has_sme(); diff --git a/test/common/test_suite.hpp b/test/common/test_suite.hpp index e59c0c50a48707b566977a1523155a1dd50c1217..8bedf0949e681d723a1d60b5200d158cd55d4669 100644 --- a/test/common/test_suite.hpp +++ b/test/common/test_suite.hpp @@ -9,31 +9,40 @@ #include #include -#include +#include +#include #include -#define UKERNEL_MATMUL_VARIANT(name) \ - { \ - {kai_get_m_step_matmul_##name, \ - kai_get_n_step_matmul_##name, \ - kai_get_mr_matmul_##name, \ - kai_get_nr_matmul_##name, \ - kai_get_kr_matmul_##name, \ - kai_get_sr_matmul_##name, \ - kai_get_lhs_packed_offset_matmul_##name, \ - kai_get_rhs_packed_offset_matmul_##name, \ - kai_get_dst_offset_matmul_##name, \ - kai_get_dst_size_matmul_##name, \ - kai_run_matmul_##name}, \ - "kai_matmul_" #name \ +#define UKERNEL_MATMUL_VARIANT(name, features_check) \ + { \ + {kai_get_m_step_matmul_##name, \ + kai_get_n_step_matmul_##name, \ + kai_get_mr_matmul_##name, \ + kai_get_nr_matmul_##name, \ + kai_get_kr_matmul_##name, \ + kai_get_sr_matmul_##name, \ + kai_get_lhs_packed_offset_matmul_##name, \ + kai_get_rhs_packed_offset_matmul_##name, \ + kai_get_dst_offset_matmul_##name, \ + kai_get_dst_size_matmul_##name, \ + kai_run_matmul_##name}, \ + "kai_matmul_" #name, (features_check) \ } namespace kai::test { template struct UkernelVariant { + /// Interface for testing variant. T interface; - std::string name{}; + + /// Name of the test variant. + std::string_view name{}; + + /// Check if CPU supports required features. + /// + /// @return Supported (true) or not supported (false). + std::function fn_is_supported; }; /// Matrix multiplication shape. diff --git a/test/tests/matmul_clamp_f32_qai8dxp_qsi4c32p_test.cpp b/test/tests/matmul_clamp_f32_qai8dxp_qsi4c32p_test.cpp index 080ebbbfdf9658140c003be6ad7ed74765b65d1e..95d357ec0a817c926363da069eaf09245381cb16 100644 --- a/test/tests/matmul_clamp_f32_qai8dxp_qsi4c32p_test.cpp +++ b/test/tests/matmul_clamp_f32_qai8dxp_qsi4c32p_test.cpp @@ -23,6 +23,7 @@ #include "kai/ukernels/matmul/pack/kai_rhs_pack_kxn_qsi4c32p_qsu4c32s1s0.h" #include "kai/ukernels/matmul/pack/kai_rhs_pack_nxk_qsi4c32p_qsu4c32s1s0.h" #include "test/common/bfloat16.hpp" +#include "test/common/cpu_info.hpp" #include "test/common/data_type.hpp" #include "test/common/int4.hpp" #include "test/common/memory.hpp" @@ -38,10 +39,10 @@ namespace kai::test { static const std::array, 4> variants_kai_matmul_clamp_f32_qai8dxp_qsi4c32p = {{ - UKERNEL_MATMUL_VARIANT(clamp_f32_qai8dxp1x8_qsi4c32p4x8_1x4x32_neon_dotprod), - UKERNEL_MATMUL_VARIANT(clamp_f32_qai8dxp1x8_qsi4c32p8x8_1x8x32_neon_dotprod), - UKERNEL_MATMUL_VARIANT(clamp_f32_qai8dxp4x8_qsi4c32p4x8_8x4x32_neon_i8mm), - UKERNEL_MATMUL_VARIANT(clamp_f32_qai8dxp4x8_qsi4c32p8x8_4x8x32_neon_i8mm), + UKERNEL_MATMUL_VARIANT(clamp_f32_qai8dxp1x8_qsi4c32p4x8_1x4x32_neon_dotprod, cpu_has_dotprod), + UKERNEL_MATMUL_VARIANT(clamp_f32_qai8dxp1x8_qsi4c32p8x8_1x8x32_neon_dotprod, cpu_has_dotprod), + UKERNEL_MATMUL_VARIANT(clamp_f32_qai8dxp4x8_qsi4c32p4x8_8x4x32_neon_i8mm, cpu_has_i8mm), + UKERNEL_MATMUL_VARIANT(clamp_f32_qai8dxp4x8_qsi4c32p8x8_4x8x32_neon_i8mm, cpu_has_i8mm), }}; class MatMulTest_f32_qmatmul_clamp_f32_qai8dxp_qsi4c32p : public UkernelVariantTest {}; @@ -50,6 +51,10 @@ TEST_P(MatMulTest_f32_qmatmul_clamp_f32_qai8dxp_qsi4c32p, EndToEnd_RHS_Transpose const auto& [variant_index, matmul_shape] = GetParam(); const auto& ukernel_variant = variants_kai_matmul_clamp_f32_qai8dxp_qsi4c32p.at(variant_index); + if (ukernel_variant.fn_is_supported && !ukernel_variant.fn_is_supported()) { + GTEST_SKIP(); + } + constexpr uint64_t seed = 0; const size_t M = matmul_shape.m; @@ -131,6 +136,10 @@ TEST_P(MatMulTest_f32_qmatmul_clamp_f32_qai8dxp_qsi4c32p, EndToEnd_RHS_NonTransp const auto& [variant_index, matmul_shape] = GetParam(); const auto& ukernel_variant = variants_kai_matmul_clamp_f32_qai8dxp_qsi4c32p.at(variant_index); + if (ukernel_variant.fn_is_supported && !ukernel_variant.fn_is_supported()) { + GTEST_SKIP(); + } + const uint64_t seed = 0; const size_t M = matmul_shape.m; diff --git a/test/tests/matmul_clamp_f32_qai8dxp_qsi4cxp_test.cpp b/test/tests/matmul_clamp_f32_qai8dxp_qsi4cxp_test.cpp index 95ff03f43bd15e471f3fe45893675b634877c690..532270887ae53c0c1423986bd0ddaecb6486cca2 100644 --- a/test/tests/matmul_clamp_f32_qai8dxp_qsi4cxp_test.cpp +++ b/test/tests/matmul_clamp_f32_qai8dxp_qsi4cxp_test.cpp @@ -22,6 +22,7 @@ #include "kai/ukernels/matmul/matmul_clamp_f32_qai8dxp_qsi4cxp/kai_matmul_clamp_f32_qai8dxp_qsi4cxp_interface.h" #include "kai/ukernels/matmul/pack/kai_lhs_quant_pack_qai8dxp_f32.h" #include "kai/ukernels/matmul/pack/kai_rhs_pack_nxk_qsi4cxp_qsu4cxs1s0.h" +#include "test/common/cpu_info.hpp" #include "test/common/int4.hpp" #include "test/common/memory.hpp" #include "test/common/test_suite.hpp" @@ -34,12 +35,12 @@ namespace kai::test { static const std::array, 6> variants_kai_matmul_clamp_f32_qai8dxp_qsi4cxp = {{ - UKERNEL_MATMUL_VARIANT(clamp_f32_qai8dxp1x8_qsi4cxp4x8_1x4x32_neon_dotprod), - UKERNEL_MATMUL_VARIANT(clamp_f32_qai8dxp1x8_qsi4cxp8x8_1x8x32_neon_dotprod), - UKERNEL_MATMUL_VARIANT(clamp_f32_qai8dxp4x8_qsi4cxp4x8_4x4x32_neon_i8mm), - UKERNEL_MATMUL_VARIANT(clamp_f32_qai8dxp4x8_qsi4cxp4x8_8x4x32_neon_i8mm), - UKERNEL_MATMUL_VARIANT(clamp_f32_qai8dxp4x8_qsi4cxp8x8_4x8x32_neon_i8mm), - UKERNEL_MATMUL_VARIANT(clamp_f32_qai8dxp4x8_qsi4cxp8x8_8x8x32_neon_i8mm), + UKERNEL_MATMUL_VARIANT(clamp_f32_qai8dxp1x8_qsi4cxp4x8_1x4x32_neon_dotprod, cpu_has_dotprod), + UKERNEL_MATMUL_VARIANT(clamp_f32_qai8dxp1x8_qsi4cxp8x8_1x8x32_neon_dotprod, cpu_has_dotprod), + UKERNEL_MATMUL_VARIANT(clamp_f32_qai8dxp4x8_qsi4cxp4x8_4x4x32_neon_i8mm, cpu_has_i8mm), + UKERNEL_MATMUL_VARIANT(clamp_f32_qai8dxp4x8_qsi4cxp4x8_8x4x32_neon_i8mm, cpu_has_i8mm), + UKERNEL_MATMUL_VARIANT(clamp_f32_qai8dxp4x8_qsi4cxp8x8_4x8x32_neon_i8mm, cpu_has_i8mm), + UKERNEL_MATMUL_VARIANT(clamp_f32_qai8dxp4x8_qsi4cxp8x8_8x8x32_neon_i8mm, cpu_has_i8mm), }}; class MatMulTest_f32_qai8dxp4x8_qsi4cxp8x8 : public UkernelVariantTest {}; @@ -48,6 +49,10 @@ TEST_P(MatMulTest_f32_qai8dxp4x8_qsi4cxp8x8, EndToEnd) { auto& [variant_index, matmul_shape] = GetParam(); const auto& ukernel_variant = variants_kai_matmul_clamp_f32_qai8dxp_qsi4cxp.at(variant_index); + if (ukernel_variant.fn_is_supported && !ukernel_variant.fn_is_supported()) { + GTEST_SKIP(); + } + const uint64_t seed = 0; const size_t M = matmul_shape.m; diff --git a/test/tests/matmul_clamp_f32_qsi8d32p_qsi4c32p_test.cpp b/test/tests/matmul_clamp_f32_qsi8d32p_qsi4c32p_test.cpp index 8dba1ac3dc7653d9ae8c340c003d7c69b1f8c040..45a5bb6233dee9bad27b9c99c71b96914d91732b 100644 --- a/test/tests/matmul_clamp_f32_qsi8d32p_qsi4c32p_test.cpp +++ b/test/tests/matmul_clamp_f32_qsi8d32p_qsi4c32p_test.cpp @@ -14,6 +14,7 @@ #include "kai/ukernels/matmul/matmul_clamp_f32_qsi8d32p_qsi4c32p/kai_matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_8x4x32_neon_i8mm.h" #include "kai/ukernels/matmul/pack/kai_lhs_quant_pack_qsi8d32p_f32.h" #include "kai/ukernels/matmul/pack/kai_rhs_pack_nxk_qsi4c32pscalef16_qsu4c32s16s0.h" +#include "test/common/cpu_info.hpp" #include "test/common/float16.hpp" #include "test/common/int4.hpp" #include "test/common/memory.hpp" @@ -26,6 +27,10 @@ namespace kai::test { TEST(matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_8x4x32_neon_i8mm, EndToEnd) { + if (!cpu_has_i8mm()) { + GTEST_SKIP(); + } + const std::uint64_t seed = 0; const size_t M = 32; @@ -94,6 +99,10 @@ TEST(matmul_clamp_f32_qsi8d32p4x8_qsi4c32p4x8_8x4x32_neon_i8mm, EndToEnd) { } TEST(matmul_clamp_f32_qsi8d32p1x8_qsi4c32p4x8_1x4x32_neon_dotprod, EndToEnd) { + if (!cpu_has_dotprod()) { + GTEST_SKIP(); + } + const std::uint64_t seed = 0; const size_t M = 32; diff --git a/test/tests/matmul_test.cpp b/test/tests/matmul_test.cpp index bec296200dea82235584f5dbf6b02891152215d5..f2d5e54423ae8cffeecc9fb2d31bb5647fc463f8 100644 --- a/test/tests/matmul_test.cpp +++ b/test/tests/matmul_test.cpp @@ -58,8 +58,6 @@ struct MatMulMethod { bool lhs_transposed; ///< LHS matrix is transposed. bool rhs_transposed; ///< RHS matrix is transposed. - bool is_sme2; ///< Test is a sme2 test - DataFormat dst_format; ///< Data format of the destination matrix. DataFormat lhs_format; ///< Data format of the LHS matrix. DataFormat packed_lhs_format; ///< Data format of the packed LHS matrix. @@ -67,6 +65,11 @@ struct MatMulMethod { DataFormat packed_rhs_format; ///< Data format of the packed RHS matrix. DataFormat bias_format; ///< Data format of the bias vector. + /// Check if CPU supports required features. + /// + /// @return Supported (true) or not supported (false). + std::function fn_is_supported; + /// Gets mr value. /// /// This is the packing parameter which must be used to pack the LHS matrix (if necessary). @@ -344,8 +347,6 @@ static const std::array matmul_methods = { .lhs_transposed = false, .rhs_transposed = false, - .is_sme2 = false, - .dst_format = DataFormat(DataType::FP16), .lhs_format = DataFormat(DataType::FP16), .packed_lhs_format = DataFormat(DataType::UNKNOWN), @@ -354,6 +355,7 @@ static const std::array matmul_methods = { DataType::FP16, 16, 0, DataFormat::PackFormat::BIAS_PER_ROW, DataType::FP16, DataType::UNKNOWN, 16, 1), .bias_format = DataFormat(DataType::FP16), + .fn_is_supported = cpu_has_fp16, .fn_get_mr = nullptr, .fn_get_nr = kai_get_nr_matmul_clamp_f16_f16_f16p16x1biasf16_6x16x8_neon_mla, .fn_get_kr = kai_get_kr_matmul_clamp_f16_f16_f16p16x1biasf16_6x16x8_neon_mla, @@ -393,8 +395,6 @@ static const std::array matmul_methods = { .lhs_transposed = false, .rhs_transposed = false, - .is_sme2 = false, - .dst_format = DataFormat(DataType::FP32), .lhs_format = DataFormat(DataType::FP32), .packed_lhs_format = DataFormat(DataType::UNKNOWN), @@ -403,6 +403,7 @@ static const std::array matmul_methods = { DataType::FP32, 8, 0, DataFormat::PackFormat::BIAS_PER_ROW, DataType::FP32, DataType::UNKNOWN, 8, 1), .bias_format = DataFormat(DataType::FP32), + .fn_is_supported = cpu_has_advsimd, .fn_get_mr = nullptr, .fn_get_nr = kai_get_nr_matmul_clamp_f32_f32_f32p8x1biasf32_6x8x4_neon_mla, .fn_get_kr = kai_get_kr_matmul_clamp_f32_f32_f32p8x1biasf32_6x8x4_neon_mla, @@ -442,8 +443,6 @@ static const std::array matmul_methods = { .lhs_transposed = false, .rhs_transposed = false, - .is_sme2 = true, - .dst_format = DataFormat(DataType::FP32), .lhs_format = DataFormat(DataType::FP32), .packed_lhs_format = DataFormat(DataType::FP32, 2 * get_sme_vector_length(), 1), @@ -453,6 +452,7 @@ static const std::array matmul_methods = { DataType::UNKNOWN, 2 * get_sme_vector_length(), 1), .bias_format = DataFormat(DataType::FP32), + .fn_is_supported = cpu_has_sme2, .fn_get_mr = kai_get_mr_matmul_clamp_f32_f32p2vlx1_f32p2vlx1biasf32_sme2_mopa, .fn_get_nr = kai_get_nr_matmul_clamp_f32_f32p2vlx1_f32p2vlx1biasf32_sme2_mopa, .fn_get_kr = kai_get_kr_matmul_clamp_f32_f32p2vlx1_f32p2vlx1biasf32_sme2_mopa, @@ -621,7 +621,7 @@ TEST_P(MatMulTest, PackedLhs) { const auto& data = test_data(); const auto& method = matmul_methods.at(method_no); - if (method.is_sme2 && !cpu_has_sme2()) { + if (method.fn_is_supported && !method.fn_is_supported()) { GTEST_SKIP(); } @@ -672,7 +672,7 @@ TEST_P(MatMulTest, PackedRhs) { const auto& data = test_data(); const auto& method = matmul_methods.at(method_no); - if (method.is_sme2 && !cpu_has_sme2()) { + if (method.fn_is_supported && !method.fn_is_supported()) { GTEST_SKIP(); } @@ -743,7 +743,7 @@ TEST_P(MatMulTest, Output) { const auto& data = test_data(); const auto& method = matmul_methods.at(method_no); - if (method.is_sme2 && !cpu_has_sme2()) { + if (method.fn_is_supported && !method.fn_is_supported()) { GTEST_SKIP(); }