// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef BASE_NUMERICS_SAFE_MATH_ARM_IMPL_H_ #define BASE_NUMERICS_SAFE_MATH_ARM_IMPL_H_ #include #include #include #include "base/numerics/safe_conversions.h" namespace base { namespace internal { template struct CheckedMulFastAsmOp { static const bool is_supported = FastIntegerArithmeticPromotion::is_contained; // The following is much more efficient than the Clang and GCC builtins for // performing overflow-checked multiplication when a twice wider type is // available. The below compiles down to 2-3 instructions, depending on the // width of the types in use. // As an example, an int32_t multiply compiles to: // smull r0, r1, r0, r1 // cmp r1, r1, asr #31 // And an int16_t multiply compiles to: // smulbb r1, r1, r0 // asr r2, r1, #16 // cmp r2, r1, asr #15 template __attribute__((always_inline)) static bool Do(T x, U y, V* result) { using Promotion = typename FastIntegerArithmeticPromotion::type; Promotion presult; presult = static_cast(x) * static_cast(y); *result = static_cast(presult); return IsValueInRangeForNumericType(presult); } }; template struct ClampedAddFastAsmOp { static const bool is_supported = BigEnoughPromotion::is_contained && IsTypeInRangeForNumericType< int32_t, typename BigEnoughPromotion::type>::value; template __attribute__((always_inline)) static V Do(T x, U y) { // This will get promoted to an int, so let the compiler do whatever is // clever and rely on the saturated cast to bounds check. if (IsIntegerArithmeticSafe::value) return saturated_cast(x + y); int32_t result; int32_t x_i32 = x; int32_t y_i32 = y; asm("qadd %[result], %[first], %[second]" : [result] "=r"(result) : [first] "r"(x_i32), [second] "r"(y_i32)); return saturated_cast(result); } }; template struct ClampedSubFastAsmOp { static const bool is_supported = BigEnoughPromotion::is_contained && IsTypeInRangeForNumericType< int32_t, typename BigEnoughPromotion::type>::value; template __attribute__((always_inline)) static V Do(T x, U y) { // This will get promoted to an int, so let the compiler do whatever is // clever and rely on the saturated cast to bounds check. if (IsIntegerArithmeticSafe::value) return saturated_cast(x - y); int32_t result; int32_t x_i32 = x; int32_t y_i32 = y; asm("qsub %[result], %[first], %[second]" : [result] "=r"(result) : [first] "r"(x_i32), [second] "r"(y_i32)); return saturated_cast(result); } }; template struct ClampedMulFastAsmOp { static const bool is_supported = CheckedMulFastAsmOp::is_supported; template __attribute__((always_inline)) static V Do(T x, U y) { // Use the CheckedMulFastAsmOp for full-width 32-bit values, because // it's fewer instructions than promoting and then saturating. if (!IsIntegerArithmeticSafe::value && !IsIntegerArithmeticSafe::value) { V result; if (CheckedMulFastAsmOp::Do(x, y, &result)) return result; return CommonMaxOrMin(IsValueNegative(x) ^ IsValueNegative(y)); } assert((FastIntegerArithmeticPromotion::is_contained)); using Promotion = typename FastIntegerArithmeticPromotion::type; return saturated_cast(static_cast(x) * static_cast(y)); } }; } // namespace internal } // namespace base #endif // BASE_NUMERICS_SAFE_MATH_ARM_IMPL_H_