blob: ae6ba718540579f96b49badb2ba828167d4a2e95 [file] [log] [blame]
/*
* QEMU float support
*
* The code in this source file is derived from release 2a of the SoftFloat
* IEC/IEEE Floating-point Arithmetic Package. Those parts of the code (and
* some later contributions) are provided under that license, as detailed below.
* It has subsequently been modified by contributors to the QEMU Project,
* so some portions are provided under:
* the SoftFloat-2a license
* the BSD license
* GPL-v2-or-later
*
* Any future contributions to this file after December 1st 2014 will be
* taken to be licensed under the Softfloat-2a license unless specifically
* indicated otherwise.
*/
/*
===============================================================================
This C source file is part of the SoftFloat IEC/IEEE Floating-point
Arithmetic Package, Release 2a.
Written by John R. Hauser. This work was made possible in part by the
International Computer Science Institute, located at Suite 600, 1947 Center
Street, Berkeley, California 94704. Funding was partially provided by the
National Science Foundation under grant MIP-9311980. The original version
of this code was written as part of a project to build a fixed-point vector
processor in collaboration with the University of California at Berkeley,
overseen by Profs. Nelson Morgan and John Wawrzynek. More information
is available through the Web page `http://HTTP.CS.Berkeley.EDU/~jhauser/
arithmetic/SoftFloat.html'.
THIS SOFTWARE IS DISTRIBUTED AS IS, FOR FREE. Although reasonable effort
has been made to avoid it, THIS SOFTWARE MAY CONTAIN FAULTS THAT WILL AT
TIMES RESULT IN INCORRECT BEHAVIOR. USE OF THIS SOFTWARE IS RESTRICTED TO
PERSONS AND ORGANIZATIONS WHO CAN AND WILL TAKE FULL RESPONSIBILITY FOR ANY
AND ALL LOSSES, COSTS, OR OTHER PROBLEMS ARISING FROM ITS USE.
Derivative works are acceptable, even for commercial purposes, so long as
(1) they include prominent notice that the work is derivative, and (2) they
include prominent notice akin to these four paragraphs for those parts of
this code that are retained.
===============================================================================
*/
/* BSD licensing:
* Copyright (c) 2006, Fabrice Bellard
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
/* Portions of this work are licensed under the terms of the GNU GPL,
* version 2 or later. See the COPYING file in the top-level directory.
*/
/* softfloat (and in particular the code in softfloat-specialize.h) is
* target-dependent and needs the TARGET_* macros.
*/
#include "qemu/osdep.h"
#include <math.h>
#include "qemu/bitops.h"
#include "fpu/softfloat.h"
/* We only need stdlib for abort() */
/*----------------------------------------------------------------------------
| Primitive arithmetic functions, including multi-word arithmetic, and
| division and square root approximations. (Can be specialized to target if
| desired.)
*----------------------------------------------------------------------------*/
#include "fpu/softfloat-macros.h"
/*
* Hardfloat
*
* Fast emulation of guest FP instructions is challenging for two reasons.
* First, FP instruction semantics are similar but not identical, particularly
* when handling NaNs. Second, emulating at reasonable speed the guest FP
* exception flags is not trivial: reading the host's flags register with a
* feclearexcept & fetestexcept pair is slow [slightly slower than soft-fp],
* and trapping on every FP exception is not fast nor pleasant to work with.
*
* We address these challenges by leveraging the host FPU for a subset of the
* operations. To do this we expand on the idea presented in this paper:
*
* Guo, Yu-Chuan, et al. "Translating the ARM Neon and VFP instructions in a
* binary translator." Software: Practice and Experience 46.12 (2016):1591-1615.
*
* The idea is thus to leverage the host FPU to (1) compute FP operations
* and (2) identify whether FP exceptions occurred while avoiding
* expensive exception flag register accesses.
*
* An important optimization shown in the paper is that given that exception
* flags are rarely cleared by the guest, we can avoid recomputing some flags.
* This is particularly useful for the inexact flag, which is very frequently
* raised in floating-point workloads.
*
* We optimize the code further by deferring to soft-fp whenever FP exception
* detection might get hairy. Two examples: (1) when at least one operand is
* denormal/inf/NaN; (2) when operands are not guaranteed to lead to a 0 result
* and the result is < the minimum normal.
*/
#define GEN_INPUT_FLUSH__NOCHECK(name, soft_t) \
static inline void name(soft_t *a, float_status *s) \
{ \
if (unlikely(soft_t ## _is_denormal(*a))) { \
*a = soft_t ## _set_sign(soft_t ## _zero, \
soft_t ## _is_neg(*a)); \
s->float_exception_flags |= float_flag_input_denormal; \
} \
}
GEN_INPUT_FLUSH__NOCHECK(float32_input_flush__nocheck, float32)
GEN_INPUT_FLUSH__NOCHECK(float64_input_flush__nocheck, float64)
#undef GEN_INPUT_FLUSH__NOCHECK
#define GEN_INPUT_FLUSH1(name, soft_t) \
static inline void name(soft_t *a, float_status *s) \
{ \
if (likely(!s->flush_inputs_to_zero)) { \
return; \
} \
soft_t ## _input_flush__nocheck(a, s); \
}
GEN_INPUT_FLUSH1(float32_input_flush1, float32)
GEN_INPUT_FLUSH1(float64_input_flush1, float64)
#undef GEN_INPUT_FLUSH1
#define GEN_INPUT_FLUSH2(name, soft_t) \
static inline void name(soft_t *a, soft_t *b, float_status *s) \
{ \
if (likely(!s->flush_inputs_to_zero)) { \
return; \
} \
soft_t ## _input_flush__nocheck(a, s); \
soft_t ## _input_flush__nocheck(b, s); \
}
GEN_INPUT_FLUSH2(float32_input_flush2, float32)
GEN_INPUT_FLUSH2(float64_input_flush2, float64)
#undef GEN_INPUT_FLUSH2
#define GEN_INPUT_FLUSH3(name, soft_t) \
static inline void name(soft_t *a, soft_t *b, soft_t *c, float_status *s) \
{ \
if (likely(!s->flush_inputs_to_zero)) { \
return; \
} \
soft_t ## _input_flush__nocheck(a, s); \
soft_t ## _input_flush__nocheck(b, s); \
soft_t ## _input_flush__nocheck(c, s); \
}
GEN_INPUT_FLUSH3(float32_input_flush3, float32)
GEN_INPUT_FLUSH3(float64_input_flush3, float64)
#undef GEN_INPUT_FLUSH3
/*
* Choose whether to use fpclassify or float32/64_* primitives in the generated
* hardfloat functions. Each combination of number of inputs and float size
* gets its own value.
*/
#if defined(__x86_64__)
# define QEMU_HARDFLOAT_1F32_USE_FP 0
# define QEMU_HARDFLOAT_1F64_USE_FP 1
# define QEMU_HARDFLOAT_2F32_USE_FP 0
# define QEMU_HARDFLOAT_2F64_USE_FP 1
# define QEMU_HARDFLOAT_3F32_USE_FP 0
# define QEMU_HARDFLOAT_3F64_USE_FP 1
#else
# define QEMU_HARDFLOAT_1F32_USE_FP 0
# define QEMU_HARDFLOAT_1F64_USE_FP 0
# define QEMU_HARDFLOAT_2F32_USE_FP 0
# define QEMU_HARDFLOAT_2F64_USE_FP 0
# define QEMU_HARDFLOAT_3F32_USE_FP 0
# define QEMU_HARDFLOAT_3F64_USE_FP 0
#endif
/*
* QEMU_HARDFLOAT_USE_ISINF chooses whether to use isinf() over
* float{32,64}_is_infinity when !USE_FP.
* On x86_64/aarch64, using the former over the latter can yield a ~6% speedup.
* On power64 however, using isinf() reduces fp-bench performance by up to 50%.
*/
#if defined(__x86_64__) || defined(__aarch64__)
# define QEMU_HARDFLOAT_USE_ISINF 1
#else
# define QEMU_HARDFLOAT_USE_ISINF 0
#endif
/*
* Some targets clear the FP flags before most FP operations. This prevents
* the use of hardfloat, since hardfloat relies on the inexact flag being
* already set.
*/
#if defined(TARGET_PPC) || defined(__FAST_MATH__)
# if defined(__FAST_MATH__)
# warning disabling hardfloat due to -ffast-math: hardfloat requires an exact \
IEEE implementation
# endif
# define QEMU_NO_HARDFLOAT 1
# define QEMU_SOFTFLOAT_ATTR QEMU_FLATTEN
#else
# define QEMU_NO_HARDFLOAT 0
# define QEMU_SOFTFLOAT_ATTR QEMU_FLATTEN __attribute__((noinline))
#endif
static inline bool can_use_fpu(const float_status *s)
{
if (QEMU_NO_HARDFLOAT) {
return false;
}
return likely(s->float_exception_flags & float_flag_inexact &&
s->float_rounding_mode == float_round_nearest_even);
}
/*
* Hardfloat generation functions. Each operation can have two flavors:
* either using softfloat primitives (e.g. float32_is_zero_or_normal) for
* most condition checks, or native ones (e.g. fpclassify).
*
* The flavor is chosen by the callers. Instead of using macros, we rely on the
* compiler to propagate constants and inline everything into the callers.
*
* We only generate functions for operations with two inputs, since only
* these are common enough to justify consolidating them into common code.
*/
typedef union {
float32 s;
float h;
} union_float32;
typedef union {
float64 s;
double h;
} union_float64;
typedef bool (*f32_check_fn)(union_float32 a, union_float32 b);
typedef bool (*f64_check_fn)(union_float64 a, union_float64 b);
typedef float32 (*soft_f32_op2_fn)(float32 a, float32 b, float_status *s);
typedef float64 (*soft_f64_op2_fn)(float64 a, float64 b, float_status *s);
typedef float (*hard_f32_op2_fn)(float a, float b);
typedef double (*hard_f64_op2_fn)(double a, double b);
/* 2-input is-zero-or-normal */
static inline bool f32_is_zon2(union_float32 a, union_float32 b)
{
if (QEMU_HARDFLOAT_2F32_USE_FP) {
/*
* Not using a temp variable for consecutive fpclassify calls ends up
* generating faster code.
*/
return (fpclassify(a.h) == FP_NORMAL || fpclassify(a.h) == FP_ZERO) &&
(fpclassify(b.h) == FP_NORMAL || fpclassify(b.h) == FP_ZERO);
}
return float32_is_zero_or_normal(a.s) &&
float32_is_zero_or_normal(b.s);
}
static inline bool f64_is_zon2(union_float64 a, union_float64 b)
{
if (QEMU_HARDFLOAT_2F64_USE_FP) {
return (fpclassify(a.h) == FP_NORMAL || fpclassify(a.h) == FP_ZERO) &&
(fpclassify(b.h) == FP_NORMAL || fpclassify(b.h) == FP_ZERO);
}
return float64_is_zero_or_normal(a.s) &&
float64_is_zero_or_normal(b.s);
}
/* 3-input is-zero-or-normal */
static inline
bool f32_is_zon3(union_float32 a, union_float32 b, union_float32 c)
{
if (QEMU_HARDFLOAT_3F32_USE_FP) {
return (fpclassify(a.h) == FP_NORMAL || fpclassify(a.h) == FP_ZERO) &&
(fpclassify(b.h) == FP_NORMAL || fpclassify(b.h) == FP_ZERO) &&
(fpclassify(c.h) == FP_NORMAL || fpclassify(c.h) == FP_ZERO);
}
return float32_is_zero_or_normal(a.s) &&
float32_is_zero_or_normal(b.s) &&
float32_is_zero_or_normal(c.s);
}
static inline
bool f64_is_zon3(union_float64 a, union_float64 b, union_float64 c)
{
if (QEMU_HARDFLOAT_3F64_USE_FP) {
return (fpclassify(a.h) == FP_NORMAL || fpclassify(a.h) == FP_ZERO) &&
(fpclassify(b.h) == FP_NORMAL || fpclassify(b.h) == FP_ZERO) &&
(fpclassify(c.h) == FP_NORMAL || fpclassify(c.h) == FP_ZERO);
}
return float64_is_zero_or_normal(a.s) &&
float64_is_zero_or_normal(b.s) &&
float64_is_zero_or_normal(c.s);
}
static inline bool f32_is_inf(union_float32 a)
{
if (QEMU_HARDFLOAT_USE_ISINF) {
return isinf(a.h);
}
return float32_is_infinity(a.s);
}
static inline bool f64_is_inf(union_float64 a)
{
if (QEMU_HARDFLOAT_USE_ISINF) {
return isinf(a.h);
}
return float64_is_infinity(a.s);
}
/* Note: @fast_test and @post can be NULL */
static inline float32
float32_gen2(float32 xa, float32 xb, float_status *s,
hard_f32_op2_fn hard, soft_f32_op2_fn soft,
f32_check_fn pre, f32_check_fn post,
f32_check_fn fast_test, soft_f32_op2_fn fast_op)
{
union_float32 ua, ub, ur;
ua.s = xa;
ub.s = xb;
if (unlikely(!can_use_fpu(s))) {
goto soft;
}
float32_input_flush2(&ua.s, &ub.s, s);
if (unlikely(!pre(ua, ub))) {
goto soft;
}
if (fast_test && fast_test(ua, ub)) {
return fast_op(ua.s, ub.s, s);
}
ur.h = hard(ua.h, ub.h);
if (unlikely(f32_is_inf(ur))) {
s->float_exception_flags |= float_flag_overflow;
} else if (unlikely(fabsf(ur.h) <= FLT_MIN)) {
if (post == NULL || post(ua, ub)) {
goto soft;
}
}
return ur.s;
soft:
return soft(ua.s, ub.s, s);
}
static inline float64
float64_gen2(float64 xa, float64 xb, float_status *s,
hard_f64_op2_fn hard, soft_f64_op2_fn soft,
f64_check_fn pre, f64_check_fn post,
f64_check_fn fast_test, soft_f64_op2_fn fast_op)
{
union_float64 ua, ub, ur;
ua.s = xa;
ub.s = xb;
if (unlikely(!can_use_fpu(s))) {
goto soft;
}
float64_input_flush2(&ua.s, &ub.s, s);
if (unlikely(!pre(ua, ub))) {
goto soft;
}
if (fast_test && fast_test(ua, ub)) {
return fast_op(ua.s, ub.s, s);
}
ur.h = hard(ua.h, ub.h);
if (unlikely(f64_is_inf(ur))) {
s->float_exception_flags |= float_flag_overflow;
} else if (unlikely(fabs(ur.h) <= DBL_MIN)) {
if (post == NULL || post(ua, ub)) {
goto soft;
}
}
return ur.s;
soft:
return soft(ua.s, ub.s, s);
}
/*----------------------------------------------------------------------------
| Returns the fraction bits of the single-precision floating-point value `a'.
*----------------------------------------------------------------------------*/
static inline uint32_t extractFloat32Frac(float32 a)
{
return float32_val(a) & 0x007FFFFF;
}
/*----------------------------------------------------------------------------
| Returns the exponent bits of the single-precision floating-point value `a'.
*----------------------------------------------------------------------------*/
static inline int extractFloat32Exp(float32 a)
{
return (float32_val(a) >> 23) & 0xFF;
}
/*----------------------------------------------------------------------------
| Returns the sign bit of the single-precision floating-point value `a'.
*----------------------------------------------------------------------------*/
static inline flag extractFloat32Sign(float32 a)
{
return float32_val(a) >> 31;
}
/*----------------------------------------------------------------------------
| Returns the fraction bits of the double-precision floating-point value `a'.
*----------------------------------------------------------------------------*/
static inline uint64_t extractFloat64Frac(float64 a)
{
return float64_val(a) & UINT64_C(0x000FFFFFFFFFFFFF);
}
/*----------------------------------------------------------------------------
| Returns the exponent bits of the double-precision floating-point value `a'.
*----------------------------------------------------------------------------*/
static inline int extractFloat64Exp(float64 a)
{
return (float64_val(a) >> 52) & 0x7FF;
}
/*----------------------------------------------------------------------------
| Returns the sign bit of the double-precision floating-point value `a'.
*----------------------------------------------------------------------------*/
static inline flag extractFloat64Sign(float64 a)
{
return float64_val(a) >> 63;
}
/*
* Classify a floating point number. Everything above float_class_qnan
* is a NaN so cls >= float_class_qnan is any NaN.
*/
typedef enum __attribute__ ((__packed__)) {
float_class_unclassified,
float_class_zero,
float_class_normal,
float_class_inf,
float_class_qnan, /* all NaNs from here */
float_class_snan,
} FloatClass;
/* Simple helpers for checking if, or what kind of, NaN we have */
static inline __attribute__((unused)) bool is_nan(FloatClass c)
{
return unlikely(c >= float_class_qnan);
}
static inline __attribute__((unused)) bool is_snan(FloatClass c)
{
return c == float_class_snan;
}
static inline __attribute__((unused)) bool is_qnan(FloatClass c)
{
return c == float_class_qnan;
}
/*
* Structure holding all of the decomposed parts of a float. The
* exponent is unbiased and the fraction is normalized. All
* calculations are done with a 64 bit fraction and then rounded as
* appropriate for the final format.
*
* Thanks to the packed FloatClass a decent compiler should be able to
* fit the whole structure into registers and avoid using the stack
* for parameter passing.
*/
typedef struct {
uint64_t frac;
int32_t exp;
FloatClass cls;
bool sign;
} FloatParts;
#define DECOMPOSED_BINARY_POINT (64 - 2)
#define DECOMPOSED_IMPLICIT_BIT (1ull << DECOMPOSED_BINARY_POINT)
#define DECOMPOSED_OVERFLOW_BIT (DECOMPOSED_IMPLICIT_BIT << 1)
/* Structure holding all of the relevant parameters for a format.
* exp_size: the size of the exponent field
* exp_bias: the offset applied to the exponent field
* exp_max: the maximum normalised exponent
* frac_size: the size of the fraction field
* frac_shift: shift to normalise the fraction with DECOMPOSED_BINARY_POINT
* The following are computed based the size of fraction
* frac_lsb: least significant bit of fraction
* frac_lsbm1: the bit below the least significant bit (for rounding)
* round_mask/roundeven_mask: masks used for rounding
* The following optional modifiers are available:
* arm_althp: handle ARM Alternative Half Precision
*/
typedef struct {
int exp_size;
int exp_bias;
int exp_max;
int frac_size;
int frac_shift;
uint64_t frac_lsb;
uint64_t frac_lsbm1;
uint64_t round_mask;
uint64_t roundeven_mask;
bool arm_althp;
} FloatFmt;
/* Expand fields based on the size of exponent and fraction */
#define FLOAT_PARAMS(E, F) \
.exp_size = E, \
.exp_bias = ((1 << E) - 1) >> 1, \
.exp_max = (1 << E) - 1, \
.frac_size = F, \
.frac_shift = DECOMPOSED_BINARY_POINT - F, \
.frac_lsb = 1ull << (DECOMPOSED_BINARY_POINT - F), \
.frac_lsbm1 = 1ull << ((DECOMPOSED_BINARY_POINT - F) - 1), \
.round_mask = (1ull << (DECOMPOSED_BINARY_POINT - F)) - 1, \
.roundeven_mask = (2ull << (DECOMPOSED_BINARY_POINT - F)) - 1
static const FloatFmt float16_params = {
FLOAT_PARAMS(5, 10)
};
static const FloatFmt float16_params_ahp = {
FLOAT_PARAMS(5, 10),
.arm_althp = true
};
static const FloatFmt float32_params = {
FLOAT_PARAMS(8, 23)
};
static const FloatFmt float64_params = {
FLOAT_PARAMS(11, 52)
};
/* Unpack a float to parts, but do not canonicalize. */
static inline FloatParts unpack_raw(FloatFmt fmt, uint64_t raw)
{
const int sign_pos = fmt.frac_size + fmt.exp_size;
return (FloatParts) {
.cls = float_class_unclassified,
.sign = extract64(raw, sign_pos, 1),
.exp = extract64(raw, fmt.frac_size, fmt.exp_size),
.frac = extract64(raw, 0, fmt.frac_size),
};
}
static inline FloatParts float16_unpack_raw(float16 f)
{
return unpack_raw(float16_params, f);
}
static inline FloatParts float32_unpack_raw(float32 f)
{
return unpack_raw(float32_params, f);
}
static inline FloatParts float64_unpack_raw(float64 f)
{
return unpack_raw(float64_params, f);
}
/* Pack a float from parts, but do not canonicalize. */
static inline uint64_t pack_raw(FloatFmt fmt, FloatParts p)
{
const int sign_pos = fmt.frac_size + fmt.exp_size;
uint64_t ret = deposit64(p.frac, fmt.frac_size, fmt.exp_size, p.exp);
return deposit64(ret, sign_pos, 1, p.sign);
}
static inline float16 float16_pack_raw(FloatParts p)
{
return make_float16(pack_raw(float16_params, p));
}
static inline float32 float32_pack_raw(FloatParts p)
{
return make_float32(pack_raw(float32_params, p));
}
static inline float64 float64_pack_raw(FloatParts p)
{
return make_float64(pack_raw(float64_params, p));
}
/*----------------------------------------------------------------------------
| Functions and definitions to determine: (1) whether tininess for underflow
| is detected before or after rounding by default, (2) what (if anything)
| happens when exceptions are raised, (3) how signaling NaNs are distinguished
| from quiet NaNs, (4) the default generated quiet NaNs, and (5) how NaNs
| are propagated from function inputs to output. These details are target-
| specific.
*----------------------------------------------------------------------------*/
#include "softfloat-specialize.inc.c"
/* Canonicalize EXP and FRAC, setting CLS. */
static FloatParts sf_canonicalize(FloatParts part, const FloatFmt *parm,
float_status *status)
{
if (part.exp == parm->exp_max && !parm->arm_althp) {
if (part.frac == 0) {
part.cls = float_class_inf;
} else {
part.frac <<= parm->frac_shift;
part.cls = (parts_is_snan_frac(part.frac, status)
? float_class_snan : float_class_qnan);
}
} else if (part.exp == 0) {
if (likely(part.frac == 0)) {
part.cls = float_class_zero;
} else if (status->flush_inputs_to_zero) {
float_raise(float_flag_input_denormal, status);
part.cls = float_class_zero;
part.frac = 0;
} else {
int shift = clz64(part.frac) - 1;
part.cls = float_class_normal;
part.exp = parm->frac_shift - parm->exp_bias - shift + 1;
part.frac <<= shift;
}
} else {
part.cls = float_class_normal;
part.exp -= parm->exp_bias;
part.frac = DECOMPOSED_IMPLICIT_BIT + (part.frac << parm->frac_shift);
}
return part;
}
/* Round and uncanonicalize a floating-point number by parts. There
* are FRAC_SHIFT bits that may require rounding at the bottom of the
* fraction; these bits will be removed. The exponent will be biased
* by EXP_BIAS and must be bounded by [EXP_MAX-1, 0].
*/
static FloatParts round_canonical(FloatParts p, float_status *s,
const FloatFmt *parm)
{
const uint64_t frac_lsb = parm->frac_lsb;
const uint64_t frac_lsbm1 = parm->frac_lsbm1;
const uint64_t round_mask = parm->round_mask;
const uint64_t roundeven_mask = parm->roundeven_mask;
const int exp_max = parm->exp_max;
const int frac_shift = parm->frac_shift;
uint64_t frac, inc;
int exp, flags = 0;
bool overflow_norm;
frac = p.frac;
exp = p.exp;
switch (p.cls) {
case float_class_normal:
switch (s->float_rounding_mode) {
case float_round_nearest_even:
overflow_norm = false;
inc = ((frac & roundeven_mask) != frac_lsbm1 ? frac_lsbm1 : 0);
break;
case float_round_ties_away:
overflow_norm = false;
inc = frac_lsbm1;
break;
case float_round_to_zero:
overflow_norm = true;
inc = 0;
break;
case float_round_up:
inc = p.sign ? 0 : round_mask;
overflow_norm = p.sign;
break;
case float_round_down:
inc = p.sign ? round_mask : 0;
overflow_norm = !p.sign;
break;
case float_round_to_odd:
overflow_norm = true;
inc = frac & frac_lsb ? 0 : round_mask;
break;
default:
g_assert_not_reached();
}
exp += parm->exp_bias;
if (likely(exp > 0)) {
if (frac & round_mask) {
flags |= float_flag_inexact;
frac += inc;
if (frac & DECOMPOSED_OVERFLOW_BIT) {
frac >>= 1;
exp++;
}
}
frac >>= frac_shift;
if (parm->arm_althp) {
/* ARM Alt HP eschews Inf and NaN for a wider exponent. */
if (unlikely(exp > exp_max)) {
/* Overflow. Return the maximum normal. */
flags = float_flag_invalid;
exp = exp_max;
frac = -1;
}
} else if (unlikely(exp >= exp_max)) {
flags |= float_flag_overflow | float_flag_inexact;
if (overflow_norm) {
exp = exp_max - 1;
frac = -1;
} else {
p.cls = float_class_inf;
goto do_inf;
}
}
} else if (s->flush_to_zero) {
flags |= float_flag_output_denormal;
p.cls = float_class_zero;
goto do_zero;
} else {
bool is_tiny = (s->float_detect_tininess
== float_tininess_before_rounding)
|| (exp < 0)
|| !((frac + inc) & DECOMPOSED_OVERFLOW_BIT);
shift64RightJamming(frac, 1 - exp, &frac);
if (frac & round_mask) {
/* Need to recompute round-to-even. */
switch (s->float_rounding_mode) {
case float_round_nearest_even:
inc = ((frac & roundeven_mask) != frac_lsbm1
? frac_lsbm1 : 0);
break;
case float_round_to_odd:
inc = frac & frac_lsb ? 0 : round_mask;
break;
}
flags |= float_flag_inexact;
frac += inc;
}
exp = (frac & DECOMPOSED_IMPLICIT_BIT ? 1 : 0);
frac >>= frac_shift;
if (is_tiny && (flags & float_flag_inexact)) {
flags |= float_flag_underflow;
}
if (exp == 0 && frac == 0) {
p.cls = float_class_zero;
}
}
break;
case float_class_zero:
do_zero:
exp = 0;
frac = 0;
break;
case float_class_inf:
do_inf:
assert(!parm->arm_althp);
exp = exp_max;
frac = 0;
break;
case float_class_qnan:
case float_class_snan:
assert(!parm->arm_althp);
exp = exp_max;
frac >>= parm->frac_shift;
break;
default:
g_assert_not_reached();
}
float_raise(flags, s);
p.exp = exp;
p.frac = frac;
return p;
}
/* Explicit FloatFmt version */
static FloatParts float16a_unpack_canonical(float16 f, float_status *s,
const FloatFmt *params)
{
return sf_canonicalize(float16_unpack_raw(f), params, s);
}
static FloatParts float16_unpack_canonical(float16 f, float_status *s)
{
return float16a_unpack_canonical(f, s, &float16_params);
}
static float16 float16a_round_pack_canonical(FloatParts p, float_status *s,
const FloatFmt *params)
{
return float16_pack_raw(round_canonical(p, s, params));
}
static float16 float16_round_pack_canonical(FloatParts p, float_status *s)
{
return float16a_round_pack_canonical(p, s, &float16_params);
}
static FloatParts float32_unpack_canonical(float32 f, float_status *s)
{
return sf_canonicalize(float32_unpack_raw(f), &float32_params, s);
}
static float32 float32_round_pack_canonical(FloatParts p, float_status *s)
{
return float32_pack_raw(round_canonical(p, s, &float32_params));
}
static FloatParts float64_unpack_canonical(float64 f, float_status *s)
{
return sf_canonicalize(float64_unpack_raw(f), &float64_params, s);
}
static float64 float64_round_pack_canonical(FloatParts p, float_status *s)
{
return float64_pack_raw(round_canonical(p, s, &float64_params));
}
static FloatParts return_nan(FloatParts a, float_status *s)
{
switch (a.cls) {
case float_class_snan:
s->float_exception_flags |= float_flag_invalid;
a = parts_silence_nan(a, s);
/* fall through */
case float_class_qnan:
if (s->default_nan_mode) {
return parts_default_nan(s);
}
break;
default:
g_assert_not_reached();
}
return a;
}
static FloatParts pick_nan(FloatParts a, FloatParts b, float_status *s)
{
if (is_snan(a.cls) || is_snan(b.cls)) {
s->float_exception_flags |= float_flag_invalid;
}
if (s->default_nan_mode) {
return parts_default_nan(s);
} else {
if (pickNaN(a.cls, b.cls,
a.frac > b.frac ||
(a.frac == b.frac && a.sign < b.sign))) {
a = b;
}
if (is_snan(a.cls)) {
return parts_silence_nan(a, s);
}
}
return a;
}
static FloatParts pick_nan_muladd(FloatParts a, FloatParts b, FloatParts c,
bool inf_zero, float_status *s)
{
int which;
if (is_snan(a.cls) || is_snan(b.cls) || is_snan(c.cls)) {
s->float_exception_flags |= float_flag_invalid;
}
which = pickNaNMulAdd(a.cls, b.cls, c.cls, inf_zero, s);
if (s->default_nan_mode) {
/* Note that this check is after pickNaNMulAdd so that function
* has an opportunity to set the Invalid flag.
*/
which = 3;
}
switch (which) {
case 0:
break;
case 1:
a = b;
break;
case 2:
a = c;
break;
case 3:
return parts_default_nan(s);
default:
g_assert_not_reached();
}
if (is_snan(a.cls)) {
return parts_silence_nan(a, s);
}
return a;
}
/*
* Returns the result of adding or subtracting the values of the
* floating-point values `a' and `b'. The operation is performed
* according to the IEC/IEEE Standard for Binary Floating-Point
* Arithmetic.
*/
static FloatParts addsub_floats(FloatParts a, FloatParts b, bool subtract,
float_status *s)
{
bool a_sign = a.sign;
bool b_sign = b.sign ^ subtract;
if (a_sign != b_sign) {
/* Subtraction */
if (a.cls == float_class_normal && b.cls == float_class_normal) {
if (a.exp > b.exp || (a.exp == b.exp && a.frac >= b.frac)) {
shift64RightJamming(b.frac, a.exp - b.exp, &b.frac);
a.frac = a.frac - b.frac;
} else {
shift64RightJamming(a.frac, b.exp - a.exp, &a.frac);
a.frac = b.frac - a.frac;
a.exp = b.exp;
a_sign ^= 1;
}
if (a.frac == 0) {
a.cls = float_class_zero;
a.sign = s->float_rounding_mode == float_round_down;
} else {
int shift = clz64(a.frac) - 1;
a.frac = a.frac << shift;
a.exp = a.exp - shift;
a.sign = a_sign;
}
return a;
}
if (is_nan(a.cls) || is_nan(b.cls)) {
return pick_nan(a, b, s);
}
if (a.cls == float_class_inf) {
if (b.cls == float_class_inf) {
float_raise(float_flag_invalid, s);
return parts_default_nan(s);
}
return a;
}
if (a.cls == float_class_zero && b.cls == float_class_zero) {
a.sign = s->float_rounding_mode == float_round_down;
return a;
}
if (a.cls == float_class_zero || b.cls == float_class_inf) {
b.sign = a_sign ^ 1;
return b;
}
if (b.cls == float_class_zero) {
return a;
}
} else {
/* Addition */
if (a.cls == float_class_normal && b.cls == float_class_normal) {
if (a.exp > b.exp) {
shift64RightJamming(b.frac, a.exp - b.exp, &b.frac);
} else if (a.exp < b.exp) {
shift64RightJamming(a.frac, b.exp - a.exp, &a.frac);
a.exp = b.exp;
}
a.frac += b.frac;
if (a.frac & DECOMPOSED_OVERFLOW_BIT) {
shift64RightJamming(a.frac, 1, &a.frac);
a.exp += 1;
}
return a;
}
if (is_nan(a.cls) || is_nan(b.cls)) {
return pick_nan(a, b, s);
}
if (a.cls == float_class_inf || b.cls == float_class_zero) {
return a;
}
if (b.cls == float_class_inf || a.cls == float_class_zero) {
b.sign = b_sign;
return b;
}
}
g_assert_not_reached();
}
/*
* Returns the result of adding or subtracting the floating-point
* values `a' and `b'. The operation is performed according to the
* IEC/IEEE Standard for Binary Floating-Point Arithmetic.
*/
float16 QEMU_FLATTEN float16_add(float16 a, float16 b, float_status *status)
{
FloatParts pa = float16_unpack_canonical(a, status);
FloatParts pb = float16_unpack_canonical(b, status);
FloatParts pr = addsub_floats(pa, pb, false, status);
return float16_round_pack_canonical(pr, status);
}
float16 QEMU_FLATTEN float16_sub(float16 a, float16 b, float_status *status)
{
FloatParts pa = float16_unpack_canonical(a, status);
FloatParts pb = float16_unpack_canonical(b, status);
FloatParts pr = addsub_floats(pa, pb, true, status);
return float16_round_pack_canonical(pr, status);
}
static float32 QEMU_SOFTFLOAT_ATTR
soft_f32_addsub(float32 a, float32 b, bool subtract, float_status *status)
{
FloatParts pa = float32_unpack_canonical(a, status);
FloatParts pb = float32_unpack_canonical(b, status);
FloatParts pr = addsub_floats(pa, pb, subtract, status);
return float32_round_pack_canonical(pr, status);
}
static inline float32 soft_f32_add(float32 a, float32 b, float_status *status)
{
return soft_f32_addsub(a, b, false, status);
}
static inline float32 soft_f32_sub(float32 a, float32 b, float_status *status)
{
return soft_f32_addsub(a, b, true, status);
}
static float64 QEMU_SOFTFLOAT_ATTR
soft_f64_addsub(float64 a, float64 b, bool subtract, float_status *status)
{
FloatParts pa = float64_unpack_canonical(a, status);
FloatParts pb = float64_unpack_canonical(b, status);
FloatParts pr = addsub_floats(pa, pb, subtract, status);
return float64_round_pack_canonical(pr, status);
}
static inline float64 soft_f64_add(float64 a, float64 b, float_status *status)
{
return soft_f64_addsub(a, b, false, status);
}
static inline float64 soft_f64_sub(float64 a, float64 b, float_status *status)
{
return soft_f64_addsub(a, b, true, status);
}
static float hard_f32_add(float a, float b)
{
return a + b;
}
static float hard_f32_sub(float a, float b)
{
return a - b;
}
static double hard_f64_add(double a, double b)
{
return a + b;
}
static double hard_f64_sub(double a, double b)
{
return a - b;
}
static bool f32_addsub_post(union_float32 a, union_float32 b)
{
if (QEMU_HARDFLOAT_2F32_USE_FP) {
return !(fpclassify(a.h) == FP_ZERO && fpclassify(b.h) == FP_ZERO);
}
return !(float32_is_zero(a.s) && float32_is_zero(b.s));
}
static bool f64_addsub_post(union_float64 a, union_float64 b)
{
if (QEMU_HARDFLOAT_2F64_USE_FP) {
return !(fpclassify(a.h) == FP_ZERO && fpclassify(b.h) == FP_ZERO);
} else {
return !(float64_is_zero(a.s) && float64_is_zero(b.s));
}
}
static float32 float32_addsub(float32 a, float32 b, float_status *s,
hard_f32_op2_fn hard, soft_f32_op2_fn soft)
{
return float32_gen2(a, b, s, hard, soft,
f32_is_zon2, f32_addsub_post, NULL, NULL);
}
static float64 float64_addsub(float64 a, float64 b, float_status *s,
hard_f64_op2_fn hard, soft_f64_op2_fn soft)
{
return float64_gen2(a, b, s, hard, soft,
f64_is_zon2, f64_addsub_post, NULL, NULL);
}
float32 QEMU_FLATTEN
float32_add(float32 a, float32 b, float_status *s)
{
return float32_addsub(a, b, s, hard_f32_add, soft_f32_add);
}
float32 QEMU_FLATTEN
float32_sub(float32 a, float32 b, float_status *s)
{
return float32_addsub(a, b, s, hard_f32_sub, soft_f32_sub);
}
float64 QEMU_FLATTEN
float64_add(float64 a, float64 b, float_status *s)
{
return float64_addsub(a, b, s, hard_f64_add, soft_f64_add);
}
float64 QEMU_FLATTEN
float64_sub(float64 a, float64 b, float_status *s)
{
return float64_addsub(a, b, s, hard_f64_sub, soft_f64_sub);
}
/*
* Returns the result of multiplying the floating-point values `a' and
* `b'. The operation is performed according to the IEC/IEEE Standard
* for Binary Floating-Point Arithmetic.
*/
static FloatParts mul_floats(FloatParts a, FloatParts b, float_status *s)
{
bool sign = a.sign ^ b.sign;
if (a.cls == float_class_normal && b.cls == float_class_normal) {
uint64_t hi, lo;
int exp = a.exp + b.exp;
mul64To128(a.frac, b.frac, &hi, &lo);
shift128RightJamming(hi, lo, DECOMPOSED_BINARY_POINT, &hi, &lo);
if (lo & DECOMPOSED_OVERFLOW_BIT) {
shift64RightJamming(lo, 1, &lo);
exp += 1;
}
/* Re-use a */
a.exp = exp;
a.sign = sign;
a.frac = lo;
return a;
}
/* handle all the NaN cases */
if (is_nan(a.cls) || is_nan(b.cls)) {
return pick_nan(a, b, s);
}
/* Inf * Zero == NaN */
if ((a.cls == float_class_inf && b.cls == float_class_zero) ||
(a.cls == float_class_zero && b.cls == float_class_inf)) {
s->float_exception_flags |= float_flag_invalid;
return parts_default_nan(s);
}
/* Multiply by 0 or Inf */
if (a.cls == float_class_inf || a.cls == float_class_zero) {
a.sign = sign;
return a;
}
if (b.cls == float_class_inf || b.cls == float_class_zero) {
b.sign = sign;
return b;
}
g_assert_not_reached();
}
float16 QEMU_FLATTEN float16_mul(float16 a, float16 b, float_status *status)
{
FloatParts pa = float16_unpack_canonical(a, status);
FloatParts pb = float16_unpack_canonical(b, status);
FloatParts pr = mul_floats(pa, pb, status);
return float16_round_pack_canonical(pr, status);
}
static float32 QEMU_SOFTFLOAT_ATTR
soft_f32_mul(float32 a, float32 b, float_status *status)
{
FloatParts pa = float32_unpack_canonical(a, status);
FloatParts pb = float32_unpack_canonical(b, status);
FloatParts pr = mul_floats(pa, pb, status);
return float32_round_pack_canonical(pr, status);
}
static float64 QEMU_SOFTFLOAT_ATTR
soft_f64_mul(float64 a, float64 b, float_status *status)
{
FloatParts pa = float64_unpack_canonical(a, status);
FloatParts pb = float64_unpack_canonical(b, status);
FloatParts pr = mul_floats(pa, pb, status);
return float64_round_pack_canonical(pr, status);
}
static float hard_f32_mul(float a, float b)
{
return a * b;
}
static double hard_f64_mul(double a, double b)
{
return a * b;
}
static bool f32_mul_fast_test(union_float32 a, union_float32 b)
{
return float32_is_zero(a.s) || float32_is_zero(b.s);
}
static bool f64_mul_fast_test(union_float64 a, union_float64 b)
{
return float64_is_zero(a.s) || float64_is_zero(b.s);
}
static float32 f32_mul_fast_op(float32 a, float32 b, float_status *s)
{
bool signbit = float32_is_neg(a) ^ float32_is_neg(b);
return float32_set_sign(float32_zero, signbit);
}
static float64 f64_mul_fast_op(float64 a, float64 b, float_status *s)
{
bool signbit = float64_is_neg(a) ^ float64_is_neg(b);
return float64_set_sign(float64_zero, signbit);
}
float32 QEMU_FLATTEN
float32_mul(float32 a, float32 b, float_status *s)
{
return float32_gen2(a, b, s, hard_f32_mul, soft_f32_mul,
f32_is_zon2, NULL, f32_mul_fast_test, f32_mul_fast_op);
}
float64 QEMU_FLATTEN
float64_mul(float64 a, float64 b, float_status *s)
{
return float64_gen2(a, b, s, hard_f64_mul, soft_f64_mul,
f64_is_zon2, NULL, f64_mul_fast_test, f64_mul_fast_op);
}
/*
* Returns the result of multiplying the floating-point values `a' and
* `b' then adding 'c', with no intermediate rounding step after the
* multiplication. The operation is performed according to the
* IEC/IEEE Standard for Binary Floating-Point Arithmetic 754-2008.
* The flags argument allows the caller to select negation of the
* addend, the intermediate product, or the final result. (The
* difference between this and having the caller do a separate
* negation is that negating externally will flip the sign bit on
* NaNs.)
*/
static FloatParts muladd_floats(FloatParts a, FloatParts b, FloatParts c,
int flags, float_status *s)
{
bool inf_zero = ((1 << a.cls) | (1 << b.cls)) ==
((1 << float_class_inf) | (1 << float_class_zero));
bool p_sign;
bool sign_flip = flags & float_muladd_negate_result;
FloatClass p_class;
uint64_t hi, lo;
int p_exp;
/* It is implementation-defined whether the cases of (0,inf,qnan)
* and (inf,0,qnan) raise InvalidOperation or not (and what QNaN
* they return if they do), so we have to hand this information
* off to the target-specific pick-a-NaN routine.
*/
if (is_nan(a.cls) || is_nan(b.cls) || is_nan(c.cls)) {
return pick_nan_muladd(a, b, c, inf_zero, s);
}
if (inf_zero) {
s->float_exception_flags |= float_flag_invalid;
return parts_default_nan(s);
}
if (flags & float_muladd_negate_c) {
c.sign ^= 1;
}
p_sign = a.sign ^ b.sign;
if (flags & float_muladd_negate_product) {
p_sign ^= 1;
}
if (a.cls == float_class_inf || b.cls == float_class_inf) {
p_class = float_class_inf;
} else if (a.cls == float_class_zero || b.cls == float_class_zero) {
p_class = float_class_zero;
} else {
p_class = float_class_normal;
}
if (c.cls == float_class_inf) {
if (p_class == float_class_inf && p_sign != c.sign) {
s->float_exception_flags |= float_flag_invalid;
return parts_default_nan(s);
} else {
a.cls = float_class_inf;
a.sign = c.sign ^ sign_flip;
return a;
}
}
if (p_class == float_class_inf) {
a.cls = float_class_inf;
a.sign = p_sign ^ sign_flip;
return a;
}
if (p_class == float_class_zero) {
if (c.cls == float_class_zero) {
if (p_sign != c.sign) {
p_sign = s->float_rounding_mode == float_round_down;
}
c.sign = p_sign;
} else if (flags & float_muladd_halve_result) {
c.exp -= 1;
}
c.sign ^= sign_flip;
return c;
}
/* a & b should be normals now... */
assert(a.cls == float_class_normal &&
b.cls == float_class_normal);
p_exp = a.exp + b.exp;
/* Multiply of 2 62-bit numbers produces a (2*62) == 124-bit
* result.
*/
mul64To128(a.frac, b.frac, &hi, &lo);
/* binary point now at bit 124 */
/* check for overflow */
if (hi & (1ULL << (DECOMPOSED_BINARY_POINT * 2 + 1 - 64))) {
shift128RightJamming(hi, lo, 1, &hi, &lo);
p_exp += 1;
}
/* + add/sub */
if (c.cls == float_class_zero) {
/* move binary point back to 62 */
shift128RightJamming(hi, lo, DECOMPOSED_BINARY_POINT, &hi, &lo);
} else {
int exp_diff = p_exp - c.exp;
if (p_sign == c.sign) {
/* Addition */
if (exp_diff <= 0) {
shift128RightJamming(hi, lo,
DECOMPOSED_BINARY_POINT - exp_diff,
&hi, &lo);
lo += c.frac;
p_exp = c.exp;
} else {
uint64_t c_hi, c_lo;
/* shift c to the same binary point as the product (124) */
c_hi = c.frac >> 2;
c_lo = 0;
shift128RightJamming(c_hi, c_lo,
exp_diff,
&c_hi, &c_lo);
add128(hi, lo, c_hi, c_lo, &hi, &lo);
/* move binary point back to 62 */
shift128RightJamming(hi, lo, DECOMPOSED_BINARY_POINT, &hi, &lo);
}
if (lo & DECOMPOSED_OVERFLOW_BIT) {
shift64RightJamming(lo, 1, &lo);
p_exp += 1;
}
} else {
/* Subtraction */
uint64_t c_hi, c_lo;
/* make C binary point match product at bit 124 */
c_hi = c.frac >> 2;
c_lo = 0;
if (exp_diff <= 0) {
shift128RightJamming(hi, lo, -exp_diff, &hi, &lo);
if (exp_diff == 0
&&
(hi > c_hi || (hi == c_hi && lo >= c_lo))) {
sub128(hi, lo, c_hi, c_lo, &hi, &lo);
} else {
sub128(c_hi, c_lo, hi, lo, &hi, &lo);
p_sign ^= 1;
p_exp = c.exp;
}
} else {
shift128RightJamming(c_hi, c_lo,
exp_diff,
&c_hi, &c_lo);
sub128(hi, lo, c_hi, c_lo, &hi, &lo);
}
if (hi == 0 && lo == 0) {
a.cls = float_class_zero;
a.sign = s->float_rounding_mode == float_round_down;
a.sign ^= sign_flip;
return a;
} else {
int shift;
if (hi != 0) {
shift = clz64(hi);
} else {
shift = clz64(lo) + 64;
}
/* Normalizing to a binary point of 124 is the
correct adjust for the exponent. However since we're
shifting, we might as well put the binary point back
at 62 where we really want it. Therefore shift as
if we're leaving 1 bit at the top of the word, but
adjust the exponent as if we're leaving 3 bits. */
shift -= 1;
if (shift >= 64) {
lo = lo << (shift - 64);
} else {
hi = (hi << shift) | (lo >> (64 - shift));
lo = hi | ((lo << shift) != 0);
}
p_exp -= shift - 2;
}
}
}
if (flags & float_muladd_halve_result) {
p_exp -= 1;
}
/* finally prepare our result */
a.cls = float_class_normal;
a.sign = p_sign ^ sign_flip;
a.exp = p_exp;
a.frac = lo;
return a;
}
float16 QEMU_FLATTEN float16_muladd(float16 a, float16 b, float16 c,
int flags, float_status *status)
{
FloatParts pa = float16_unpack_canonical(a, status);
FloatParts pb = float16_unpack_canonical(b, status);
FloatParts pc = float16_unpack_canonical(c, status);
FloatParts pr = muladd_floats(pa, pb, pc, flags, status);
return float16_round_pack_canonical(pr, status);
}
static float32 QEMU_SOFTFLOAT_ATTR
soft_f32_muladd(float32 a, float32 b, float32 c, int flags,
float_status *status)
{
FloatParts pa = float32_unpack_canonical(a, status);
FloatParts pb = float32_unpack_canonical(b, status);
FloatParts pc = float32_unpack_canonical(c, status);
FloatParts pr = muladd_floats(pa, pb, pc, flags, status);
return float32_round_pack_canonical(pr, status);
}
static float64 QEMU_SOFTFLOAT_ATTR
soft_f64_muladd(float64 a, float64 b, float64 c, int flags,
float_status *status)
{
FloatParts pa = float64_unpack_canonical(a, status);
FloatParts pb = float64_unpack_canonical(b, status);
FloatParts pc = float64_unpack_canonical(c, status);
FloatParts pr = muladd_floats(pa, pb, pc, flags, status);
return float64_round_pack_canonical(pr, status);
}
static bool force_soft_fma;
float32 QEMU_FLATTEN
float32_muladd(float32 xa, float32 xb, float32 xc, int flags, float_status *s)
{
union_float32 ua, ub, uc, ur;
ua.s = xa;
ub.s = xb;
uc.s = xc;
if (unlikely(!can_use_fpu(s))) {
goto soft;
}
if (unlikely(flags & float_muladd_halve_result)) {
goto soft;
}
float32_input_flush3(&ua.s, &ub.s, &uc.s, s);
if (unlikely(!f32_is_zon3(ua, ub, uc))) {
goto soft;
}
if (unlikely(force_soft_fma)) {
goto soft;
}
/*
* When (a || b) == 0, there's no need to check for under/over flow,
* since we know the addend is (normal || 0) and the product is 0.
*/
if (float32_is_zero(ua.s) || float32_is_zero(ub.s)) {
union_float32 up;
bool prod_sign;
prod_sign = float32_is_neg(ua.s) ^ float32_is_neg(ub.s);
prod_sign ^= !!(flags & float_muladd_negate_product);
up.s = float32_set_sign(float32_zero, prod_sign);
if (flags & float_muladd_negate_c) {
uc.h = -uc.h;
}
ur.h = up.h + uc.h;
} else {
union_float32 ua_orig = ua;
union_float32 uc_orig = uc;
if (flags & float_muladd_negate_product) {
ua.h = -ua.h;
}
if (flags & float_muladd_negate_c) {
uc.h = -uc.h;
}
ur.h = fmaf(ua.h, ub.h, uc.h);
if (unlikely(f32_is_inf(ur))) {
s->float_exception_flags |= float_flag_overflow;
} else if (unlikely(fabsf(ur.h) <= FLT_MIN)) {
ua = ua_orig;
uc = uc_orig;
goto soft;
}
}
if (flags & float_muladd_negate_result) {
return float32_chs(ur.s);
}
return ur.s;
soft:
return soft_f32_muladd(ua.s, ub.s, uc.s, flags, s);
}
float64 QEMU_FLATTEN
float64_muladd(float64 xa, float64 xb, float64 xc, int flags, float_status *s)
{
union_float64 ua, ub, uc, ur;
ua.s = xa;
ub.s = xb;
uc.s = xc;
if (unlikely(!can_use_fpu(s))) {
goto soft;
}
if (unlikely(flags & float_muladd_halve_result)) {
goto soft;
}
float64_input_flush3(&ua.s, &ub.s, &uc.s, s);
if (unlikely(!f64_is_zon3(ua, ub, uc))) {
goto soft;
}
if (unlikely(force_soft_fma)) {
goto soft;
}
/*
* When (a || b) == 0, there's no need to check for under/over flow,
* since we know the addend is (normal || 0) and the product is 0.
*/
if (float64_is_zero(ua.s) || float64_is_zero(ub.s)) {
union_float64 up;
bool prod_sign;
prod_sign = float64_is_neg(ua.s) ^ float64_is_neg(ub.s);
prod_sign ^= !!(flags & float_muladd_negate_product);
up.s = float64_set_sign(float64_zero, prod_sign);
if (flags & float_muladd_negate_c) {
uc.h = -uc.h;
}
ur.h = up.h + uc.h;
} else {
union_float64 ua_orig = ua;
union_float64 uc_orig = uc;
if (flags & float_muladd_negate_product) {
ua.h = -ua.h;
}
if (flags & float_muladd_negate_c) {
uc.h = -uc.h;
}
ur.h = fma(ua.h, ub.h, uc.h);
if (unlikely(f64_is_inf(ur))) {
s->float_exception_flags |= float_flag_overflow;
} else if (unlikely(fabs(ur.h) <= FLT_MIN)) {
ua = ua_orig;
uc = uc_orig;
goto soft;
}
}
if (flags & float_muladd_negate_result) {
return float64_chs(ur.s);
}
return ur.s;
soft:
return soft_f64_muladd(ua.s, ub.s, uc.s, flags, s);
}
/*
* Returns the result of dividing the floating-point value `a' by the
* corresponding value `b'. The operation is performed according to
* the IEC/IEEE Standard for Binary Floating-Point Arithmetic.
*/
static FloatParts div_floats(FloatParts a, FloatParts b, float_status *s)
{
bool sign = a.sign ^ b.sign;
if (a.cls == float_class_normal && b.cls == float_class_normal) {
uint64_t n0, n1, q, r;
int exp = a.exp - b.exp;
/*
* We want a 2*N / N-bit division to produce exactly an N-bit
* result, so that we do not lose any precision and so that we
* do not have to renormalize afterward. If A.frac < B.frac,
* then division would produce an (N-1)-bit result; shift A left
* by one to produce the an N-bit result, and decrement the
* exponent to match.
*
* The udiv_qrnnd algorithm that we're using requires normalization,
* i.e. the msb of the denominator must be set. Since we know that
* DECOMPOSED_BINARY_POINT is msb-1, the inputs must be shifted left
* by one (more), and the remainder must be shifted right by one.
*/
if (a.frac < b.frac) {
exp -= 1;
shift128Left(0, a.frac, DECOMPOSED_BINARY_POINT + 2, &n1, &n0);
} else {
shift128Left(0, a.frac, DECOMPOSED_BINARY_POINT + 1, &n1, &n0);
}
q = udiv_qrnnd(&r, n1, n0, b.frac << 1);
/*
* Set lsb if there is a remainder, to set inexact.
* As mentioned above, to find the actual value of the remainder we
* would need to shift right, but (1) we are only concerned about
* non-zero-ness, and (2) the remainder will always be even because
* both inputs to the division primitive are even.
*/
a.frac = q | (r != 0);
a.sign = sign;
a.exp = exp;
return a;
}
/* handle all the NaN cases */
if (is_nan(a.cls) || is_nan(b.cls)) {
return pick_nan(a, b, s);
}
/* 0/0 or Inf/Inf */
if (a.cls == b.cls
&&
(a.cls == float_class_inf || a.cls == float_class_zero)) {
s->float_exception_flags |= float_flag_invalid;
return parts_default_nan(s);
}
/* Inf / x or 0 / x */
if (a.cls == float_class_inf || a.cls == float_class_zero) {
a.sign = sign;
return a;
}
/* Div 0 => Inf */
if (b.cls == float_class_zero) {
s->float_exception_flags |= float_flag_divbyzero;
a.cls = float_class_inf;
a.sign = sign;
return a;
}
/* Div by Inf */
if (b.cls == float_class_inf) {
a.cls = float_class_zero;
a.sign = sign;
return a;
}
g_assert_not_reached();
}
float16 float16_div(float16 a, float16 b, float_status *status)
{
FloatParts pa = float16_unpack_canonical(a, status);
FloatParts pb = float16_unpack_canonical(b, status);
FloatParts pr = div_floats(pa, pb, status);
return float16_round_pack_canonical(pr, status);
}
static float32 QEMU_SOFTFLOAT_ATTR
soft_f32_div(float32 a, float32 b, float_status *status)
{
FloatParts pa = float32_unpack_canonical(a, status);
FloatParts pb = float32_unpack_canonical(b, status);
FloatParts pr = div_floats(pa, pb, status);
return float32_round_pack_canonical(pr, status);
}
static float64 QEMU_SOFTFLOAT_ATTR
soft_f64_div(float64 a, float64 b, float_status *status)
{
FloatParts pa = float64_unpack_canonical(a, status);
FloatParts pb = float64_unpack_canonical(b, status);
FloatParts pr = div_floats(pa, pb, status);
return float64_round_pack_canonical(pr, status);
}
static float hard_f32_div(float a, float b)
{
return a / b;
}
static double hard_f64_div(double a, double b)
{
return a / b;
}
static bool f32_div_pre(union_float32 a, union_float32 b)
{
if (QEMU_HARDFLOAT_2F32_USE_FP) {
return (fpclassify(a.h) == FP_NORMAL || fpclassify(a.h) == FP_ZERO) &&
fpclassify(b.h) == FP_NORMAL;
}
return float32_is_zero_or_normal(a.s) && float32_is_normal(b.s);
}
static bool f64_div_pre(union_float64 a, union_float64 b)
{
if (QEMU_HARDFLOAT_2F64_USE_FP) {
return (fpclassify(a.h) == FP_NORMAL || fpclassify(a.h) == FP_ZERO) &&
fpclassify(b.h) == FP_NORMAL;
}
return float64_is_zero_or_normal(a.s) && float64_is_normal(b.s);
}
static bool f32_div_post(union_float32 a, union_float32 b)
{
if (QEMU_HARDFLOAT_2F32_USE_FP) {
return fpclassify(a.h) != FP_ZERO;
}
return !float32_is_zero(a.s);
}
static bool f64_div_post(union_float64 a, union_float64 b)
{
if (QEMU_HARDFLOAT_2F64_USE_FP) {
return fpclassify(a.h) != FP_ZERO;
}
return !float64_is_zero(a.s);
}
float32 QEMU_FLATTEN
float32_div(float32 a, float32 b, float_status *s)
{
return float32_gen2(a, b, s, hard_f32_div, soft_f32_div,
f32_div_pre, f32_div_post, NULL, NULL);
}
float64 QEMU_FLATTEN
float64_div(float64 a, float64 b, float_status *s)
{
return float64_gen2(a, b, s, hard_f64_div, soft_f64_div,
f64_div_pre, f64_div_post, NULL, NULL);
}
/*
* Float to Float conversions
*
* Returns the result of converting one float format to another. The
* conversion is performed according to the IEC/IEEE Standard for
* Binary Floating-Point Arithmetic.
*
* The float_to_float helper only needs to take care of raising
* invalid exceptions and handling the conversion on NaNs.
*/
static FloatParts float_to_float(FloatParts a, const FloatFmt *dstf,
float_status *s)
{
if (dstf->arm_althp) {
switch (a.cls) {
case float_class_qnan:
case float_class_snan:
/* There is no NaN in the destination format. Raise Invalid
* and return a zero with the sign of the input NaN.
*/
s->float_exception_flags |= float_flag_invalid;
a.cls = float_class_zero;
a.frac = 0;
a.exp = 0;
break;
case float_class_inf:
/* There is no Inf in the destination format. Raise Invalid
* and return the maximum normal with the correct sign.
*/
s->float_exception_flags |= float_flag_invalid;
a.cls = float_class_normal;
a.exp = dstf->exp_max;
a.frac = ((1ull << dstf->frac_size) - 1) << dstf->frac_shift;
break;
default:
break;
}
} else if (is_nan(a.cls)) {
if (is_snan(a.cls)) {
s->float_exception_flags |= float_flag_invalid;
a = parts_silence_nan(a, s);
}
if (s->default_nan_mode) {
return parts_default_nan(s);
}
}
return a;
}
float32 float16_to_float32(float16 a, bool ieee, float_status *s)
{
const FloatFmt *fmt16 = ieee ? &float16_params : &float16_params_ahp;
FloatParts p = float16a_unpack_canonical(a, s, fmt16);
FloatParts pr = float_to_float(p, &float32_params, s);
return float32_round_pack_canonical(pr, s);
}
float64 float16_to_float64(float16 a, bool ieee, float_status *s)
{
const FloatFmt *fmt16 = ieee ? &float16_params : &float16_params_ahp;
FloatParts p = float16a_unpack_canonical(a, s, fmt16);
FloatParts pr = float_to_float(p, &float64_params, s);
return float64_round_pack_canonical(pr, s);
}
float16 float32_to_float16(float32 a, bool ieee, float_status *s)
{
const FloatFmt *fmt16 = ieee ? &float16_params : &float16_params_ahp;
FloatParts p = float32_unpack_canonical(a, s);
FloatParts pr = float_to_float(p, fmt16, s);
return float16a_round_pack_canonical(pr, s, fmt16);
}
static float64 QEMU_SOFTFLOAT_ATTR
soft_float32_to_float64(float32 a, float_status *s)
{
FloatParts p = float32_unpack_canonical(a, s);
FloatParts pr = float_to_float(p, &float64_params, s);
return float64_round_pack_canonical(pr, s);
}
float64 float32_to_float64(float32 a, float_status *s)
{
if (likely(float32_is_normal(a))) {
/* Widening conversion can never produce inexact results. */
union_float32 uf;
union_float64 ud;
uf.s = a;
ud.h = uf.h;
return ud.s;
} else if (float32_is_zero(a)) {
return float64_set_sign(float64_zero, float32_is_neg(a));
} else {
return soft_float32_to_float64(a, s);
}
}
float16 float64_to_float16(float64 a, bool ieee, float_status *s)
{
const FloatFmt *fmt16 = ieee ? &float16_params : &float16_params_ahp;
FloatParts p = float64_unpack_canonical(a, s);
FloatParts pr = float_to_float(p, fmt16, s);
return float16a_round_pack_canonical(pr, s, fmt16);
}
float32 float64_to_float32(float64 a, float_status *s)
{
FloatParts p = float64_unpack_canonical(a, s);
FloatParts pr = float_to_float(p, &float32_params, s);
return float32_round_pack_canonical(pr, s);
}
/*
* Rounds the floating-point value `a' to an integer, and returns the
* result as a floating-point value. The operation is performed
* according to the IEC/IEEE Standard for Binary Floating-Point
* Arithmetic.
*/
static FloatParts round_to_int(FloatParts a, int rmode,
int scale, float_status *s)
{
switch (a.cls) {
case float_class_qnan:
case float_class_snan:
return return_nan(a, s);
case float_class_zero:
case float_class_inf:
/* already "integral" */
break;
case float_class_normal:
scale = MIN(MAX(scale, -0x10000), 0x10000);
a.exp += scale;
if (a.exp >= DECOMPOSED_BINARY_POINT) {
/* already integral */
break;
}
if (a.exp < 0) {
bool one;
/* all fractional */
s->float_exception_flags |= float_flag_inexact;
switch (rmode) {
case float_round_nearest_even:
one = a.exp == -1 && a.frac > DECOMPOSED_IMPLICIT_BIT;
break;
case float_round_ties_away:
one = a.exp == -1 && a.frac >= DECOMPOSED_IMPLICIT_BIT;
break;
case float_round_to_zero:
one = false;
break;
case float_round_up:
one = !a.sign;
break;
case float_round_down:
one = a.sign;
break;
case float_round_to_odd:
one = true;
break;
default:
g_assert_not_reached();
}
if (one) {
a.frac = DECOMPOSED_IMPLICIT_BIT;
a.exp = 0;
} else {
a.cls = float_class_zero;
}
} else {
uint64_t frac_lsb = DECOMPOSED_IMPLICIT_BIT >> a.exp;
uint64_t frac_lsbm1 = frac_lsb >> 1;
uint64_t rnd_even_mask = (frac_lsb - 1) | frac_lsb;
uint64_t rnd_mask = rnd_even_mask >> 1;
uint64_t inc;
switch (rmode) {
case float_round_nearest_even:
inc = ((a.frac & rnd_even_mask) != frac_lsbm1 ? frac_lsbm1 : 0);
break;
case float_round_ties_away:
inc = frac_lsbm1;
break;
case float_round_to_zero:
inc = 0;
break;
case float_round_up:
inc = a.sign ? 0 : rnd_mask;
break;
case float_round_down:
inc = a.sign ? rnd_mask : 0;
break;
case float_round_to_odd:
inc = a.frac & frac_lsb ? 0 : rnd_mask;
break;
default:
g_assert_not_reached();
}
if (a.frac & rnd_mask) {
s->float_exception_flags |= float_flag_inexact;
a.frac += inc;
a.frac &= ~rnd_mask;
if (a.frac & DECOMPOSED_OVERFLOW_BIT) {
a.frac >>= 1;
a.exp++;
}
}
}
break;
default:
g_assert_not_reached();
}
return a;
}
float16 float16_round_to_int(float16 a, float_status *s)
{
FloatParts pa = float16_unpack_canonical(a, s);
FloatParts pr = round_to_int(pa, s->float_rounding_mode, 0, s);
return float16_round_pack_canonical(pr, s);
}
float32 float32_round_to_int(float32 a, float_status *s)
{
FloatParts pa = float32_unpack_canonical(a, s);
FloatParts pr = round_to_int(pa, s->float_rounding_mode, 0, s);
return float32_round_pack_canonical(pr, s);
}
float64 float64_round_to_int(float64 a, float_status *s)
{
FloatParts pa = float64_unpack_canonical(a, s);
FloatParts pr = round_to_int(pa, s->float_rounding_mode, 0, s);
return float64_round_pack_canonical(pr, s);
}
/*
* Returns the result of converting the floating-point value `a' to
* the two's complement integer format. The conversion is performed
* according to the IEC/IEEE Standard for Binary Floating-Point
* Arithmetic---which means in particular that the conversion is
* rounded according to the current rounding mode. If `a' is a NaN,
* the largest positive integer is returned. Otherwise, if the
* conversion overflows, the largest integer with the same sign as `a'
* is returned.
*/
static int64_t round_to_int_and_pack(FloatParts in, int rmode, int scale,
int64_t min, int64_t max,
float_status *s)
{
uint64_t r;
int orig_flags = get_float_exception_flags(s);
FloatParts p = round_to_int(in, rmode, scale, s);
switch (p.cls) {
case float_class_snan:
case float_class_qnan:
s->float_exception_flags = orig_flags | float_flag_invalid;
return max;
case float_class_inf:
s->float_exception_flags = orig_flags | float_flag_invalid;
return p.sign ? min : max;
case float_class_zero:
return 0;
case float_class_normal:
if (p.exp < DECOMPOSED_BINARY_POINT) {
r = p.frac >> (DECOMPOSED_BINARY_POINT - p.exp);
} else if (p.exp - DECOMPOSED_BINARY_POINT < 2) {
r = p.frac << (p.exp - DECOMPOSED_BINARY_POINT);
} else {
r = UINT64_MAX;
}
if (p.sign) {
if (r <= -(uint64_t) min) {
return -r;
} else {
s->float_exception_flags = orig_flags | float_flag_invalid;
return min;
}
} else {
if (r <= max) {
return r;
} else {
s->float_exception_flags = orig_flags | float_flag_invalid;
return max;
}
}
default:
g_assert_not_reached();
}
}
int16_t float16_to_int16_scalbn(float16 a, int rmode, int scale,
float_status *s)
{
return round_to_int_and_pack(float16_unpack_canonical(a, s),
rmode, scale, INT16_MIN, INT16_MAX, s);
}
int32_t float16_to_int32_scalbn(float16 a, int rmode, int scale,
float_status *s)
{
return round_to_int_and_pack(float16_unpack_canonical(a, s),
rmode, scale, INT32_MIN, INT32_MAX, s);
}
int64_t float16_to_int64_scalbn(float16 a, int rmode, int scale,
float_status *s)
{
return round_to_int_and_pack(float16_unpack_canonical(a, s),
rmode, scale, INT64_MIN, INT64_MAX, s);
}
int16_t float32_to_int16_scalbn(float32 a, int rmode, int scale,
float_status *s)
{
return round_to_int_and_pack(float32_unpack_canonical(a, s),
rmode, scale, INT16_MIN, INT16_MAX, s);
}
int32_t float32_to_int32_scalbn(float32 a, int rmode, int scale,
float_status *s)
{
return round_to_int_and_pack(float32_unpack_canonical(a, s),
rmode, scale, INT32_MIN, INT32_MAX, s);
}
int64_t float32_to_int64_scalbn(float32 a, int rmode, int scale,
float_status *s)
{
return round_to_int_and_pack(float32_unpack_canonical(a, s),
rmode, scale, INT64_MIN, INT64_MAX, s);
}
int16_t float64_to_int16_scalbn(float64 a, int rmode, int scale,
float_status *s)
{
return round_to_int_and_pack(float64_unpack_canonical(a, s),
rmode, scale, INT16_MIN, INT16_MAX, s);
}
int32_t float64_to_int32_scalbn(float64 a, int rmode, int scale,
float_status *s)
{
return round_to_int_and_pack(float64_unpack_canonical(a, s),
rmode, scale, INT32_MIN, INT32_MAX, s);
}
int64_t float64_to_int64_scalbn(float64 a, int rmode, int scale,
float_status *s)
{
return round_to_int_and_pack(float64_unpack_canonical(a, s),
rmode, scale, INT64_MIN, INT64_MAX, s);
}
int16_t float16_to_int16(float16 a, float_status *s)
{
return float16_to_int16_scalbn(a, s->float_rounding_mode, 0, s);
}
int32_t float16_to_int32(float16 a, float_status *s)
{
return float16_to_int32_scalbn(a, s->float_rounding_mode, 0, s);
}
int64_t float16_to_int64(float16 a, float_status *s)
{
return float16_to_int64_scalbn(a, s->float_rounding_mode, 0, s);
}
int16_t float32_to_int16(float32 a, float_status *s)
{
return float32_to_int16_scalbn(a, s->float_rounding_mode, 0, s);
}
int32_t float32_to_int32(float32 a, float_status *s)
{
return float32_to_int32_scalbn(a, s->float_rounding_mode, 0, s);
}
int64_t float32_to_int64(float32 a, float_status *s)
{
return float32_to_int64_scalbn(a, s->float_rounding_mode, 0, s);
}
int16_t float64_to_int16(float64 a, float_status *s)
{
return float64_to_int16_scalbn(a, s->float_rounding_mode, 0, s);
}
int32_t float64_to_int32(float64 a, float_status *s)
{
return float64_to_int32_scalbn(a, s->float_rounding_mode, 0, s);
}
int64_t float64_to_int64(float64 a, float_status *s)
{
return float64_to_int64_scalbn(a, s->float_rounding_mode, 0, s);
}
int16_t float16_to_int16_round_to_zero(float16 a, float_status *s)
{
return float16_to_int16_scalbn(a, float_round_to_zero, 0, s);
}
int32_t float16_to_int32_round_to_zero(float16 a, float_status *s)
{
return float16_to_int32_scalbn(a, float_round_to_zero, 0, s);
}
int64_t float16_to_int64_round_to_zero(float16 a, float_status *s)
{
return float16_to_int64_scalbn(a, float_round_to_zero, 0, s);
}
int16_t float32_to_int16_round_to_zero(float32 a, float_status *s)
{
return float32_to_int16_scalbn(a, float_round_to_zero, 0, s);
}
int32_t float32_to_int32_round_to_zero(float32 a, float_status *s)
{
return float32_to_int32_scalbn(a, float_round_to_zero, 0, s);
}
int64_t float32_to_int64_round_to_zero(float32 a, float_status *s)
{
return float32_to_int64_scalbn(a, float_round_to_zero, 0, s);
}
int16_t float64_to_int16_round_to_zero(float64 a, float_status *s)
{
return float64_to_int16_scalbn(a, float_round_to_zero, 0, s);
}
int32_t float64_to_int32_round_to_zero(float64 a, float_status *s)
{
return float64_to_int32_scalbn(a, float_round_to_zero, 0, s);
}
int64_t float64_to_int64_round_to_zero(float64 a, float_status *s)
{
return float64_to_int64_scalbn(a, float_round_to_zero, 0, s);
}
/*
* Returns the result of converting the floating-point value `a' to
* the unsigned integer format. The conversion is performed according
* to the IEC/IEEE Standard for Binary Floating-Point
* Arithmetic---which means in particular that the conversion is
* rounded according to the current rounding mode. If `a' is a NaN,
* the largest unsigned integer is returned. Otherwise, if the
* conversion overflows, the largest unsigned integer is returned. If
* the 'a' is negative, the result is rounded and zero is returned;
* values that do not round to zero will raise the inexact exception
* flag.
*/
static uint64_t round_to_uint_and_pack(FloatParts in, int rmode, int scale,
uint64_t max, float_status *s)
{
int orig_flags = get_float_exception_flags(s);
FloatParts p = round_to_int(in, rmode, scale, s);
uint64_t r;
switch (p.cls) {
case float_class_snan:
case float_class_qnan:
s->float_exception_flags = orig_flags | float_flag_invalid;
return max;
case float_class_inf:
s->float_exception_flags = orig_flags | float_flag_invalid;
return p.sign ? 0 : max;
case float_class_zero:
return 0;
case float_class_normal:
if (p.sign) {
s->float_exception_flags = orig_flags | float_flag_invalid;
return 0;
}
if (p.exp < DECOMPOSED_BINARY_POINT) {
r = p.frac >> (DECOMPOSED_BINARY_POINT - p.exp);
} else if (p.exp - DECOMPOSED_BINARY_POINT < 2) {
r = p.frac << (p.exp - DECOMPOSED_BINARY_POINT);
} else {
s->float_exception_flags = orig_flags | float_flag_invalid;
return max;
}
/* For uint64 this will never trip, but if p.exp is too large
* to shift a decomposed fraction we shall have exited via the
* 3rd leg above.
*/
if (r > max) {
s->float_exception_flags = orig_flags | float_flag_invalid;
return max;
}
return r;
default:
g_assert_not_reached();
}
}
uint16_t float16_to_uint16_scalbn(float16 a, int rmode, int scale,
float_status *s)
{
return round_to_uint_and_pack(float16_unpack_canonical(a, s),
rmode, scale, UINT16_MAX, s);
}
uint32_t float16_to_uint32_scalbn(float16 a, int rmode, int scale,
float_status *s)
{
return round_to_uint_and_pack(float16_unpack_canonical(a, s),
rmode, scale, UINT32_MAX, s);
}
uint64_t float16_to_uint64_scalbn(float16 a, int rmode, int scale,
float_status *s)
{
return round_to_uint_and_pack(float16_unpack_canonical(a, s),
rmode, scale, UINT64_MAX, s);
}
uint16_t float32_to_uint16_scalbn(float32 a, int rmode, int scale,
float_status *s)
{
return round_to_uint_and_pack(float32_unpack_canonical(a, s),
rmode, scale, UINT16_MAX, s);
}
uint32_t float32_to_uint32_scalbn(float32 a, int rmode, int scale,
float_status *s)
{
return round_to_uint_and_pack(float32_unpack_canonical(a, s),
rmode, scale, UINT32_MAX, s);
}
uint64_t float32_to_uint64_scalbn(float32 a, int rmode, int scale,
float_status *s)
{
return round_to_uint_and_pack(float32_unpack_canonical(a, s),
rmode, scale, UINT64_MAX, s);
}
uint16_t float64_to_uint16_scalbn(float64 a, int rmode, int scale,
float_status *s)
{
return round_to_uint_and_pack(float64_unpack_canonical(a, s),
rmode, scale, UINT16_MAX, s);
}
uint32_t float64_to_uint32_scalbn(float64 a, int rmode, int scale,
float_status *s)
{
return round_to_uint_and_pack(float64_unpack_canonical(a, s),
rmode, scale, UINT32_MAX, s);
}
uint64_t float64_to_uint64_scalbn(float64 a, int rmode, int scale,
float_status *s)
{
return round_to_uint_and_pack(float64_unpack_canonical(a, s),
rmode, scale, UINT64_MAX, s);
}
uint16_t float16_to_uint16(float16 a, float_status *s)
{
return float16_to_uint16_scalbn(a, s->float_rounding_mode, 0, s);
}
uint32_t float16_to_uint32(float16 a, float_status *s)
{
return float16_to_uint32_scalbn(a, s->float_rounding_mode, 0, s);
}
uint64_t float16_to_uint64(float16 a, float_status *s)
{
return float16_to_uint64_scalbn(a, s->float_rounding_mode, 0, s);
}
uint16_t float32_to_uint16(float32 a, float_status *s)
{
return float32_to_uint16_scalbn(a, s->float_rounding_mode, 0, s);
}
uint32_t float32_to_uint32(float32 a, float_status *s)
{
return float32_to_uint32_scalbn(a, s->float_rounding_mode, 0, s);
}
uint64_t float32_to_uint64(float32 a, float_status *s)
{
return float32_to_uint64_scalbn(a, s->float_rounding_mode, 0, s);
}
uint16_t float64_to_uint16(float64 a, float_status *s)
{
return float64_to_uint16_scalbn(a, s->float_rounding_mode, 0, s);
}
uint32_t float64_to_uint32(float64 a, float_status *s)
{
return float64_to_uint32_scalbn(a, s->float_rounding_mode, 0, s);
}
uint64_t float64_to_uint64(float64 a, float_status *s)
{
return float64_to_uint64_scalbn(a, s->float_rounding_mode, 0, s);
}
uint16_t float16_to_uint16_round_to_zero(float16 a, float_status *s)
{
return float16_to_uint16_scalbn(a, float_round_to_zero, 0, s);
}
uint32_t float16_to_uint32_round_to_zero(float16 a, float_status *s)
{
return float16_to_uint32_scalbn(a, float_round_to_zero, 0, s);
}
uint64_t float16_to_uint64_round_to_zero(float16 a, float_status *s)
{
return float16_to_uint64_scalbn(a, float_round_to_zero, 0, s);
}
uint16_t float32_to_uint16_round_to_zero(float32 a, float_status *s)
{
return float32_to_uint16_scalbn(a, float_round_to_zero, 0, s);
}
uint32_t float32_to_uint32_round_to_zero(float32 a, float_status *s)
{
return float32_to_uint32_scalbn(a, float_round_to_zero, 0, s);
}
uint64_t float32_to_uint64_round_to_zero(float32 a, float_status *s)
{
return float32_to_uint64_scalbn(a, float_round_to_zero, 0, s);
}
uint16_t float64_to_uint16_round_to_zero(float64 a, float_status *s)
{
return float64_to_uint16_scalbn(a, float_round_to_zero, 0, s);
}
uint32_t float64_to_uint32_round_to_zero(float64 a, float_status *s)
{
return float64_to_uint32_scalbn(a, float_round_to_zero, 0, s);
}
uint64_t float64_to_uint64_round_to_zero(float64 a, float_status *s)
{
return float64_to_uint64_scalbn(a, float_round_to_zero, 0, s);
}
/*
* Integer to float conversions
*
* Returns the result of converting the two's complement integer `a'
* to the floating-point format. The conversion is performed according
* to the IEC/IEEE Standard for Binary Floating-Point Arithmetic.
*/
static FloatParts int_to_float(int64_t a, int scale, float_status *status)
{
FloatParts r = { .sign = false };
if (a == 0) {
r.cls = float_class_zero;
} else {
uint64_t f = a;
int shift;
r.cls = float_class_normal;
if (a < 0) {
f = -f;
r.sign = true;
}
shift = clz64(f) - 1;
scale = MIN(MAX(scale, -0x10000), 0x10000);
r.exp = DECOMPOSED_BINARY_POINT - shift + scale;
r.frac = (shift < 0 ? DECOMPOSED_IMPLICIT_BIT : f << shift);
}
return r;
}
float16 int64_to_float16_scalbn(int64_t a, int scale, float_status *status)
{
FloatParts pa = int_to_float(a, scale, status);
return float16_round_pack_canonical(pa, status);
}
float16 int32_to_float16_scalbn(int32_t a, int scale, float_status *status)
{
return int64_to_float16_scalbn(a, scale, status);
}
float16 int16_to_float16_scalbn(int16_t a, int scale, float_status *status)
{
return int64_to_float16_scalbn(a, scale, status);
}
float16 int64_to_float16(int64_t a, float_status *status)
{
return int64_to_float16_scalbn(a, 0, status);
}
float16 int32_to_float16(int32_t a, float_status *status)
{
return int64_to_float16_scalbn(a, 0, status);
}
float16 int16_to_float16(int16_t a, float_status *status)
{
return int64_to_float16_scalbn(a, 0, status);
}
float32 int64_to_float32_scalbn(int64_t a, int scale, float_status *status)
{
FloatParts pa = int_to_float(a, scale, status);
return float32_round_pack_canonical(pa, status);
}
float32 int32_to_float32_scalbn(int32_t a, int scale, float_status *status)
{
return int64_to_float32_scalbn(a, scale, status);
}
float32 int16_to_float32_scalbn(int16_t a, int scale, float_status *status)
{
return int64_to_float32_scalbn(a, scale, status);
}
float32 int64_to_float32(int64_t a, float_status *status)
{
return int64_to_float32_scalbn(a, 0, status);
}
float32 int32_to_float32(int32_t a, float_status *status)
{
return int64_to_float32_scalbn(a, 0, status);
}
float32 int16_to_float32(int16_t a, float_status *status)
{
return int64_to_float32_scalbn(a, 0, status);
}
float64 int64_to_float64_scalbn(int64_t a, int scale, float_status *status)
{
FloatParts pa = int_to_float(a, scale, status);
return float64_round_pack_canonical(pa, status);
}
float64 int32_to_float64_scalbn(int32_t a, int scale, float_status *status)
{
return int64_to_float64_scalbn(a, scale, status);
}
float64 int16_to_float64_scalbn(int16_t a, int scale, float_status *status)
{
return int64_to_float64_scalbn(a, scale, status);
}
float64 int64_to_float64(int64_t a, float_status *status)
{
return int64_to_float64_scalbn(a, 0, status);
}
float64 int32_to_float64(int32_t a, float_status *status)
{
return int64_to_float64_scalbn(a, 0, status);
}
float64 int16_to_float64(int16_t a, float_status *status)
{
return int64_to_float64_scalbn(a, 0, status);
}
/*
* Unsigned Integer to float conversions
*
* Returns the result of converting the unsigned integer `a' to the
* floating-point format. The conversion is performed according to the
* IEC/IEEE Standard for Binary Floating-Point Arithmetic.
*/
static FloatParts uint_to_float(uint64_t a, int scale, float_status *status)
{
FloatParts r = { .sign = false };
if (a == 0) {
r.cls = float_class_zero;
} else {
scale = MIN(MAX(scale, -0x10000), 0x10000);
r.cls = float_class_normal;
if ((int64_t)a < 0) {
r.exp = DECOMPOSED_BINARY_POINT + 1 + scale;
shift64RightJamming(a, 1, &a);
r.frac = a;
} else {
int shift = clz64(a) - 1;
r.exp = DECOMPOSED_BINARY_POINT - shift + scale;
r.frac = a << shift;
}
}
return r;
}
float16 uint64_to_float16_scalbn(uint64_t a, int scale, float_status *status)
{
FloatParts pa = uint_to_float(a, scale, status);
return float16_round_pack_canonical(pa, status);
}
float16 uint32_to_float16_scalbn(uint32_t a, int scale, float_status *status)
{
return uint64_to_float16_scalbn(a, scale, status);
}
float16 uint16_to_float16_scalbn(uint16_t a, int scale, float_status *status)
{
return uint64_to_float16_scalbn(a, scale, status);
}
float16 uint64_to_float16(uint64_t a, float_status *status)
{
return uint64_to_float16_scalbn(a, 0, status);
}
float16 uint32_to_float16(uint32_t a, float_status *status)
{
return uint64_to_float16_scalbn(a, 0, status);
}
float16 uint16_to_float16(uint16_t a, float_status *status)
{
return uint64_to_float16_scalbn(a, 0, status);
}
float32 uint64_to_float32_scalbn(uint64_t a, int scale, float_status *status)
{
FloatParts pa = uint_to_float(a, scale, status);
return float32_round_pack_canonical(pa, status);
}
float32 uint32_to_float32_scalbn(uint32_t a, int scale, float_status *status)
{
return uint64_to_float32_scalbn(a, scale, status);
}
float32 uint16_to_float32_scalbn(uint16_t a, int scale, float_status *status)
{
return uint64_to_float32_scalbn(a, scale, status);
}
float32 uint64_to_float32(uint64_t a, float_status *status)
{
return uint64_to_float32_scalbn(a, 0, status);
}
float32 uint32_to_float32(uint32_t a, float_status *status)
{
return uint64_to_float32_scalbn(a, 0, status);
}
float32 uint16_to_float32(uint16_t a, float_status *status)
{
return uint64_to_float32_scalbn(a, 0, status);
}
float64 uint64_to_float64_scalbn(uint64_t a, int scale, float_status *status)
{
FloatParts pa = uint_to_float(a, scale, status);
return float64_round_pack_canonical(pa, status);
}
float64 uint32_to_float64_scalbn(uint32_t a, int scale, float_status *status)
{
return uint64_to_float64_scalbn(a, scale, status);
}
float64 uint16_to_float64_scalbn(uint16_t a, int scale, float_status *status)
{
return uint64_to_float64_scalbn(a, scale, status);
}
float64 uint64_to_float64(uint64_t a, float_status *status)
{
return uint64_to_float64_scalbn(a, 0, status);
}
float64 uint32_to_float64(uint32_t a, float_status *status)
{
return uint64_to_float64_scalbn(a, 0, status);
}
float64 uint16_to_float64(uint16_t a, float_status *status)
{
return uint64_to_float64_scalbn(a, 0, status);
}
/* Float Min/Max */
/* min() and max() functions. These can't be implemented as
* 'compare and pick one input' because that would mishandle
* NaNs and +0 vs -0.
*
* minnum() and maxnum() functions. These are similar to the min()
* and max() functions but if one of the arguments is a QNaN and
* the other is numerical then the numerical argument is returned.
* SNaNs will get quietened before being returned.
* minnum() and maxnum correspond to the IEEE 754-2008 minNum()
* and maxNum() operations. min() and max() are the typical min/max
* semantics provided by many CPUs which predate that specification.
*
* minnummag() and maxnummag() functions correspond to minNumMag()
* and minNumMag() from the IEEE-754 2008.
*/
static FloatParts minmax_floats(FloatParts a, FloatParts b, bool ismin,
bool ieee, bool ismag, float_status *s)
{
if (unlikely(is_nan(a.cls) || is_nan(b.cls))) {
if (ieee) {
/* Takes two floating-point values `a' and `b', one of
* which is a NaN, and returns the appropriate NaN
* result. If either `a' or `b' is a signaling NaN,
* the invalid exception is raised.
*/
if (is_snan(a.cls) || is_snan(b.cls)) {
return pick_nan(a, b, s);
} else if (is_nan(a.cls) && !is_nan(b.cls)) {
return b;
} else if (is_nan(b.cls) && !is_nan(a.cls)) {
return a;
}
}
return pick_nan(a, b, s);
} else {
int a_exp, b_exp;
switch (a.cls) {
case float_class_normal:
a_exp = a.exp;
break;
case float_class_inf:
a_exp = INT_MAX;
break;
case float_class_zero:
a_exp = INT_MIN;
break;
default:
g_assert_not_reached();
break;
}
switch (b.cls) {
case float_class_normal:
b_exp = b.exp;
break;
case float_class_inf:
b_exp = INT_MAX;
break;
case float_class_zero:
b_exp = INT_MIN;
break;
default:
g_assert_not_reached();
break;
}
if (ismag && (a_exp != b_exp || a.frac != b.frac)) {
bool a_less = a_exp < b_exp;
if (a_exp == b_exp) {
a_less = a.frac < b.frac;
}
return a_less ^ ismin ? b : a;
}
if (a.sign == b.sign) {
bool a_less = a_exp < b_exp;
if (a_exp == b_exp) {
a_less = a.frac < b.frac;
}
return a.sign ^ a_less ^ ismin ? b : a;
} else {
return a.sign ^ ismin ? b : a;
}
}
}
#define MINMAX(sz, name, ismin, isiee, ismag) \
float ## sz float ## sz ## _ ## name(float ## sz a, float ## sz b, \
float_status *s) \
{ \
FloatParts pa = float ## sz ## _unpack_canonical(a, s); \
FloatParts pb = float ## sz ## _unpack_canonical(b, s); \
FloatParts pr = minmax_floats(pa, pb, ismin, isiee, ismag, s); \
\
return float ## sz ## _round_pack_canonical(pr, s); \
}
MINMAX(16, min, true, false, false)
MINMAX(16, minnum, true, true, false)
MINMAX(16, minnummag, true, true, true)
MINMAX(16, max, false, false, false)
MINMAX(16, maxnum, false, true, false)
MINMAX(16, maxnummag, false, true, true)
MINMAX(32, min, true, false, false)
MINMAX(32, minnum, true, true, false)
MINMAX(32, minnummag, true, true, true)
MINMAX(32, max, false, false, false)
MINMAX(32, maxnum, false, true, false)
MINMAX(32, maxnummag, false, true, true)
MINMAX(64, min, true, false, false)
MINMAX(64, minnum, true, true, false)
MINMAX(64, minnummag, true, true, true)
MINMAX(64, max, false, false, false)
MINMAX(64, maxnum, false, true, false)
MINMAX(64, maxnummag, false, true, true)
#undef MINMAX
/* Floating point compare */
static int compare_floats(FloatParts a, FloatParts b, bool is_quiet,
float_status *s)
{
if (is_nan(a.cls) || is_nan(b.cls)) {
if (!is_quiet ||
a.cls == float_class_snan ||
b.cls == float_class_snan) {
s->float_exception_flags |= float_flag_invalid;
}
return float_relation_unordered;
}
if (a.cls == float_class_zero) {
if (b.cls == float_class_zero) {
return float_relation_equal;
}
return b.sign ? float_relation_greater : float_relation_less;
} else if (b.cls == float_class_zero) {
return a.sign ? float_relation_less : float_relation_greater;
}
/* The only really important thing about infinity is its sign. If
* both are infinities the sign marks the smallest of the two.
*/
if (a.cls == float_class_inf) {
if ((b.cls == float_class_inf) && (a.sign == b.sign)) {
return float_relation_equal;
}
return a.sign ? float_relation_less : float_relation_greater;
} else if (b.cls == float_class_inf) {
return b.sign ? float_relation_greater : float_relation_less;
}
if (a.sign != b.sign) {
return a.sign ? float_relation_less : float_relation_greater;
}
if (a.exp == b.exp) {
if (a.frac == b.frac) {
return float_relation_equal;
}
if (a.sign) {
return a.frac > b.frac ?
float_relation_less : float_relation_greater;
} else {
return a.frac > b.frac ?
float_relation_greater : float_relation_less;
}
} else {
if (a.sign) {
return a.exp > b.exp ? float_relation_less : float_relation_greater;
} else {
return a.exp > b.exp ? float_relation_greater : float_relation_less;
}
}
}
#define COMPARE(name, attr, sz) \
static int attr \
name(float ## sz a, float ## sz b, bool is_quiet, float_status *s) \
{ \
FloatParts pa = float ## sz ## _unpack_canonical(a, s); \
FloatParts pb = float ## sz ## _unpack_canonical(b, s); \
return compare_floats(pa, pb, is_quiet, s); \
}
COMPARE(soft_f16_compare, QEMU_FLATTEN, 16)
COMPARE(soft_f32_compare, QEMU_SOFTFLOAT_ATTR, 32)
COMPARE(soft_f64_compare, QEMU_SOFTFLOAT_ATTR, 64)
#undef COMPARE
int float16_compare(float16 a, float16 b, float_status *s)
{
return soft_f16_compare(a, b, false, s);
}
int float16_compare_quiet(float16 a, float16 b, float_status *s)
{
return soft_f16_compare(a,