Dan*_*iel 2 floating-point numerical-computing ieee-754 numerical-analysis
我正在使用以下算法进行双精度除法,并尝试使其在浮点软件模拟中正确舍入。设a为被除数,b为除数。
所有操作均在 Q2.62 中执行。
b/2是b的有效数,加上其隐含位,并右移一位。接下来,当写成a或b时,它的意思是a或b的有效数加上其隐含位。
这
近似为0x17504f333f9de6( 0x5D413CCCFE779800Q2.62 中的)。
对于倒数r有 6 次这样的迭代。商q是通过将r乘以a(的有效数)来计算的。
最终的舍入结果为:
if a <= (a - q * b/2):
result = final_biased_exponent | q
else
result = final_biased_exponent | adjusted_q
Run Code Online (Sandbox Code Playgroud)
除以下两种情况外,此方法可以正常工作:a)结果低于正常值或 b)a和b均低于正常值。在这些情况下,它不会正确舍入,并且结果会偏离 1 位(与 x86 结果相比)。(数字a和b被归一化,并且当a或b中的任何一个被归一化时,指数也会相应地缩放。)
对于这些情况,我该怎么做才能正确舍入它?
在我看来,精度在步骤 x5 处丢失了。由于在步骤 x4 处,倒数近似为约 54 位,并且适合 64 位变量。而在步骤 x5 中,倒数近似为 ~108 位。因此,在步骤 x5 处,倒数的整个宽度不适合 64 位。当我将乘法后的 128 位截断为 64 位时,我感觉我没有正确考虑这一点。
为了检查舍入问题(仅在舍入到最近或偶数模式下),我binary32从头开始构建了 IEEE-754 除法的仿真代码,以便于说明。一旦工作正常,我就机械地将代码转换为 IEEE-754binary64划分的仿真代码。两者的 ISO-C99 代码(包括我的测试框架)如下所示。该方法与 Asker 算法略有不同,因为它在 Q1.63 算术中执行中间计算以获得最大精度,并使用 8 位或 16 位条目表作为倒数的起始近似值。
舍入步骤基本上是从被除数中减去原始商和除数的乘积以形成余数rem_raw。它还形成商rem_inc增加 1 ulp 所得的余数。通过构造,我们知道原始商足够准确,它或其增量值都是正确舍入的结果。余数可以都是正数、都是负数或混合负/正数。数值较小的余数对应于正确舍入的商。
舍入正态和次正态之间存在的唯一区别(除了后者固有的非规范化步骤)是平局情况对于正常结果不会发生,而对于次正常结果则可能发生。例如,参见
\n\nMilo\xc5\xa1 D. Ercegovac 和 Tom\xc3\xa1s Lang,“数字算术”,Morgan Kaufman,2004 年,第 11 页。第452章
\n\n在进行定点算术计算时,原始商和除数的乘积是双倍长度乘积。为了精确计算余数而不丢失任何位,因此我们动态更改定点表示以提供额外的分数位。为此,被除数左移适当的位数。但因为我们从算法的构造中知道初步商非常接近真实结果,所以我们知道在从被除数中减去时,所有高位都将被抵消。所以我们只需要计算并减去低阶乘积位即可计算出两个余数。
\n\n由于 [1,2) 中的两个值相除会得到 (0.5, 2) 中的商,因此商的计算可能涉及归一化步骤以返回区间 [1,2),并附有指数更正。在排列被除数以及商和除数的乘积以进行减法时,我们需要考虑到这一点,请参见normalization_shift下面的代码。
由于下面的代码具有探索性,因此编写它时并没有考虑到极端优化。可以进行各种调整,就像用特定于平台的内在函数或内联汇编替换可移植代码一样。同样,可以通过结合从文献中生成难以舍入的案例的各种技术来加强下面的基本测试框架。例如,我过去曾使用过以下论文附带的除法测试向量:
\n\n布丽吉特·维东克、安妮·库伊特和丹尼斯·维沙伦。“用于测试浮点算术 I 的与精度和范围无关的工具:基本运算、平方根和余数。” ACM 数学软件汇刊,卷。27,第 1 期,2001 年 3 月,第 92-118 页。
\n\n我的测试框架的基于模式的测试向量受到以下出版物的启发:
\n\nNL Schryer,“计算机浮点单元的测试”。计算机科学技术报告编号。89,AT&T 贝尔实验室,新泽西州默里山 (1981)。
\n\n#include <stdio.h>\n#include <stdlib.h>\n#include <stdint.h>\n#include <string.h>\n#include <limits.h>\n\n#define TEST_FP32_DIV (0) /* 0: binary64 division; 1: binary32 division */\n#define PURELY_RANDOM (1)\n#define PATTERN_BASED (2)\n#define TEST_MODE (PATTERN_BASED)\n#define ITO_TAKAGI_YAJIMA (1) /* more accurate recip. starting approximation */\n\nuint32_t float_as_uint32 (float a)\n{\n uint32_t r;\n memcpy (&r, &a, sizeof r);\n return r;\n}\n\nfloat uint32_as_float (uint32_t a)\n{\n float r;\n memcpy (&r, &a, sizeof r);\n return r;\n}\n\nuint32_t umul32hi (uint32_t a, uint32_t b)\n{\n return (uint32_t)(((uint64_t)a*b) >> 32);\n}\n\nint clz32 (uint32_t a)\n{\n uint32_t r = 32;\n if (a >= 0x00010000) { a >>= 16; r -= 16; }\n if (a >= 0x00000100) { a >>= 8; r -= 8; }\n if (a >= 0x00000010) { a >>= 4; r -= 4; }\n if (a >= 0x00000004) { a >>= 2; r -= 2; }\n r -= a - (a & (a >> 1));\n return r;\n}\n\n#if ITO_TAKAGI_YAJIMA\n/* Masayuki Ito, Naofumi Takagi, Shuzo Yajima, "Efficient Initial Approximation\n for Multiplicative Division and Square Root by a Multiplication with Operand\n Modification". IEEE Transactions on Computers, Vol. 46, No. 4, April 1997,\n pp. 495-498.\n*/\n#define LOG2_TAB_ENTRIES (6)\n#define TAB_ENTRIES (1 << LOG2_TAB_ENTRIES)\n#define TAB_ENTRY_BITS (16)\n/* approx. err. ~= 5.146e-5 */\nconst uint16_t b1tab [64] =\n{\n 0xfc0f, 0xf46b, 0xed20, 0xe626, 0xdf7a, 0xd918, 0xd2fa, 0xcd1e,\n 0xc77f, 0xc21b, 0xbcee, 0xb7f5, 0xb32e, 0xae96, 0xaa2a, 0xa5e9,\n 0xa1d1, 0x9dde, 0x9a11, 0x9665, 0x92dc, 0x8f71, 0x8c25, 0x88f6,\n 0x85e2, 0x82e8, 0x8008, 0x7d3f, 0x7a8e, 0x77f2, 0x756c, 0x72f9,\n 0x709b, 0x6e4e, 0x6c14, 0x69eb, 0x67d2, 0x65c8, 0x63cf, 0x61e3,\n 0x6006, 0x5e36, 0x5c73, 0x5abd, 0x5913, 0x5774, 0x55e1, 0x5458,\n 0x52da, 0x5166, 0x4ffc, 0x4e9b, 0x4d43, 0x4bf3, 0x4aad, 0x496e,\n 0x4837, 0x4708, 0x45e0, 0x44c0, 0x43a6, 0x4293, 0x4187, 0x4081\n};\n#else // ITO_TAKAGI_YAJIMA\n#define LOG2_TAB_ENTRIES (7)\n#define TAB_ENTRIES (1 << LOG2_TAB_ENTRIES)\n#define TAB_ENTRY_BITS (8)\n/* approx. err. ~= 5.585e-3 */\nconst uint8_t rcp_tab [TAB_ENTRIES] =\n{\n 0xff, 0xfd, 0xfb, 0xf9, 0xf7, 0xf5, 0xf4, 0xf2,\n 0xf0, 0xee, 0xed, 0xeb, 0xe9, 0xe8, 0xe6, 0xe4,\n 0xe3, 0xe1, 0xe0, 0xde, 0xdd, 0xdb, 0xda, 0xd8,\n 0xd7, 0xd5, 0xd4, 0xd3, 0xd1, 0xd0, 0xcf, 0xcd,\n 0xcc, 0xcb, 0xca, 0xc8, 0xc7, 0xc6, 0xc5, 0xc4,\n 0xc2, 0xc1, 0xc0, 0xbf, 0xbe, 0xbd, 0xbc, 0xbb,\n 0xba, 0xb9, 0xb8, 0xb7, 0xb6, 0xb5, 0xb4, 0xb3,\n 0xb2, 0xb1, 0xb0, 0xaf, 0xae, 0xad, 0xac, 0xab,\n 0xaa, 0xa9, 0xa8, 0xa8, 0xa7, 0xa6, 0xa5, 0xa4,\n 0xa3, 0xa3, 0xa2, 0xa1, 0xa0, 0x9f, 0x9f, 0x9e,\n 0x9d, 0x9c, 0x9c, 0x9b, 0x9a, 0x99, 0x99, 0x98,\n 0x97, 0x97, 0x96, 0x95, 0x95, 0x94, 0x93, 0x93,\n 0x92, 0x91, 0x91, 0x90, 0x8f, 0x8f, 0x8e, 0x8e,\n 0x8d, 0x8c, 0x8c, 0x8b, 0x8b, 0x8a, 0x89, 0x89,\n 0x88, 0x88, 0x87, 0x87, 0x86, 0x85, 0x85, 0x84,\n 0x84, 0x83, 0x83, 0x82, 0x82, 0x81, 0x81, 0x80\n};\n#endif // ITO_TAKAGI_YAJIMA\n\n#define FP32_MANT_BITS (23)\n#define FP32_EXPO_BITS (8)\n#define FP32_SIGN_MASK (0x80000000u)\n#define FP32_MANT_MASK (0x007fffffu)\n#define FP32_EXPO_MASK (0x7f800000u)\n#define FP32_MAX_EXPO (0xff)\n#define FP32_EXPO_BIAS (127)\n#define FP32_INFTY (0x7f800000u)\n#define FP32_QNAN_BIT (0x00400000u)\n#define FP32_QNAN_INDEFINITE (0xffc00000u)\n#define FP32_MANT_INT_BIT (0x00800000u)\n\nuint32_t fp32_div_core (uint32_t x, uint32_t y)\n{\n uint32_t expo_x, expo_y, expo_r, sign_r;\n uint32_t abs_x, abs_y, f, l, p, r, z, s;\n int x_is_zero, y_is_zero, normalization_shift;\n\n expo_x = (x & FP32_EXPO_MASK) >> FP32_MANT_BITS;\n expo_y = (y & FP32_EXPO_MASK) >> FP32_MANT_BITS;\n sign_r = (x ^ y) & FP32_SIGN_MASK;\n\n abs_x = x & ~FP32_SIGN_MASK;\n abs_y = y & ~FP32_SIGN_MASK;\n x_is_zero = (abs_x == 0);\n y_is_zero = (abs_y == 0);\n\n if ((expo_x == FP32_MAX_EXPO) || (expo_y == FP32_MAX_EXPO) || \n x_is_zero || y_is_zero) {\n int x_is_nan = (abs_x > FP32_INFTY);\n int x_is_inf = (abs_x == FP32_INFTY);\n int y_is_nan = (abs_y > FP32_INFTY);\n int y_is_inf = (abs_y == FP32_INFTY);\n if (x_is_nan) {\n r = x | FP32_QNAN_BIT;\n } else if (y_is_nan) {\n r = y | FP32_QNAN_BIT;\n } else if ((x_is_zero && y_is_zero) || (x_is_inf && y_is_inf)) {\n r = FP32_QNAN_INDEFINITE;\n } else if (x_is_zero || y_is_inf) {\n r = sign_r;\n } else if (y_is_zero || x_is_inf) {\n r = sign_r | FP32_INFTY;\n }\n } else {\n /* normalize any subnormals */\n if (expo_x == 0) {\n s = clz32 (abs_x) - FP32_EXPO_BITS;\n x = x << s;\n expo_x = expo_x - (s - 1);\n }\n if (expo_y == 0) {\n s = clz32 (abs_y) - FP32_EXPO_BITS;\n y = y << s;\n expo_y = expo_y - (s - 1);\n }\n //\n expo_r = expo_x - expo_y + FP32_EXPO_BIAS;\n /* extract mantissas */\n x = x & FP32_MANT_MASK;\n y = y & FP32_MANT_MASK;\n#if ITO_TAKAGI_YAJIMA\n /* initial approx based on 6 most significant stored mantissa bits */\n r = b1tab [y >> (FP32_MANT_BITS - LOG2_TAB_ENTRIES)];\n /* make implicit integer bit of mantissa explicit */\n x = x | FP32_MANT_INT_BIT;\n y = y | FP32_MANT_INT_BIT;\n r = r * ((y ^ ((1u << (FP32_MANT_BITS - LOG2_TAB_ENTRIES)) - 1)) >> \n (FP32_MANT_BITS + 1 + TAB_ENTRY_BITS - 32));\n /* pre-scale y for more efficient fixed-point computation */\n z = y << FP32_EXPO_BITS;\n#else // ITO_TAKAGI_YAJIMA\n /* initial approx based on 7 most significant stored mantissa bits */\n r = rcp_tab [y >> (FP32_MANT_BITS - LOG2_TAB_ENTRIES)];\n /* make implicit integer bit of mantissa explicit */\n x = x | FP32_MANT_INT_BIT;\n y = y | FP32_MANT_INT_BIT;\n /* pre-scale y for more efficient fixed-point computation */\n z = y << FP32_EXPO_BITS;\n /* first NR iteration r1 = 2*r0-y*r0*r0 */\n f = r * r;\n p = umul32hi (z, f << (32 - 2 * TAB_ENTRY_BITS));\n r = (r << (32 - TAB_ENTRY_BITS)) - p;\n#endif // ITO_TAKAGI_YAJIMA\n /* second NR iteration: r2 = r1*(2-y*r1) */\n p = umul32hi (z, r << 1);\n f = 0u - p;\n r = umul32hi (f, r << 1);\n /* compute quotient as wide product x*(1/y) = x*r */\n l = x * (r << 1);\n r = umul32hi (x, r << 1);\n /* normalize mantissa to be in [1,2) */\n normalization_shift = (r & FP32_MANT_INT_BIT) == 0;\n expo_r -= normalization_shift;\n r = (r << normalization_shift) | ((l >> 1) >> (32 - 1 - normalization_shift));\n if ((expo_r > 0) && (expo_r < FP32_MAX_EXPO)) { /* result is normal */\n int32_t rem_raw, rem_inc, inc;\n /* align x with product y*quotient */\n x = x << (FP32_MANT_BITS + normalization_shift + 1);\n /* compute product y*quotient */\n y = y << 1;\n p = y * r;\n /* compute x - y*quotient, for both raw and incremented quotient */\n rem_raw = x - p;\n rem_inc = rem_raw - y;\n /* round to nearest: tie case _cannot_ occur */\n inc = abs (rem_inc) < abs (rem_raw);\n /* build final results from sign, exponent, mantissa */\n r = sign_r | (((expo_r - 1) << FP32_MANT_BITS) + r + inc);\n } else if ((int)expo_r >= FP32_MAX_EXPO) { /* result overflowed */\n r = sign_r | FP32_INFTY;\n } else { /* result underflowed */\n int denorm_shift = 1 - expo_r;\n if (denorm_shift > (FP32_MANT_BITS + 1)) { /* massive underflow */\n r = sign_r;\n } else {\n int32_t rem_raw, rem_inc, inc;\n /* denormalize quotient */\n r = r >> denorm_shift;\n /* align x with product y*quotient */ \n x = x << (FP32_MANT_BITS + normalization_shift + 1 - denorm_shift);\n /* compute product y*quotient */\n y = y << 1;\n p = y * r;\n /* compute x - y*quotient, for both raw & incremented quotient*/\n rem_raw = x - p;\n rem_inc = rem_raw - y;\n /* round to nearest or even: tie case _can_ occur */\n inc = ((abs (rem_inc) < abs (rem_raw)) ||\n (abs (rem_inc) == abs (rem_raw) && (r & 1)));\n /* build final result from sign and mantissa */\n r = sign_r | (r + inc);\n }\n }\n }\n return r;\n}\n\nfloat fp32_div (float a, float b)\n{\n uint32_t x, y, r;\n x = float_as_uint32 (a);\n y = float_as_uint32 (b);\n r = fp32_div_core (x, y);\n return uint32_as_float (r);\n}\n\nuint64_t double_as_uint64 (double a)\n{\n uint64_t r;\n memcpy (&r, &a, sizeof r);\n return r;\n}\n\ndouble uint64_as_double (uint64_t a)\n{\n double r;\n memcpy (&r, &a, sizeof r);\n return r;\n}\n\nuint64_t umul64hi (uint64_t a, uint64_t b)\n{\n uint64_t a_lo = (uint64_t)(uint32_t)a;\n uint64_t a_hi = a >> 32;\n uint64_t b_lo = (uint64_t)(uint32_t)b;\n uint64_t b_hi = b >> 32;\n uint64_t p0 = a_lo * b_lo;\n uint64_t p1 = a_lo * b_hi;\n uint64_t p2 = a_hi * b_lo;\n uint64_t p3 = a_hi * b_hi;\n uint32_t cy = (uint32_t)(((p0 >> 32) + (uint32_t)p1 + (uint32_t)p2) >> 32);\n return p3 + (p1 >> 32) + (p2 >> 32) + cy;\n}\n\nint clz64 (uint64_t a)\n{\n uint64_t r = 64;\n if (a >= 0x100000000ULL) { a >>= 32; r -= 32; }\n if (a >= 0x000010000ULL) { a >>= 16; r -= 16; }\n if (a >= 0x000000100ULL) { a >>= 8; r -= 8; }\n if (a >= 0x000000010ULL) { a >>= 4; r -= 4; }\n if (a >= 0x000000004ULL) { a >>= 2; r -= 2; }\n r -= a - (a & (a >> 1));\n return r;\n}\n\n\n#define FP64_MANT_BITS (52)\n#define FP64_EXPO_BITS (11)\n#define FP64_EXPO_MASK (0x7ff0000000000000ULL)\n#define FP64_SIGN_MASK (0x8000000000000000ULL)\n#define FP64_MANT_MASK (0x000fffffffffffffULL)\n#define FP64_MAX_EXPO (0x7ff)\n#define FP64_EXPO_BIAS (1023)\n#define FP64_INFTY (0x7ff0000000000000ULL)\n#define FP64_QNAN_BIT (0x0008000000000000ULL)\n#define FP64_QNAN_INDEFINITE (0xfff8000000000000ULL)\n#define FP64_MANT_INT_BIT (0x0010000000000000ULL)\n\nuint64_t fp64_div_core (uint64_t x, uint64_t y)\n{\n uint64_t expo_x, expo_y, expo_r, sign_r;\n uint64_t abs_x, abs_y, f, l, p, r, z, s;\n int x_is_zero, y_is_zero, normalization_shift;\n\n expo_x = (x & FP64_EXPO_MASK) >> FP64_MANT_BITS;\n expo_y = (y & FP64_EXPO_MASK) >> FP64_MANT_BITS;\n sign_r = (x ^ y) & FP64_SIGN_MASK;\n\n abs_x = x & ~FP64_SIGN_MASK;\n abs_y = y & ~FP64_SIGN_MASK;\n x_is_zero = (abs_x == 0);\n y_is_zero = (abs_y == 0);\n\n if ((expo_x == FP64_MAX_EXPO) || (expo_y == FP64_MAX_EXPO) || \n x_is_zero || y_is_zero) {\n int x_is_nan = (abs_x > FP64_INFTY);\n int x_is_inf = (abs_x == FP64_INFTY);\n int y_is_nan = (abs_y > FP64_INFTY);\n int y_is_inf = (abs_y == FP64_INFTY);\n if (x_is_nan) {\n r = x | FP64_QNAN_BIT;\n } else if (y_is_nan) {\n r = y | FP64_QNAN_BIT;\n } else if ((x_is_zero && y_is_zero) || (x_is_inf && y_is_inf)) {\n r = FP64_QNAN_INDEFINITE;\n } else if (x_is_zero || y_is_inf) {\n r = sign_r;\n } else if (y_is_zero || x_is_inf) {\n r = sign_r | FP64_INFTY;\n }\n } else {\n /* normalize any subnormals */\n if (expo_x == 0) {\n s = clz64 (abs_x) - FP64_EXPO_BITS;\n x = x << s;\n expo_x = expo_x - (s - 1);\n }\n if (expo_y == 0) {\n s = clz64 (abs_y) - FP64_EXPO_BITS;\n y = y << s;\n expo_y = expo_y - (s - 1);\n }\n //\n expo_r = expo_x - expo_y + FP64_EXPO_BIAS;\n /* extract mantissas */\n x = x & FP64_MANT_MASK;\n y = y & FP64_MANT_MASK;\n#if ITO_TAKAGI_YAJIMA\n /* initial approx based on 6 most significant stored mantissa bits */\n r = b1tab [y >> (FP64_MANT_BITS - LOG2_TAB_ENTRIES)];\n /* make implicit integer bit of mantissa explicit */\n x = x | FP64_MANT_INT_BIT;\n y = y | FP64_MANT_INT_BIT;\n r = r * ((y ^ ((1ULL << (FP64_MANT_BITS - LOG2_TAB_ENTRIES)) - 1)) >> \n (FP64_MANT_BITS + 1 + TAB_ENTRY_BITS - 64));\n /* pre-scale y for more efficient fixed-point computation */\n z = y << FP64_EXPO_BITS;\n#else // ITO_TAKAGI_YAJIMA\n /* initial approx based on 7 most significant stored mantissa bits */\n r = rcp_tab [y >> (FP64_MANT_BITS - LOG2_TAB_ENTRIES)];\n /* make implicit integer bit of mantissa explicit */\n x = x | FP64_MANT_INT_BIT;\n y = y | FP64_MANT_INT_BIT;\n /* pre-scale y for more efficient fixed-point computation */\n z = y << FP64_EXPO_BITS;\n /* first NR iteration r1 = 2*r0-y*r0*r0 */\n f = r * r;\n p = umul64hi (z, f << (64 - 2 * TAB_ENTRY_BITS));\n r = (r << (64 - TAB_ENTRY_BITS)) - p;\n#endif // ITO_TAKAGI_YAJIMA\n /* second NR iteration: r2 = r1*(2-y*r1) */\n p = umul64hi (z, r << 1);\n f = 0u - p;\n r = umul64hi (f, r << 1);\n /* third NR iteration: r3 = r2*(2-y*r2) */\n p = umul64hi (z, r << 1);\n f = 0u - p;\n r = umul64hi (f, r << 1);\n /* compute quotient as wide product x*(1/y) = x*r */\n l = x * (r << 1);\n r = umul64hi (x, r << 1);\n /* normalize mantissa to be in [1,2) */\n normalization_shift = (r & FP64_MANT_INT_BIT) == 0;\n expo_r -= normalization_shift;\n r = (r << normalization_shift) | ((l >> 1) >> (64 - 1 - normalization_shift));\n if ((expo_r > 0) && (expo_r < FP64_MAX_EXPO)) { /* result is normal */\n int64_t rem_raw, rem_inc;\n int inc;\n /* align x with product y*quotient */\n x = x << (FP64_MANT_BITS + 1 + normalization_shift);\n /* compute product y*quotient */\n y = y << 1;\n p = y * r;\n /* compute x - y*quotient, for both raw and incremented quotient */\n rem_raw = x - p;\n rem_inc = rem_raw - y;\n /* round to nearest: tie case _cannot_ occur */\n inc = llabs (rem_inc) < llabs (rem_raw);\n /* build final results from sign, exponent, mantissa */\n r = sign_r | (((expo_r - 1) << FP64_MANT_BITS) + r + inc);\n } else if ((int)expo_r >= FP64_MAX_EXPO) { /* result overflowed */\n r = sign_r | FP64_INFTY;\n } else { /* result underflowed */\n int denorm_shift = 1 - expo_r;\n if (denorm_shift > (FP64_MANT_BITS + 1)) { /* massive underflow */\n r = sign_r;\n } else {\n int64_t rem_raw, rem_inc;\n int inc;\n /* denormalize quotient */\n r = r >> denorm_shift;\n /* align x with product y*quotient */ \n x = x << (FP64_MANT_BITS + 1 + normalization_shift - denorm_shift);\n /* compute product y*quotient */\n y = y << 1;\n p = y * r;\n /* compute x - y*quotient, for both raw & incremented quotient*/\n rem_raw = x - p;\n rem_inc = rem_raw - y;\n /* round to nearest or even: tie case _can_ occur */\n inc = ((llabs (rem_inc) < llabs (rem_raw)) ||\n (llabs (rem_inc) == llabs (rem_raw) && (r & 1)));\n /* build final result from sign and mantissa */\n r = sign_r | (r + inc);\n }\n }\n }\n return r;\n}\n\ndouble fp64_div (double a, double b)\n{\n uint64_t x, y, r;\n x = double_as_uint64 (a);\n y = double_as_uint64 (b);\n r = fp64_div_core (x, y);\n return uint64_as_double (r);\n}\n\n#if TEST_FP32_DIV\n\n// Fixes via: Greg Rose, KISS: A Bit Too Simple. http://eprint.iacr.org/2011/007\nstatic uint32_t kiss_z=362436069,kiss_w=521288629, kiss_jsr=362436069,kiss_jcong=123456789;\n#define znew (kiss_z=36969*(kiss_z&0xffff)+(kiss_z>>16))\n#define wnew (kiss_w=18000*(kiss_w&0xffff)+(kiss_w>>16))\n#define MWC ((znew<<16)+wnew)\n#define SHR3 (kiss_jsr^=(kiss_jsr<<13),kiss_jsr^=(kiss_jsr>>17),kiss_jsr^=(kiss_jsr<<5))\n#define CONG (kiss_jcong=69069*kiss_jcong+13579)\n#define KISS ((MWC^CONG)+SHR3)\n\nuint32_t v[8192];\n\nint main (void)\n{\n uint64_t count = 0;\n float a, b, res, ref;\n uint32_t ires, iref, diff;\n uint32_t i, j, patterns, idx = 0, nbrBits = sizeof (v[0]) * CHAR_BIT;\n\n printf ("testing fp32 division\\n");\n\n /* pattern class 1: 2**i */\n for (i = 0; i < nbrBits; i++) {\n v [idx] = ((uint32_t)1 << i);\n idx++;\n }\n /* pattern class 2: 2**i-1 */\n for (i = 0; i < nbrBits; i++) {\n v [idx] = (((uint32_t)1 << i) - 1);\n idx++;\n }\n /* pattern class 3: 2**i+1 */\n for (i = 0; i < nbrBits; i++) {\n v [idx] = (((uint32_t)1 << i) + 1);\n idx++;\n }\n /* pattern class 4: 2**i + 2**j */\n for (i = 0; i < nbrBits; i++) {\n for (j = 0; j < nbrBits; j++) {\n v [idx] = (((uint32_t)1 << i) + ((uint32_t)1 << j));\n idx++;\n }\n }\n /* pattern class 5: 2**i - 2**j */\n for (i = 0; i < nbrBits; i++) {\n for (j =