32位至16位浮点转换

Mat*_*man 37 c++ networking ieee-754

我需要一个跨平台的库/算法,它将在32位和16位浮点数之间进行转换.我不需要使用16位数进行数学运算; 我只需要减小32位浮点数的大小,以便它们可以通过网络发送.我在C++工作.

我理解我会失去多少精确度,但这对我的应用来说是可以的.

IEEE 16位格式会很棒.

Phe*_*ost 51

完全从单精度到半精度的转换.这是我的SSE版本的直接副本,因此它是无分支的.它利用了在GCC(-true == ~0)中也可能对VisualStudio也是如此但我没有副本的事实.

#include <cstdint> // uint32_t, uint64_t, etc.
#include <cstring> // memcpy
#include <climits> // CHAR_BIT
#include <limits>  // numeric_limits
#include <utility> // is_integral_v, is_floating_point_v, forward

namespace std
{
  template< typename T , typename U >
  T bit_cast( U&& u ) {
    static_assert( sizeof( T ) == sizeof( U ) );
    union { T t; }; // prevent construction
    std::memcpy( &t, &u, sizeof( t ) );
    return t;
  }
} // namespace std

template< typename T > struct native_float_bits;
template<> struct native_float_bits< float >{ using type = std::uint32_t; };
template<> struct native_float_bits< double >{ using type = std::uint64_t; };
template< typename T > using native_float_bits_t = typename native_float_bits< T >::type;

static_assert( sizeof( float ) == sizeof( native_float_bits_t< float > ) );
static_assert( sizeof( double ) == sizeof( native_float_bits_t< double > ) );

template< typename T, int SIG_BITS, int EXP_BITS >
struct raw_float_type_info {
  using raw_type = T;

  static constexpr int sig_bits = SIG_BITS;
  static constexpr int exp_bits = EXP_BITS;
  static constexpr int bits = sig_bits + exp_bits + 1;

  static_assert( std::is_integral_v< raw_type > );
  static_assert( sig_bits >= 0 );
  static_assert( exp_bits >= 0 );
  static_assert( bits <= sizeof( raw_type ) * CHAR_BIT );

  static constexpr int exp_max = ( 1 << exp_bits ) - 1;
  static constexpr int exp_bias = exp_max >> 1;

  static constexpr raw_type sign = raw_type( 1 ) << ( bits - 1 );
  static constexpr raw_type inf = raw_type( exp_max ) << sig_bits;
  static constexpr raw_type qnan = inf | ( inf >> 1 );

  static constexpr auto abs( raw_type v ) { return raw_type( v & ( sign - 1 ) ); }
  static constexpr bool is_nan( raw_type v ) { return abs( v ) > inf; }
  static constexpr bool is_inf( raw_type v ) { return abs( v ) == inf; }
  static constexpr bool is_zero( raw_type v ) { return abs( v ) == 0; }
};
using raw_flt16_type_info = raw_float_type_info< std::uint16_t, 10, 5 >;
using raw_flt32_type_info = raw_float_type_info< std::uint32_t, 23, 8 >;
using raw_flt64_type_info = raw_float_type_info< std::uint64_t, 52, 11 >;
//using raw_flt128_type_info = raw_float_type_info< uint128_t, 112, 15 >;

template< typename T, int SIG_BITS = std::numeric_limits< T >::digits - 1,
  int EXP_BITS = sizeof( T ) * CHAR_BIT - SIG_BITS - 1 >
struct float_type_info 
: raw_float_type_info< native_float_bits_t< T >, SIG_BITS, EXP_BITS > {
  using flt_type = T;
  static_assert( std::is_floating_point_v< flt_type > );
};

template< typename E >
struct raw_float_encoder
{
  using enc = E;
  using enc_type = typename enc::raw_type;

  template< bool DO_ROUNDING, typename F >
  static auto encode( F value )
  {
    using flt = float_type_info< F >;
    using raw_type = typename flt::raw_type;
    static constexpr auto sig_diff = flt::sig_bits - enc::sig_bits;
    static constexpr auto bit_diff = flt::bits - enc::bits;
    static constexpr auto do_rounding = DO_ROUNDING && sig_diff > 0;
    static constexpr auto bias_mul = raw_type( enc::exp_bias ) << flt::sig_bits;
    if constexpr( !do_rounding ) { // fix exp bias
      // when not rounding, fix exp first to avoid mixing float and binary ops
      value *= std::bit_cast< F >( bias_mul );
    }
    auto bits = std::bit_cast< raw_type >( value );
    auto sign = bits & flt::sign; // save sign
    bits ^= sign; // clear sign
    auto is_nan = flt::inf < bits; // compare before rounding!!
    if constexpr( do_rounding ) {
      static constexpr auto min_norm = raw_type( flt::exp_bias - enc::exp_bias + 1 ) << flt::sig_bits;
      static constexpr auto sub_rnd = enc::exp_bias < sig_diff
        ? raw_type( 1 ) << ( flt::sig_bits - 1 + enc::exp_bias - sig_diff )
        : raw_type( enc::exp_bias - sig_diff ) << flt::sig_bits;
      static constexpr auto sub_mul = raw_type( flt::exp_bias + sig_diff ) << flt::sig_bits;
      bool is_sub = bits < min_norm;
      auto norm = std::bit_cast< F >( bits );
      auto subn = norm;
      subn *= std::bit_cast< F >( sub_rnd ); // round subnormals
      subn *= std::bit_cast< F >( sub_mul ); // correct subnormal exp
      norm *= std::bit_cast< F >( bias_mul ); // fix exp bias
      bits = std::bit_cast< raw_type >( norm );
      bits += ( bits >> sig_diff ) & 1; // add tie breaking bias
      bits += ( raw_type( 1 ) << ( sig_diff - 1 ) ) - 1; // round up to half
      //if( is_sub ) bits = std::bit_cast< raw_type >( subn );
      bits ^= -is_sub & ( std::bit_cast< raw_type >( subn ) ^ bits );
    }
    bits >>= sig_diff; // truncate
    //if( enc::inf < bits ) bits = enc::inf; // fix overflow
    bits ^= -( enc::inf < bits ) & ( enc::inf ^ bits );
    //if( is_nan ) bits = enc::qnan;
    bits ^= -is_nan & ( enc::qnan ^ bits );
    bits |= sign >> bit_diff; // restore sign
    return enc_type( bits );
  }

  template< typename F >
  static F decode( enc_type value )
  {
    using flt = float_type_info< F >;
    using raw_type = typename flt::raw_type;
    static constexpr auto sig_diff = flt::sig_bits - enc::sig_bits;
    static constexpr auto bit_diff = flt::bits - enc::bits;
    static constexpr auto bias_mul = raw_type( 2 * flt::exp_bias - enc::exp_bias ) << flt::sig_bits;
    raw_type bits = value;
    auto sign = bits & enc::sign; // save sign
    bits ^= sign; // clear sign
    auto is_norm = bits < enc::inf;
    bits = ( sign << bit_diff ) | ( bits << sig_diff );
    auto val = std::bit_cast< F >( bits ) * std::bit_cast< F >( bias_mul );
    bits = std::bit_cast< raw_type >( val );
    //if( !is_norm ) bits |= flt::inf;
    bits |= -!is_norm & flt::inf;
    return std::bit_cast< F >( bits );
  }
};

using flt16_encoder = raw_float_encoder< raw_flt16_type_info >;

template< typename F >
auto quick_encode_flt16( F && value )
{ return flt16_encoder::encode< false >( std::forward< F >( value ) ); }

template< typename F >
auto encode_flt16( F && value )
{ return flt16_encoder::encode< true >( std::forward< F >( value ) ); }

template< typename F = float, typename X >
auto decode_flt16( X && value )
{ return flt16_encoder::decode< F >( std::forward< X >( value ) ); }
Run Code Online (Sandbox Code Playgroud)

所以这需要很多,但它处理所有次正常值,包括无穷大,静音NaN,信号NaN和负零.当然,并不总是需要完整的IEEE支持.所以压缩泛型浮点数:

#include <cstdint> // uint32_t, uint64_t, etc.
#include <cstring> // memcpy
#include <climits> // CHAR_BIT
#include <limits>  // numeric_limits
#include <utility> // is_integral_v, is_floating_point_v, forward

namespace std
{
  template< typename T , typename U >
  T bit_cast( U&& u ) {
    static_assert( sizeof( T ) == sizeof( U ) );
    union { T t; }; // prevent construction
    std::memcpy( &t, &u, sizeof( t ) );
    return t;
  }
} // namespace std

template< typename T > struct native_float_bits;
template<> struct native_float_bits< float >{ using type = std::uint32_t; };
template<> struct native_float_bits< double >{ using type = std::uint64_t; };
template< typename T > using native_float_bits_t = typename native_float_bits< T >::type;

static_assert( sizeof( float ) == sizeof( native_float_bits_t< float > ) );
static_assert( sizeof( double ) == sizeof( native_float_bits_t< double > ) );

template< typename T, int SIG_BITS, int EXP_BITS >
struct raw_float_type_info {
  using raw_type = T;

  static constexpr int sig_bits = SIG_BITS;
  static constexpr int exp_bits = EXP_BITS;
  static constexpr int bits = sig_bits + exp_bits + 1;

  static_assert( std::is_integral_v< raw_type > );
  static_assert( sig_bits >= 0 );
  static_assert( exp_bits >= 0 );
  static_assert( bits <= sizeof( raw_type ) * CHAR_BIT );

  static constexpr int exp_max = ( 1 << exp_bits ) - 1;
  static constexpr int exp_bias = exp_max >> 1;

  static constexpr raw_type sign = raw_type( 1 ) << ( bits - 1 );
  static constexpr raw_type inf = raw_type( exp_max ) << sig_bits;
  static constexpr raw_type qnan = inf | ( inf >> 1 );

  static constexpr auto abs( raw_type v ) { return raw_type( v & ( sign - 1 ) ); }
  static constexpr bool is_nan( raw_type v ) { return abs( v ) > inf; }
  static constexpr bool is_inf( raw_type v ) { return abs( v ) == inf; }
  static constexpr bool is_zero( raw_type v ) { return abs( v ) == 0; }
};
using raw_flt16_type_info = raw_float_type_info< std::uint16_t, 10, 5 >;
using raw_flt32_type_info = raw_float_type_info< std::uint32_t, 23, 8 >;
using raw_flt64_type_info = raw_float_type_info< std::uint64_t, 52, 11 >;
//using raw_flt128_type_info = raw_float_type_info< uint128_t, 112, 15 >;

template< typename T, int SIG_BITS = std::numeric_limits< T >::digits - 1,
  int EXP_BITS = sizeof( T ) * CHAR_BIT - SIG_BITS - 1 >
struct float_type_info 
: raw_float_type_info< native_float_bits_t< T >, SIG_BITS, EXP_BITS > {
  using flt_type = T;
  static_assert( std::is_floating_point_v< flt_type > );
};

template< typename E >
struct raw_float_encoder
{
  using enc = E;
  using enc_type = typename enc::raw_type;

  template< bool DO_ROUNDING, typename F >
  static auto encode( F value )
  {
    using flt = float_type_info< F >;
    using raw_type = typename flt::raw_type;
    static constexpr auto sig_diff = flt::sig_bits - enc::sig_bits;
    static constexpr auto bit_diff = flt::bits - enc::bits;
    static constexpr auto do_rounding = DO_ROUNDING && sig_diff > 0;
    static constexpr auto bias_mul = raw_type( enc::exp_bias ) << flt::sig_bits;
    if constexpr( !do_rounding ) { // fix exp bias
      // when not rounding, fix exp first to avoid mixing float and binary ops
      value *= std::bit_cast< F >( bias_mul );
    }
    auto bits = std::bit_cast< raw_type >( value );
    auto sign = bits & flt::sign; // save sign
    bits ^= sign; // clear sign
    auto is_nan = flt::inf < bits; // compare before rounding!!
    if constexpr( do_rounding ) {
      static constexpr auto min_norm = raw_type( flt::exp_bias - enc::exp_bias + 1 ) << flt::sig_bits;
      static constexpr auto sub_rnd = enc::exp_bias < sig_diff
        ? raw_type( 1 ) << ( flt::sig_bits - 1 + enc::exp_bias - sig_diff )
        : raw_type( enc::exp_bias - sig_diff ) << flt::sig_bits;
      static constexpr auto sub_mul = raw_type( flt::exp_bias + sig_diff ) << flt::sig_bits;
      bool is_sub = bits < min_norm;
      auto norm = std::bit_cast< F >( bits );
      auto subn = norm;
      subn *= std::bit_cast< F >( sub_rnd ); // round subnormals
      subn *= std::bit_cast< F >( sub_mul ); // correct subnormal exp
      norm *= std::bit_cast< F >( bias_mul ); // fix exp bias
      bits = std::bit_cast< raw_type >( norm );
      bits += ( bits >> sig_diff ) & 1; // add tie breaking bias
      bits += ( raw_type( 1 ) << ( sig_diff - 1 ) ) - 1; // round up to half
      //if( is_sub ) bits = std::bit_cast< raw_type >( subn );
      bits ^= -is_sub & ( std::bit_cast< raw_type >( subn ) ^ bits );
    }
    bits >>= sig_diff; // truncate
    //if( enc::inf < bits ) bits = enc::inf; // fix overflow
    bits ^= -( enc::inf < bits ) & ( enc::inf ^ bits );
    //if( is_nan ) bits = enc::qnan;
    bits ^= -is_nan & ( enc::qnan ^ bits );
    bits |= sign >> bit_diff; // restore sign
    return enc_type( bits );
  }

  template< typename F >
  static F decode( enc_type value )
  {
    using flt = float_type_info< F >;
    using raw_type = typename flt::raw_type;
    static constexpr auto sig_diff = flt::sig_bits - enc::sig_bits;
    static constexpr auto bit_diff = flt::bits - enc::bits;
    static constexpr auto bias_mul = raw_type( 2 * flt::exp_bias - enc::exp_bias ) << flt::sig_bits;
    raw_type bits = value;
    auto sign = bits & enc::sign; // save sign
    bits ^= sign; // clear sign
    auto is_norm = bits < enc::inf;
    bits = ( sign << bit_diff ) | ( bits << sig_diff );
    auto val = std::bit_cast< F >( bits ) * std::bit_cast< F >( bias_mul );
    bits = std::bit_cast< raw_type >( val );
    //if( !is_norm ) bits |= flt::inf;
    bits |= -!is_norm & flt::inf;
    return std::bit_cast< F >( bits );
  }
};

using flt16_encoder = raw_float_encoder< raw_flt16_type_info >;

template< typename F >
auto quick_encode_flt16( F && value )
{ return flt16_encoder::encode< false >( std::forward< F >( value ) ); }

template< typename F >
auto encode_flt16( F && value )
{ return flt16_encoder::encode< true >( std::forward< F >( value ) ); }

template< typename F = float, typename X >
auto decode_flt16( X && value )
{ return flt16_encoder::decode< F >( std::forward< X >( value ) ); }
Run Code Online (Sandbox Code Playgroud)

这会强制所有值进入可接受的范围,不支持NaN,无穷大或负零.Epsilon是该范围内允许的最小值.精度是浮点数保留的精度位数.虽然上面有很多分支,但它们都是静态的,并且将由CPU中的分支预测器缓存.

当然,如果您的值不需要对数分辨率接近零.然后,如已经提到的那样,将它们线性化为定点格式要快得多.

我在图形库中使用FloatCompressor(SSE版本)来减小内存中线性浮点颜色值的大小.压缩浮动具有创建小型查找表的优点,用于耗时的功能,例如伽马校正或超越.压缩线性sRGB值减少到最大12位或最大值3011,这对于来自/来自sRGB的查找表大小是很好的.

  • Unlicense(http://choosealicense.com/licenses/unlicense/)是公共领域. (6认同)
  • 您的Float16Compressor类的许可是什么? (2认同)

Ale*_*lli 18

std::frexp从正常的浮点数或双精度数中提取有效数和指数 - 然后你需要决定如何处理太大而无法适应半精度浮点数的指数(饱和......?),相应地调整,并将半数 - 精确数一起. 本文有C源代码,向您展示如何执行转换.


Art*_*ius 18

根据您的需求(-1000,1000),使用定点表示可能会更好.

//change to 20000 to SHORT_MAX if you don't mind whole numbers
//being turned into fractional ones
const int compact_range = 20000;

short compactFloat(double input) {
    return round(input * compact_range / 1000);
}
double expandToFloat(short input) {
    return ((double)input) * 1000 / compact_range;
}
Run Code Online (Sandbox Code Playgroud)

这将使您精确到最接近的0.05.如果您将20000更改为SHORT_MAX,您将获得更高的准确性,但是一些整数将在另一端以小数形式结束.

  • 它取决于[-1000,1000]范围内的分布.如果大多数数字实际上在[-1,1]范围内,那么16位浮点数的准确度平均更好. (11认同)
  • @Matt,是否可以使用不同的格式将标准化值发送到位置向量?考虑为每个方案使用适当的定点方案. (2认同)

小智 17

一半浮动:
float f = ((h&0x8000)<<16) | (((h&0x7c00)+0x1C000)<<13) | ((h&0x03FF)<<13);

浮动到一半:
uint32_t x = *((uint32_t*)&f);
uint16_t h = ((x>>16)&0x8000)|((((x&0x7f800000)-0x38000000)>>13)&0x7c00)|((x>>13)&0x03ff);

  • 但当然要记住,这当前忽略了任何类型的溢出,下溢,非规范化值或无限值. (3认同)
  • 这对于 0 不起作用。 (2认同)

Pro*_*ysX 8

为什么这么复杂?我的实现不需要任何额外的库,符合 IEEE-754 FP16 格式,管理规范化和非规范化数字,无分支,需要大约 40 个时钟周期来来回转换和沟渠NaNInf扩展范围. 这就是位运算的神奇力量。

typedef unsigned short ushort;
typedef unsigned int uint;

uint as_uint(const float x) {
    return *(uint*)&x;
}
float as_float(const uint x) {
    return *(float*)&x;
}

float half_to_float(const ushort x) { // IEEE-754 16-bit floating-point format (without infinity): 1-5-10, exp-15, +-131008.0, +-6.1035156E-5, +-5.9604645E-8, 3.311 digits
    const uint e = (x&0x7C00)>>10; // exponent
    const uint m = (x&0x03FF)<<13; // mantissa
    const uint v = as_uint((float)m)>>23; // evil log2 bit hack to count leading zeros in denormalized format
    return as_float((x&0x8000)<<16 | (e!=0)*((e+112)<<23|m) | ((e==0)&(m!=0))*((v-37)<<23|((m<<(150-v))&0x007FE000))); // sign : normalized : denormalized
}
ushort float_to_half(const float x) { // IEEE-754 16-bit floating-point format (without infinity): 1-5-10, exp-15, +-131008.0, +-6.1035156E-5, +-5.9604645E-8, 3.311 digits
    const uint b = as_uint(x)+0x00001000; // round-to-nearest-even: add last bit after truncated mantissa
    const uint e = (b&0x7F800000)>>23; // exponent
    const uint m = b&0x007FFFFF; // mantissa; in line below: 0x007FF000 = 0x00800000-0x00001000 = decimal indicator flag - initial rounding
    return (b&0x80000000)>>16 | (e>112)*((((e-112)<<10)&0x7C00)|m>>13) | ((e<113)&(e>101))*((((0x007FF000+m)>>(125-e))+1)>>1) | (e>143)*0x7FFF; // sign : normalized : denormalized : saturate
}
Run Code Online (Sandbox Code Playgroud)

如何使用它并检查转换是否正确的示例:

#include <iostream>

void print_bits(const ushort x) {
    for(int i=15; i>=0; i--) {
        cout << ((x>>i)&1);
        if(i==15||i==10) cout << " ";
        if(i==10) cout << "      ";
    }
    cout << endl;
}
void print_bits(const float x) {
    uint b = *(uint*)&x;
    for(int i=31; i>=0; i--) {
        cout << ((b>>i)&1);
        if(i==31||i==23) cout << " ";
        if(i==23) cout << "   ";
    }
    cout << endl;
}

int main() {
    const float x = 1.0f;
    const ushort x_compressed = float_to_half(x);
    const float x_decompressed = half_to_float(x_compressed);
    print_bits(x);
    print_bits(x_compressed);
    print_bits(x_decompressed);
    return 0;
}
Run Code Online (Sandbox Code Playgroud)

输出:

0 01111111    00000000000000000000000
0 01111       0000000000
0 01111111    00000000000000000000000
Run Code Online (Sandbox Code Playgroud)

  • 在c++20中,`std::bit_cast`可以替换`as_uint`和`as_float`函数 (4认同)
  • 这个答案是最好的。谢谢。 (3认同)

tsa*_*ter 5

如果你要发送信息流,你可能会比这更好,特别是如果一切都在一致的范围内,就像你的应用程序似乎有的那样.

发送一个小的标题,它只包含一个float32最小值和最大值,然后你可以在两者之间作为16位插值发送信息.正如您所说,精度不是问题,您甚至可以一次发送8位.

在重建时你的价值会是这样的:

float t = _t / numeric_limits<unsigned short>::max();  // With casting, naturally ;)
float val = h.min + t * (h.max - h.min);
Run Code Online (Sandbox Code Playgroud)

希望有所帮助.

-Tom