@ -48,29 +48,13 @@ int VarintLength(uint64_t v);
char * EncodeVarint32 ( char * dst , uint32_t value ) ;
char * EncodeVarint64 ( char * dst , uint64_t value ) ;
/ / TODO ( costan ) : Remove port : : kLittleEndian and the fast paths based on
/ / std : : memcpy when clang learns to optimize the generic code , as
/ / described in https : / / bugs . llvm . org / show_bug . cgi ? id = 41761
/ /
/ / The platform - independent code in DecodeFixed { 32 , 64 } ( ) gets optimized to mov
/ / on x86 and ldr on ARM64 , by both clang and gcc . However , only gcc optimizes
/ / the platform - independent code in EncodeFixed { 32 , 64 } ( ) to mov / str .
/ / Lower - level versions of Put . . . that write directly into a character buffer
/ / REQUIRES : dst has enough space for the value being written
inline void EncodeFixed32 ( char * dst , uint32_t value ) {
uint8_t * const buffer = reinterpret_cast < uint8_t * > ( dst ) ;
if ( port : : kLittleEndian ) {
/ / Fast path for little - endian CPUs . All major compilers optimize this to a
/ / single mov ( x86_64 ) / str ( ARM ) instruction .
std : : memcpy ( buffer , & value , sizeof ( uint32_t ) ) ;
return ;
}
/ / Platform - independent code .
/ / Currently , only gcc optimizes this to a single mov / str instruction .
/ / Recent clang and gcc optimize this to a single mov / str instruction .
buffer [ 0 ] = static_cast < uint8_t > ( value ) ;
buffer [ 1 ] = static_cast < uint8_t > ( value > > 8 ) ;
buffer [ 2 ] = static_cast < uint8_t > ( value > > 16 ) ;
@ -80,15 +64,7 @@ inline void EncodeFixed32(char* dst, uint32_t value) {
inline void EncodeFixed64 ( char * dst , uint64_t value ) {
uint8_t * const buffer = reinterpret_cast < uint8_t * > ( dst ) ;
if ( port : : kLittleEndian ) {
/ / Fast path for little - endian CPUs . All major compilers optimize this to a
/ / single mov ( x86_64 ) / str ( ARM ) instruction .
std : : memcpy ( buffer , & value , sizeof ( uint64_t ) ) ;
return ;
}
/ / Platform - independent code .
/ / Currently , only gcc optimizes this to a single mov / str instruction .
/ / Recent clang and gcc optimize this to a single mov / str instruction .
buffer [ 0 ] = static_cast < uint8_t > ( value ) ;
buffer [ 1 ] = static_cast < uint8_t > ( value > > 8 ) ;
buffer [ 2 ] = static_cast < uint8_t > ( value > > 16 ) ;
@ -105,16 +81,7 @@ inline void EncodeFixed64(char* dst, uint64_t value) {
inline uint32_t DecodeFixed32 ( const char * ptr ) {
const uint8_t * const buffer = reinterpret_cast < const uint8_t * > ( ptr ) ;
if ( port : : kLittleEndian ) {
/ / Fast path for little - endian CPUs . All major compilers optimize this to a
/ / single mov ( x86_64 ) / ldr ( ARM ) instruction .
uint32_t result ;
std : : memcpy ( & result , buffer , sizeof ( uint32_t ) ) ;
return result ;
}
/ / Platform - independent code .
/ / Clang and gcc optimize this to a single mov / ldr instruction .
/ / Recent clang and gcc optimize this to a single mov / ldr instruction .
return ( static_cast < uint32_t > ( buffer [ 0 ] ) ) |
( static_cast < uint32_t > ( buffer [ 1 ] ) < < 8 ) |
( static_cast < uint32_t > ( buffer [ 2 ] ) < < 16 ) |
@ -124,16 +91,7 @@ inline uint32_t DecodeFixed32(const char* ptr) {
inline uint64_t DecodeFixed64 ( const char * ptr ) {
const uint8_t * const buffer = reinterpret_cast < const uint8_t * > ( ptr ) ;
if ( port : : kLittleEndian ) {
/ / Fast path for little - endian CPUs . All major compilers optimize this to a
/ / single mov ( x86_64 ) / ldr ( ARM ) instruction .
uint64_t result ;
std : : memcpy ( & result , buffer , sizeof ( uint64_t ) ) ;
return result ;
}
/ / Platform - independent code .
/ / Clang and gcc optimize this to a single mov / ldr instruction .
/ / Recent clang and gcc optimize this to a single mov / ldr instruction .
return ( static_cast < uint64_t > ( buffer [ 0 ] ) ) |
( static_cast < uint64_t > ( buffer [ 1 ] ) < < 8 ) |
( static_cast < uint64_t > ( buffer [ 2 ] ) < < 16 ) |