2009-12-16 35 views
2

是否有人知道任何常用的C++库,它提供了从基本10到基本32的编码和解码数字的方法,反之亦然?C++中的base32转换

感谢, 斯特凡诺

回答

2

您的意思是“base 10 to base 32”,而不是integer to base32?后者似乎更可能和更有用;默认情况下,标准格式化的I/O函数在处理整数时生成基本10字符串格式。

对于基数32到整数的转换,标准库strtol()函数会这样做。对于互惠,你不需要一个图书馆,你可以很容易地实现自己(并非所有东西都是乐高积木)。

下面是一个例子,不一定是最高效但简单的例子;

#include <cstring> 
#include <string> 

long b32tol(std::string b32) 
{ 
    return strtol(b32.c_str(), 0, 32) ; 
} 

std::string itob32(long i) 
{ 
    unsigned long u = *(reinterpret_cast<unsigned long*>)(&i) ; 
    std::string b32 ; 

    do 
    { 
     int d = u % 32 ; 
     if(d < 10) 
     { 
      b32.insert(0, 1, '0' + d) ; 
     } 
     else 
     { 
      b32.insert(0, 1, 'a' + d - 10) ; 
     } 

     u /= 32 ; 

    } while(u > 0); 

    return b32 ; 
} 


#include <iostream> 

int main() 
{ 
    long i = 32*32*11 + 32*20 + 5 ; // BK5 in base 32 
    std::string b32 = itob32(i) ; 
    long ii = b32tol(b32) ; 

    std::cout << i << std::endl ; // Original 
    std::cout << b32 << std::endl ; // Converted to b32 
    std::cout << ii << std::endl ; // Converted back 

    return 0 ; 
} 
+0

谢谢,作为一个说明,代码中的转换似乎只适用于正整数。 – Spac 2009-12-16 20:09:49

+0

我正在处理它。作出了一个更正,但仍然不正确。自己去吧。 – Clifford 2009-12-16 22:04:26

4

[更新]显然,C++ std::setbase() IO操纵器和正常<<>> IO操作符只能处理台8,图10和16,并且因此对于处理底座32

所以没用解决转换

  • 串与基地计划从一些输入读取整数数字10/32表示您的问题
  • 程序中的整数以字符串形式输出基本10/32表示法

您需要使用其他函数。

要将包含基数2..36表示的C风格字符串转换为整数,可以使用#include <cstdlib>并使用公司的一组函数。

至于将整数转换为任意基地的字符串...我找不到一个简单的答案。 printf(3)样式格式字符串仅处理基数8,10,16 AFAICS,就像std::setbase一样。任何人?

+3

'std :: setbase()'只取值8,10和16。 – 2009-12-16 17:31:55

2

在直接回答原来(现在旧)的问题,我不知道在base32编码的字节数组任何公共库中,或用于其解码后再次。然而,上周我提出需要将base32中表示的SHA1哈希值解码为其原始字节数组。以下是我编写的一些C++代码(带有一些值得注意的Windows/little endian工件),并验证结果。请注意,与上面的Clifford代码相比,如果我没有弄错,它假定在RFC 4648中提到的“base32hex”字母表,我的代码假设“base32”字母表(“AZ”和“2-7” “)。

// This program illustrates how SHA1 hash values in base32 encoded form can be decoded 
// and then re-encoded in base16. 

#include "stdafx.h" 
#include <string> 
#include <vector> 
#include <iostream> 
#include <cassert> 

using namespace std; 

unsigned char Base16EncodeNibble(unsigned char value) 
{ 
    if(value >= 0 && value <= 9) 
     return value + 48; 
    else if(value >= 10 && value <= 15) 
     return (value-10) + 65; 
    else //assert(false); 
    { 
     cout << "Error: trying to convert value: " << value << endl; 
    } 

    return 42; // sentinal for error condition 
} 

void Base32DecodeBase16Encode(const string & input, string & output) 
{ 
    // Here's the base32 decoding: 

     // The "Base 32 Encoding" section of http://tools.ietf.org/html/rfc4648#page-8 
     // shows that every 8 bytes of base32 encoded data must be translated back into 5 bytes 
     // of original data during a decoding process. The following code does this. 

    int input_len = input.length(); 
    assert(input_len == 32); 
    const char * input_str = input.c_str(); 
    int output_len = (input_len*5)/8; 
    assert(output_len == 20); 
     // Because input strings are assumed to be SHA1 hash values in base32, it is also assumed 
     // that they will be 32 characters (and bytes in this case) in length, and so the output 
     // string should be 20 bytes in length. 
    unsigned char *output_str = new unsigned char[output_len]; 

    char curr_char, temp_char; 
    long long temp_buffer = 0; //formerly: __int64 temp_buffer = 0; 
    for(int i=0; i<input_len; i++) 
    { 
     curr_char = input_str[i]; 
     if(curr_char >= 'A' && curr_char <= 'Z') 
      temp_char = curr_char - 'A'; 
     if(curr_char >= '2' && curr_char <= '7') 
      temp_char = curr_char - '2' + 26; 

     if(temp_buffer) 
      temp_buffer <<= 5; //temp_buffer = (temp_buffer << 5); 
     temp_buffer |= temp_char; 

     // if 8 encoded characters have been decoded into the temp location, 
      // then copy them to the appropriate section of the final decoded location 
     if((i>0) && !((i+1) % 8)) 
     { 
      unsigned char * source = reinterpret_cast<unsigned char*>(&temp_buffer); 
      //strncpy(output_str+(5*(((i+1)/8)-1)), source, 5); 
      int start_index = 5*(((i+1)/8)-1); 
      int copy_index = 4; 
      for(int x=start_index; x<(start_index+5); x++, copy_index--) 
       output_str[x] = source[copy_index]; 
      temp_buffer = 0; 

      // I could be mistaken, but I'm guessing that the necessity of copying 
      // in "reverse" order results from temp_buffer's little endian byte order. 
     } 
    } 

    // Here's the base16 encoding (for human-readable output and the chosen validation tests): 

     // The "Base 16 Encoding" section of http://tools.ietf.org/html/rfc4648#page-10 
     // shows that every byte original data must be encoded as two characters from the 
     // base16 alphabet - one charactor for the original byte's high nibble, and one for 
     // its low nibble. 

    unsigned char out_temp, chr_temp; 
    for(int y=0; y<output_len; y++) 
    { 
     out_temp = Base16EncodeNibble(output_str[y] >> 4); //encode the high nibble 
     output.append(1, static_cast<char>(out_temp)); 
     out_temp = Base16EncodeNibble(output_str[y] & 0xF); //encode the low nibble 
     output.append(1, static_cast<char>(out_temp)); 
    } 

    delete [] output_str; 
} 

int _tmain(int argc, _TCHAR* argv[]) 
{ 
    //string input = "J3WEDSJDRMJHE2FUHERUR6YWLGE3USRH"; 
    vector<string> input_b32_strings, output_b16_strings, expected_b16_strings; 

    input_b32_strings.push_back("J3WEDSJDRMJHE2FUHERUR6YWLGE3USRH"); 
    expected_b16_strings.push_back("4EEC41C9238B127268B4392348FB165989BA4A27"); 
    input_b32_strings.push_back("2HPUCIVW2EVBANIWCXOIQZX6N5NDIUSX"); 
    expected_b16_strings.push_back("D1DF4122B6D12A10351615DC8866FE6F5A345257"); 
    input_b32_strings.push_back("U4BDNCBAQFCPVDBL4FBG3AANGWVESI5J"); 
    expected_b16_strings.push_back("A7023688208144FA8C2BE1426D800D35AA4923A9"); 

    // Use the base conversion tool at http://darkfader.net/toolbox/convert/ 
    // to verify that the above base32/base16 pairs are equivalent. 

    int num_input_strs = input_b32_strings.size(); 
    for(int i=0; i<num_input_strs; i++) 
    { 
     string temp; 
     Base32DecodeBase16Encode(input_b32_strings[i], temp); 
     output_b16_strings.push_back(temp); 
    } 

    for(int j=0; j<num_input_strs; j++) 
    { 
     cout << input_b32_strings[j] << endl; 
     cout << output_b16_strings[j] << endl; 
     cout << expected_b16_strings[j] << endl; 

     if(output_b16_strings[j] != expected_b16_strings[j]) 
     { 
      cout << "Error in conversion for string " << j << endl; 
     } 
    } 

    return 0; 
}