2014-06-10 22 views
7

我强烈关注this Xamarin sample(基于this Apple sample)将LinearPCM文件转换为AAC文件。AudioConverter#FillComplexBuffer返回-50并且不转换任何东西

该示例很好,但在我的项目中实施,FillComplexBuffer方法返回错误-50InputData事件不会触发一次,因此没有任何转换。

仅当在设备上测试时才会显示该错误。在模拟器上测试时,一切都很顺利,最后我得到一个好的编码AAC文件。

今天我尝试了很多东西,我没有看到我的代码和示例代码之间的任何区别。你知道这可能来自哪里吗?

我不知道这是否与Xamarin有关,它似乎不是如此,因为Xamarin示例很好。

这里是我的代码的相关部分:

protected void Encode(string path) 
{ 
    // In class setup. File at TempWavFilePath has DecodedFormat as format. 
    // 
    // DecodedFormat = AudioStreamBasicDescription.CreateLinearPCM(); 
    // AudioStreamBasicDescription encodedFormat = new AudioStreamBasicDescription() 
    // { 
    // Format = AudioFormatType.MPEG4AAC, 
    // SampleRate = DecodedFormat.SampleRate, 
    // ChannelsPerFrame = DecodedFormat.ChannelsPerFrame, 
    // }; 
    // AudioStreamBasicDescription.GetFormatInfo (ref encodedFormat); 
    // EncodedFormat = encodedFormat; 


    // Setup converter 
    AudioStreamBasicDescription inputFormat = DecodedFormat; 
    AudioStreamBasicDescription outputFormat = EncodedFormat; 

    AudioConverterError converterCreateError; 
    AudioConverter converter = AudioConverter.Create(inputFormat, outputFormat, out converterCreateError); 
    if (converterCreateError != AudioConverterError.None) 
    { 
    Console.WriteLine("Converter creation error: " + converterCreateError); 
    } 
    converter.EncodeBitRate = 192000; // AAC 192kbps 

    // get the actual formats back from the Audio Converter 
    inputFormat = converter.CurrentInputStreamDescription; 
    outputFormat = converter.CurrentOutputStreamDescription; 


    /*** INPUT ***/ 

    AudioFile inputFile = AudioFile.OpenRead(NSUrl.FromFilename(TempWavFilePath)); 

    // init buffer 
    const int inputBufferBytesSize = 32768; 
    IntPtr inputBufferPtr = Marshal.AllocHGlobal(inputBufferBytesSize); 

    // calc number of packets per read 
    int inputSizePerPacket = inputFormat.BytesPerPacket; 
    int inputBufferPacketSize = inputBufferBytesSize/inputSizePerPacket; 
    AudioStreamPacketDescription[] inputPacketDescriptions = null; 

    // init position 
    long inputFilePosition = 0; 

    // define input delegate 
    converter.InputData += delegate(ref int numberDataPackets, AudioBuffers data, ref AudioStreamPacketDescription[] dataPacketDescription) 
    { 
    // how much to read 
    if (numberDataPackets > inputBufferPacketSize) 
    { 
     numberDataPackets = inputBufferPacketSize; 
    } 

    // read from the file 
    int outNumBytes; 
    AudioFileError readError = inputFile.ReadPackets(false, out outNumBytes, inputPacketDescriptions, inputFilePosition, ref numberDataPackets, inputBufferPtr); 
    if (readError != 0) 
    { 
     Console.WriteLine("Read error: " + readError); 
    } 

    // advance input file packet position 
    inputFilePosition += numberDataPackets; 

    // put the data pointer into the buffer list 
    data.SetData(0, inputBufferPtr, outNumBytes); 

    // add packet descriptions if required 
    if (dataPacketDescription != null) 
    { 
     if (inputPacketDescriptions != null) 
     { 
     dataPacketDescription = inputPacketDescriptions; 
     } 
     else 
     { 
     dataPacketDescription = null; 
     } 
    } 

    return AudioConverterError.None; 
    }; 


    /*** OUTPUT ***/ 

    // create the destination file 
    var outputFile = AudioFile.Create (NSUrl.FromFilename(path), AudioFileType.M4A, outputFormat, AudioFileFlags.EraseFlags); 

    // init buffer 
    const int outputBufferBytesSize = 32768; 
    IntPtr outputBufferPtr = Marshal.AllocHGlobal(outputBufferBytesSize); 

    AudioBuffers buffers = new AudioBuffers(1); 

    // calc number of packet per write 
    int outputSizePerPacket = outputFormat.BytesPerPacket; 
    AudioStreamPacketDescription[] outputPacketDescriptions = null; 

    if (outputSizePerPacket == 0) { 
    // if the destination format is VBR, we need to get max size per packet from the converter 
    outputSizePerPacket = (int)converter.MaximumOutputPacketSize; 

    // allocate memory for the PacketDescription structures describing the layout of each packet 
    outputPacketDescriptions = new AudioStreamPacketDescription [outputBufferBytesSize/outputSizePerPacket]; 
    } 
    int outputBufferPacketSize = outputBufferBytesSize/outputSizePerPacket; 

    // init position 
    long outputFilePosition = 0; 

    long totalOutputFrames = 0; // used for debugging 

    // write magic cookie if necessary 
    if (converter.CompressionMagicCookie != null && converter.CompressionMagicCookie.Length != 0) 
    { 
    outputFile.MagicCookie = converter.CompressionMagicCookie; 
    } 

    // loop to convert data 
    Console.WriteLine ("Converting..."); 
    while (true) 
    { 
    // create buffer 
    buffers[0] = new AudioBuffer() 
    { 
     NumberChannels = outputFormat.ChannelsPerFrame, 
     DataByteSize = outputBufferBytesSize, 
     Data = outputBufferPtr 
    }; 

    int writtenPackets = outputBufferPacketSize; 

    // LET'S CONVERT (it's about time...) 
    AudioConverterError converterFillError = converter.FillComplexBuffer(ref writtenPackets, buffers, outputPacketDescriptions); 
    if (converterFillError != AudioConverterError.None) 
    { 
     Console.WriteLine("FillComplexBuffer error: " + converterFillError); 
    } 

    if (writtenPackets == 0) // EOF 
    { 
     break; 
    } 

    // write to output file 
    int inNumBytes = buffers[0].DataByteSize; 

    AudioFileError writeError = outputFile.WritePackets(false, inNumBytes, outputPacketDescriptions, outputFilePosition, ref writtenPackets, outputBufferPtr); 
    if (writeError != 0) 
    { 
     Console.WriteLine("WritePackets error: {0}", writeError); 
    } 

    // advance output file packet position 
    outputFilePosition += writtenPackets; 

    if (FlowFormat.FramesPerPacket != 0) { 
     // the format has constant frames per packet 
     totalOutputFrames += (writtenPackets * FlowFormat.FramesPerPacket); 
    } else { 
     // variable frames per packet require doing this for each packet (adding up the number of sample frames of data in each packet) 
     for (var i = 0; i < writtenPackets; ++i) 
     { 
     totalOutputFrames += outputPacketDescriptions[i].VariableFramesInPacket; 
     } 
    } 
    } 

    // write out any of the leading and trailing frames for compressed formats only 
    if (outputFormat.BitsPerChannel == 0) 
    { 
    Console.WriteLine("Total number of output frames counted: {0}", totalOutputFrames); 
    WritePacketTableInfo(converter, outputFile); 
    } 

    // write the cookie again - sometimes codecs will update cookies at the end of a conversion 
    if (converter.CompressionMagicCookie != null && converter.CompressionMagicCookie.Length != 0) 
    { 
    outputFile.MagicCookie = converter.CompressionMagicCookie; 
    } 

    // Clean everything 
    Marshal.FreeHGlobal(inputBufferPtr); 
    Marshal.FreeHGlobal(outputBufferPtr); 
    converter.Dispose(); 
    outputFile.Dispose(); 

    // Remove temp file 
    File.Delete(TempWavFilePath); 
} 

我已经看到this SO question,但没有详细的C /的OBJ-C相关答案似乎++并不适合与我的问题。

谢谢!

+1

是否有任何机会可以将示例项目发布到具有失败代码的github?调试比调节眼球要容易得多。 – bratsche

+0

[编辑]自从我找到答案后,GitHub repo被删除。 – Ingolmo

回答

0

我终于找到了解决方案!

我只是在转换文件之前声明AVAudioSession类别。

AVAudioSession.SharedInstance().SetCategory(AVAudioSessionCategory.AudioProcessing); 
AVAudioSession.SharedInstance().SetActive(true); 

因为我也用一个AudioQueueRenderOffline,我必须在事实上的类别设置为AVAudioSessionCategory.PlayAndRecord所以离线和音频都转换工作。