2016-02-08 61 views
0

我正在使用CoreAudio低级音频采集API。应用程序目标是MAC OSX,而​​不是iOS。由于音频数据错误而导致音频破裂

在测试过程中,我们不时用真实的音频对噪声进行非常烦人的调制。这些现象随着时间而发展,从几乎没有引人注目的地步开始,变得越来越占统治地位。

分析Audacity下的捕获音频表明音频数据包的末尾部分是错误的。

下面是样品图像: enter image description here

侵入重复每40ms哪个是所配置的分组时间(在缓冲液中的样品计)

更新: 随着时间的推移的间隙变大,这里是10分钟后,来自同一捕获文件的另一个快照。现在差距包含1460个样本,距离包总数为40毫秒33毫秒! enter image description here

CODE SNIPPESTS:

捕捉回调

OSStatus MacOS_AudioDevice::captureCallback(void *inRefCon, 
              AudioUnitRenderActionFlags *ioActionFlags, 
              const AudioTimeStamp *inTimeStamp, 
              UInt32 inBusNumber, 
              UInt32 inNumberFrames, 
              AudioBufferList *ioData) 
{ 
    MacOS_AudioDevice* _this = static_cast<MacOS_AudioDevice*>(inRefCon); 

    // Get the new audio data 
    OSStatus err = AudioUnitRender(_this->m_AUHAL, ioActionFlags, inTimeStamp, inBusNumber, inNumberFrames, _this->m_InputBuffer); 
    if (err != noErr) 
    { 
     ... 

     return err; 
    } 

    // ignore callback on unexpected buffer size 
    if (_this->m_params.bufferSizeSamples != inNumberFrames) 
    { 
     ... 

     return noErr; 
    } 

    // Deliver audio data 
    DeviceIOMessage message; 
    message.bufferSizeBytes = _this->m_deviceBufferSizeBytes; 
    message.buffer = _this->m_InputBuffer->mBuffers[0].mData; 
    if (_this->m_callbackFunc) 
    { 
     _this->m_callbackFunc(_this, message); 
    } 
} 

打开,并开始捕捉设备:

void MacOS_AudioDevice::openAUHALCapture() 
{ 
    UInt32 enableIO; 
    AudioStreamBasicDescription streamFormat; 
    UInt32 size; 
    SInt32 *channelArr; 
    std::stringstream ss; 
    AudioObjectPropertyAddress deviceBufSizeProperty = 
    { 
     kAudioDevicePropertyBufferFrameSize, 
     kAudioDevicePropertyScopeInput, 
     kAudioObjectPropertyElementMaster 
    }; 

    // AUHAL 
    AudioComponentDescription cd = {kAudioUnitType_Output, kAudioUnitSubType_HALOutput, kAudioUnitManufacturer_Apple, 0, 0}; 
    AudioComponent HALOutput = AudioComponentFindNext(NULL, &cd); 
    verify_macosapi(AudioComponentInstanceNew(HALOutput, &m_AUHAL)); 

    verify_macosapi(AudioUnitInitialize(m_AUHAL)); 

    // enable input IO 
    enableIO = 1; 
    verify_macosapi(AudioUnitSetProperty(m_AUHAL, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, 1, &enableIO, sizeof(enableIO))); 

    // disable output IO 
    enableIO = 0; 
    verify_macosapi(AudioUnitSetProperty(m_AUHAL, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, 0, &enableIO, sizeof(enableIO))); 

    // Setup current device 
    size = sizeof(AudioDeviceID); 
    verify_macosapi(AudioUnitSetProperty(m_AUHAL, kAudioOutputUnitProperty_CurrentDevice, kAudioUnitScope_Global, 0, &m_MacDeviceID, sizeof(AudioDeviceID))); 

    // Set device native buffer length before setting AUHAL stream 
    size = sizeof(m_originalDeviceBufferTimeFrames); 
    verify_macosapi(AudioObjectSetPropertyData(m_MacDeviceID, &deviceBufSizeProperty, 0, NULL, size, &m_originalDeviceBufferTimeFrames)); 

    // Get device format 
    size = sizeof(AudioStreamBasicDescription); 
    verify_macosapi(AudioUnitGetProperty(m_AUHAL, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, 1, &streamFormat, &size)); 

    // Setup channel map 
    assert(m_params.numOfChannels <= streamFormat.mChannelsPerFrame); 
    channelArr = new SInt32[streamFormat.mChannelsPerFrame]; 
    for (int i = 0; i < streamFormat.mChannelsPerFrame; i++) 
     channelArr[i] = -1; 
    for (int i = 0; i < m_params.numOfChannels; i++) 
     channelArr[i] = i; 

    verify_macosapi(AudioUnitSetProperty(m_AUHAL, kAudioOutputUnitProperty_ChannelMap, kAudioUnitScope_Input, 1, channelArr, sizeof(SInt32) * streamFormat.mChannelsPerFrame)); 
    delete [] channelArr; 

    // Setup stream converters 
    streamFormat.mFormatID = kAudioFormatLinearPCM; 
    streamFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger; 
    streamFormat.mFramesPerPacket = m_SamplesPerPacket; 
    streamFormat.mBitsPerChannel = m_params.sampleDepthBits; 
    streamFormat.mSampleRate = m_deviceSampleRate; 
    streamFormat.mChannelsPerFrame = 1; 
    streamFormat.mBytesPerFrame = 2; 
    streamFormat.mBytesPerPacket = streamFormat.mFramesPerPacket * streamFormat.mBytesPerFrame; 

    verify_macosapi(AudioUnitSetProperty(m_AUHAL, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &streamFormat, size)); 

    // Setup callbacks 
    AURenderCallbackStruct input; 
    input.inputProc = captureCallback; 
    input.inputProcRefCon = this; 
    verify_macosapi(AudioUnitSetProperty(m_AUHAL, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &input, sizeof(input))); 

    // Calculate the size of the IO buffer (in samples) 
    if (m_params.bufferSizeMS != -1) 
    { 
     unsigned int desiredSignalsInBuffer = (m_params.bufferSizeMS/(double)1000) * m_deviceSampleRate; 

     // making sure the value stay in the device's supported range 
     desiredSignalsInBuffer = std::min<unsigned int>(desiredSignalsInBuffer, m_deviceBufferFramesRange.mMaximum); 
     desiredSignalsInBuffer = std::max<unsigned int>(m_deviceBufferFramesRange.mMinimum, desiredSignalsInBuffer); 

     m_deviceBufferFrames = desiredSignalsInBuffer; 
    } 

    // Set device buffer length 
    size = sizeof(m_deviceBufferFrames); 
    verify_macosapi(AudioObjectSetPropertyData(m_MacDeviceID, &deviceBufSizeProperty, 0, NULL, size, &m_deviceBufferFrames)); 

    m_deviceBufferSizeBytes = m_deviceBufferFrames * streamFormat.mBytesPerFrame; 
    m_deviceBufferTimeMS = 1000 * m_deviceBufferFrames/m_deviceSampleRate; 

    // Calculate number of buffers from channels 
    size = offsetof(AudioBufferList, mBuffers[0]) + (sizeof(AudioBuffer) * m_params.numOfChannels); 

    // Allocate input buffer 
    m_InputBuffer = (AudioBufferList *)malloc(size); 
    m_InputBuffer->mNumberBuffers = m_params.numOfChannels; 

    // Pre-malloc buffers for AudioBufferLists 
    for(UInt32 i = 0; i< m_InputBuffer->mNumberBuffers ; i++) 
    { 
     m_InputBuffer->mBuffers[i].mNumberChannels = 1; 
     m_InputBuffer->mBuffers[i].mDataByteSize = m_deviceBufferSizeBytes; 
     m_InputBuffer->mBuffers[i].mData = malloc(m_deviceBufferSizeBytes); 
    } 

    // Update class properties 
    m_params.sampleRateHz = streamFormat.mSampleRate; 
    m_params.bufferSizeSamples = m_deviceBufferFrames; 
    m_params.bufferSizeBytes = m_params.bufferSizeSamples * streamFormat.mBytesPerFrame; 

} 


eADMReturnCode MacOS_AudioDevice::start() 
{ 
    eADMReturnCode ret = OK; 
    LOGAPI(ret); 

    if (!m_isStarted && m_isOpen) 
    { 
     OSStatus err = AudioOutputUnitStart(m_AUHAL); 
     if (err == noErr) 
      m_isStarted = true; 
     else 
      ret = ERROR; 
    } 
    return ret; 
} 

任何想法是什么原因,以及如何解决?

在此先感谢!

+0

请显示您正在使用的过程。 – dave234

+0

我用代码更新了我的问题。请参阅上文。谢谢! – meirm

+0

看起来hotpaw2对我来说很合适。 – dave234

回答

2

周期性故障或中断可能是由于不注意或未完全处理发送到每个音频回调的帧数而造成的。有效缓冲区并不总是包含预期的或相同数量的样本(inNumberFrames可能不等于bufferSizeSamples或完全有效的音频缓冲区中的先前inNumberFrame)。

这些类型的毛刺可能是由于某些型号的iOS设备(仅支持硬件中的48k音频)尝试在44.1k时录制而引起的。

某些类型的毛刺也可能由m_callbackFunc函数中的任何非硬实时代码引起(例如任何同步文件读/写,OS调用,Objective C消息调度,GC或内存分配/释放)。

+0

目标是OSX不是iOS。我检查当前的设备采样率并用实际值配置流。捕获的文件包含正确采样率的声音,而不是音高。因此这不是不正确的SR问题 – meirm

+0

请参阅我更新的问题,我添加了代码段代码,因为您可以看到我按照预期验证“inNumberFrames”。也许更多的检查,我需要执行? – meirm

+0

您的捕获回调代码似乎无法处理验证失败时帧长度不同的缓冲区。这可能会导致辍学。 – hotpaw2