2011-07-19 20 views
2

我想不通,为什么下面的回报OSStatus -10851:音频单元,设置格式失败-10581

status = AudioUnitSetProperty(*audioUnit, 
       kAudioUnitProperty_StreamFormat, 
       kAudioUnitScope_Output, 
       kInputBus, 
       &outFormat, 
       sizeof(outFormat)); 

工程在模拟器上而不是设备上。

这里是代码的其余部分:

#import "VoipRecorder.h" 
#import <AudioToolbox/AudioToolbox.h> 
#import <CoreAudio/CoreAudioTypes.h> 

#define kOutputBus 0 
#define kInputBus 1 

void SetAUCanonical(AudioStreamBasicDescription *format, UInt32 nChannels, bool interleaved) 
// note: leaves sample rate untouched 
{ 
    format->mFormatID = kAudioFormatLinearPCM; 
#if TARGET_IPHONE_SIMULATOR 
    int sampleSize = sizeof(Float32); 
    format->mFormatFlags = kAudioFormatFlagsNativeFloatPacked; 
#else 
    int sampleSize = sizeof(AudioSampleType); 
    format->mFormatFlags = kAudioFormatFlagsCanonical; 
#endif 
    format->mBitsPerChannel = 8 * sampleSize; 
    format->mChannelsPerFrame = nChannels; 
    format->mFramesPerPacket = 1; 
    if (interleaved) 
     format->mBytesPerPacket = format->mBytesPerFrame = nChannels * sampleSize; 
    else { 
     format->mBytesPerPacket = format->mBytesPerFrame = sampleSize; 
     format->mFormatFlags |= kAudioFormatFlagIsNonInterleaved; 
    } 
} 

int SetupRemoteIO (AudioUnit *audioUnit, AURenderCallbackStruct inRenderProc, AURenderCallbackStruct inOutputProc, AudioStreamBasicDescription * outFormat) 
{ 
    OSStatus status; 

    // Open the output unit 
    AudioComponentDescription desc; 
    desc.componentType = kAudioUnitType_Output; 
    desc.componentSubType = kAudioUnitSubType_RemoteIO; 
    desc.componentManufacturer = kAudioUnitManufacturer_Apple; 
    desc.componentFlags = 0; 
    desc.componentFlagsMask = 0; 

    AudioComponent comp = AudioComponentFindNext(NULL, &desc); 

    AudioComponentInstanceNew(comp, audioUnit); 

    UInt32 flag = 1; 
    // Enable IO for recording 
    status = AudioUnitSetProperty(*audioUnit, 
            kAudioOutputUnitProperty_EnableIO, 
            kAudioUnitScope_Input, 
            kInputBus, 
            &flag, 
            sizeof(flag)); 

    assert(status == 0); 

    // Enable IO for playback 
    status = AudioUnitSetProperty(*audioUnit, 
            kAudioOutputUnitProperty_EnableIO, 
            kAudioUnitScope_Output, 
            kOutputBus, 
            &flag, 
            sizeof(flag));  

    assert(status == 0); 

    // set our required format - Canonical AU format: LPCM non-interleaved 8.24 fixed point 
    SetAUCanonical(outFormat, 1, NO); 

    outFormat->mSampleRate = 44100.00; //8000; 

    //Apply format 
    status = AudioUnitSetProperty(*audioUnit, 
            kAudioUnitProperty_StreamFormat, 
            kAudioUnitScope_Output, 
            kInputBus, 
            &outFormat, 
            sizeof(outFormat)); 

    assert(status == 0); 

    status = AudioUnitSetProperty(*audioUnit, 
            kAudioUnitProperty_StreamFormat, 
            kAudioUnitScope_Input, 
            kOutputBus, 
            &outFormat, 
            sizeof(outFormat)); 

    // Setup callbacks 
    // Recording 
    status = AudioUnitSetProperty(*audioUnit, 
            kAudioOutputUnitProperty_SetInputCallback, 
            kAudioUnitScope_Input, 
            kInputBus, 
            &inRenderProc, 
            sizeof(inRenderProc)); 
    assert(status == 0); 

    // Playback 
    status = AudioUnitSetProperty(*audioUnit, 
            kAudioUnitProperty_SetRenderCallback, 
            kAudioUnitScope_Output, 
            kOutputBus, 
            &inOutputProc, 
            sizeof(inOutputProc)); 
    assert(status == 0); 

    status = AudioUnitInitialize(*audioUnit); 
    assert(status == 0); 

    return 0; 
} 

@implementation VoipRecorder 

@synthesize audioUnit; 

- (id)init 
{ 
    self = [super init]; 
    if (self) { 

    } 

    return self; 
} 

void rioInterruptionListener(void *inClientData, UInt32 inInterruption) 
{ 
    printf("Session interrupted! --- %s ---", inInterruption == kAudioSessionBeginInterruption ? "Begin Interruption" : "End Interruption"); 

    VoipRecorder *THIS = (VoipRecorder*)inClientData; 

    if (inInterruption == kAudioSessionEndInterruption) { 
     // make sure we are again the active session 
     AudioSessionSetActive(true); 
     AudioOutputUnitStart(THIS.audioUnit); 
    } 

    if (inInterruption == kAudioSessionBeginInterruption) { 
     AudioOutputUnitStop(THIS.audioUnit); 
    } 
} 

int buffer[1000000]; 
int bufferSize = 2; 

static OSStatus PerformSpeaker(
          void      *inRefCon, 
          AudioUnitRenderActionFlags *ioActionFlags, 
          const AudioTimeStamp  *inTimeStamp, 
          UInt32      inBusNumber, 
          UInt32      inNumberFrames, 
          AudioBufferList    *ioData) 
{ 
    NSLog(@"Speaker"); 

    if (bufferSize == 0) { 
     return 0; 
    } 

    if (ioData == NULL) { 
     NSLog(@"err"); 
     return 0; 
    } 

    return 0; 
} 

AudioBufferList *AllocateBuffers(UInt32 nBytes) 
{ 
    int channelCount = 2; 

    AudioBufferList *audioBufferList; 
    audioBufferList = (AudioBufferList *)malloc(sizeof(AudioBufferList)); 
    audioBufferList->mNumberBuffers = 1; 
    audioBufferList->mBuffers[0].mNumberChannels = channelCount; 
    audioBufferList->mBuffers[0].mDataByteSize = nBytes; 
    audioBufferList->mBuffers[0].mData = (AudioUnitSampleType *)malloc(nBytes); 

    return audioBufferList; 
} 

static OSStatus PerformThru(
          void      *inRefCon, 
          AudioUnitRenderActionFlags *ioActionFlags, 
          const AudioTimeStamp  *inTimeStamp, 
          UInt32      inBusNumber, 
          UInt32      inNumberFrames, 
          AudioBufferList    *ioData) 
{ 
    VoipRecorder *THIS = (VoipRecorder *)inRefCon; 

    AudioBufferList *bufferList = AllocateBuffers(inNumberFrames*2); 

    OSStatus err = AudioUnitRender(THIS.audioUnit, ioActionFlags, inTimeStamp, 1, inNumberFrames, bufferList); 
    if (err) { 
     printf("PerformThru: error %d\n", (int)err); 
     free(bufferList); 
     return err; 
    } 


    free(bufferList); 

    return 0; 
} 


- (void)setupAudio { 

    OSStatus status; 

    inputProc.inputProc = PerformThru; 
    inputProc.inputProcRefCon = self; 

    outputProc.inputProc = PerformSpeaker; 
    outputProc.inputProcRefCon = self; 

    buffer[0] = 0x4444; 
    buffer[1] = 0xffff; 
    status = AudioSessionInitialize(NULL, NULL, rioInterruptionListener, self); 
    assert(status == 0); 

    UInt32 audioCategory = kAudioSessionCategory_PlayAndRecord; 
    status = AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(audioCategory), &audioCategory); 
    assert(status == 0); 

    Float32 preferredBufferSize = .005; 
    status = AudioSessionSetProperty(kAudioSessionProperty_PreferredHardwareIOBufferDuration, sizeof(preferredBufferSize), &preferredBufferSize); 
    assert(status == 0); 

    UInt32 size = sizeof(hwSampleRate); 
    status = AudioSessionGetProperty(kAudioSessionProperty_CurrentHardwareSampleRate, &size, &hwSampleRate); 
    assert(status == 0); 

    status = AudioSessionSetActive(true); 
    assert(status == 0); 

    status = SetupRemoteIO(&audioUnit, inputProc, outputProc, &thruFormat); 
    assert(status == 0); 

    status = AudioOutputUnitStart(audioUnit); 
    assert(status == 0); 

    size = sizeof(thruFormat); 
    status = AudioUnitGetProperty(audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, 1, &thruFormat, &size); 
    assert(status == 0); 

    //NSLog(@"0x%X", status); 
} 

回答

3

两种可能的检查项目:您发布的代码混合使用AudioSampleType和AudioUnitSampleType的,这是两个不同大小的数据类型。您还只在1个数据通道上指定kAudioFormatFlagIsNonInterleaved标志,这可能不是必需的。