2014-03-31 38 views
3

因此,我试着用Accelerate.framework阅读了关于FFT的所有内容,并获得了一个与MTAudioProcessingTap一起工作的示例,但我觉得我做错了什么,并且我的绘制点不应该“你看起来像这样。在iOS上使用MTAudioProcessingTap进行FFT

plotted points from FFT

#import "AudioTap.h" 


#pragma mark - TapContext 

typedef struct TapContext { 
    void *audioTap; 
    Float64 sampleRate; 
    UInt32 numSamples; 
    FFTSetup fftSetup; 
    COMPLEX_SPLIT split; 
    float *window; 
    float *inReal; 

} TapContext; 


#pragma mark - AudioTap Callbacks 

static void TapInit(MTAudioProcessingTapRef tap, void *clientInfo, void **tapStorageOut) 
{ 
    TapContext *context = calloc(1, sizeof(TapContext)); 
    context->audioTap = clientInfo; 
    context->sampleRate = NAN; 
    context->numSamples = 4096; 

    vDSP_Length log2n = log2f((float)context->numSamples); 

    int nOver2 = context->numSamples/2; 

    context->inReal = (float *) malloc(context->numSamples * sizeof(float)); 
    context->split.realp = (float *) malloc(nOver2*sizeof(float)); 
    context->split.imagp = (float *) malloc(nOver2*sizeof(float)); 

    context->fftSetup = vDSP_create_fftsetup(log2n, FFT_RADIX2); 

    context->window = (float *) malloc(context->numSamples * sizeof(float)); 
    vDSP_hann_window(context->window, context->numSamples, vDSP_HANN_DENORM); 



    *tapStorageOut = context; 
} 

static void TapPrepare(MTAudioProcessingTapRef tap, CMItemCount numberFrames, const AudioStreamBasicDescription *format) 
{ 
    TapContext *context = (TapContext *)MTAudioProcessingTapGetStorage(tap); 
    context->sampleRate = format->mSampleRate; 

    if (format->mFormatFlags & kAudioFormatFlagIsNonInterleaved) { 
     NSLog(@"is Non Interleaved"); 
    } 

    if (format->mFormatFlags & kAudioFormatFlagIsSignedInteger) { 
     NSLog(@"dealing with integers"); 
    } 
} 


static void TapProcess(MTAudioProcessingTapRef tap, CMItemCount numberFrames, MTAudioProcessingTapFlags flags, 
         AudioBufferList *bufferListInOut, CMItemCount *numberFramesOut, MTAudioProcessingTapFlags *flagsOut) 
{ 


    OSStatus status; 

    status = MTAudioProcessingTapGetSourceAudio(tap, numberFrames, bufferListInOut, flagsOut, NULL, numberFramesOut); 
    if (status != noErr) { 
     NSLog(@"MTAudioProcessingTapGetSourceAudio: %d", (int)status); 
     return; 
    } 

    //UInt32 bufferCount = bufferListInOut->mNumberBuffers; 

    AudioBuffer *firstBuffer = &bufferListInOut->mBuffers[1]; 

    float *bufferData = firstBuffer->mData; 
    //UInt32 dataSize = firstBuffer->mDataByteSize; 
    //printf(": %li", dataSize); 





    TapContext *context = (TapContext *)MTAudioProcessingTapGetStorage(tap); 

    vDSP_vmul(bufferData, 1, context->window, 1, context->inReal, 1, context->numSamples); 

    vDSP_ctoz((COMPLEX *)context->inReal, 2, &context->split, 1, context->numSamples/2); 


    vDSP_Length log2n = log2f((float)context->numSamples); 
    vDSP_fft_zrip(context->fftSetup, &context->split, 1, log2n, FFT_FORWARD); 
    context->split.imagp[0] = 0.0; 

    UInt32 i; 


    NSMutableArray *outData = [NSMutableArray array]; 

    [outData addObject:[NSNumber numberWithFloat:0]]; 
    for(i = 1; i < context->numSamples; i++) { 
     float power = context->split.realp[i] * context->split.realp[i] + context->split.imagp[i] * context->split.imagp[i]; 
     //amp[i] = sqrtf(power); 

     [outData addObject:[NSNumber numberWithFloat:sqrtf(power)]]; 
    } 


    AudioTap *audioTap = (__bridge AudioTap *)context->audioTap; 
    [audioTap updateSpectrum:outData]; 

} 

static void TapUnprepare(MTAudioProcessingTapRef tap) 
{ 

} 

static void TapFinalize(MTAudioProcessingTapRef tap) 
{ 
    TapContext *context = (TapContext *)MTAudioProcessingTapGetStorage(tap); 

    free(context->split.realp); 
    free(context->split.imagp); 
    free(context->inReal); 
    free(context->window); 

    context->fftSetup = nil; 
    context->audioTap = nil; 
    free(context); 
} 






#pragma mark - AudioTap Implementation 


@implementation AudioTap 

- (id)initWithTrack:(AVAssetTrack *)track frameSize:(UInt32)frameSize 
{ 

    self = [super init]; 
    if (self) { 

     _assetTrack = track; 
     _frameSize = frameSize; 

     [self setupAudioTap]; 

    } 
    return self; 

} 

- (void)setupAudioTap 
{ 
    //MTAudioProcessingTap 
    MTAudioProcessingTapCallbacks callbacks; 

    callbacks.version = kMTAudioProcessingTapCallbacksVersion_0; 

    callbacks.init = TapInit; 
    callbacks.prepare = TapPrepare; 
    callbacks.process = TapProcess; 
    callbacks.unprepare = TapUnprepare; 
    callbacks.finalize = TapFinalize; 
    callbacks.clientInfo = (__bridge void *)self; 

    MTAudioProcessingTapRef tapRef; 
    OSStatus err = MTAudioProcessingTapCreate(kCFAllocatorDefault, &callbacks, 
               kMTAudioProcessingTapCreationFlag_PostEffects, &tapRef); 

    if (err || !tapRef) { 
     NSLog(@"Unable to create AudioProcessingTap."); 
     return; 
    } 

    //Audio Mix 
    AVMutableAudioMixInputParameters *inputParams = [AVMutableAudioMixInputParameters 
                audioMixInputParametersWithTrack:_assetTrack]; 

    inputParams.audioTapProcessor = tapRef; 

    AVMutableAudioMix *audioMix = [AVMutableAudioMix audioMix]; 
    audioMix.inputParameters = @[inputParams]; 
    _audioMix = audioMix; 
} 



- (void)updateSpectrum:(NSArray *)data 
{ 
    @autoreleasepool 
    { 
     dispatch_async(dispatch_get_main_queue(), ^{ 
      // Forward left and right channel volume to delegate. 
      if (_delegate && [_delegate respondsToSelector:@selector(updateSpectrum:)]) { 
       [_delegate updateSpectrum:data]; 
      } 
     }); 
    } 
} 

@end 

我读了audioBuffer-> MDATA财产可能是别的其他然后一个浮动的东西(即SINT32等?),如果这是真的如何确保我把它转换正确在尝试FFT之前呢?

回答

0

绘图长度和实际FFT幅度结果长度(2^log2n)/ 2不一样。

+0

所以我创建一个NSArray的长度是错误的?我的长度应该是(2^log2n)/ 2?你能否给我一点解释,这对我来说有点新鲜。 –

+0

另外我是否正在做这个权利,如果我想要从正在播放的音乐中看到整个频率范围的大小? –

+0

实际上,我终于在FFT的频率得到绘制方面得到了一些澄清。现在我只需要把更多的重量放在更高的频率上。 –