2014-07-12 53 views
1

我正在尝试编写一个程序,该程序作为其功能的一部分,不断捕获视频并实时计算给定帧视频数据的平均亮度,或尽可能接近实时。这是我第一次进入任何视频/ iOS相机的东西,所以除了我自己的东西之外,我只是在互联网上搜集了很多我一直在研究的东西。眼下这段代码在我ViewController.m文件编译时,我的设备上运行,但它似乎没有被做任何事情:iOS:获取实时(ish)视频数据

- (void)viewDidLoad{ 
    [super viewDidLoad]; 
    _val = 0; 

    //Set up the video capture session. 
    NSLog(@"Setting up the capture session...\n"); 
    captureSession = [[AVCaptureSession alloc] init]; 

    //Add input. 
    NSLog(@"Adding video input...\n"); 
    AVCaptureDevice *captureDevice = [self frontFacingCameraIfAvailable]; 
    if(captureDevice){ 
     NSError *error; 
     videoInputDevice = [AVCaptureDeviceInput deviceInputWithDevice:captureDevice error:&error]; 
     if(!error){ 
      if([captureSession canAddInput:videoInputDevice]) 
       [captureSession addInput:videoInputDevice]; 
      else 
       NSLog(@"Couldn't add video input.\n"); 

     }else{ 
      NSLog(@"Couldn't create video input.\n"); 
     } 
    }else{ 
     NSLog(@"Couldn't create capture device.\n"); 
    } 

    //Add output. 
    NSLog(@"Adding video data output...\n"); 
    vidOutput = [[AVCaptureVideoDataOutput alloc] init]; 
    vidOutput.alwaysDiscardsLateVideoFrames = YES; 
    if([captureSession canAddOutput:vidOutput]) 
     [captureSession addOutput:vidOutput]; 
    else 
     NSLog(@"Couldn't add video output.\n"); 
    NSString* key = (NSString*)kCVPixelBufferPixelFormatTypeKey; 
    NSNumber* value = [NSNumber numberWithUnsignedInt:kCVPixelFormatType_420YpCbCr8BiPlanarFullRange]; 
    NSDictionary* videoSettings = [NSDictionary dictionaryWithObject:value forKey:key]; 
    [vidOutput setVideoSettings:videoSettings]; 
    dispatch_queue_t queue = dispatch_queue_create("MyQueue", NULL); 
    [vidOutput setSampleBufferDelegate:self queue:queue]; 

} 

- (void)viewDidUnload{ 
    [super viewDidUnload]; 
    // Release any retained subviews of the main view. 
} 

-(AVCaptureDevice *)frontFacingCameraIfAvailable{ 
    NSArray *videoDevices = [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo]; 
    AVCaptureDevice *captureDevice = nil; 
    for (AVCaptureDevice *device in videoDevices){ 
     if (device.position == AVCaptureDevicePositionFront){ 
      captureDevice = device; 
      break; 
     } 
    } 

    //couldn't find one on the front, so just get the default video device. 
    if (!captureDevice){ 
     captureDevice = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo]; 
    } 

    return captureDevice; 
} 

- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection: (AVCaptureConnection *)connection{ 
    // Create autorelease pool because we are not in the main_queue 
    @autoreleasepool { 
     CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer); 
     // Lock the imagebuffer 
     CVPixelBufferLockBaseAddress(imageBuffer,0); 
     // Get information about the image 
     uint8_t *baseAddress = (uint8_t *)CVPixelBufferGetBaseAddress(imageBuffer); 
     // size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer); 
     size_t width = CVPixelBufferGetWidth(imageBuffer); 
     size_t height = CVPixelBufferGetHeight(imageBuffer); 
     size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer); 
     CVPlanarPixelBufferInfo_YCbCrBiPlanar *bufferInfo = (CVPlanarPixelBufferInfo_YCbCrBiPlanar *)baseAddress; 
     // This just moved the pointer past the offset 
     baseAddress = (uint8_t *)CVPixelBufferGetBaseAddressOfPlane(imageBuffer, 0); 
     // convert the image 
     UIImage *image = [self makeImage:baseAddress bufferInfo:bufferInfo width:width height:height bytesPerRow:bytesPerRow]; 
     // Update the display with the captured image for DEBUG purposes 
     //dispatch_async(dispatch_get_main_queue(), ^{ 
      //[self.vImage setImage:image]; 
     //}); 
     CGImageRef cgImage = [image CGImage]; 
     CGDataProviderRef provider = CGImageGetDataProvider(cgImage); 
     CFDataRef bitmapData = CGDataProviderCopyData(provider); 
     const UInt8* data = CFDataGetBytePtr(bitmapData); 
     int cols = width - 1; 
     int rows = height - 1; 
     float avgLuminance = 0.0; 
     for(int i = 0; i < cols; i++){ 
      for(int j = 0; j < rows; j++){ 
       const UInt8* pixel = data + j*bytesPerRow + i*4; 
       avgLuminance += pixel[0]*0.299 + pixel[1]*0.587 + pixel[2]*0.114; 
      } 
     } 
     avgLuminance /= (cols*rows); 
     NSLog(@"Average Luminance: %f\n", avgLuminance); 

    } 
} 

-(UIImage *)makeImage:(uint8_t *)inBaseAddress bufferInfo:(CVPlanarPixelBufferInfo_YCbCrBiPlanar *)inBufferInfo width: (size_t)Width height:(size_t)Height bytesPerRow:(size_t)BytesPerRow{ 
    NSUInteger yPitch = EndianU32_BtoN(inBufferInfo->componentInfoY.rowBytes); 
    uint8_t *rgbBuffer = (uint8_t *)malloc(Width * Height * 4); 
    uint8_t *yBuffer = (uint8_t *)inBaseAddress; 
    uint8_t val; 
    int bytesPerPixel = 4; 
    // for each byte in the input buffer, fill in the output buffer with four bytes 
    // the first byte is the Alpha channel, then the next three contain the same 
    // value of the input buffer 
    for(int y = 0; y < Height*Width; y++){ 
     val = yBuffer[y]; 
     // Alpha channel 
     rgbBuffer[(y*bytesPerPixel)] = 0xff; 
     // next three bytes same as input 
     rgbBuffer[(y*bytesPerPixel)+1] = rgbBuffer[(y*bytesPerPixel)+2] = rgbBuffer[y*bytesPerPixel+3] = val; 
    } 

    // Create a device-dependent RGB color space 
    CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB(); 
    CGContextRef context = CGBitmapContextCreate(rgbBuffer, yPitch, Height, 8,yPitch*bytesPerPixel, colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedLast); 
    CGImageRef quartzImage = CGBitmapContextCreateImage(context); 
    CGContextRelease(context); 
    //UIImage *image = [[UIImage alloc] initWithCGImage:quartzImage scale:(CGFloat)0.5 orientation:UIImageOrientationRight]; 
    UIImage *image = [UIImage imageWithCGImage:quartzImage]; 
    CGImageRelease(quartzImage); 
    free(rgbBuffer); 
    return image; 
} 

设置我的.h文件作为AVCaptureVideoDataOutputSampleBufferDelegate,但给我的感觉我并不完全理解我需要在代码中不断从摄影机获取数据,因为CaptureOutput方法在任何地方都不会被调用。我应该如何/在哪里调用它以获得持续不断的数据流?

+0

那么我会做的是创建一个AVCaptureVideoPreviewLayer(参考http://stackoverflow.com/questions/5002789/get-uiimage-from-views-layers)的快照,然后从中获取数据。尽管如此,它可能并不像你想要的那样干净。 –

回答

1

我是个白痴。我需要的全部是:

[captureSession startRunning]; 

想象一下。嘿。