2015-04-24 31 views
-1

我已经把我从Obj-C翻译成Swift的一团糟,所以我非常感谢重构/代码布局评论。花括号真的把我扔了。是否有任何Xcode插件或什么可以帮助我更好地管理我的代码块?> REQ Swift重构/代码布局/评论

我的一些函数和计算可能效率不高,所以如果您对这些方面有任何建议,那也太棒了。例如,如果你已经使用或看到更好的过滤算法等。

p.s.谢谢马丁。

import UIKit 
import Foundation 
import AVFoundation 
import CoreMedia 
import CoreVideo 

let minFramesForFilterToSettle = 10 


class ViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDelegate { 

let captureSession = AVCaptureSession() 
// If we find a device we'll store it here for later use 
var captureDevice : AVCaptureDevice? 

var validFrameCounter: Int = 0 
var detector: Detector! 
var filter: Filter! 
// var currentState = CurrentState.stateSampling  // Is this initialized correctly? 

override func viewDidLoad() { 
    super.viewDidLoad() 
    self.detector = Detector() 
    self.filter = Filter() 
    // startCameraCapture() // call to un-used function. 

    captureSession.sessionPreset = AVCaptureSessionPresetHigh 

    let devices = AVCaptureDevice.devices() 

    // Loop through all the capture devices on this phone 
    for device in devices { 
     // Make sure this particular device supports video 
     if (device.hasMediaType(AVMediaTypeVideo)) { 
      // Finally check the position and confirm we've got the back camera 
      if(device.position == AVCaptureDevicePosition.Front) { 
       captureDevice = device as? AVCaptureDevice 
       if captureDevice != nil { 
        //println("Capture device found") 
        beginSession() 
       } 
      } 
     } 
    } 


} // end of viewDidLoad ??? 



// configure device for camera and focus mode // maybe not needed since we dont use focuc? 
func configureDevice() { 
    if let device = captureDevice { 
     device.lockForConfiguration(nil) 
     //device.focusMode = .Locked 
     device.unlockForConfiguration() 
    } 

} 


// start capturing frames 
func beginSession() { 
    // Create the AVCapture Session 

    configureDevice() 

    var err : NSError? = nil 
    captureSession.addInput(AVCaptureDeviceInput(device: captureDevice, error: &err)) 

    if err != nil { 
     println("error: \(err?.localizedDescription)") 
    } 

    // Automatic Switch ON torch mode 
    if captureDevice!.hasTorch { 
     // lock your device for configuration 
     captureDevice!.lockForConfiguration(nil) 
     // check if your torchMode is on or off. If on turns it off otherwise turns it on 
     captureDevice!.torchMode = captureDevice!.torchActive ? AVCaptureTorchMode.Off : AVCaptureTorchMode.On 
     // sets the torch intensity to 100% 
     captureDevice!.setTorchModeOnWithLevel(1.0, error: nil) 
     // unlock your device 
     captureDevice!.unlockForConfiguration() 
    } 

    // Create a AVCaptureInput with the camera device 
    var deviceInput : AVCaptureInput = AVCaptureDeviceInput.deviceInputWithDevice(captureDevice, error: &err) as! AVCaptureInput 
    if deviceInput == nil! { 
     println("error: \(err?.localizedDescription)") 
    } 

    // Set the output 
    var videoOutput : AVCaptureVideoDataOutput = AVCaptureVideoDataOutput() 

    // create a queue to run the capture on 
    var captureQueue : dispatch_queue_t = dispatch_queue_create("captureQueue", nil) 

    // setup ourself up as the capture delegate 
    videoOutput.setSampleBufferDelegate(self, queue: captureQueue) 

    // configure the pixel format 
    videoOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String : Int(kCVPixelFormatType_32BGRA)] // kCVPixelBufferPixelFormatTypeKey is a CFString btw. 

    // set the minimum acceptable frame rate to 10 fps 
    captureDevice!.activeVideoMinFrameDuration = CMTimeMake(1, 10) 

    // and the size of the frames we want - we'll use the smallest frame size available 
    captureSession.sessionPreset = AVCaptureSessionPresetLow 

    // Add the input and output 
    captureSession.addInput(deviceInput) 
    captureSession.addOutput(videoOutput) 


    // Start the session 
    captureSession.startRunning() 

    // we're now sampling from the camera 
    enum CurrentState { 
     case statePaused 
     case stateSampling 
     } 
    var currentState = CurrentState.statePaused 

    func setState(state: CurrentState){ 
     switch state 
     { 
     case .statePaused: 
      // what goes here? Something like this? 
      UIApplication.sharedApplication().idleTimerDisabled = false 
     case .stateSampling: 
      // what goes here? Something like this? 
      UIApplication.sharedApplication().idleTimerDisabled = true // singletons 

     } 
    } 

    // we're now sampling from the camera 
    currentState = CurrentState.stateSampling 


    // stop the app from sleeping 
    UIApplication.sharedApplication().idleTimerDisabled = true 

    // update our UI on a timer every 0.1 seconds 
    NSTimer.scheduledTimerWithTimeInterval(0.1, target: self, selector: Selector("update"), userInfo: nil, repeats: true) 

    func stopCameraCapture() { 
     captureSession.stopRunning() 
     captureSession = nil 
    } 


    // pragma mark Pause and Resume of detection 
    func pause() { 
     if currentState == CurrentState.statePaused { 
      return 
     } 

     // switch off the torch 
     if captureDevice!.isTorchModeSupported(AVCaptureTorchMode.On) { 
      captureDevice!.lockForConfiguration(nil) 
      captureDevice!.torchMode = AVCaptureTorchMode.Off 
      captureDevice!.unlockForConfiguration() 
    } 
    currentState = CurrentState.statePaused 
    // let the application go to sleep if the phone is idle 
    UIApplication.sharedApplication().idleTimerDisabled = false 
    } 


    func resume() { 
     if currentState != CurrentState.statePaused { 
      return 
     } 

     // switch on the torch 
     if captureDevice!.isTorchModeSupported(AVCaptureTorchMode.On) { 
      captureDevice!.lockForConfiguration(nil) 
      captureDevice!.torchMode = AVCaptureTorchMode.On 
      captureDevice!.unlockForConfiguration() 
    } 
    currentState = CurrentState.stateSampling 
    // stop the app from sleeping 
    UIApplication.sharedApplication().idleTimerDisabled = true 
    } 

    // beginning of paste 

    // r,g,b values are from 0 to 1 // h = [0,360], s = [0,1], v = [0,1] 
    // if s == 0, then h = -1 (undefined) 
    func RGBtoHSV(r : Float, g : Float, b : Float, inout h : Float, inout s : Float, inout v : Float) { 
     let rgbMin = min(r, g, b) 
     let rgbMax = max(r, g, b) 
     let delta = rgbMax - rgbMin 

     v = rgbMax 
     s = delta/rgbMax 
     h = Float(0.0) 



    // start of calculation 
     if (rgbMax != 0) { 
      s = delta/rgbMax 
     } 
     else{ 
      // r = g = b = 0 
      s = 0  
      h = -1 
      return 
     } 

     if r == rgbMax { 
      h = (g - b)/delta 
     } 
     else if (g == rgbMax) { 
      h = 2 + (b - r)/delta 
     } 
     else{ 
      h = 4 + (r - g)/delta 
      h = 60 
     } 
     if (h < 0) { 
      h += 360 
     } 
    } 



     // process the frame of video 
    func captureOutput(captureOutput:AVCaptureOutput, didOutputSampleBuffer sampleBuffer:CMSampleBuffer, fromConnection connection:AVCaptureConnection) { 
     // if we're paused don't do anything 
     if currentState == CurrentState.statePaused { 
      // reset our frame counter 
      self.validFrameCounter = 0 

      return 
     } 

    // this is the image buffer 
    var cvimgRef:CVImageBufferRef = CMSampleBufferGetImageBuffer(sampleBuffer) 
    // Lock the image buffer 
    CVPixelBufferLockBaseAddress(cvimgRef, 0) 
    // access the data 
    var width: size_t = CVPixelBufferGetWidth(cvimgRef) 
    var height:size_t = CVPixelBufferGetHeight(cvimgRef) 
    // get the raw image bytes 
    let buf = UnsafeMutablePointer<UInt8>(CVPixelBufferGetBaseAddress(cvimgRef)) 
    var bprow: size_t = CVPixelBufferGetBytesPerRow(cvimgRef) 


    var r = 0 
    var g = 0 
    var b = 0 

     for var y = 0; y < height; y++ { 
      for var x = 0; x < width * 4; x += 4 { 
       b+=buf[x](UnsafeMutablePointer(UInt8)) // fix 
       g+=buf[x + 1](UnsafeMutablePointer(Float)) // fix 
       r+=buf[x + 2](UnsafeMutablePointer(Int)) // fix 
      } 
      buf += bprow() 
     } 
     r /= 255 * (width*height) 
     g /= 255 * (width*height) 
     b /= 255 * (width*height) 


    } 


    // convert from rgb to hsv colourspace 
    var h = Float() 
    var s = Float() 
    var v = Float() 

    RGBtoHSV(r, g, b, &h, &s, &v) 

    // do a sanity check for blackness 
    if s > 0.5 && v > 0.5 { 
     // increment the valid frame count 
     validFrameCounter++ 
     // filter the hue value - the filter is a simple band pass filter that removes any DC component and any high frequency noise 
     var filtered: Float = filter.processValue(h) 
     // have we collected enough frames for the filter to settle? 
     if validFrameCounter > minFramesForFilterToSettle { 
      // add the new value to the detector 
      detector.addNewValue(filtered, atTime: CACurrentMediaTime()) 
     } 
    } else { 
     validFrameCounter = 0 
     // clear the detector - we only really need to do this once, just before we start adding valid samples 
     detector.reset() 
    } 
} 
+0

什么'RGBtoHSV(R,G,B,H,S,V)',正如我在[我的答案](HTTP建议:// stackoverflow.com/a/29808914/1187415)到你以前的问题? –

+0

它不起作用是一个糟糕的问题描述。它不会编译? (什么是错误信息?)还是它没有给出预期的结果? (你得到了什么结果,你期望什么)? - 请注意,在*'RGBtoHSV()'内定义*局部变量* r,g,b,h,s,v *根本没有意义,并且隐藏了函数参数。也许这是你的问题。 –

+2

我在回答您的上一个问题时建议的代码会进行编译,并按预期工作。但是您将'let r:Float = 0.3 ... var v:Float = 0.0' *复制到该函数中并且没有意义。 –

回答

0

实际上你可以做到这一点

RGBtoHSV(r: r, g: g, b: b, h: &h, s: &s, v: &v) 
+0

编号自由函数默认不带参数标签。 –

+0

Martin。我更新和清理。我编辑了我的问题,要求进行布局审查。请参阅上文。谢谢。 – Edward