2017-03-01 2 views
0

Donc j'utilise du code qui fait cela en Objective C et je l'ai traduit sur swift et j'ai du mal à créer un CIImage de AVCaptureStillImageOutput. Donc, si quelqu'un pouvait regarder ce code et me dire où je me trompe, ce serait génial.Comment créer CIImage à partir de AVCaptureStillImageOutput dans swift?

Voici le code Objective C

- (void)captureImageWithCompletionHander:(void(^)(NSString *fullPath))completionHandler 
{ 
dispatch_suspend(_captureQueue); 

AVCaptureConnection *videoConnection = nil; 
for (AVCaptureConnection *connection in self.stillImageOutput.connections) 
{ 
    for (AVCaptureInputPort *port in connection.inputPorts) 
    { 
     if ([port.mediaType isEqual:AVMediaTypeVideo]) 
     { 
      videoConnection = connection; 
      break; 
     } 
    } 
    if (videoConnection) break; 
} 

__weak typeof(self) weakSelf = self; 

[self.stillImageOutput captureStillImageAsynchronouslyFromConnection:videoConnection completionHandler: ^(CMSampleBufferRef imageSampleBuffer, NSError *error) 
{ 
    if (error) 
    { 
     dispatch_resume(_captureQueue); 
     return; 
    } 

    __block NSArray *filePath = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES); //create an array and store result of our search for the documents directory in it 

    NSString *documentsDirectory = [filePath objectAtIndex:0]; //create NSString object, that holds our exact path to the documents directory 

    NSString *fullPath = [documentsDirectory stringByAppendingPathComponent:[NSString stringWithFormat:@"/iScan_img_%i.pdf",(int)[NSDate date].timeIntervalSince1970]]; 


    @autoreleasepool 
    { 
     NSData *imageData = [AVCaptureStillImageOutput jpegStillImageNSDataRepresentation:imageSampleBuffer]; 
     CIImage *enhancedImage = [[CIImage alloc] initWithData:imageData options:@{kCIImageColorSpace:[NSNull null]}]; 
     imageData = nil; 

     if (weakSelf.cameraViewType == DocScannerCameraViewTypeBlackAndWhite) 
     { 
      enhancedImage = [self filteredImageUsingEnhanceFilterOnImage:enhancedImage]; 
     } 
     else 
     { 
      enhancedImage = [self filteredImageUsingContrastFilterOnImage:enhancedImage]; 
     } 

     if (weakSelf.isBorderDetectionEnabled && rectangleDetectionConfidenceHighEnough(_imageDedectionConfidence)) 
     { 
      CIRectangleFeature *rectangleFeature = [self biggestRectangleInRectangles:[[self highAccuracyRectangleDetector] featuresInImage:enhancedImage]]; 

      if (rectangleFeature) 
      { 
       enhancedImage = [self correctPerspectiveForImage:enhancedImage withFeatures:rectangleFeature]; 
      } 
     } 

     CIFilter *transform = [CIFilter filterWithName:@"CIAffineTransform"]; 
     [transform setValue:enhancedImage forKey:kCIInputImageKey]; 
     NSValue *rotation = [NSValue valueWithCGAffineTransform:CGAffineTransformMakeRotation(-90 * (M_PI/180))]; 
     [transform setValue:rotation forKey:@"inputTransform"]; 
     enhancedImage = transform.outputImage; 

     if (!enhancedImage || CGRectIsEmpty(enhancedImage.extent)) return; 

     static CIContext *ctx = nil; 
     if (!ctx) 
     { 
      ctx = [CIContext contextWithOptions:@{kCIContextWorkingColorSpace:[NSNull null]}]; 
     } 

     CGSize bounds = enhancedImage.extent.size; 
     bounds = CGSizeMake(floorf(bounds.width/4) * 4,floorf(bounds.height/4) * 4); 
     CGRect extent = CGRectMake(enhancedImage.extent.origin.x, enhancedImage.extent.origin.y, bounds.width, bounds.height); 

     static int bytesPerPixel = 8; 
     uint rowBytes = bytesPerPixel * bounds.width; 
     uint totalBytes = rowBytes * bounds.height; 
     uint8_t *byteBuffer = malloc(totalBytes); 

     CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB(); 

     [ctx render:enhancedImage toBitmap:byteBuffer rowBytes:rowBytes bounds:extent format:kCIFormatRGBA8 colorSpace:colorSpace]; 

     CGContextRef bitmapContext = CGBitmapContextCreate(byteBuffer,bounds.width,bounds.height,bytesPerPixel,rowBytes,colorSpace,kCGImageAlphaNoneSkipLast); 
     CGImageRef imgRef = CGBitmapContextCreateImage(bitmapContext); 
     CGColorSpaceRelease(colorSpace); 
     CGContextRelease(bitmapContext); 
     free(byteBuffer); 

     if (imgRef == NULL) 
     { 
      CFRelease(imgRef); 
      return; 
     } 
     saveCGImageAsJPEGToFilePath(imgRef, fullPath); 



     CFRelease(imgRef); 

     dispatch_async(dispatch_get_main_queue(),^
         { 
          completionHandler(fullPath); 

          dispatch_resume(_captureQueue); 
         }); 

     _imageDedectionConfidence = 0.0f; 
    } 
}]; 

}

Maintenant, fondamentalement, il capture le contenu et si certaines if déclarations sont vraies, alors il capture le contenu dans le CIRectangleFeature affiché et convertit ensuite les CIImage à CGImage à appeler dans une fonction de sauvegarde.

Je l'ai fait traduire rapidement comme ceci.

func captureImage(completionHandler: @escaping (_ imageFilePath: String) -> Void) { 

    self.captureQueue?.suspend() 
    var videoConnection: AVCaptureConnection! 
    for connection in self.stillImageOutput.connections{ 
     for port in (connection as! AVCaptureConnection).inputPorts { 
      if (port as! AVCaptureInputPort).mediaType.isEqual(AVMediaTypeVideo) { 
       videoConnection = connection as! AVCaptureConnection 
       break 
      } 
     } 
     if videoConnection != nil { 
      break 
     } 
    } 
    weak var weakSelf = self 
    self.stillImageOutput.captureStillImageAsynchronously(from: videoConnection) { (sampleBuffer, error) -> Void in 
     if error != nil { 
      self.captureQueue?.resume() 
      return 
     } 
     let filePath = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true) 
     let documentsDirectory: String = filePath[0] 
     let fullPath: String = URL(fileURLWithPath: documentsDirectory).appendingPathComponent("iScan_img_\(Int(Date().timeIntervalSince1970)).pdf").absoluteString 
     autoreleasepool { 
      let imageData = Data(AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(sampleBuffer)) 
      var enhancedImage = CIImage(data: imageData, options: [kCIImageColorSpace: NSNull()]) 


      if weakSelf?.cameraViewType == DocScannerCameraViewType.blackAndWhite { 
       enhancedImage = self.filteredImageUsingEnhanceFilter(on: enhancedImage!) 
      } 
      else { 
       enhancedImage = self.filteredImageUsingContrastFilter(on: enhancedImage!) 
      } 
      if (weakSelf?.isEnableBorderDetection == true) && self.rectangleDetectionConfidenceHighEnough(confidence: self.imageDedectionConfidence) { 
       let rectangleFeature: CIRectangleFeature? = self.biggestRectangles(rectangles: self.highAccuracyRectangleDetector().features(in: enhancedImage!)) 
       if rectangleFeature != nil { 
        enhancedImage = self.correctPerspective(for: enhancedImage!, withFeatures: rectangleFeature!) 
       } 
      } 
      let transform = CIFilter(name: "CIAffineTransform") 
      let rotation = NSValue(cgAffineTransform: CGAffineTransform(rotationAngle: -90 * (.pi/180))) 
      transform?.setValue(rotation, forKey: "inputTransform") 
      enhancedImage = transform?.outputImage 
      if (enhancedImage == nil) || (enhancedImage?.extent.isEmpty)! { 
       return 
      } 
      var ctx: CIContext? 
      if (ctx != nil) { 
       ctx = CIContext(options: [kCIContextWorkingColorSpace: NSNull()]) 
      } 
      var bounds: CGSize = (enhancedImage?.extent.size)! 
      bounds = CGSize(width: CGFloat((floorf(Float(bounds.width))/4) * 4), height: CGFloat((floorf(Float(bounds.height))/4) * 4)) 
      let extent = CGRect(x: CGFloat((enhancedImage?.extent.origin.x)!), y: CGFloat((enhancedImage?.extent.origin.y)!), width: CGFloat(bounds.width), height: CGFloat(bounds.height)) 
      let bytesPerPixel: CGFloat = 8 
      let rowBytes = bytesPerPixel * bounds.width 
      let totalBytes = rowBytes * bounds.height 
      let byteBuffer = malloc(Int(totalBytes)) 
      let colorSpace = CGColorSpaceCreateDeviceRGB() 
      ctx!.render(enhancedImage!, toBitmap: byteBuffer!, rowBytes: Int(rowBytes), bounds: extent, format: kCIFormatRGBA8, colorSpace: colorSpace) 
      let bitmapContext = CGContext(data: byteBuffer, width: Int(bounds.width), height: Int(bounds.height), bitsPerComponent: Int(bytesPerPixel), bytesPerRow: Int(rowBytes), space: colorSpace, bitmapInfo: CGImageAlphaInfo.noneSkipLast.rawValue) 
      let imgRef = bitmapContext?.makeImage() 
      free(byteBuffer) 

      self.saveCGImageAsJPEGToFilePath(imgRef: imgRef!, filePath: fullPath) 
      DispatchQueue.main.async(execute: {() -> Void in 
       completionHandler(fullPath) 
       self.captureQueue?.resume() 
      }) 
      self.imageDedectionConfidence = 0.0 
     } 
    } 
} 

Il prend la AVCaptureStillImageOutput convertit à CIImage pour toutes les utilisations alors nécessaires convertit CGImage pour sauver. Qu'est-ce que je fais de mal dans la traduction? Ou y a-t-il une meilleure façon de faire cela?

Je ne voulais vraiment pas poser de questions à ce sujet, mais je ne peux pas trouver de questions comme celle-ci, ou du moins tout ce qui se rapporte à la capture comme CIImage de AVCaptureStillImageOutput.

Merci pour toute aide!

Répondre

2

Ceci est la traduction correcte Merci rapides à nouveau à Prientus pour me aider à trouver mon erreur

func captureImage(completionHandler: @escaping (_ imageFilePath: String) -> Void) { 

    self.captureQueue?.suspend() 
    var videoConnection: AVCaptureConnection! 
    for connection in self.stillImageOutput.connections{ 
     for port in (connection as! AVCaptureConnection).inputPorts { 
      if (port as! AVCaptureInputPort).mediaType.isEqual(AVMediaTypeVideo) { 
       videoConnection = connection as! AVCaptureConnection 
       break 
      } 
     } 
     if videoConnection != nil { 
      break 
     } 
    } 
    weak var weakSelf = self 
    self.stillImageOutput.captureStillImageAsynchronously(from: videoConnection) { (sampleBuffer: CMSampleBuffer?, error) -> Void in 
     if error != nil { 
      self.captureQueue?.resume() 
      return 
     } 
     let filePath = NSSearchPathForDirectoriesInDomains(.documentDirectory, .userDomainMask, true) 
     let documentsDirectory: String = filePath[0] 
     let fullPath: String = documentsDirectory.appending("/iScan_img_\(Int(Date().timeIntervalSince1970)).pdf") 
     autoreleasepool { 

      let imageData = Data(AVCaptureStillImageOutput.jpegStillImageNSDataRepresentation(sampleBuffer)) 
      var enhancedImage = CIImage(data: imageData, options: [kCIImageColorSpace: NSNull()]) 


      if weakSelf?.cameraViewType == DocScannerCameraViewType.blackAndWhite { 
       enhancedImage = self.filteredImageUsingEnhanceFilter(on: enhancedImage!) 
      } 
      else { 
       enhancedImage = self.filteredImageUsingContrastFilter(on: enhancedImage!) 
      } 
      if (weakSelf?.isEnableBorderDetection == true) && self.rectangleDetectionConfidenceHighEnough(confidence: self.imageDedectionConfidence) { 
       let rectangleFeature: CIRectangleFeature? = self.biggestRectangles(rectangles: self.highAccuracyRectangleDetector().features(in: enhancedImage!)) 
       if rectangleFeature != nil { 
        enhancedImage = self.correctPerspective(for: enhancedImage!, withFeatures: rectangleFeature!) 
       } 
      } 
      let transform = CIFilter(name: "CIAffineTransform") 
      transform?.setValue(enhancedImage, forKey: kCIInputImageKey) 
      let rotation = NSValue(cgAffineTransform: CGAffineTransform(rotationAngle: -90 * (.pi/180))) 
      transform?.setValue(rotation, forKey: "inputTransform") 
      enhancedImage = (transform?.outputImage)! 
      if (enhancedImage == nil) || (enhancedImage?.extent.isEmpty)! { 
       return 
      } 
      var ctx: CIContext? 
      if (ctx == nil) { 
       ctx = CIContext(options: [kCIContextWorkingColorSpace: NSNull()]) 
      } 
      var bounds: CGSize = (enhancedImage!.extent.size) 
      bounds = CGSize(width: CGFloat((floorf(Float(bounds.width))/4) * 4), height: CGFloat((floorf(Float(bounds.height))/4) * 4)) 
      let extent = CGRect(x: CGFloat((enhancedImage?.extent.origin.x)!), y: CGFloat((enhancedImage?.extent.origin.y)!), width: CGFloat(bounds.width), height: CGFloat(bounds.height)) 
      let bytesPerPixel: CGFloat = 8 
      let rowBytes = bytesPerPixel * bounds.width 
      let totalBytes = rowBytes * bounds.height 
      let byteBuffer = malloc(Int(totalBytes)) 
      let colorSpace = CGColorSpaceCreateDeviceRGB() 
      ctx!.render(enhancedImage!, toBitmap: byteBuffer!, rowBytes: Int(rowBytes), bounds: extent, format: kCIFormatRGBA8, colorSpace: colorSpace) 
      let bitmapContext = CGContext(data: byteBuffer, width: Int(bounds.width), height: Int(bounds.height), bitsPerComponent: Int(bytesPerPixel), bytesPerRow: Int(rowBytes), space: colorSpace, bitmapInfo: CGImageAlphaInfo.noneSkipLast.rawValue) 
      let imgRef = bitmapContext?.makeImage() 
      free(byteBuffer) 
      if imgRef == nil { 
       return 
      } 
      self.saveCGImageAsJPEGToFilePath(imgRef: imgRef!, filePath: fullPath) 
      DispatchQueue.main.async(execute: {() -> Void in 
       completionHandler(fullPath) 
       self.captureQueue?.resume() 
      }) 
      self.imageDedectionConfidence = 0.0 
     } 
    } 
} 
1

Essayez de remplacer la création de votre CIImage avec les lignes suivantes:

guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer), let enhancedImage = CIImage(cvPixelBuffer: pixelBuffer) else { 
    return 
} 
+0

@ Prientus Ne devrais-je besoin du 'CIImage' pour dessiner un' CIRectangleFeature' et tous les filtres que j'utilise avant de capturer réellement l'image? Je sais capturer en tant que 'CGImage' et ensuite convertir, mais je devrais avoir besoin de' CIImage' pour tout ce que je fais pour l'image avant la capture. Merci pour votre réponse! – CarpenterBlood

+0

@CarpenterBlood Oh, je vois, j'ai mal compris la priorité. Pardon. Je vais encore y réfléchir un peu :) – Prientus

+0

@CarpenterBlood S'il vous plaît voir ma réponse mise à jour. Je pense que votre code n'a peut-être pas fonctionné car vous n'avez peut-être pas récupéré le CIImage correctement dans votre autoreleasepool. – Prientus