Here is the best way to do this (and, I will explain why in the end):
On an iOS device sending image data:
- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection { CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer); CVPixelBufferLockBaseAddress(imageBuffer,0); uint8_t *baseAddress = (uint8_t *)CVPixelBufferGetBaseAddress(imageBuffer); size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer); size_t width = CVPixelBufferGetWidth(imageBuffer); size_t height = CVPixelBufferGetHeight(imageBuffer); CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB(); CGContextRef newContext = CGBitmapContextCreate(baseAddress, width, height, 8, bytesPerRow, colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedFirst); CGImageRef newImage = CGBitmapContextCreateImage(newContext); UIImage *image = [[UIImage alloc] initWithCGImage:newImage scale:1 orientation:UIImageOrientationUp]; CGImageRelease(newImage); CGContextRelease(newContext); CGColorSpaceRelease(colorSpace); CVPixelBufferUnlockBaseAddress(imageBuffer, 0); if (image) { NSData *data = UIImageJPEGRepresentation(image, 0.7); NSError *err; [((ViewController *)self.parentViewController).session sendData:data toPeers:((ViewController *)self.parentViewController).session.connectedPeers withMode:MCSessionSendDataReliable error:&err]; } }
On an iOS device receiving image data:
typedef struct { size_t length; void *data; } ImageCacheDataStruct; - (void)session:(nonnull MCSession *)session didReceiveData:(nonnull NSData *)data fromPeer:(nonnull MCPeerID *)peerID { dispatch_async(self.imageCacheDataQueue, ^{ dispatch_semaphore_wait(self.semaphore, DISPATCH_TIME_FOREVER); const void *dataBuffer = [data bytes]; size_t dataLength = [data length]; ImageCacheDataStruct *imageCacheDataStruct = calloc(1, sizeof(imageCacheDataStruct)); imageCacheDataStruct->data = (void*)dataBuffer; imageCacheDataStruct->length = dataLength; __block const void * kMyKey; dispatch_queue_set_specific(self.imageDisplayQueue, &kMyKey, (void *)imageCacheDataStruct, NULL); dispatch_sync(self.imageDisplayQueue, ^{ ImageCacheDataStruct *imageCacheDataStruct = calloc(1, sizeof(imageCacheDataStruct)); imageCacheDataStruct = dispatch_queue_get_specific(self.imageDisplayQueue, &kMyKey); const void *dataBytes = imageCacheDataStruct->data; size_t length = imageCacheDataStruct->length; NSData *imageData = [NSData dataWithBytes:dataBytes length:length]; UIImage *image = [UIImage imageWithData:imageData]; if (image) { dispatch_async(dispatch_get_main_queue(), ^{ [((ViewerViewController *)self.childViewControllers.lastObject).view.layer setContents:(__bridge id)image.CGImage]; dispatch_semaphore_signal(self.semaphore); }); } }); }); }
The reason for semaphores and individual GCD queues is simple: you want frames to be displayed at regular intervals. Otherwise, the video seems to slow down at first at times, right before speeding the way back to catch up. My scheme ensures that each frame plays one after another at the same pace, regardless of bottlenecks in network bandwidth.
source share