Convert grayscale image

I am trying to convert the image to grayscale as follows:

#define bytesPerPixel 4 #define bitsPerComponent 8 -(unsigned char*) getBytesForImage: (UIImage*)pImage { CGImageRef image = [pImage CGImage]; NSUInteger width = CGImageGetWidth(image); NSUInteger height = CGImageGetHeight(image); NSUInteger bytesPerRow = bytesPerPixel * width; CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB(); unsigned char *rawData = malloc(height * width * 4); CGContextRef context = CGBitmapContextCreate(rawData, width, height, bitsPerComponent, bytesPerRow, colorSpace, kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big); CGColorSpaceRelease(colorSpace); CGContextDrawImage(context, CGRectMake(0, 0, width, height), image); CGContextRelease(context); return rawData; } -(UIImage*) processImage: (UIImage*)pImage { DebugLog(@"processing image"); unsigned char *rawData = [self getBytesForImage: pImage]; NSUInteger width = pImage.size.width; NSUInteger height = pImage.size.height; DebugLog(@"width: %d", width); DebugLog(@"height: %d", height); NSUInteger bytesPerRow = bytesPerPixel * width; for (int xCoordinate = 0; xCoordinate < width; xCoordinate++) { for (int yCoordinate = 0; yCoordinate < height; yCoordinate++) { int byteIndex = (bytesPerRow * yCoordinate) + xCoordinate * bytesPerPixel; //Getting original colors float red = ( rawData[byteIndex] / 255.f ); float green = ( rawData[byteIndex + 1] / 255.f ); float blue = ( rawData[byteIndex + 2] / 255.f ); //Processing pixel data float averageColor = (red + green + blue) / 3.0f; red = averageColor; green = averageColor; blue = averageColor; //Assigning new color components rawData[byteIndex] = (unsigned char) red * 255; rawData[byteIndex + 1] = (unsigned char) green * 255; rawData[byteIndex + 2] = (unsigned char) blue * 255; } } NSData* newPixelData = [NSData dataWithBytes: rawData length: height * width * 4]; UIImage* newImage = [UIImage imageWithData: newPixelData]; free(rawData); DebugLog(@"image processed"); return newImage; } 

So when I want to convert the image, I just call processImage:

 imageToDisplay.image = [self processImage: image]; 

But imageToDisplay is not displayed. What could be the problem?

Thank.

+49
iphone cocoa-touch cgimage
Aug 19 '09 at 9:55
source share
12 answers

What exactly happens when using this function? Is the function returning an invalid image or is it displayed incorrectly?

This is the method that I use to convert to grayscale.

 - (UIImage *) convertToGreyscale:(UIImage *)i { int kRed = 1; int kGreen = 2; int kBlue = 4; int colors = kGreen | kBlue | kRed; int m_width = i.size.width; int m_height = i.size.height; uint32_t *rgbImage = (uint32_t *) malloc(m_width * m_height * sizeof(uint32_t)); CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB(); CGContextRef context = CGBitmapContextCreate(rgbImage, m_width, m_height, 8, m_width * 4, colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaNoneSkipLast); CGContextSetInterpolationQuality(context, kCGInterpolationHigh); CGContextSetShouldAntialias(context, NO); CGContextDrawImage(context, CGRectMake(0, 0, m_width, m_height), [i CGImage]); CGContextRelease(context); CGColorSpaceRelease(colorSpace); // now convert to grayscale uint8_t *m_imageData = (uint8_t *) malloc(m_width * m_height); for(int y = 0; y < m_height; y++) { for(int x = 0; x < m_width; x++) { uint32_t rgbPixel=rgbImage[y*m_width+x]; uint32_t sum=0,count=0; if (colors & kRed) {sum += (rgbPixel>>24)&255; count++;} if (colors & kGreen) {sum += (rgbPixel>>16)&255; count++;} if (colors & kBlue) {sum += (rgbPixel>>8)&255; count++;} m_imageData[y*m_width+x]=sum/count; } } free(rgbImage); // convert from a gray scale image back into a UIImage uint8_t *result = (uint8_t *) calloc(m_width * m_height *sizeof(uint32_t), 1); // process the image back to rgb for(int i = 0; i < m_height * m_width; i++) { result[i*4]=0; int val=m_imageData[i]; result[i*4+1]=val; result[i*4+2]=val; result[i*4+3]=val; } // create a UIImage colorSpace = CGColorSpaceCreateDeviceRGB(); context = CGBitmapContextCreate(result, m_width, m_height, 8, m_width * sizeof(uint32_t), colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaNoneSkipLast); CGImageRef image = CGBitmapContextCreateImage(context); CGContextRelease(context); CGColorSpaceRelease(colorSpace); UIImage *resultUIImage = [UIImage imageWithCGImage:image]; CGImageRelease(image); free(m_imageData); // make sure the data will be released by giving it to an autoreleased NSData [NSData dataWithBytesNoCopy:result length:m_width * m_height]; return resultUIImage; } 
+30
Aug 19 '09 at 15:11
source share
β€” -

I need a version that saved the alpha channel, so I changed the code posted by Dutchie432:

 @implementation UIImage (grayscale) typedef enum { ALPHA = 0, BLUE = 1, GREEN = 2, RED = 3 } PIXELS; - (UIImage *)convertToGrayscale { CGSize size = [self size]; int width = size.width; int height = size.height; // the pixels will be painted to this array uint32_t *pixels = (uint32_t *) malloc(width * height * sizeof(uint32_t)); // clear the pixels so any transparency is preserved memset(pixels, 0, width * height * sizeof(uint32_t)); CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB(); // create a context with RGBA pixels CGContextRef context = CGBitmapContextCreate(pixels, width, height, 8, width * sizeof(uint32_t), colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedLast); // paint the bitmap to our context which will fill in the pixels array CGContextDrawImage(context, CGRectMake(0, 0, width, height), [self CGImage]); for(int y = 0; y < height; y++) { for(int x = 0; x < width; x++) { uint8_t *rgbaPixel = (uint8_t *) &pixels[y * width + x]; // convert to grayscale using recommended method: http://en.wikipedia.org/wiki/Grayscale#Converting_color_to_grayscale uint32_t gray = 0.3 * rgbaPixel[RED] + 0.59 * rgbaPixel[GREEN] + 0.11 * rgbaPixel[BLUE]; // set the pixels to gray rgbaPixel[RED] = gray; rgbaPixel[GREEN] = gray; rgbaPixel[BLUE] = gray; } } // create a new CGImageRef from our context with the modified pixels CGImageRef image = CGBitmapContextCreateImage(context); // we're done with the context, color space, and pixels CGContextRelease(context); CGColorSpaceRelease(colorSpace); free(pixels); // make a new UIImage to return UIImage *resultUIImage = [UIImage imageWithCGImage:image]; // we're done with image now too CGImageRelease(image); return resultUIImage; } @end 
+50
08 Oct '09 at 23:14
source share

Here is code using only UIKit and bluminity mode. A bit of a hack, but it works well.

 // Transform the image in grayscale. - (UIImage*) grayishImage: (UIImage*) inputImage { // Create a graphic context. UIGraphicsBeginImageContextWithOptions(inputImage.size, YES, 1.0); CGRect imageRect = CGRectMake(0, 0, inputImage.size.width, inputImage.size.height); // Draw the image with the luminosity blend mode. // On top of a white background, this will give a black and white image. [inputImage drawInRect:imageRect blendMode:kCGBlendModeLuminosity alpha:1.0]; // Get the resulting image. UIImage *filteredImage = UIGraphicsGetImageFromCurrentImageContext(); UIGraphicsEndImageContext(); return filteredImage; } 

To maintain transparency, perhaps you can simply set the opaque mode parameter of the UIGraphicsBeginImageContextWithOptions parameter to NO . Need to check.

+44
Jun 05 '11 at 19:29
source share

Based on Cam code with the ability to work with the scale for Retina displays.

 - (UIImage *) toGrayscale { const int RED = 1; const int GREEN = 2; const int BLUE = 3; // Create image rectangle with current image width/height CGRect imageRect = CGRectMake(0, 0, self.size.width * self.scale, self.size.height * self.scale); int width = imageRect.size.width; int height = imageRect.size.height; // the pixels will be painted to this array uint32_t *pixels = (uint32_t *) malloc(width * height * sizeof(uint32_t)); // clear the pixels so any transparency is preserved memset(pixels, 0, width * height * sizeof(uint32_t)); CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB(); // create a context with RGBA pixels CGContextRef context = CGBitmapContextCreate(pixels, width, height, 8, width * sizeof(uint32_t), colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedLast); // paint the bitmap to our context which will fill in the pixels array CGContextDrawImage(context, CGRectMake(0, 0, width, height), [self CGImage]); for(int y = 0; y < height; y++) { for(int x = 0; x < width; x++) { uint8_t *rgbaPixel = (uint8_t *) &pixels[y * width + x]; // convert to grayscale using recommended method: http://en.wikipedia.org/wiki/Grayscale#Converting_color_to_grayscale uint8_t gray = (uint8_t) ((30 * rgbaPixel[RED] + 59 * rgbaPixel[GREEN] + 11 * rgbaPixel[BLUE]) / 100); // set the pixels to gray rgbaPixel[RED] = gray; rgbaPixel[GREEN] = gray; rgbaPixel[BLUE] = gray; } } // create a new CGImageRef from our context with the modified pixels CGImageRef image = CGBitmapContextCreateImage(context); // we're done with the context, color space, and pixels CGContextRelease(context); CGColorSpaceRelease(colorSpace); free(pixels); // make a new UIImage to return UIImage *resultUIImage = [UIImage imageWithCGImage:image scale:self.scale orientation:UIImageOrientationUp]; // we're done with image now too CGImageRelease(image); return resultUIImage; } 
+35
Mar 11 '11 at 18:06
source share

I liked the answer by Mathieu Godart, but it did not seem to work properly for retina or alpha images. Here's an updated version that works for both of me:

 - (UIImage*)convertToGrayscale { UIGraphicsBeginImageContextWithOptions(self.size, NO, self.scale); CGRect imageRect = CGRectMake(0.0f, 0.0f, self.size.width, self.size.height); CGContextRef ctx = UIGraphicsGetCurrentContext(); // Draw a white background CGContextSetRGBFillColor(ctx, 1.0f, 1.0f, 1.0f, 1.0f); CGContextFillRect(ctx, imageRect); // Draw the luminosity on top of the white background to get grayscale [self drawInRect:imageRect blendMode:kCGBlendModeLuminosity alpha:1.0f]; // Apply the source image alpha [self drawInRect:imageRect blendMode:kCGBlendModeDestinationIn alpha:1.0f]; UIImage* grayscaleImage = UIGraphicsGetImageFromCurrentImageContext(); UIGraphicsEndImageContext(); return grayscaleImage; } 
+30
Jan 15 '13 at 21:58
source share

Different approach with CIFilter. Saves the alpha channel and works with a transparent background:

 + (UIImage *)convertImageToGrayScale:(UIImage *)image { CIImage *inputImage = [CIImage imageWithCGImage:image.CGImage]; CIContext *context = [CIContext contextWithOptions:nil]; CIFilter *filter = [CIFilter filterWithName:@"CIColorControls"]; [filter setValue:inputImage forKey:kCIInputImageKey]; [filter setValue:@(0.0) forKey:kCIInputSaturationKey]; CIImage *outputImage = filter.outputImage; CGImageRef cgImageRef = [context createCGImage:outputImage fromRect:outputImage.extent]; UIImage *result = [UIImage imageWithCGImage:cgImageRef]; CGImageRelease(cgImageRef); return result; } 
+13
Jul 04 '15 at 18:04
source share

Quick expansion to UIImage , preserving alpha:

 extension UIImage { private func convertToGrayScaleNoAlpha() -> CGImageRef { let colorSpace = CGColorSpaceCreateDeviceGray(); let bitmapInfo = CGBitmapInfo(CGImageAlphaInfo.None.rawValue) let context = CGBitmapContextCreate(nil, UInt(size.width), UInt(size.height), 8, 0, colorSpace, bitmapInfo) CGContextDrawImage(context, CGRectMake(0, 0, size.width, size.height), self.CGImage) return CGBitmapContextCreateImage(context) } /** Return a new image in shades of gray + alpha */ func convertToGrayScale() -> UIImage { let bitmapInfo = CGBitmapInfo(CGImageAlphaInfo.Only.rawValue) let context = CGBitmapContextCreate(nil, UInt(size.width), UInt(size.height), 8, 0, nil, bitmapInfo) CGContextDrawImage(context, CGRectMake(0, 0, size.width, size.height), self.CGImage); let mask = CGBitmapContextCreateImage(context) return UIImage(CGImage: CGImageCreateWithMask(convertToGrayScaleNoAlpha(), mask), scale: scale, orientation:imageOrientation)! } } 
+11
Jan 15 '15 at 9:22
source share

Here is another good solution as a category method on UIImage. It is based on this blog post and its comments. But I fixed the memory issue here:

 - (UIImage *)grayScaleImage { // Create image rectangle with current image width/height CGRect imageRect = CGRectMake(0, 0, self.size.width * self.scale, self.size.height * self.scale); // Grayscale color space CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceGray(); // Create bitmap content with current image size and grayscale colorspace CGContextRef context = CGBitmapContextCreate(nil, self.size.width * self.scale, self.size.height * self.scale, 8, 0, colorSpace, kCGImageAlphaNone); // Draw image into current context, with specified rectangle // using previously defined context (with grayscale colorspace) CGContextDrawImage(context, imageRect, [self CGImage]); // Create bitmap image info from pixel data in current context CGImageRef grayImage = CGBitmapContextCreateImage(context); // release the colorspace and graphics context CGColorSpaceRelease(colorSpace); CGContextRelease(context); // make a new alpha-only graphics context context = CGBitmapContextCreate(nil, self.size.width * self.scale, self.size.height * self.scale, 8, 0, nil, kCGImageAlphaOnly); // draw image into context with no colorspace CGContextDrawImage(context, imageRect, [self CGImage]); // create alpha bitmap mask from current context CGImageRef mask = CGBitmapContextCreateImage(context); // release graphics context CGContextRelease(context); // make UIImage from grayscale image with alpha mask CGImageRef cgImage = CGImageCreateWithMask(grayImage, mask); UIImage *grayScaleImage = [UIImage imageWithCGImage:cgImage scale:self.scale orientation:self.imageOrientation]; // release the CG images CGImageRelease(cgImage); CGImageRelease(grayImage); CGImageRelease(mask); // return the new grayscale image return grayScaleImage; } 
+9
Aug 14 '12 at 23:11
source share

Fast and efficient implementation of Swift 3 for iOS 9/10. . I feel this is effective when we tried every image filtering method I could find to process 100 images at a time (when loading using AlamofireImage ImageFilter). I decided to use this method as FAR better than any other I tried (for my use) in terms of memory and speed.

 func convertToGrayscale() -> UIImage? { UIGraphicsBeginImageContextWithOptions(self.size, false, self.scale) let imageRect = CGRect(x: 0.0, y: 0.0, width: self.size.width, height: self.size.height) let context = UIGraphicsGetCurrentContext() // Draw a white background context!.setFillColor(red: 1.0, green: 1.0, blue: 1.0, alpha: 1.0) context!.fill(imageRect) // optional: increase contrast with colorDodge before applying luminosity // (my images were too dark when using just luminosity - you may not need this) self.draw(in: imageRect, blendMode: CGBlendMode.colorDodge, alpha: 0.7) // Draw the luminosity on top of the white background to get grayscale of original image self.draw(in: imageRect, blendMode: CGBlendMode.luminosity, alpha: 0.90) // optional: re-apply alpha if your image has transparency - based on user1978534 answer (I haven't tested this as I didn't have transparency - I just know this would be the the syntax) // self.draw(in: imageRect, blendMode: CGBlendMode.destinationIn, alpha: 1.0) let grayscaleImage = UIGraphicsGetImageFromCurrentImageContext() UIGraphicsEndImageContext() return grayscaleImage } 


Reusing colorDodge: I initially had problems getting my images light enough to match the gray color scheme created using CIFilter ("CIPhotoEffectTonal") - my results were too dark. I was able to get a decent match by using CGBlendMode.colorDodge @ ~ 0.7 alpha, which seems to increase the overall contrast.

Other color mixing effects may work as well, but I think you will want to apply it to brightness, which is a grayscale filtering effect. I found this page very useful for linking to different BlendModes .



Achieved performance ratio: I need to process 100 thumbnails downloaded from the server (using AlamofireImage to load, cache and apply a filter). I started to crash when the total size of my images exceeded the cache size, so I experimented with other methods.

CoreImage-based CoreImage-based CIFilter approach was the first that I tried and is not effective enough for the number of processed images.

I also tried applying CIFilter through the GPU using EAGLContext(api: .openGLES3) , which was actually even more memory intensive - in fact, I received memory warnings when using 450+ mb when loading 200+ images.

I tried processing a bitmap (i.e. CGContext(data: nil, width: width, height: height, bitsPerComponent: 8, bytesPerRow: 0, space: colorSpace, bitmapInfo: CGImageAlphaInfo.none.rawValue) ... that worked ok, except that I couldn't get a high enough resolution for a modern retina context.scaleBy(x: scaleFactor, y: scaleFactor) images were very grainy even when I added context.scaleBy(x: scaleFactor, y: scaleFactor) .

Thus, from all that I tried, this method (UIGraphics Context Draw) should be VASTLY more efficient for speed and memory when applied as a filter for AlamofireImage. As is the case with viewing less than 200 mb when processing my 200+ images, and they mostly load instantly, and not more than the 35 seconds that were required using the openEAGL methods. I know that these are not very scientific guidelines. I will use the tool if someone is very curious :)


And finally, if you need to pass a particular grayscale filter to AlamofireImage, here’s how to do it: (note that you must import AlamofireImage into your class to use ImageFilter)

 public struct GrayScaleFilter: ImageFilter { public init() { } public var filter: (UIImage) -> UIImage { return { image in return image.convertToGrayscale() ?? image } } } 

To use it, create a filter as follows and go to af_setImage as follows:

 let filter = GrayScaleFilter() imageView.af_setImage(withURL: url, filter: filter) 
+5
Jan 14 '17 at 2:35
source share
 @interface UIImageView (Settings) - (void)convertImageToGrayScale; @end @implementation UIImageView (Settings) - (void)convertImageToGrayScale { // Create image rectangle with current image width/height CGRect imageRect = CGRectMake(0, 0, self.image.size.width, self.image.size.height); // Grayscale color space CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceGray(); // Create bitmap content with current image size and grayscale colorspace CGContextRef context = CGBitmapContextCreate(nil, self.image.size.width, self.image.size.height, 8, 0, colorSpace, kCGImageAlphaNone); // Draw image into current context, with specified rectangle // using previously defined context (with grayscale colorspace) CGContextDrawImage(context, imageRect, [self.image CGImage]); // Create bitmap image info from pixel data in current context CGImageRef imageRef = CGBitmapContextCreateImage(context); // Create a new UIImage object UIImage *newImage = [UIImage imageWithCGImage:imageRef]; // Release colorspace, context and bitmap information CGColorSpaceRelease(colorSpace); CGContextRelease(context); CFRelease(imageRef); // Return the new grayscale image self.image = newImage; } @end 
+3
Jan 09 '17 at 11:25
source share

I have one more answer. It is very effective and handles retina graphics as well as transparency. He extends the approach of Sargis Gevorgyan:

 + (UIImage*) grayScaleFromImage:(UIImage*)image opaque:(BOOL)opaque { // NSTimeInterval start = [NSDate timeIntervalSinceReferenceDate]; CGSize size = image.size; CGRect bounds = CGRectMake(0, 0, size.width, size.height); // Create bitmap content with current image size and grayscale colorspace CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceGray(); size_t bitsPerComponent = 8; size_t bytesPerPixel = opaque ? 1 : 2; size_t bytesPerRow = bytesPerPixel * size.width * image.scale; CGContextRef context = CGBitmapContextCreate(nil, size.width, size.height, bitsPerComponent, bytesPerRow, colorSpace, opaque ? kCGImageAlphaNone : kCGImageAlphaPremultipliedLast); // create image from bitmap CGContextDrawImage(context, bounds, image.CGImage); CGImageRef cgImage = CGBitmapContextCreateImage(context); UIImage* result = [[UIImage alloc] initWithCGImage:cgImage scale:image.scale orientation:UIImageOrientationUp]; CGImageRelease(cgImage); CGContextRelease(context); // performance results on iPhone 6S+ in Release mode. // Results are in photo pixels, not device pixels: // ~ 5ms for 500px x 600px // ~ 15ms for 2200px x 600px // NSLog(@"generating %dx %d @ %dx grayscale took %f seconds", (int)size.width, (int)size.height, (int)image.scale, [NSDate timeIntervalSinceReferenceDate] - start); return result; } 

Using blending modes is elegant, but copying to a grayscale bitmap is more indicative because you only use one or two color channels instead of four. Transparency bool is designed to capture your opaque UIView flag, so you can opt out of using the alpha channel if you know you don't need one.

I have not tried Core Image-based solutions in this answer stream, but I would be very careful about using Core Image if performance is important.

+1
Jun 23 '17 at 17:20
source share

This is my attempt to convert quickly by painting directly in the grayscale color space without each listing of pixels. It runs 10 times faster than CIImageFilter .

 @implementation UIImage (Grayscale) static UIImage *grayscaleImageFromCIImage(CIImage *image, CGFloat scale) { CIImage *blackAndWhite = [CIFilter filterWithName:@"CIColorControls" keysAndValues:kCIInputImageKey, image, kCIInputBrightnessKey, @0.0, kCIInputContrastKey, @1.1, kCIInputSaturationKey, @0.0, nil].outputImage; CIImage *output = [CIFilter filterWithName:@"CIExposureAdjust" keysAndValues:kCIInputImageKey, blackAndWhite, kCIInputEVKey, @0.7, nil].outputImage; CGImageRef ref = [[CIContext contextWithOptions:nil] createCGImage:output fromRect:output.extent]; UIImage *result = [UIImage imageWithCGImage:ref scale:scale orientation:UIImageOrientationUp]; CGImageRelease(ref); return result; } static UIImage *grayscaleImageFromCGImage(CGImageRef imageRef, CGFloat scale) { NSInteger width = CGImageGetWidth(imageRef) * scale; NSInteger height = CGImageGetHeight(imageRef) * scale; NSMutableData *pixels = [NSMutableData dataWithLength:width*height]; CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceGray(); CGContextRef context = CGBitmapContextCreate(pixels.mutableBytes, width, height, 8, width, colorSpace, 0); CGContextDrawImage(context, CGRectMake(0, 0, width, height), imageRef); CGImageRef ref = CGBitmapContextCreateImage(context); UIImage *result = [UIImage imageWithCGImage:ref scale:scale orientation:UIImageOrientationUp]; CGContextRelease(context); CGColorSpaceRelease(colorSpace); CGImageRelease(ref); return result; } - (UIImage *)grayscaleImage { if (self.CIImage) { return grayscaleImageFromCIImage(self.CIImage, self.scale); } else if (self.CGImage) { return grayscaleImageFromCGImage(self.CGImage, self.scale); } return nil; } @end 
0
Apr 15 '16 at 6:15
source share



All Articles