UIColor* color = nil;
CGImageRef inImage = self.image.CGImage;
// Create off screen
bitmap context to draw the image into. Format ARGB is 4 bytes for
each pixel: Alpa, Red, Green, Blue
CGContextRef cgctx = [self createARGBBitmapContextFromImage:inImage];
if (cgctx == NULL) { return nil; }
size_t w = CGImageGetWidth(inImage);
size_t h = CGImageGetHeight(inImage);
CGRect rect = {{,},{w,h}};
// Draw the image to
the bitmap context. Once we draw, the
memory
// allocated for the
context for rendering will then contain
the
// raw image data in
the specified color space.
CGContextDrawImage(cgctx, rect, inImage);
// Now we can get a
pointer to the image data associated with the bitmap
//
context.
unsigned char* data = CGBitmapContextGetData (cgctx);
if (data != NULL) {
//offset locates the
pixel in the data from x,y.
//4 for 4 bytes of
data per pixel, w is width of one row of data.
@try {
int offset = *((w*round(point.y))+round(point.x));
NSLog(@"offset:
%d", offset);
int alpha = data[offset];
int red = data[offset+];
int green = data[offset+];
int blue = data[offset+];
NSLog(@"offset: %i colors: RGB A %i %i %i
%i",offset,red,green,blue,alpha);
color = [UIColor colorWithRed:(red/255.0f) green:(green/255.0f) blue:(blue/255.0f) alpha:(alpha/255.0f)];
}
@catch (NSException * e) {
NSLog(@"%@",[e reason]);
}
@finally {
}
}
// When finished,
release the context
CGContextRelease(cgctx);
// Free image data
memory for the context
if (data) { free(data); }
return color;
}
CGContextRef
context = NULL;
CGColorSpaceRef colorSpace;
void *
bitmapData;
int
bitmapByteCount;
int
bitmapBytesPerRow;
// Get image width,
height. We'll use the entire image.
size_t pixelsWide = CGImageGetWidth(inImage);
size_t pixelsHigh = CGImageGetHeight(inImage);
// Declare the number
of bytes per row. Each pixel in the bitmap in this
// example is
represented by 4 bytes; 8 bits each of red, green, blue,
and
// alpha.
bitmapBytesPerRow = (pixelsWide * );
bitmapByteCount
= (bitmapBytesPerRow * pixelsHigh);
// Use the generic RGB
color space.
colorSpace = CGColorSpaceCreateDeviceRGB();
if (colorSpace == NULL)
{
fprintf(stderr, "Error allocating color
space\n");
return NULL;
}
// Allocate memory for
image data. This is the destination in memory
// where any drawing
to the bitmap context will be rendered.
bitmapData = malloc( bitmapByteCount );
if (bitmapData == NULL)
{
fprintf (stderr, "Memory
not allocated!");
CGColorSpaceRelease( colorSpace );
return NULL;
}
// Create the bitmap
context. We want pre-multiplied ARGB,
8-bits
// per component.
Regardless of what the source image format
is
// (CMYK, Grayscale,
and so on) it will be converted over to the format
// specified here by
CGBitmapContextCreate.
context = CGBitmapContextCreate (bitmapData,
pixelsWide,
pixelsHigh,
,
// bits per
component
bitmapBytesPerRow,
colorSpace,
kCGImageAlphaPremultipliedFirst);
if (context == NULL)
{
free (bitmapData);
fprintf (stderr, "Context
not created!");
}
// Make sure and
release colorspace before returning
CGColorSpaceRelease( colorSpace );
return context;
}
- (UIColor *) colorOfPoint:(CGPoint)point
{
unsigned char pixel[4] = {0};
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef context = CGBitmapContextCreate(pixel, 1, 1, 8, 4, colorSpace, kCGImageAlphaPremultipliedLast); CGContextTranslateCTM(context, -point.x, -point.y); [self.layer renderInContext:context]; CGContextRelease(context);
CGColorSpaceRelease(colorSpace); //NSLog(@"pixel: %d %d %d %d", pixel[0], pixel[1], pixel[2], pixel[3]); UIColor *color = [UIColor colorWithRed:pixel[0]/255.0 green:pixel[1]/255.0 blue:pixel[2]/255.0 alpha:pixel[3]/255.0]; return color;
} //图片压缩
iOS自带的提供了一个API如下
- NSData *UIImageJPEGRepresentation(UIImage *image, CGFloat compressionQuality);
在Iphone上有两种读取图片数据的简单方法: UIImageJPEGRepresentation和UIImagePNGRepresentation. UIImageJPEGRepresentation函数需要两个参数:图片的引用和压缩系数.而UIImagePNGRepresentation只需 要图片引用作为参数.通过在实际使用过程中,比较发现: UIImagePNGRepresentation(UIImage* image) 要比UIImageJPEGRepresentation(UIImage* image, 1.0) 返回的图片数据量大很多.譬如,同样是读取摄像头拍摄的同样景色的照片, UIImagePNGRepresentation()返回的数据量大小为199K ,而 UIImageJPEGRepresentation(UIImage* image, 1.0)返回的数据量大小只为140KB,比前者少了50多KB.如果对图片的清晰度要求不高,还可以通过设置 UIImageJPEGRepresentation函数的第二个参数,大幅度降低图片数据量.譬如,刚才拍摄的图片, 通过调用UIImageJPEGRepresentation(UIImage* image, 1.0)读取数据时,返回的数据大小为140KB,但更改压缩系数后,通过调用UIImageJPEGRepresentation(UIImage* image, 0.5)读取数据时,返回的数据大小只有11KB多,大大压缩了图片的数据量 ,而且从视角角度看,图片的质量并没有明显的降低.因此,在读取图片数据内容时,建议优先使用UIImageJPEGRepresentation,并可 根据自己的实际使用场景,设置压缩系数,进一步降低图片数据量大小。
- UIImage *imageNew = [info objectForKey:@"UIImagePickerControllerOriginalImage"];
- imageNew = [self imageWithImage:imageNew scaledToSize:CGSizeMake(100, 100)];
- NSData *imageData = UIImageJPEGRepresentation(imageNew, 0.0001);
- m_selectImage = [UIImage imageWithData:imageData];
.h具体code
- #import <Foundation/Foundation.h>
- @interface UIImage (UIImageExt)
- - (UIImage *)scaleToSize:(UIImage *)img size:(CGSize)size;
- - (UIImage *)imageByScalingAndCroppingForSize:(CGSize)targetSize;
- @end
.m具体code
- #import "UIImageExt.h"
- @implementation UIImage (UIImageExt)
- - (UIImage *)scaleToSize:(UIImage *)img size:(CGSize)size{
- // 创建一个bitmap的context
- // 并把它设置成为当前正在使用的context
- UIGraphicsBeginImageContext(size);
- // 绘制改变大小的图片
- [img drawInRect:CGRectMake(0, 0, size.width, size.height)];
- // 从当前context中创建一个改变大小后的图片
- UIImage* scaledImage = UIGraphicsGetImageFromCurrentImageContext();
- // 使当前的context出堆栈
- UIGraphicsEndImageContext();
- // 返回新的改变大小后的图片
- return scaledImage;
- }
- - (UIImage*)imageByScalingAndCroppingForSize:(CGSize)targetSize
- {
- UIImage *sourceImage = self;
- UIImage *newImage = nil;
- CGSize imageSize = sourceImage.size;
- CGFloat width = imageSize.width;
- CGFloat height = imageSize.height;
- CGFloat targetWidth = targetSize.width;
- CGFloat targetHeight = targetSize.height;
- CGFloat scaleFactor = 0.0;
- CGFloat scaledWidth = targetWidth;
- CGFloat scaledHeight = targetHeight;
- CGPoint thumbnailPoint = CGPointMake(0.0,0.0);
- if (CGSizeEqualToSize(imageSize, targetSize) == NO)
- {
- CGFloat widthFactor = targetWidth / width;
- CGFloat heightFactor = targetHeight / height;
- if (widthFactor > heightFactor)
- scaleFactor = widthFactor; // scale to fit height
- else
- scaleFactor = heightFactor; // scale to fit width
- scaledWidth = width * scaleFactor;
- scaledHeight = height * scaleFactor;
- // center the image
- if (widthFactor > heightFactor)
- {
- thumbnailPoint.y = (targetHeight - scaledHeight) * 0.5;
- }
- else
- if (widthFactor < heightFactor)
- {
- thumbnailPoint.x = (targetWidth - scaledWidth) * 0.5;
- }
- }
- UIGraphicsBeginImageContext(targetSize); // this will crop
- CGRect thumbnailRect = CGRectZero;
- thumbnailRect.origin = thumbnailPoint;
- thumbnailRect.size.width = scaledWidth;
- thumbnailRect.size.height = scaledHeight;
- [sourceImage drawInRect:thumbnailRect];
- newImage = UIGraphicsGetImageFromCurrentImageContext();
- if(newImage == nil)
- NSLog(@"could not scale image");
- //pop the context to get back to the default
- UIGraphicsEndImageContext();
- return newImage;
- }
- @end