SDWebImage
使用了很多工具类来对图片的处理,比如获取图片类型、图片放大缩小、GIF图片处理、图片解压缩处理等。下面我们来看一下这几个工具类。
1.NSData+ImageContentType
这个类提供了一个类方法sd_imageFormatForImageData
。通过这个方法传入图片的NSData数据,然后返回图片类型。
/**
不同图片类型的枚举 - SDImageFormatUndefined: 未知
- SDImageFormatJPEG: JPG(FFD8FFE1)
- SDImageFormatPNG: PNG(89504E47)
- SDImageFormatGIF: GIF(47494638)
- SDImageFormatTIFF: TIFF(49492A00或4D4D002A)
- SDImageFormatWebP: WebP(524946462A73010057454250, 52494646对应ASCII字符为RIFF,57454250对应ASCII字符为WEBP。当第一个字节为52时,如果长度<12 我们就认定为不是图片。因此返回SDImageFormatUndefined。)
*/
typedef NS_ENUM(NSInteger, SDImageFormat) {
SDImageFormatUndefined = -,
SDImageFormatJPEG = ,
SDImageFormatPNG,
SDImageFormatGIF,
SDImageFormatTIFF,
SDImageFormatWebP
}; /**
根据图片NSData获取图片的类型 @param data NSData数据
@return 图片数据类型
*/
+ (SDImageFormat)sd_imageFormatForImageData:(nullable NSData *)data {
if (!data) {
return SDImageFormatUndefined;
} uint8_t c;
//获取图片数据的第一个字节数据
[data getBytes:&c length:];
//根据字母的ASCII码比较
switch (c) {
case 0xFF:
return SDImageFormatJPEG;
case 0x89:
return SDImageFormatPNG;
case 0x47:
return SDImageFormatGIF;
case 0x49:
case 0x4D:
return SDImageFormatTIFF;
case 0x52:
// R as RIFF for WEBP
if (data.length < ) {
return SDImageFormatUndefined;
} NSString *testString = [[NSString alloc] initWithData:[data subdataWithRange:NSMakeRange(, )] encoding:NSASCIIStringEncoding];
if ([testString hasPrefix:@"RIFF"] && [testString hasSuffix:@"WEBP"]) {
return SDImageFormatWebP;
}
}
return SDImageFormatUndefined;
}
2.UIImage+MultiFormat
该分类实现了NSData与UIImage对象之间的相互转换,并且是根据图片类型做转换。比如GIF的UIImage转换为GIF格式的NSData。
/**
根据图片的data数据,生成对应的图片对象 @param data 图片的data
@return 图片对象
*/
+ (nullable UIImage *)sd_imageWithData:(nullable NSData *)data {
if (!data) {
return nil;
} UIImage *image;
//获取data的图片类型,png,gif,jpg
SDImageFormat imageFormat = [NSData sd_imageFormatForImageData:data];
if (imageFormat == SDImageFormatGIF) {
//gif处理:返回一张只包含数据第一张image 的gif图片
image = [UIImage sd_animatedGIFWithData:data];
}
#ifdef SD_WEBP
else if (imageFormat == SDImageFormatWebP)
{
image = [UIImage sd_imageWithWebPData:data];
}
#endif
else {
image = [[UIImage alloc] initWithData:data];
#if SD_UIKIT || SD_WATCH
//获取方向
UIImageOrientation orientation = [self sd_imageOrientationFromImageData:data];
//如果不是向上的,还需要再次生成图片
if (orientation != UIImageOrientationUp) {
image = [UIImage imageWithCGImage:image.CGImage
scale:image.scale
orientation:orientation];
}
#endif
} return image;
} #if SD_UIKIT || SD_WATCH /**
根据图片数据获取图片的方向 @param imageData 图片数据
@return 方向
*/
+(UIImageOrientation)sd_imageOrientationFromImageData:(nonnull NSData *)imageData {
//默认是向上的
UIImageOrientation result = UIImageOrientationUp;
CGImageSourceRef imageSource = CGImageSourceCreateWithData((__bridge CFDataRef)imageData, NULL);
if (imageSource) {
//获取图片的属性列表
CFDictionaryRef properties = CGImageSourceCopyPropertiesAtIndex(imageSource, , NULL);
if (properties) {
CFTypeRef val;
int exifOrientation;
//获取图片方向
val = CFDictionaryGetValue(properties, kCGImagePropertyOrientation);
if (val) {
CFNumberGetValue(val, kCFNumberIntType, &exifOrientation);
result = [self sd_exifOrientationToiOSOrientation:exifOrientation];
} // else - if it's not set it remains at up
CFRelease((CFTypeRef) properties);
} else {
//NSLog(@"NO PROPERTIES, FAIL");
}
CFRelease(imageSource);
}
return result;
} #pragma mark EXIF orientation tag converter
// Convert an EXIF image orientation to an iOS one.
// reference see here: http://sylvana.net/jpegcrop/exif_orientation.html /**
根据不同的值返回不同的图片方向 @param exifOrientation 输入值
@return 图片的方向
*/
+ (UIImageOrientation) sd_exifOrientationToiOSOrientation:(int)exifOrientation {
UIImageOrientation orientation = UIImageOrientationUp;
switch (exifOrientation) {
case :
orientation = UIImageOrientationUp;
break; case :
orientation = UIImageOrientationDown;
break; case :
orientation = UIImageOrientationLeft;
break; case :
orientation = UIImageOrientationRight;
break; case :
orientation = UIImageOrientationUpMirrored;
break; case :
orientation = UIImageOrientationDownMirrored;
break; case :
orientation = UIImageOrientationLeftMirrored;
break; case :
orientation = UIImageOrientationRightMirrored;
break;
default:
break;
}
return orientation;
}
#endif - (nullable NSData *)sd_imageData {
return [self sd_imageDataAsFormat:SDImageFormatUndefined];
} /**
根据指定的图片类型,把image对象转换为对应格式的data @param imageFormat 指定的image格式
@return 返回data对象
*/
- (nullable NSData *)sd_imageDataAsFormat:(SDImageFormat)imageFormat {
NSData *imageData = nil;
if (self) {
#if SD_UIKIT || SD_WATCH
int alphaInfo = CGImageGetAlphaInfo(self.CGImage);
//是否有透明度
BOOL hasAlpha = !(alphaInfo == kCGImageAlphaNone ||
alphaInfo == kCGImageAlphaNoneSkipFirst ||
alphaInfo == kCGImageAlphaNoneSkipLast);
//只有png图片有alpha属性
BOOL usePNG = hasAlpha; // the imageFormat param has priority here. But if the format is undefined, we relly on the alpha channel
//是否是PNG类型的图片
if (imageFormat != SDImageFormatUndefined) {
usePNG = (imageFormat == SDImageFormatPNG);
}
//根据不同的图片类型获取到对应的图片data
if (usePNG) {
imageData = UIImagePNGRepresentation(self);
} else {
imageData = UIImageJPEGRepresentation(self, (CGFloat)1.0);
}
#else
NSBitmapImageFileType imageFileType = NSJPEGFileType;
if (imageFormat == SDImageFormatGIF) {
imageFileType = NSGIFFileType;
} else if (imageFormat == SDImageFormatPNG) {
imageFileType = NSPNGFileType;
} imageData = [NSBitmapImageRep representationOfImageRepsInArray:self.representations
usingType:imageFileType
properties:@{}];
#endif
}
return imageData;
}
3.UIImage+GIF
该分类实现了对GIF图片的NSData的处理,处理方式是取出GIF图片的第一张UIImage来显示。如果要显示动态图片的话,需要使用FLAnimatedImageView
来显示。
/**
根据Gif图片的data生成对应的UIImage对象(只会取GIF图片的第一张UIImage) @param data data
@return 生成的image对象。这里只获取Gif图片的第一张图像,如果要实现Gif完整图像,使用FLAnimatedImageView
*/
+ (UIImage *)sd_animatedGIFWithData:(NSData *)data {
if (!data) {
return nil;
} CGImageSourceRef source = CGImageSourceCreateWithData((__bridge CFDataRef)data, NULL);
//获取NSData中的图片数量
size_t count = CGImageSourceGetCount(source); UIImage *staticImage;
//如果图片数量小于或者等于1,直接转换
if (count <= ) {
staticImage = [[UIImage alloc] initWithData:data];
} else {
// we will only retrieve the 1st frame. the full GIF support is available via the FLAnimatedImageView category.
// this here is only code to allow drawing animated images as static ones
#if SD_WATCH
CGFloat scale = ;
scale = [WKInterfaceDevice currentDevice].screenScale;
#elif SD_UIKIT
CGFloat scale = ;
scale = [UIScreen mainScreen].scale;
#endif
//获取第一张UIImage对象
CGImageRef CGImage = CGImageSourceCreateImageAtIndex(source, , NULL);
#if SD_UIKIT || SD_WATCH
//获取Gif图片的第一张图片
UIImage *frameImage = [UIImage imageWithCGImage:CGImage scale:scale orientation:UIImageOrientationUp];
//用第一张图片生成一个新的Gif图片
staticImage = [UIImage animatedImageWithImages:@[frameImage] duration:0.0f];
#elif SD_MAC
staticImage = [[UIImage alloc] initWithCGImage:CGImage size:NSZeroSize];
#endif
CGImageRelease(CGImage);
} CFRelease(source); return staticImage;
} /**
判断一张图片是否GIF图片 @return YES/NO
*/
- (BOOL)isGIF {
return (self.images != nil);
}
4.SDWebImageDecoder
本类实现图片的解码操作,对于太大的图片,先按照一定比例缩小然后再解码。在这里,大家可能有一个问题:为什么要解码呢?
4.1为什么要解码?
在我们实际的项目开发中,我们经常使用imageNamed:方法来加载图片,系统默认会在主线程立即进行图片的解码工作,这一过程就是把图片解码成可供控件直接使用的位图。当在主线程调用了大量的imageNamed:
方法后,就会产生卡顿。为了解决这个问题我们有两种处理方法:
不使用imageNamed:
加载图片,使用imageWithContentsOfFile:来加载图片;
自己解码图片,把这个解码过程放到子线程。
关于图片的存储方式和处理,大家可以看一下这篇文章:图片格式。
4.2SDWebImageDecoder源码分析
#if SD_UIKIT || SD_WATCH
static const size_t kBytesPerPixel = ; //!<每个像素占用的字节数(图像在iOS设备上是以像素为单位显示的)
static const size_t kBitsPerComponent = ; //!<每一个组件占多少位(比方说RGBA,其中R(红色)G(绿色)B(蓝色)A(透明度)是4个组件,每个像素由这4个组件组成,那么我们就用8位来表示着每一个组件,所以这个RGBA就是8*4 = 32位) /**
解码图片 @param image UIImage对象
@return 返回解码以后的图片
*/
+ (nullable UIImage *)decodedImageWithImage:(nullable UIImage *)image {
//图片是否能够解码
if (![UIImage shouldDecodeImage:image]) {
return image;
} // autorelease the bitmap context and all vars to help system to free memory when there are memory warning.
// on iOS7, do not forget to call [[SDImageCache sharedImageCache] clearMemory];
//解码操作放入一个自动释放池里面,以便自动释放所有的变量
@autoreleasepool{
//获取和图像相关的各种参数
CGImageRef imageRef = image.CGImage;
//获取图片的色彩空间
CGColorSpaceRef colorspaceRef = [UIImage colorSpaceForImageRef:imageRef];
//宽度和高度
size_t width = CGImageGetWidth(imageRef);
size_t height = CGImageGetHeight(imageRef);
//计算出每行的像素数
size_t bytesPerRow = kBytesPerPixel * width; // kCGImageAlphaNone is not supported in CGBitmapContextCreate.
// Since the original image here has no alpha info, use kCGImageAlphaNoneSkipLast
// to create bitmap graphics contexts without alpha info.
//创建一个绘制图片的上下文
//这里创建的contexts是没有透明因素的。在UI渲染的时候,实际上是把多个图层按像素叠加计算的过程,需要对每一个像素进行 RGBA 的叠加计算。当某个 layer 的是不透明的,也就是 opaque 为 YES 时,GPU 可以直接忽略掉其下方的图层,这就减少了很多工作量。
CGContextRef context = CGBitmapContextCreate(NULL,
width,
height,
kBitsPerComponent,
bytesPerRow,
colorspaceRef,
kCGBitmapByteOrderDefault|kCGImageAlphaNoneSkipLast);
if (context == NULL) {
return image;
} // Draw the image into the context and retrieve the new bitmap image without alpha
//绘制一个和图片大小一样的图片
CGContextDrawImage(context, CGRectMake(, , width, height), imageRef);
//创建一个没有alpha通道的图片
CGImageRef imageRefWithoutAlpha = CGBitmapContextCreateImage(context);
//得到解码以后的图片
UIImage *imageWithoutAlpha = [UIImage imageWithCGImage:imageRefWithoutAlpha
scale:image.scale
orientation:image.imageOrientation]; CGContextRelease(context);
CGImageRelease(imageRefWithoutAlpha); return imageWithoutAlpha;
}
} /*
* Defines the maximum size in MB of the decoded image when the flag `SDWebImageScaleDownLargeImages` is set
* Suggested value for iPad1 and iPhone 3GS: 60.
* Suggested value for iPad2 and iPhone 4: 120.
* Suggested value for iPhone 3G and iPod 2 and earlier devices: 30.
*/
static const CGFloat kDestImageSizeMB = 60.0f; //!<最大支持压缩图像源的大小,默认为60MB。当我们要压缩一张图像的时候,首先就是要定义最大支持的源文件的大小,不能没有任何限制。 /*
* Defines the maximum size in MB of a tile used to decode image when the flag `SDWebImageScaleDownLargeImages` is set
* Suggested value for iPad1 and iPhone 3GS: 20.
* Suggested value for iPad2 and iPhone 4: 40.
* Suggested value for iPhone 3G and iPod 2 and earlier devices: 10.
*/
static const CGFloat kSourceImageTileSizeMB = 20.0f; //!<原图方块的大小,默认为20MB,这个方块将会被用来分割原图 static const CGFloat kBytesPerMB = 1024.0f * 1024.0f; //!<1MB有多少字节
static const CGFloat kPixelsPerMB = kBytesPerMB / kBytesPerPixel; //!<1MB可以存储多少像素
static const CGFloat kDestTotalPixels = kDestImageSizeMB * kPixelsPerMB; //!<目标总像素
static const CGFloat kTileTotalPixels = kSourceImageTileSizeMB * kPixelsPerMB; //!<原图方块总像素 static const CGFloat kDestSeemOverlap = 2.0f; //重叠像素大小 /**
如果原始图片占用的空间太大,则按照一定的比例解码,从而不让解码以后的图片占用的空间太大 @param image UIImage对象
@return 返回处理结束的UIImage对象
*/
+ (nullable UIImage *)decodedAndScaledDownImageWithImage:(nullable UIImage *)image {
//图片是否支持解码
if (![UIImage shouldDecodeImage:image]) {
return image;
}
//图片不需要处理,直接解码
if (![UIImage shouldScaleDownImage:image]) {
return [UIImage decodedImageWithImage:image];
} CGContextRef destContext; // autorelease the bitmap context and all vars to help system to free memory when there are memory warning.
// on iOS7, do not forget to call [[SDImageCache sharedImageCache] clearMemory];
@autoreleasepool {
//获取和图像相关的各种参数
CGImageRef sourceImageRef = image.CGImage;
//获取原始图片的像素
CGSize sourceResolution = CGSizeZero;
sourceResolution.width = CGImageGetWidth(sourceImageRef);
sourceResolution.height = CGImageGetHeight(sourceImageRef);
//计算原始图片的总像素
float sourceTotalPixels = sourceResolution.width * sourceResolution.height;
// Determine the scale ratio to apply to the input image
// that results in an output image of the defined size.
// see kDestImageSizeMB, and how it relates to destTotalPixels.
//根据一定的比例设置目标图片的宽度和高度
float imageScale = kDestTotalPixels / sourceTotalPixels;
//计算目标像素
CGSize destResolution = CGSizeZero;
destResolution.width = (int)(sourceResolution.width*imageScale);
destResolution.height = (int)(sourceResolution.height*imageScale); // current color space
//获取原始图片的像素空间,默认是RGB
CGColorSpaceRef colorspaceRef = [UIImage colorSpaceForImageRef:sourceImageRef];
//每一行像素占用的内存空间大小
size_t bytesPerRow = kBytesPerPixel * destResolution.width; // Allocate enough pixel data to hold the output image.
//目标图片占用的总内存空间大小,一行占用内存空间大小*高度
void* destBitmapData = malloc( bytesPerRow * destResolution.height );
if (destBitmapData == NULL) {
return image;
} // kCGImageAlphaNone is not supported in CGBitmapContextCreate.
// Since the original image here has no alpha info, use kCGImageAlphaNoneSkipLast
// to create bitmap graphics contexts without alpha info.
//根据各种设置创建一个上下文环境
destContext = CGBitmapContextCreate(destBitmapData,
destResolution.width,
destResolution.height,
kBitsPerComponent,
bytesPerRow,
colorspaceRef,
kCGBitmapByteOrderDefault|kCGImageAlphaNoneSkipLast); if (destContext == NULL) {
free(destBitmapData);
return image;
}
//设置目标图片的压缩质量
CGContextSetInterpolationQuality(destContext, kCGInterpolationHigh); // Now define the size of the rectangle to be used for the
// incremental blits from the input image to the output image.
// we use a source tile width equal to the width of the source
// image due to the way that iOS retrieves image data from disk.
// iOS must decode an image from disk in full width 'bands', even
// if current graphics context is clipped to a subrect within that
// band. Therefore we fully utilize all of the pixel data that results
// from a decoding opertion by achnoring our tile size to the full
// width of the input image.
//计算第一个原图方块,这个方块的宽度同原图一样,高度根据方块容量计算
CGRect sourceTile = CGRectZero;
sourceTile.size.width = sourceResolution.width;
// The source tile height is dynamic. Since we specified the size
// of the source tile in MB, see how many rows of pixels high it
// can be given the input image width.
sourceTile.size.height = (int)(kTileTotalPixels / sourceTile.size.width );
sourceTile.origin.x = 0.0f;
// The output tile is the same proportions as the input tile, but
// scaled to image scale.
//计算目标图像方块
CGRect destTile;
destTile.size.width = destResolution.width;
destTile.size.height = sourceTile.size.height * imageScale;
destTile.origin.x = 0.0f;
// The source seem overlap is proportionate to the destination seem overlap.
// this is the amount of pixels to overlap each tile as we assemble the ouput image.
//计算原图像方块与方块重叠的像素大小
float sourceSeemOverlap = (int)((kDestSeemOverlap/destResolution.height)*sourceResolution.height);
CGImageRef sourceTileImageRef;
// calculate the number of read/write operations required to assemble the
// output image.
//计算原图像需要被分割成多少个方块 iterations
int iterations = (int)( sourceResolution.height / sourceTile.size.height );
// If tile height doesn't divide the image height evenly, add another iteration
// to account for the remaining pixels.
int remainder = (int)sourceResolution.height % (int)sourceTile.size.height;
if(remainder) {
iterations++;
}
// Add seem overlaps to the tiles, but save the original tile height for y coordinate calculations.
//根据重叠像素计算原图方块的大小后,获取原图中该方块内的数据,把该数据写入到相对应的目标方块中
float sourceTileHeightMinusOverlap = sourceTile.size.height;
sourceTile.size.height += sourceSeemOverlap;
destTile.size.height += kDestSeemOverlap;
for( int y = ; y < iterations; ++y ) {
@autoreleasepool {
sourceTile.origin.y = y * sourceTileHeightMinusOverlap + sourceSeemOverlap;
destTile.origin.y = destResolution.height - (( y + ) * sourceTileHeightMinusOverlap * imageScale + kDestSeemOverlap);
sourceTileImageRef = CGImageCreateWithImageInRect( sourceImageRef, sourceTile );
if( y == iterations - && remainder ) {
float dify = destTile.size.height;
destTile.size.height = CGImageGetHeight( sourceTileImageRef ) * imageScale;
dify -= destTile.size.height;
destTile.origin.y += dify;
}
CGContextDrawImage( destContext, destTile, sourceTileImageRef );
CGImageRelease( sourceTileImageRef );
}
} CGImageRef destImageRef = CGBitmapContextCreateImage(destContext);
CGContextRelease(destContext);
if (destImageRef == NULL) {
return image;
}
//生成处理结束以后的图片
UIImage *destImage = [UIImage imageWithCGImage:destImageRef scale:image.scale orientation:image.imageOrientation];
CGImageRelease(destImageRef);
if (destImage == nil) {
return image;
}
return destImage;
}
} /**
图片是否能够解码 @param image 图片
@return 能否解码
*/
+ (BOOL)shouldDecodeImage:(nullable UIImage *)image {
// Prevent "CGBitmapContextCreateImage: invalid context 0x0" error
if (image == nil) {
return NO;
} // do not decode animated images
//如果是动态图片不处理
if (image.images != nil) {
return NO;
} CGImageRef imageRef = image.CGImage;
//获取image的alpha通道。通过通道获取图片数据
CGImageAlphaInfo alpha = CGImageGetAlphaInfo(imageRef);
BOOL anyAlpha = (alpha == kCGImageAlphaFirst ||
alpha == kCGImageAlphaLast ||
alpha == kCGImageAlphaPremultipliedFirst ||
alpha == kCGImageAlphaPremultipliedLast);
// do not decode images with alpha
//如果有alpha通道值,则不处理
if (anyAlpha) {
return NO;
} return YES;
} /**
是否需要压缩原始图片的大小(图像大于目标尺寸才需要压缩) @param image UIImage对象
@return 是否支持压缩
*/
+ (BOOL)shouldScaleDownImage:(nonnull UIImage *)image {
BOOL shouldScaleDown = YES; CGImageRef sourceImageRef = image.CGImage;
CGSize sourceResolution = CGSizeZero;
sourceResolution.width = CGImageGetWidth(sourceImageRef);
sourceResolution.height = CGImageGetHeight(sourceImageRef);
//图片总共像素
float sourceTotalPixels = sourceResolution.width * sourceResolution.height;
//如果图片的总像素大于一定比例,则需要做简化处理
float imageScale = kDestTotalPixels / sourceTotalPixels;
if (imageScale < ) {
shouldScaleDown = YES;
} else {
shouldScaleDown = NO;
} return shouldScaleDown;
} /**
获取图片的色彩空间 @param imageRef 图片
@return 色彩空间
*/
+ (CGColorSpaceRef)colorSpaceForImageRef:(CGImageRef)imageRef {
// current
CGColorSpaceModel imageColorSpaceModel = CGColorSpaceGetModel(CGImageGetColorSpace(imageRef));
CGColorSpaceRef colorspaceRef = CGImageGetColorSpace(imageRef); BOOL unsupportedColorSpace = (imageColorSpaceModel == kCGColorSpaceModelUnknown ||
imageColorSpaceModel == kCGColorSpaceModelMonochrome ||
imageColorSpaceModel == kCGColorSpaceModelCMYK ||
imageColorSpaceModel == kCGColorSpaceModelIndexed);
if (unsupportedColorSpace) {
colorspaceRef = CGColorSpaceCreateDeviceRGB();
CFAutorelease(colorspaceRef);
}
return colorspaceRef;
}
5.UIView+WebCacheOperation
UIView+WebCacheOperation主要用来记录 UIView 加载 Operation 操作,大多数情况下一个 View 仅拥有一个 Operation ,默认的 key 是当前类的类名,如果设置了不同的 key,将保存不同的 Operation 。比如一个 UIButton,可以设置不同状态下的图片,那么需要记录多个 Operation ,它主要采用一个字典来保存所有的 Operation 。
/**
关联属性 @return 属性值
*/
- (SDOperationsDictionary *)operationDictionary {
SDOperationsDictionary *operations = objc_getAssociatedObject(self, &loadOperationKey);
if (operations) {
return operations;
}
operations = [NSMutableDictionary dictionary];
objc_setAssociatedObject(self, &loadOperationKey, operations, OBJC_ASSOCIATION_RETAIN_NONATOMIC);
return operations;
} /**
关联Operation对象与key对象 @param operation Operation对象
@param key key
*/
- (void)sd_setImageLoadOperation:(nullable id)operation forKey:(nullable NSString *)key {
if (key) {
[self sd_cancelImageLoadOperationWithKey:key];
if (operation) {
SDOperationsDictionary *operationDictionary = [self operationDictionary];
operationDictionary[key] = operation;
}
}
} /**
取消当前key对应的所有实现了SDWebImageOperation协议的Operation对象 @param key Operation对应的key
*/
- (void)sd_cancelImageLoadOperationWithKey:(nullable NSString *)key {
// Cancel in progress downloader from queue
//获取当前View对应的所有key
SDOperationsDictionary *operationDictionary = [self operationDictionary];
//获取对应的图片加载Operation
id operations = operationDictionary[key];
//取消所有当前View对应的所有Operation
if (operations) {
if ([operations isKindOfClass:[NSArray class]]) {
for (id <SDWebImageOperation> operation in operations) {
if (operation) {
//SDWebImageCombinedOperation对象的cancel方法
[operation cancel];
}
}
} else if ([operations conformsToProtocol:@protocol(SDWebImageOperation)]){
[(id<SDWebImageOperation>) operations cancel];
}
[operationDictionary removeObjectForKey:key];
}
} /**
根据key移除相应的Operation对象 @param key key
*/
- (void)sd_removeImageLoadOperationWithKey:(nullable NSString *)key {
if (key) {
SDOperationsDictionary *operationDictionary = [self operationDictionary];
[operationDictionary removeObjectForKey:key];
}
}