iOS - 图片模糊效果实现

下面给大家介绍图片模糊效果的三种方法

第一种使用Core Image进行模糊

复制代码
- (UIImage *)blurryImage:(UIImage *)image 

withBlurLevel:(CGFloat)blur { 

CIImage *inputImage = [CIImage imageWithCGImage:image.CGImage]; 

CIFilter *filter = [CIFilter filterWithName:@"CIGaussianBlur"

keysAndValues:kCIInputImageKey, inputImage, 

@"inputRadius", @(blur), 

]; CIImage *outputImage = filter.outputImage; 

CGImageRef outImage = [self.context createCGImage:outputImage 

fromRect:[outputImage extent]]; 

return [UIImage imageWithCGImage:outImage]; }
复制代码

 

第二种使用vImage API进行模糊

复制代码
- (UIImage *)blurryImage:(UIImage *)image withBlurLevel:(CGFloat)blur { 

if (blur < 0.f || blur > 1.f) { 

blur = 0.5f; 

} 

int boxSize = (int)(blur * 100); 

boxSize = boxSize - (boxSize % 2) + 1; 

CGImageRef img = image.CGImage; 

vImage_Buffer inBuffer, outBuffer; 

vImage_Error error; 

void *pixelBuffer; 

CGDataProviderRef inProvider = CGImageGetDataProvider(img); 

CFDataRef inBitmapData = http://www.open-open.com/code/view/CGDataProviderCopyData(inProvider); 

inBuffer.width = CGImageGetWidth(img); 

inBuffer.height = CGImageGetHeight(img); 

inBuffer.rowBytes = CGImageGetBytesPerRow(img); 

inBuffer.data = (void*)CFDataGetBytePtr(inBitmapData); 

pixelBuffer = malloc(CGImageGetBytesPerRow(img) * 

CGImageGetHeight(img)); 

if(pixelBuffer == NULL) 

NSLog(@"No pixelbuffer"); 

outBuffer.data = pixelBuffer; 

outBuffer.width = CGImageGetWidth(img); 

outBuffer.height = CGImageGetHeight(img); 

outBuffer.rowBytes = CGImageGetBytesPerRow(img); 

error = vImageBoxConvolve_ARGB8888(&inBuffer, 

&outBuffer, 

NULL, 

0, 

0, 

boxSize, 

boxSize, 

NULL, 

kvImageEdgeExtend); 

if (error) { 

NSLog(@"error from convolution %ld", error); 

} 

CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB(); 

CGContextRef ctx = CGBitmapContextCreate( 

outBuffer.data, 

outBuffer.width, 

outBuffer.height, 

8, 

outBuffer.rowBytes, 

colorSpace, 

kCGImageAlphaNoneSkipLast); 

CGImageRef imageRef = CGBitmapContextCreateImage (ctx); 

UIImage *returnImage = [UIImage imageWithCGImage:imageRef]; 

//clean up 

CGContextRelease(ctx); 

CGColorSpaceRelease(colorSpace); 

free(pixelBuffer); 

CFRelease(inBitmapData); 

CGColorSpaceRelease(colorSpace); 

CGImageRelease(imageRef); 

return returnImage; }
复制代码

 


第三种方法是网上找到的(毛玻璃效果)

复制代码
// 内部方法,核心代码,封装了毛玻璃效果 参数:半径,颜色,色彩饱和度- (UIImage *)imageBluredWithRadius:(CGFloat)blurRadius tintColor:(UIColor *)tintColor saturationDeltaFactor:(CGFloat)saturationDeltaFactor maskImage:(UIImage *)maskImage { 

CGRect imageRect = { CGPointZero, self.size }; 

UIImage *effectImage = self; BOOL hasBlur = blurRadius > __FLT_EPSILON__; 

BOOL hasSaturationChange = fabs(saturationDeltaFactor - 1.) > __FLT_EPSILON__; if (hasBlur || hasSaturationChange) { UIGraphicsBeginImageContextWithOptions(self.size, NO, [[UIScreen mainScreen] scale]); 

CGContextRef effectInContext = UIGraphicsGetCurrentContext(); 

CGContextScaleCTM(effectInContext, 1.0, -1.0); 

CGContextTranslateCTM(effectInContext, 0, -self.size.height); 

CGContextDrawImage(effectInContext, imageRect, self.CGImage); 

vImage_Buffer effectInBuffer; effectInBuffer.data = http://www.open-open.com/code/view/CGBitmapContextGetData(effectInContext); 

effectInBuffer.width = CGBitmapContextGetWidth(effectInContext); 

effectInBuffer.height = CGBitmapContextGetHeight(effectInContext); 

effectInBuffer.rowBytes = CGBitmapContextGetBytesPerRow(effectInContext); 

UIGraphicsBeginImageContextWithOptions(self.size, NO, [[UIScreen mainScreen] scale]); 

CGContextRef effectOutContext = UIGraphicsGetCurrentContext(); 

vImage_Buffer effectOutBuffer; 

effectOutBuffer.data = CGBitmapContextGetData(effectOutContext); 

effectOutBuffer.width = CGBitmapContextGetWidth(effectOutContext); 

effectOutBuffer.height = CGBitmapContextGetHeight(effectOutContext); 

effectOutBuffer.rowBytes = CGBitmapContextGetBytesPerRow(effectOutContext); if (hasBlur) { CGFloat inputRadius = blurRadius * [[UIScreen mainScreen] scale]; 

NSUInteger radius = floor(inputRadius * 3. * sqrt(2 * M_PI) / 4 + 0.5); 

if (radius % 2 != 1) { 

radius += 1; // force radius to be odd so that the three box-blur methodology works. 

} 

vImageBoxConvolve_ARGB8888(&effectInBuffer, &effectOutBuffer, NULL, 0, 0, (short)radius, (short)radius, 0, kvImageEdgeExtend); vImageBoxConvolve_ARGB8888(&effectOutBuffer, &effectInBuffer, NULL, 0, 0, (short)radius, (short)radius, 0, kvImageEdgeExtend); vImageBoxConvolve_ARGB8888(&effectInBuffer, &effectOutBuffer, NULL, 0, 0, (short)radius, (short)radius, 0, kvImageEdgeExtend); 

} 

BOOL effectImageBuffersAreSwapped = NO; 

if (hasSaturationChange) { 

CGFloat s = saturationDeltaFactor; 

CGFloat floatingPointSaturationMatrix[] = { 

0.0722 + 0.9278 * s, 0.0722 - 0.0722 * s, 0.0722 - 0.0722 * s, 

0, 

0.7152 - 0.7152 * s, 0.7152 + 0.2848 * s, 0.7152 - 0.7152 * s, 

0, 

0.2126 - 0.2126 * s, 0.2126 - 0.2126 * s, 0.2126 + 0.7873 * s, 

0, 

0, 

0, 

0, 

1, 

}; 

const int32_t divisor = 256; 

NSUInteger matrixSize = sizeof(floatingPointSaturationMatrix)/sizeof(floatingPointSaturationMatrix[0]); int16_t saturationMatrix[matrixSize]; for (NSUInteger i = 0; i < matrixSize; ++i) { 

saturationMatrix[i] = (int16_t)roundf(floatingPointSaturationMatrix[i] * divisor); 

} 

if (hasBlur) { 

vImageMatrixMultiply_ARGB8888(&effectOutBuffer, &effectInBuffer, saturationMatrix, divisor, NULL, NULL, kvImageNoFlags); 

effectImageBuffersAreSwapped = YES; 

} 

else { 

vImageMatrixMultiply_ARGB8888(&effectInBuffer, &effectOutBuffer, saturationMatrix, divisor, NULL, NULL, kvImageNoFlags); 

} 

} 

if (!effectImageBuffersAreSwapped) 

effectImage = UIGraphicsGetImageFromCurrentImageContext(); 

UIGraphicsEndImageContext(); 

if (effectImageBuffersAreSwapped) 

effectImage = UIGraphicsGetImageFromCurrentImageContext(); 

UIGraphicsEndImageContext(); 

} 

// 开启上下文 用于输出图像 

UIGraphicsBeginImageContextWithOptions(self.size, NO, [[UIScreen mainScreen] scale]); 

CGContextRef outputContext = UIGraphicsGetCurrentContext(); 

CGContextScaleCTM(outputContext, 1.0, -1.0); 

CGContextTranslateCTM(outputContext, 0, -self.size.height); 

// 开始画底图 CGContextDrawImage(outputContext, imageRect, self.CGImage); 

// 开始画模糊效果 

if (hasBlur)

{ 

CGContextSaveGState(outputContext); 

if (maskImage) 

{ 

CGContextClipToMask(outputContext, imageRect, maskImage.CGImage); 

} CGContextDrawImage(outputContext, imageRect, effectImage.CGImage); 

CGContextRestoreGState(outputContext); 

} 

// 添加颜色渲染 

if (tintColor)

{ 

CGContextSaveGState(outputContext); 

CGContextSetFillColorWithColor(outputContext, tintColor.CGColor); 

CGContextFillRect(outputContext, imageRect); 

CGContextRestoreGState(outputContext); 

} 

// 输出成品,并关闭上下文 

UIImage *outputImage = UIGraphicsGetImageFromCurrentImageContext(); 

UIGraphicsEndImageContext(); 

return outputImage;}
复制代码

 

posted @   M·emor·Y  阅读(538)  评论(0编辑  收藏  举报
编辑推荐:
· 理解Rust引用及其生命周期标识(上)
· 浏览器原生「磁吸」效果!Anchor Positioning 锚点定位神器解析
· 没有源码,如何修改代码逻辑?
· 一个奇形怪状的面试题:Bean中的CHM要不要加volatile?
· [.NET]调用本地 Deepseek 模型
阅读排行:
· 全网最简单!3分钟用满血DeepSeek R1开发一款AI智能客服,零代码轻松接入微信、公众号、小程
· .NET 10 首个预览版发布,跨平台开发与性能全面提升
· 《HelloGitHub》第 107 期
· 全程使用 AI 从 0 到 1 写了个小工具
· 从文本到图像:SSE 如何助力 AI 内容实时呈现?(Typescript篇)
点击右上角即可分享
微信分享提示