mirror of
https://github.com/danog/Telegram.git
synced 2024-12-02 09:27:55 +01:00
186 lines
7.8 KiB
Objective-C
186 lines
7.8 KiB
Objective-C
|
|
|
|
#import "UIImage+ImageEffects.h"
|
|
|
|
#import <Accelerate/Accelerate.h>
|
|
#import <float.h>
|
|
|
|
|
|
@implementation UIImage (ImageEffects)
|
|
|
|
|
|
- (UIImage *)applyLightEffect
|
|
{
|
|
UIColor *tintColor = [UIColor colorWithWhite:1.0f alpha:0.3f];
|
|
return [self applyBlurWithRadius:20 tintColor:tintColor saturationDeltaFactor:1.8f maskImage:nil];
|
|
}
|
|
|
|
|
|
- (UIImage *)applyExtraLightEffect
|
|
{
|
|
UIColor *tintColor = [UIColor colorWithWhite:0.97f alpha:0.82f];
|
|
return [self applyBlurWithRadius:20.0f tintColor:tintColor saturationDeltaFactor:1.8f maskImage:nil];
|
|
}
|
|
|
|
|
|
- (UIImage *)applyDarkEffect
|
|
{
|
|
UIColor *tintColor = [UIColor colorWithWhite:0.11f alpha:0.73f];
|
|
return [self applyBlurWithRadius:20.0f tintColor:tintColor saturationDeltaFactor:1.8f maskImage:nil];
|
|
}
|
|
|
|
|
|
- (UIImage *)applyTintEffectWithColor:(UIColor *)tintColor
|
|
{
|
|
const CGFloat EffectColorAlpha = 0.6f;
|
|
UIColor *effectColor = tintColor;
|
|
int componentCount = (int)CGColorGetNumberOfComponents(tintColor.CGColor);
|
|
if (componentCount == 2) {
|
|
CGFloat b;
|
|
if ([tintColor getWhite:&b alpha:NULL]) {
|
|
effectColor = [UIColor colorWithWhite:b alpha:EffectColorAlpha];
|
|
}
|
|
}
|
|
else {
|
|
CGFloat r, g, b;
|
|
if ([tintColor getRed:&r green:&g blue:&b alpha:NULL]) {
|
|
effectColor = [UIColor colorWithRed:r green:g blue:b alpha:EffectColorAlpha];
|
|
}
|
|
}
|
|
return [self applyBlurWithRadius:10 tintColor:effectColor saturationDeltaFactor:-1.0 maskImage:nil];
|
|
}
|
|
|
|
|
|
- (UIImage *)applyBlurWithRadius:(CGFloat)blurRadius tintColor:(UIColor *)tintColor saturationDeltaFactor:(CGFloat)saturationDeltaFactor maskImage:(UIImage *)maskImage
|
|
{
|
|
// Check pre-conditions.
|
|
if (self.size.width < 1 || self.size.height < 1) {
|
|
NSLog (@"*** error: invalid size: (%.2f x %.2f). Both dimensions must be >= 1: %@", self.size.width, self.size.height, self);
|
|
return nil;
|
|
}
|
|
if (!self.CGImage) {
|
|
NSLog (@"*** error: image must be backed by a CGImage: %@", self);
|
|
return nil;
|
|
}
|
|
if (maskImage && !maskImage.CGImage) {
|
|
NSLog (@"*** error: maskImage must be backed by a CGImage: %@", maskImage);
|
|
return nil;
|
|
}
|
|
|
|
CGRect imageRect = { CGPointZero, self.size };
|
|
UIImage *effectImage = self;
|
|
|
|
BOOL hasBlur = blurRadius > __FLT_EPSILON__;
|
|
BOOL hasSaturationChange = fabs(saturationDeltaFactor - 1.) > __FLT_EPSILON__;
|
|
if (hasBlur || hasSaturationChange) {
|
|
UIGraphicsBeginImageContextWithOptions(self.size, NO, [[UIScreen mainScreen] scale]);
|
|
CGContextRef effectInContext = UIGraphicsGetCurrentContext();
|
|
CGContextScaleCTM(effectInContext, 1.0, -1.0);
|
|
CGContextTranslateCTM(effectInContext, 0, -self.size.height);
|
|
CGContextDrawImage(effectInContext, imageRect, self.CGImage);
|
|
|
|
vImage_Buffer effectInBuffer;
|
|
effectInBuffer.data = CGBitmapContextGetData(effectInContext);
|
|
effectInBuffer.width = CGBitmapContextGetWidth(effectInContext);
|
|
effectInBuffer.height = CGBitmapContextGetHeight(effectInContext);
|
|
effectInBuffer.rowBytes = CGBitmapContextGetBytesPerRow(effectInContext);
|
|
|
|
UIGraphicsBeginImageContextWithOptions(self.size, NO, [[UIScreen mainScreen] scale]);
|
|
CGContextRef effectOutContext = UIGraphicsGetCurrentContext();
|
|
vImage_Buffer effectOutBuffer;
|
|
effectOutBuffer.data = CGBitmapContextGetData(effectOutContext);
|
|
effectOutBuffer.width = CGBitmapContextGetWidth(effectOutContext);
|
|
effectOutBuffer.height = CGBitmapContextGetHeight(effectOutContext);
|
|
effectOutBuffer.rowBytes = CGBitmapContextGetBytesPerRow(effectOutContext);
|
|
|
|
if (hasBlur) {
|
|
// A description of how to compute the box kernel width from the Gaussian
|
|
// radius (aka standard deviation) appears in the SVG spec:
|
|
// http://www.w3.org/TR/SVG/filters.html#feGaussianBlurElement
|
|
//
|
|
// For larger values of 's' (s >= 2.0), an approximation can be used: Three
|
|
// successive box-blurs build a piece-wise quadratic convolution kernel, which
|
|
// approximates the Gaussian kernel to within roughly 3%.
|
|
//
|
|
// let d = floor(s * 3*sqrt(2*pi)/4 + 0.5)
|
|
//
|
|
// ... if d is odd, use three box-blurs of size 'd', centered on the output pixel.
|
|
//
|
|
CGFloat inputRadius = blurRadius * [[UIScreen mainScreen] scale];
|
|
NSUInteger radius = (NSUInteger)(CGFloor(inputRadius * 3.0f * ((CGFloat)sqrt(2 * M_PI)) / 4 + 0.5f));
|
|
if (radius % 2 != 1) {
|
|
radius += 1; // force radius to be odd so that the three box-blur methodology works.
|
|
}
|
|
vImageBoxConvolve_ARGB8888(&effectInBuffer, &effectOutBuffer, NULL, 0, 0, (uint32_t)radius, (uint32_t)radius, 0, kvImageEdgeExtend);
|
|
vImageBoxConvolve_ARGB8888(&effectOutBuffer, &effectInBuffer, NULL, 0, 0, (uint32_t)radius, (uint32_t)radius, 0, kvImageEdgeExtend);
|
|
vImageBoxConvolve_ARGB8888(&effectInBuffer, &effectOutBuffer, NULL, 0, 0, (uint32_t)radius, (uint32_t)radius, 0, kvImageEdgeExtend);
|
|
}
|
|
BOOL effectImageBuffersAreSwapped = NO;
|
|
if (hasSaturationChange) {
|
|
CGFloat s = saturationDeltaFactor;
|
|
CGFloat floatingPointSaturationMatrix[] = {
|
|
0.0722f + 0.9278f * s, 0.0722f - 0.0722f * s, 0.0722f - 0.0722f * s, 0,
|
|
0.7152f - 0.7152f * s, 0.7152f + 0.2848f * s, 0.7152f - 0.7152f * s, 0,
|
|
0.2126f - 0.2126f * s, 0.2126f - 0.2126f * s, 0.2126f + 0.7873f * s, 0,
|
|
0, 0, 0, 1,
|
|
};
|
|
const int32_t divisor = 256;
|
|
NSUInteger matrixSize = sizeof(floatingPointSaturationMatrix)/sizeof(floatingPointSaturationMatrix[0]);
|
|
int16_t saturationMatrix[matrixSize];
|
|
for (NSUInteger i = 0; i < matrixSize; ++i) {
|
|
saturationMatrix[i] = (int16_t)CGRound(floatingPointSaturationMatrix[i] * divisor);
|
|
}
|
|
if (hasBlur) {
|
|
vImageMatrixMultiply_ARGB8888(&effectOutBuffer, &effectInBuffer, saturationMatrix, divisor, NULL, NULL, kvImageNoFlags);
|
|
effectImageBuffersAreSwapped = YES;
|
|
}
|
|
else {
|
|
vImageMatrixMultiply_ARGB8888(&effectInBuffer, &effectOutBuffer, saturationMatrix, divisor, NULL, NULL, kvImageNoFlags);
|
|
}
|
|
}
|
|
if (!effectImageBuffersAreSwapped)
|
|
effectImage = UIGraphicsGetImageFromCurrentImageContext();
|
|
UIGraphicsEndImageContext();
|
|
|
|
if (effectImageBuffersAreSwapped)
|
|
effectImage = UIGraphicsGetImageFromCurrentImageContext();
|
|
UIGraphicsEndImageContext();
|
|
}
|
|
|
|
// Set up output context.
|
|
UIGraphicsBeginImageContextWithOptions(self.size, NO, [[UIScreen mainScreen] scale]);
|
|
CGContextRef outputContext = UIGraphicsGetCurrentContext();
|
|
CGContextScaleCTM(outputContext, 1.0, -1.0);
|
|
CGContextTranslateCTM(outputContext, 0, -self.size.height);
|
|
|
|
// Draw base image.
|
|
CGContextDrawImage(outputContext, imageRect, self.CGImage);
|
|
|
|
// Draw effect image.
|
|
if (hasBlur) {
|
|
CGContextSaveGState(outputContext);
|
|
if (maskImage) {
|
|
CGContextClipToMask(outputContext, imageRect, maskImage.CGImage);
|
|
}
|
|
CGContextDrawImage(outputContext, imageRect, effectImage.CGImage);
|
|
CGContextRestoreGState(outputContext);
|
|
}
|
|
|
|
// Add in color tint.
|
|
if (tintColor) {
|
|
CGContextSaveGState(outputContext);
|
|
CGContextSetFillColorWithColor(outputContext, tintColor.CGColor);
|
|
CGContextFillRect(outputContext, imageRect);
|
|
CGContextRestoreGState(outputContext);
|
|
}
|
|
|
|
// Output image is ready.
|
|
UIImage *outputImage = UIGraphicsGetImageFromCurrentImageContext();
|
|
UIGraphicsEndImageContext();
|
|
|
|
return outputImage;
|
|
}
|
|
|
|
|
|
@end
|