first commit

This commit is contained in:
2026-03-10 16:18:05 +00:00
commit 11f9c069b5
31635 changed files with 3187747 additions and 0 deletions

37
node_modules/expo-image/ios/AnimatedImage.swift generated vendored Normal file
View File

@@ -0,0 +1,37 @@
// Copyright 2024-present 650 Industries. All rights reserved.
import SDWebImage
/**
Custom `SDAnimatedImage` that fixes issues with `images` and `duration` not being available.
*/
final class AnimatedImage: SDAnimatedImage {
var frames: [SDImageFrame]?
// MARK: - UIImage
override var images: [UIImage]? {
preloadAllFrames()
return frames?.map({ $0.image })
}
override var duration: TimeInterval {
preloadAllFrames()
return frames?.reduce(0, { $0 + $1.duration }) ?? 0.0
}
// MARK: - SDAnimatedImage
override func preloadAllFrames() {
if frames != nil {
return
}
frames = [UInt](0..<animatedImageFrameCount).compactMap { index in
guard let image = animatedImageFrame(at: index) else {
return nil
}
let duration = animatedImageDuration(at: index)
return SDImageFrame(image: image, duration: duration)
}
}
}

39
node_modules/expo-image/ios/Coders/PSDCoder.swift generated vendored Normal file
View File

@@ -0,0 +1,39 @@
// Copyright 2024-present 650 Industries. All rights reserved.
import SDWebImage
internal final class PSDCoder: NSObject, SDImageCoder {
nonisolated(unsafe) static let shared = PSDCoder()
func canDecode(from data: Data?) -> Bool {
guard let data, data.count >= 4 else {
return false
}
// verify PSD magic bytes
let signatureData = data[0..<4]
let signature = String(data: signatureData, encoding: .ascii)
return signature == "8BPS"
}
func decodedImage(with data: Data?, options: [SDImageCoderOption : Any]? = nil) -> UIImage? {
guard let data else {
return nil
}
if let scale = options?[SDImageCoderOption.decodeScaleFactor] as? CGFloat {
return UIImage(data: data, scale: scale)
}
// UIImage is able to directly handle PSD data
return UIImage(data: data)
}
func canEncode(to format: SDImageFormat) -> Bool {
return false
}
func encodedData(with image: UIImage?, format: SDImageFormat, options: [SDImageCoderOption : Any]? = nil) -> Data? {
return nil
}
}

73
node_modules/expo-image/ios/Coders/WebPCoder.swift generated vendored Normal file
View File

@@ -0,0 +1,73 @@
// Copyright 2015-present 650 Industries. All rights reserved.
import SDWebImage
import SDWebImageWebPCoder
internal let imageCoderOptionUseAppleWebpCodec = SDImageCoderOption(rawValue: "useAppleWebpCodec")
/**
A composite WebP coder that uses either `SDImageAWebPCoder` or `SDImageWebPCoder`
based on the passed `imageCoderOptionUseAppleWebpCodec` option.
*/
internal final class WebPCoder: NSObject, SDAnimatedImageCoder {
nonisolated(unsafe) static let shared = WebPCoder()
private var useAppleWebpCodec: Bool = true
private var instantiatedCoder: SDAnimatedImageCoder?
private var coder: SDAnimatedImageCoder {
if let instantiatedCoder {
return instantiatedCoder
}
return self.useAppleWebpCodec ? SDImageAWebPCoder.shared : SDImageWebPCoder.shared
}
override init() {
super.init()
}
// MARK: - SDAnimatedImageCoder implementations
convenience init(animatedImageData data: Data?, options: [SDImageCoderOption: Any]? = nil) {
self.init()
self.useAppleWebpCodec = options?[imageCoderOptionUseAppleWebpCodec] as? Bool ?? true
self.instantiatedCoder = self.useAppleWebpCodec
? SDImageAWebPCoder.init(animatedImageData: data, options: options)
: SDImageWebPCoder.init(animatedImageData: data, options: options)
}
func canDecode(from data: Data?) -> Bool {
return self.coder.canDecode(from: data)
}
func decodedImage(with data: Data?, options: [SDImageCoderOption: Any]? = nil) -> UIImage? {
return self.coder.decodedImage(with: data, options: options)
}
func canEncode(to format: SDImageFormat) -> Bool {
return self.coder.canEncode(to: format)
}
func encodedData(with image: UIImage?, format: SDImageFormat, options: [SDImageCoderOption: Any]? = nil) -> Data? {
return self.coder.encodedData(with: image, format: format, options: options)
}
var animatedImageData: Data? {
return self.coder.animatedImageData
}
var animatedImageFrameCount: UInt {
return self.coder.animatedImageFrameCount
}
var animatedImageLoopCount: UInt {
return self.coder.animatedImageLoopCount
}
func animatedImageFrame(at index: UInt) -> UIImage? {
return self.coder.animatedImageFrame(at: index)
}
func animatedImageDuration(at index: UInt) -> TimeInterval {
return self.coder.animatedImageDuration(at: index)
}
}

55
node_modules/expo-image/ios/ContentFit.swift generated vendored Normal file
View File

@@ -0,0 +1,55 @@
import ExpoModulesCore
/**
Describes how the image should be resized to fit its container.
- Note: It mirrors the CSS [`object-fit`](https://developer.mozilla.org/en-US/docs/Web/CSS/object-fit) property.
*/
enum ContentFit: String, Enumerable {
/**
The image is scaled to maintain its aspect ratio while fitting within the container's box.
The entire image is made to fill the box, while preserving its aspect ratio,
so the image will be "letterboxed" if its aspect ratio does not match the aspect ratio of the box.
*/
case contain
/**
The image is sized to maintain its aspect ratio while filling the element's entire content box.
If the image's aspect ratio does not match the aspect ratio of its box, then the object will be clipped to fit.
*/
case cover
/**
The image is sized to fill the element's content box. The entire object will completely fill the box.
If the image's aspect ratio does not match the aspect ratio of its box, then the image will be stretched to fit.
*/
case fill
/**
The image is not resized and is centered by default.
When specified, the exact position can be controlled with `ContentPosition`.
*/
case none
/**
The image is sized as if `none` or `contain` were specified,
whichever would result in a smaller concrete image size.
*/
case scaleDown = "scale-down"
/**
`ContentFit` cases can be directly translated to the native `UIView.ContentMode`
except `scaleDown` that needs to be handled differently at the later step of rendering.
*/
func toContentMode() -> UIView.ContentMode {
switch self {
case .contain:
return .scaleAspectFit
case .cover:
return .scaleAspectFill
case .fill:
return .scaleToFill
case .none, .scaleDown:
return .center
}
}
}

108
node_modules/expo-image/ios/ContentPosition.swift generated vendored Normal file
View File

@@ -0,0 +1,108 @@
import ExpoModulesCore
/**
Represents a position value that might be either `Double` or `String`.
*/
typealias ContentPositionValue = Either<Double, String>
/**
Specifies the alignment of the image within the container's box.
- Note: Its intention is to behave like the CSS [`object-position`](https://developer.mozilla.org/en-US/docs/Web/CSS/object-position) property.
*/
struct ContentPosition: Record {
static let center = Self()
@Field
var top: ContentPositionValue?
@Field
var bottom: ContentPositionValue?
@Field
var right: ContentPositionValue?
@Field
var left: ContentPositionValue?
/**
Returns a horizontal content offset based on the `left` or `right` field.
*/
func offsetX(contentWidth: Double, containerWidth: Double) -> Double {
let diff = containerWidth - contentWidth
if let leftDistance = distance(from: left) {
return -diff / 2 + leftDistance
}
if let rightDistance = distance(from: right) {
return diff / 2 - rightDistance
}
if let factor = factor(from: left) {
return -diff / 2 + diff * factor
}
if let factor = factor(from: right) {
return diff / 2 - diff * factor
}
return 0
}
/**
Returns a vertical content offset based on the `top` or `bottom` field.
*/
func offsetY(contentHeight: Double, containerHeight: Double) -> Double {
let diff = containerHeight - contentHeight
if let topDistance = distance(from: top) {
return -diff / 2 + topDistance
}
if let bottomDistance = distance(from: bottom) {
return diff / 2 - bottomDistance
}
if let factor = factor(from: top) {
return -diff / 2 + diff * factor
}
if let factor = factor(from: bottom) {
return diff / 2 - diff * factor
}
return 0
}
/**
A `CGPoint` with horizontal and vertical content offsets.
*/
func offset(contentSize: CGSize, containerSize: CGSize) -> CGPoint {
return CGPoint(
x: offsetX(contentWidth: contentSize.width, containerWidth: containerSize.width),
y: offsetY(contentHeight: contentSize.height, containerHeight: containerSize.height)
)
}
}
/**
Returns a static offset from the given position value or `nil` when it cannot be cast to a `Double`.
*/
private func distance(from value: ContentPositionValue?) -> Double? {
if let value: Double = value?.get() {
return value
}
if let value: String = value?.get() {
return Double(value)
}
return nil
}
/**
Returns a factor from the percentage value from the given position.
The value must be a string containing a number and `%` character, or equal to `"center"` which is an equivalent to `50%`.
*/
private func factor(from value: ContentPositionValue?) -> Double? {
guard let value: String = value?.get() else {
return nil
}
if value == "center" {
return 0.5
}
guard value.contains("%"), let percentage = Double(value.replacingOccurrences(of: "%", with: "")) else {
return nil
}
return percentage / 100
}

50
node_modules/expo-image/ios/ExpoImage.podspec generated vendored Normal file
View File

@@ -0,0 +1,50 @@
require 'json'
package = JSON.parse(File.read(File.join(__dir__, '..', 'package.json')))
podfile_properties = JSON.parse(File.read("#{Pod::Config.instance.installation_root}/Podfile.properties.json")) rescue {}
property_override = podfile_properties['expo-image.disable-libdav1d']
env_override = ENV['EXPO_IMAGE_DISABLE_LIBDAV1D']
disable_libdav1d =
if property_override.nil?
env_override == '1' || env_override == 'true'
else
property_override == 'true'
end
Pod::Spec.new do |s|
s.name = 'ExpoImage'
s.version = package['version']
s.summary = package['description']
s.description = package['description']
s.license = package['license']
s.author = package['author']
s.homepage = package['homepage']
s.platforms = {
:ios => '15.1',
:tvos => '15.1'
}
s.swift_version = '6.0'
s.source = { git: 'https://github.com/expo/expo.git' }
s.static_framework = true
s.dependency 'ExpoModulesCore'
s.dependency 'SDWebImage', '~> 5.21.0'
s.dependency 'SDWebImageAVIFCoder', '~> 0.11.0'
s.dependency 'SDWebImageSVGCoder', '~> 1.7.0'
s.dependency 'SDWebImageWebPCoder', '~> 0.14.6'
s.dependency 'libavif/libdav1d' unless disable_libdav1d
# Swift/Objective-C compatibility
s.pod_target_xcconfig = {
'DEFINES_MODULE' => 'YES',
}
s.source_files = "**/*.{h,m,swift}"
s.exclude_files = 'Tests/'
s.test_spec 'Tests' do |test_spec|
test_spec.dependency 'ExpoModulesTestCore'
test_spec.source_files = 'Tests/**/*.{m,swift}'
end
end

20
node_modules/expo-image/ios/Image.swift generated vendored Normal file
View File

@@ -0,0 +1,20 @@
// Copyright 2024-present 650 Industries. All rights reserved.
import ExpoModulesCore
internal final class Image: SharedRef<UIImage> {
override var nativeRefType: String {
"image"
}
var isAnimated: Bool {
return !(ref.images?.isEmpty ?? true)
}
override func getAdditionalMemoryPressure() -> Int {
guard let cgImage = ref.cgImage else {
return 0
}
return cgImage.bytesPerRow * cgImage.height
}
}

12
node_modules/expo-image/ios/ImageCacheConfig.swift generated vendored Normal file
View File

@@ -0,0 +1,12 @@
import ExpoModulesCore
struct ImageCacheConfig: Record {
@Field
var maxDiskSize: UInt?
@Field
var maxMemoryCost: UInt?
@Field
var maxMemoryCount: UInt?
}

24
node_modules/expo-image/ios/ImageCachePolicy.swift generated vendored Normal file
View File

@@ -0,0 +1,24 @@
// Copyright 2022-present 650 Industries. All rights reserved.
import SDWebImage
import ExpoModulesCore
enum ImageCachePolicy: String, Enumerable {
case none = "none"
case disk = "disk"
case memory = "memory"
case memoryAndDisk = "memory-disk"
func toSdCacheType() -> SDImageCacheType {
switch self {
case .none:
return .none
case .disk:
return .disk
case .memory:
return .memory
case .memoryAndDisk:
return .all
}
}
}

24
node_modules/expo-image/ios/ImageCacheType.swift generated vendored Normal file
View File

@@ -0,0 +1,24 @@
// Copyright 2022-present 650 Industries. All rights reserved.
import SDWebImage
import ExpoModulesCore
enum ImageCacheType: String, Enumerable {
case none
case disk
case memory
static func fromSdCacheType(_ sdImageCacheType: SDImageCacheType) -> ImageCacheType {
switch sdImageCacheType {
case .none:
return .none
case .disk, .all:
return .disk
case .memory:
return .memory
@unknown default:
log.error("Unhandled `SDImageCacheType` value: \(sdImageCacheType), returning `none` as fallback. Add the missing case as soon as possible.")
return .none
}
}
}

19
node_modules/expo-image/ios/ImageLoadOptions.swift generated vendored Normal file
View File

@@ -0,0 +1,19 @@
// Copyright 2024-present 650 Industries. All rights reserved.
import ExpoModulesCore
internal struct ImageLoadOptions: Record {
@Field var maxWidth: Int?
@Field var maxHeight: Int?
@Field var tintColor: UIColor? = nil
func getMaxSize() -> CGSize? {
// If none of max dimensions are provided, just use the original image without the upper limit.
// This is important for vector images, where using `CGSize(.max, .max)`
// would actually try to create a bitmap of that size and cause a crash.
if maxWidth == nil && maxHeight == nil {
return nil
}
return CGSize(width: maxWidth ?? .max, height: maxHeight ?? .max)
}
}

27
node_modules/expo-image/ios/ImageLoadTask.swift generated vendored Normal file
View File

@@ -0,0 +1,27 @@
// Copyright 2024-present 650 Industries. All rights reserved.
import ExpoModulesCore
internal final class ImageLoadTask: SharedObject {
private let source: ImageSource
private let options: ImageLoadOptions
private var task: Task<UIImage, any Error>?
init(_ source: ImageSource, options: ImageLoadOptions) {
self.source = source
self.options = options
super.init()
}
func load() async throws -> UIImage {
let task = self.task ?? Task { [source, options] in
return try await ImageLoader.shared.load(source, options: options)
}
self.task = task
return try await task.value
}
func abort() {
task?.cancel()
}
}

46
node_modules/expo-image/ios/ImageLoader.swift generated vendored Normal file
View File

@@ -0,0 +1,46 @@
// Copyright 2024-present 650 Industries. All rights reserved.
import SDWebImage
import ExpoModulesCore
internal final class ImageLoader {
nonisolated(unsafe) static let shared = ImageLoader()
lazy var imageManager = SDWebImageManager(
cache: SDImageCache.shared,
loader: SDImageLoadersManager.shared
)
func load(_ source: ImageSource, options: ImageLoadOptions) async throws -> UIImage {
// This loader uses only the disk cache. We may want to give more control on this, but the memory cache
// doesn't make much sense for shared refs as they're kept in memory as long as their JS objects.
var context = createSDWebImageContext(forSource: source, cachePolicy: .disk)
if let maxSize = options.getMaxSize() {
// Note that setting the thumbnail size rasterizes vector images into a bitmap.
context[.imageThumbnailPixelSize] = maxSize
}
context[.imagePreserveAspectRatio] = true
let image = try await withCheckedThrowingContinuation { continuation in
imageManager.loadImage(with: source.uri, context: context, progress: nil) { image, _, error, _, _, _ in
if let image {
continuation.resume(returning: image)
} else {
continuation.resume(throwing: ImageLoadingFailed().causedBy(error))
}
}
}
if let tintColor = options.tintColor {
return image.withTintColor(tintColor)
}
return image
}
}
internal final class ImageLoadingFailed: Exception, @unchecked Sendable {
override var reason: String {
"Failed to load an image"
}
}

308
node_modules/expo-image/ios/ImageModule.swift generated vendored Normal file
View File

@@ -0,0 +1,308 @@
// Copyright 2022-present 650 Industries. All rights reserved.
import ExpoModulesCore
import SDWebImage
import SDWebImageAVIFCoder
import SDWebImageSVGCoder
public final class ImageModule: Module {
lazy var prefetcher = SDWebImagePrefetcher.shared
public func definition() -> ModuleDefinition {
Name("ExpoImage")
OnCreate {
ImageModule.registerCoders()
ImageModule.registerLoaders()
}
View(ImageView.self) {
Events(
"onLoadStart",
"onProgress",
"onError",
"onLoad",
"onDisplay"
)
Prop("source") { (view: ImageView, sources: Either<[ImageSource], SharedRef<UIImage>>?) in
if let imageRef: SharedRef<UIImage> = sources?.get() {
// Unset an array of traditional sources and just render the image ref right away.
view.sources = nil
view.renderSourceImage(imageRef.ref)
} else {
// Update an array of sources. Image will start loading once the all props are updated.
view.sources = sources?.get()
view.sourceImage = nil
}
}
Prop("placeholder") { (view, placeholders: [ImageSource]?) in
view.placeholderSources = placeholders ?? []
}
Prop("contentFit") { (view, contentFit: ContentFit?) in
view.contentFit = contentFit ?? .cover
}
Prop("placeholderContentFit") { (view, placeholderContentFit: ContentFit?) in
view.placeholderContentFit = placeholderContentFit ?? .scaleDown
}
Prop("contentPosition") { (view, contentPosition: ContentPosition?) in
view.contentPosition = contentPosition ?? .center
}
Prop("transition") { (view, transition: ImageTransition?) in
view.transition = transition
}
Prop("blurRadius") { (view, blurRadius: Double?) in
let radius = blurRadius ?? .zero
// the implementation uses Apple's CIGaussianBlur internally
// we divide the radius to achieve more consistent cross-platform appearance
// the value was found experimentally
view.blurRadius = radius / 2.0
}
Prop("tintColor") { (view, tintColor: UIColor?) in
view.imageTintColor = tintColor
}
Prop("priority") { (view, priority: ImagePriority?) in
view.loadingOptions.remove([.lowPriority, .highPriority])
if let priority = priority?.toSDWebImageOptions() {
view.loadingOptions.insert(priority)
}
}
Prop("cachePolicy") { (view, cachePolicy: ImageCachePolicy?) in
view.cachePolicy = cachePolicy ?? .disk
}
Prop("enableLiveTextInteraction") { (view, enableLiveTextInteraction: Bool?) in
#if !os(tvOS)
view.enableLiveTextInteraction = enableLiveTextInteraction ?? false
#endif
}
Prop("accessible") { (view, accessible: Bool?) in
view.sdImageView.isAccessibilityElement = accessible ?? false
}
Prop("accessibilityLabel") { (view, label: String?) in
view.sdImageView.accessibilityLabel = label
}
Prop("recyclingKey") { (view, key: String?) in
view.recyclingKey = key
}
Prop("allowDownscaling") { (view, allowDownscaling: Bool?) in
view.allowDownscaling = allowDownscaling ?? true
}
Prop("autoplay") { (view, autoplay: Bool?) in
view.autoplay = autoplay ?? true
}
Prop("sfEffect") { (view, sfEffect: [SFSymbolEffect]?) in
view.sfEffect = sfEffect
}
Prop("symbolWeight") { (view, symbolWeight: String?) in
view.symbolWeight = symbolWeight
}
Prop("symbolSize") { (view, symbolSize: Double?) in
view.symbolSize = symbolSize
}
Prop("useAppleWebpCodec", true) { (view, useAppleWebpCodec: Bool) in
view.useAppleWebpCodec = useAppleWebpCodec
}
Prop("enforceEarlyResizing", false) { (view, enforceEarlyResizing: Bool) in
view.enforceEarlyResizing = enforceEarlyResizing
}
Prop("preferHighDynamicRange", false) { (view, preferHighDynamicRange: Bool) in
if #available(iOS 17.0, macCatalyst 17.0, tvOS 17.0, *) {
view.sdImageView.preferredImageDynamicRange = preferHighDynamicRange ? .constrainedHigh : .unspecified
}
}
AsyncFunction("startAnimating") { (view: ImageView) in
if view.isSFSymbolSource {
view.startSymbolAnimation()
} else {
view.sdImageView.startAnimating()
}
}
AsyncFunction("stopAnimating") { (view: ImageView) in
if view.isSFSymbolSource {
view.stopSymbolAnimation()
} else {
view.sdImageView.stopAnimating()
}
}
AsyncFunction("lockResourceAsync") { (view: ImageView) in
view.lockResource = true
}
AsyncFunction("unlockResourceAsync") { (view: ImageView) in
view.lockResource = false
}
AsyncFunction("reloadAsync") { (view: ImageView) in
view.reload(force: true)
}
OnViewDidUpdateProps { view in
view.reload()
}
}
Function("configureCache") { (config: ImageCacheConfig) in
ImageModule.configureCache(config: config)
}
AsyncFunction("prefetch") { (urls: [URL], cachePolicy: ImageCachePolicy, headersMap: [String: String]?, promise: Promise) in
var context = SDWebImageContext()
let sdCacheType = cachePolicy.toSdCacheType().rawValue
context[.queryCacheType] = SDImageCacheType.none.rawValue
context[.storeCacheType] = SDImageCacheType.none.rawValue
context[.originalQueryCacheType] = sdCacheType
context[.originalStoreCacheType] = sdCacheType
var imagesLoaded = 0
var failed = false
if headersMap != nil {
context[.downloadRequestModifier] = SDWebImageDownloaderRequestModifier(headers: headersMap)
}
urls.forEach { url in
SDWebImagePrefetcher.shared.prefetchURLs([url], context: context, progress: nil, completed: { _, skipped in
if skipped > 0 && !failed {
failed = true
promise.resolve(false)
} else {
imagesLoaded = imagesLoaded + 1
if imagesLoaded == urls.count {
promise.resolve(true)
}
}
})
}
}
AsyncFunction("generateBlurhashAsync") { (source: Either<Image, URL>, numberOfComponents: CGSize, promise: Promise) in
let parsedNumberOfComponents = (width: Int(numberOfComponents.width), height: Int(numberOfComponents.height))
generatePlaceholder(source: source) { (image: UIImage) in
if let blurhashString = blurhash(fromImage: image, numberOfComponents: parsedNumberOfComponents) {
promise.resolve(blurhashString)
} else {
promise.reject(BlurhashGenerationException())
}
}
}
AsyncFunction("generateThumbhashAsync") { (source: Either<Image, URL>, promise: Promise) in
generatePlaceholder(source: source) { (image: UIImage) in
let blurhashString = thumbHash(fromImage: image)
promise.resolve(blurhashString.base64EncodedString())
}
}
AsyncFunction("clearMemoryCache") { () -> Bool in
SDImageCache.shared.clearMemory()
return true
}
AsyncFunction("clearDiskCache") { (promise: Promise) in
SDImageCache.shared.clearDisk {
promise.resolve(true)
}
}
AsyncFunction("getCachePathAsync") { (cacheKey: String, promise: Promise) in
/*
We need to check if the image exists in the cache first since `cachePath` will
return a path regardless of whether or not the image exists.
*/
SDImageCache.shared.diskImageExists(withKey: cacheKey) { exists in
if exists {
let cachePath = SDImageCache.shared.cachePath(forKey: cacheKey)
promise.resolve(cachePath)
} else {
promise.resolve(nil)
}
}
}
AsyncFunction("loadAsync") { (source: ImageSource, options: ImageLoadOptions?) -> Image? in
let image = try await ImageLoadTask(source, options: options ?? ImageLoadOptions()).load()
return Image(image)
}
Class(Image.self) {
Property("width", \.ref.size.width)
Property("height", \.ref.size.height)
Property("scale", \.ref.scale)
Property("isAnimated", \.isAnimated)
Property("mediaType") { image in
return imageFormatToMediaType(image.ref.sd_imageFormat)
}
}
}
func generatePlaceholder(
source: Either<Image, URL>,
generator: @escaping (UIImage) -> Void
) {
if let image: Image = source.get() {
generator(image.ref)
} else if let url: URL = source.get() {
let downloader = SDWebImageDownloader()
downloader.downloadImage(with: url, progress: nil, completed: { image, _, _, _ in
DispatchQueue.global().async {
if let downloadedImage = image {
generator(downloadedImage)
}
}
})
}
}
static func registerCoders() {
SDImageCodersManager.shared.addCoder(WebPCoder.shared)
SDImageCodersManager.shared.addCoder(PSDCoder.shared)
SDImageCodersManager.shared.addCoder(SDImageAVIFCoder.shared)
SDImageCodersManager.shared.addCoder(SDImageSVGCoder.shared)
SDImageCodersManager.shared.addCoder(SDImageHEICCoder.shared)
}
static func registerLoaders() {
SDImageLoadersManager.shared.addLoader(BlurhashLoader())
SDImageLoadersManager.shared.addLoader(ThumbhashLoader())
SDImageLoadersManager.shared.addLoader(PhotoLibraryAssetLoader())
SDImageLoadersManager.shared.addLoader(SFSymbolLoader())
}
static func configureCache(config: ImageCacheConfig) {
if let maxMemoryCount = config.maxMemoryCount {
SDImageCache.shared.config.maxMemoryCount = maxMemoryCount
}
if let maxDiskSize = config.maxDiskSize {
SDImageCache.shared.config.maxDiskSize = maxDiskSize
}
if let maxMemoryCost = config.maxMemoryCost {
SDImageCache.shared.config.maxMemoryCost = maxMemoryCost
}
}
}

22
node_modules/expo-image/ios/ImagePriority.swift generated vendored Normal file
View File

@@ -0,0 +1,22 @@
import ExpoModulesCore
import SDWebImage
enum ImagePriority: String, Enumerable {
case low
case normal
case high
/**
Maps the priority to `SDWebImageOptions` which is a bitmask thus has only low and high priority options.
*/
func toSDWebImageOptions() -> SDWebImageOptions? {
switch self {
case .low:
return .lowPriority
case .high:
return .highPriority
default:
return nil
}
}
}

47
node_modules/expo-image/ios/ImageSource.swift generated vendored Normal file
View File

@@ -0,0 +1,47 @@
// Copyright 2022-present 650 Industries. All rights reserved.
import ExpoModulesCore
struct ImageSource: Record {
@Field
var width: Double = 0.0
@Field
var height: Double = 0.0
@Field
var uri: URL? = nil
@Field
var scale: Double = 1.0
@Field
var headers: [String: String]?
@Field
var cacheKey: String?
var pixelCount: Double {
return width * height * scale * scale
}
var isBlurhash: Bool {
return uri?.scheme == "blurhash"
}
var isThumbhash: Bool {
return uri?.scheme == "thumbhash"
}
var isPhotoLibraryAsset: Bool {
return isPhotoLibraryAssetUrl(uri)
}
var isSFSymbol: Bool {
return uri?.scheme == "sf"
}
var cacheOriginalImage: Bool {
return !isPhotoLibraryAsset
}
}

84
node_modules/expo-image/ios/ImageTransition.swift generated vendored Normal file
View File

@@ -0,0 +1,84 @@
// Copyright 2022-present 650 Industries. All rights reserved.
import ExpoModulesCore
import Symbols
enum ImageTransitionTiming: String, Enumerable {
case easeInOut = "ease-in-out"
case easeIn = "ease-in"
case easeOut = "ease-out"
case linear = "linear"
func toAnimationOption() -> UIView.AnimationOptions {
switch self {
case .easeInOut:
return .curveEaseInOut
case .easeIn:
return .curveEaseIn
case .easeOut:
return .curveEaseOut
case .linear:
return .curveLinear
}
}
}
enum ImageTransitionEffect: String, Enumerable {
case crossDissolve = "cross-dissolve"
case flipFromTop = "flip-from-top"
case flipFromRight = "flip-from-right"
case flipFromBottom = "flip-from-bottom"
case flipFromLeft = "flip-from-left"
case curlUp = "curl-up"
case curlDown = "curl-down"
// SF Symbol replace effects (iOS 17+)
case sfReplace = "sf:replace"
case sfDownUp = "sf:down-up"
case sfUpUp = "sf:up-up"
case sfOffUp = "sf:off-up"
var isSFReplaceEffect: Bool {
switch self {
case .sfReplace, .sfDownUp, .sfUpUp, .sfOffUp:
return true
default:
return false
}
}
func toAnimationOption() -> UIView.AnimationOptions {
switch self {
case .crossDissolve:
return .transitionCrossDissolve
case .flipFromLeft:
return .transitionFlipFromLeft
case .flipFromRight:
return .transitionFlipFromRight
case .flipFromTop:
return .transitionFlipFromTop
case .flipFromBottom:
return .transitionFlipFromBottom
case .curlUp:
return .transitionCurlUp
case .curlDown:
return .transitionCurlDown
default:
return .transitionCrossDissolve
}
}
}
struct ImageTransition: Record {
@Field
var duration: Double = 100
@Field
var timing: ImageTransitionTiming = .easeInOut
@Field
var effect: ImageTransitionEffect = .crossDissolve
func toAnimationOptions() -> UIView.AnimationOptions {
return [timing.toAnimationOption(), effect.toAnimationOption()]
}
}

843
node_modules/expo-image/ios/ImageView.swift generated vendored Normal file
View File

@@ -0,0 +1,843 @@
// Copyright 2022-present 650 Industries. All rights reserved.
import SDWebImage
import ExpoModulesCore
import Symbols
#if !os(tvOS)
import VisionKit
#endif
typealias SDWebImageContext = [SDWebImageContextOption: Any]
// swiftlint:disable:next type_body_length
public final class ImageView: ExpoView {
nonisolated static let contextSourceKey = SDWebImageContextOption(rawValue: "source")
nonisolated static let screenScaleKey = SDWebImageContextOption(rawValue: "screenScale")
nonisolated static let contentFitKey = SDWebImageContextOption(rawValue: "contentFit")
nonisolated static let frameSizeKey = SDWebImageContextOption(rawValue: "frameSize")
let sdImageView = SDAnimatedImageView(frame: .zero)
// Custom image manager doesn't use shared loaders managers by default,
// so make sure it is provided here.
let imageManager = SDWebImageManager(
cache: SDImageCache.shared,
loader: SDImageLoadersManager.shared
)
var loadingOptions: SDWebImageOptions = [
.retryFailed, // Don't blacklist URLs that failed downloading
.handleCookies, // Handle cookies stored in the shared `HTTPCookieStore`
// Images from cache are `AnimatedImage`s. BlurRadius is done via a SDImageBlurTransformer
// so this flag needs to be enabled. Beware most transformers cannot manage animated images.
.transformAnimatedImage
]
/**
An array of sources from which the view will asynchronously load one of them that fits best into the view bounds.
*/
var sources: [ImageSource]?
/**
An image that has been loaded from one of the `sources` or set by the shared ref to an image.
*/
var sourceImage: UIImage?
var pendingOperation: SDWebImageCombinedOperation?
var contentFit: ContentFit = .cover
var contentPosition: ContentPosition = .center
var transition: ImageTransition?
var blurRadius: CGFloat = 0.0
var imageTintColor: UIColor?
var cachePolicy: ImageCachePolicy = .disk
var allowDownscaling: Bool = true
var lockResource: Bool = false
var enforceEarlyResizing: Bool = false
var recyclingKey: String? {
didSet {
if oldValue != nil && recyclingKey != oldValue {
sdImageView.image = nil
}
}
}
var autoplay: Bool = true
var sfEffect: [SFSymbolEffect]?
var symbolWeight: String?
var symbolSize: Double?
var useAppleWebpCodec: Bool = true
/**
Tracks whether the current image is an SF Symbol for animation control.
*/
var isSFSymbolSource: Bool = false
/**
The ideal image size that fills in the container size while maintaining the source aspect ratio.
*/
var imageIdealSize: CGSize = .zero
// MARK: - Events
let onLoadStart = EventDispatcher()
let onProgress = EventDispatcher()
let onError = EventDispatcher()
let onLoad = EventDispatcher()
let onDisplay = EventDispatcher()
// MARK: - View
public override var bounds: CGRect {
didSet {
// Reload the image when the bounds size has changed and is not empty.
if oldValue.size != bounds.size && bounds.size != .zero {
reload()
}
}
}
public required init(appContext: AppContext? = nil) {
super.init(appContext: appContext)
clipsToBounds = true
sdImageView.contentMode = contentFit.toContentMode()
sdImageView.autoresizingMask = [.flexibleWidth, .flexibleHeight]
sdImageView.layer.masksToBounds = false
// Apply trilinear filtering to smooth out mis-sized images.
sdImageView.layer.magnificationFilter = .trilinear
sdImageView.layer.minificationFilter = .trilinear
addSubview(sdImageView)
}
deinit {
// Cancel pending requests when the view is deallocated.
cancelPendingOperation()
}
public override func traitCollectionDidChange(_ previousTraitCollection: UITraitCollection?) {
super.traitCollectionDidChange(previousTraitCollection)
if self.traitCollection.hasDifferentColorAppearance(comparedTo: previousTraitCollection) {
// The mask layer we adjusted would be invaliated from `RCTViewComponentView.traitCollectionDidChange`.
// After that we have to recalculate the mask layer in `applyContentPosition`.
applyContentPosition(contentSize: imageIdealSize, containerSize: frame.size)
}
}
// MARK: - Implementation
func reload(force: Bool = false) {
if lockResource && !force {
return
}
if isViewEmpty {
displayPlaceholderIfNecessary()
}
guard let source = bestSource else {
displayPlaceholderIfNecessary()
return
}
// Track if this is an SF Symbol source for animation handling
isSFSymbolSource = source.isSFSymbol
if sdImageView.image == nil {
sdImageView.contentMode = contentFit.toContentMode()
}
var context = createBaseImageContext(source: source)
// Cancel currently running load requests.
cancelPendingOperation()
if blurRadius > 0 {
context[.imageTransformer] = createTransformPipeline()
}
// It seems that `UIImageView` can't tint some vector graphics. If the `tintColor` prop is specified,
// we tell the SVG coder to decode to a bitmap instead. This will become useless when we switch to SVGNative coder.
let shouldEarlyResize = imageTintColor != nil || enforceEarlyResizing || source.isPhotoLibraryAsset
if shouldEarlyResize {
context[.imagePreserveAspectRatio] = true
context[.imageThumbnailPixelSize] = CGSize(
width: sdImageView.bounds.size.width * screenScale,
height: sdImageView.bounds.size.height * screenScale
)
}
// Some loaders (e.g. PhotoLibraryAssetLoader) may need to know the screen scale.
context[ImageView.screenScaleKey] = screenScale
context[ImageView.frameSizeKey] = frame.size
context[ImageView.contentFitKey] = contentFit
// Do it here so we don't waste resources trying to fetch from a remote URL
if maybeRenderLocalAsset(from: source) {
return
}
// Render SF Symbols directly without going through SDWebImage to preserve symbol properties
if source.isSFSymbol {
renderSFSymbol(from: source)
return
}
onLoadStart([:])
pendingOperation = imageManager.loadImage(
with: source.uri,
options: loadingOptions,
context: context,
progress: imageLoadProgress(_:_:_:),
completed: imageLoadCompleted(_:_:_:_:_:_:)
)
}
// MARK: - Loading
private func imageLoadProgress(_ receivedSize: Int, _ expectedSize: Int, _ imageUrl: URL?) {
// Don't send the event when the expected size is unknown (it's usually -1 or 0 when called for the first time).
if expectedSize <= 0 {
return
}
// Photos library requester emits the progress as a double `0...1` that we map to `0...100` int in `PhotosLoader`.
// When that loader is used, we don't have any information about the sizes in bytes, so we only send the `progress` param.
let isPhotoLibraryAsset = isPhotoLibraryAssetUrl(imageUrl)
onProgress([
"loaded": isPhotoLibraryAsset ? nil : receivedSize,
"total": isPhotoLibraryAsset ? nil : expectedSize,
"progress": Double(receivedSize) / Double(expectedSize)
])
}
// swiftlint:disable:next function_parameter_count
private func imageLoadCompleted(
_ image: UIImage?,
_ data: Data?,
_ error: Error?,
_ cacheType: SDImageCacheType,
_ finished: Bool,
_ imageUrl: URL?
) {
if let error = error {
let code = (error as NSError).code
// SDWebImage throws an error when loading operation is canceled (interrupted) by another load request.
// We do want to ignore that one and wait for the new request to load.
if code != SDWebImageError.cancelled.rawValue {
onError(["error": error.localizedDescription])
}
return
}
guard finished else {
log.debug("Loading the image has been canceled")
return
}
if let image {
onLoad([
"cacheType": cacheTypeToString(cacheType),
"source": [
"url": imageUrl?.absoluteString,
"width": image.size.width,
"height": image.size.height,
"mediaType": imageFormatToMediaType(image.sd_imageFormat),
"isAnimated": image.sd_isAnimated
]
])
let scale = window?.screen.scale ?? UIScreen.main.scale
imageIdealSize = idealSize(
contentPixelSize: image.size * image.scale,
containerSize: frame.size,
scale: scale,
contentFit: contentFit
).rounded(.up)
let image = processImage(image, idealSize: imageIdealSize, scale: scale)
applyContentPosition(contentSize: imageIdealSize, containerSize: frame.size)
renderSourceImage(image)
} else {
displayPlaceholderIfNecessary()
}
}
private func renderSFSymbol(from source: ImageSource) {
guard let uri = source.uri else {
return
}
// Extract symbol name from URL path (e.g., sf:/star.fill)
let symbolName = uri.pathComponents.count > 1 ? uri.pathComponents[1] : ""
// Create symbol with configuration using the symbolWeight and symbolSize props
let weight = parseSymbolWeight(symbolWeight)
let pointSize = symbolSize ?? 100
let configuration = UIImage.SymbolConfiguration(pointSize: pointSize, weight: weight)
guard let image = UIImage(systemName: symbolName, withConfiguration: configuration) else {
onError(["error": "Unable to create SF Symbol image for '\(symbolName)'"])
return
}
onLoad([
"cacheType": "none",
"source": [
"url": uri.absoluteString,
"width": image.size.width,
"height": image.size.height,
"mediaType": nil,
"isAnimated": false
]
])
let scale = window?.screen.scale ?? UIScreen.main.scale
imageIdealSize = idealSize(
contentPixelSize: image.size * image.scale,
containerSize: frame.size,
scale: scale,
contentFit: contentFit
).rounded(.up)
applyContentPosition(contentSize: imageIdealSize, containerSize: frame.size)
renderSFSymbolImage(image)
}
private func renderSFSymbolImage(_ image: UIImage) {
sourceImage = image
sdImageView.contentMode = contentFit.toContentMode()
let templateImage = image.withRenderingMode(.alwaysTemplate)
if let imageTintColor {
sdImageView.tintColor = imageTintColor
}
// Use replace content transition for sf:replace effects
if #available(iOS 17.0, tvOS 17.0, *), let effect = transition?.effect, effect.isSFReplaceEffect {
applyReplaceTransition(image: templateImage, effect: effect)
} else {
sdImageView.image = templateImage
}
// Apply symbol effect if autoplay is enabled
if #available(iOS 17.0, tvOS 17.0, *), autoplay {
applySymbolEffect()
}
onDisplay()
}
private func maybeRenderLocalAsset(from source: ImageSource) -> Bool {
let path: String? = {
// .path() on iOS 16 would remove the leading slash, but it doesn't on tvOS 16 🙃
// It also crashes with EXC_BREAKPOINT when parsing data:image uris
// manually drop the leading slash below iOS 16
if let path = source.uri?.path {
return String(path.dropFirst())
}
return nil
}()
if let path, !path.isEmpty, let local = UIImage(named: path) {
renderSourceImage(local)
return true
}
return false
}
// MARK: - Placeholder
/**
A list of sources that the placeholder can be loaded from.
*/
var placeholderSources: [ImageSource] = [] {
didSet {
loadPlaceholderIfNecessary()
}
}
/**
A placeholder image to use when the proper image is unset.
*/
var placeholderImage: UIImage?
/**
Content fit for the placeholder. `scale-down` seems to be the best choice for spinners
and that the placeholders are usually smaller than the proper image, but it doesn't
apply to blurhash that by default could use the same fitting as the proper image.
*/
var placeholderContentFit: ContentFit = .scaleDown
/**
Same as `bestSource`, but for placeholders.
*/
var bestPlaceholder: ImageSource? {
return getBestSource(from: placeholderSources, forSize: bounds.size, scale: screenScale) ?? placeholderSources.first
}
/**
A bool value whether the placeholder can be displayed, i.e. nothing has been displayed yet or the sources are unset.
*/
var canDisplayPlaceholder: Bool {
return isViewEmpty || (!hasAnySource && sourceImage == nil)
}
/**
Loads a placeholder from the best source provided in `placeholder` prop.
A placeholder should be a local asset to have more time to show before the proper image is loaded,
but remote assets are also supported for the bundler and to cache them on the disk to load faster next time.
- Note: Placeholders are not being resized nor transformed, so try to keep them small.
*/
func loadPlaceholderIfNecessary() {
// Exit early if placeholder is not set or there is already an image attached to the view.
// The placeholder is only used until the first image is loaded.
guard canDisplayPlaceholder, let placeholder = bestPlaceholder else {
return
}
// Cache placeholders on the disk. Should we let the user choose whether
// to cache them or apply the same policy as with the proper image?
// Basically they are also cached in memory as the `placeholderImage` property,
// so just `disk` policy sounds like a good idea.
let context = createBaseImageContext(source: placeholder, cachePolicy: .disk)
let isPlaceholderHash = placeholder.isBlurhash || placeholder.isThumbhash
imageManager.loadImage(with: placeholder.uri, context: context, progress: nil) { [weak self] placeholder, _, _, _, finished, _ in
guard let self, let placeholder, finished else {
return
}
self.placeholderImage = placeholder
self.placeholderContentFit = isPlaceholderHash ? self.contentFit : self.placeholderContentFit
self.displayPlaceholderIfNecessary()
}
}
/**
Displays a placeholder if necessary the placeholder can only be displayed when no image has been displayed yet or the sources are unset.
*/
private func displayPlaceholderIfNecessary() {
guard canDisplayPlaceholder, let placeholder = placeholderImage else {
return
}
setImage(placeholder, contentFit: placeholderContentFit, isPlaceholder: true)
}
// MARK: - Processing
private func createTransformPipeline() -> SDImagePipelineTransformer? {
let transformers: [SDImageTransformer] = [
SDImageBlurTransformer(radius: blurRadius)
]
return SDImagePipelineTransformer(transformers: transformers)
}
private func processImage(_ image: UIImage?, idealSize: CGSize, scale: Double) -> UIImage? {
guard let image = image, !bounds.isEmpty else {
return nil
}
sdImageView.animationTransformer = nil
// Downscale the image only when necessary
if allowDownscaling && shouldDownscale(image: image, toSize: idealSize, scale: scale) {
if image.sd_isAnimated {
let size = idealSize * scale
sdImageView.animationTransformer = SDImageResizingTransformer(size: size, scaleMode: .fill)
return image
}
return resize(image: image, toSize: idealSize, scale: scale)
}
return image
}
// MARK: - Rendering
/**
Moves the layer on which the image is rendered to respect the `contentPosition` prop.
*/
private func applyContentPosition(contentSize: CGSize, containerSize: CGSize) {
let offset = contentPosition.offset(contentSize: contentSize, containerSize: containerSize)
if sdImageView.layer.mask != nil {
// In New Architecture mode, React Native adds a mask layer to image subviews.
// When moving the layer frame, we must move the mask layer with a compensation value.
// This prevents the layer from being cropped.
// See https://github.com/expo/expo/issues/34201
// and https://github.com/facebook/react-native/blob/c72d4c5ee97/packages/react-native/React/Fabric/Mounting/ComponentViews/View/RCTViewComponentView.mm#L1066-L1076
CATransaction.begin()
CATransaction.setDisableActions(true)
sdImageView.layer.frame.origin = offset
sdImageView.layer.mask?.frame.origin = CGPoint(x: -offset.x, y: -offset.y)
CATransaction.commit()
} else {
sdImageView.layer.frame.origin = offset
}
}
internal func renderSourceImage(_ image: UIImage?) {
// Update the source image before it gets rendered or transitioned to.
sourceImage = image
// For SF Symbol replace effect, skip the UIView transition and let the native symbol animation handle it
let isSFReplaceEffect = transition?.effect.isSFReplaceEffect == true && isSFSymbolSource
if let transition = transition, transition.duration > 0, !isSFReplaceEffect {
let options = transition.toAnimationOptions()
let seconds = transition.duration / 1000
UIView.transition(with: sdImageView, duration: seconds, options: options) { [weak self] in
if let self {
self.setImage(image, contentFit: self.contentFit, isPlaceholder: false)
}
}
} else {
setImage(image, contentFit: contentFit, isPlaceholder: false)
}
}
private func setImage(_ image: UIImage?, contentFit: ContentFit, isPlaceholder: Bool) {
sdImageView.contentMode = contentFit.toContentMode()
if isPlaceholder {
sdImageView.autoPlayAnimatedImage = true
} else {
sdImageView.autoPlayAnimatedImage = autoplay
}
// Remove any existing symbol effects before setting new image
if #available(iOS 17.0, tvOS 17.0, *) {
sdImageView.removeAllSymbolEffects()
}
if let imageTintColor, !isPlaceholder {
sdImageView.tintColor = imageTintColor
let templateImage = image?.withRenderingMode(.alwaysTemplate)
// Use replace content transition for SF Symbols when sf:replace effect is set
if #available(iOS 17.0, tvOS 17.0, *), isSFSymbolSource, let effect = transition?.effect, effect.isSFReplaceEffect, let templateImage {
let duration = (transition?.duration ?? 300) / 1000
applyReplaceTransition(image: templateImage, effect: effect, duration: duration)
} else {
sdImageView.image = templateImage
}
} else {
sdImageView.tintColor = nil
// Use replace content transition for SF Symbols when sf:replace effect is set
if #available(iOS 17.0, tvOS 17.0, *), isSFSymbolSource, let effect = transition?.effect, effect.isSFReplaceEffect, let image {
let duration = (transition?.duration ?? 300) / 1000
applyReplaceTransition(image: image, effect: effect, duration: duration)
} else {
sdImageView.image = image
}
}
// Apply symbol effect if this is an SF Symbol and autoplay is enabled
if #available(iOS 17.0, tvOS 17.0, *) {
if !isPlaceholder && isSFSymbolSource && autoplay {
applySymbolEffect()
}
}
if !isPlaceholder {
onDisplay()
}
#if !os(tvOS)
if enableLiveTextInteraction {
analyzeImage()
}
#endif
}
// MARK: - Symbol Effects
@available(iOS 17.0, tvOS 17.0, *)
func applySymbolEffect() {
// Remove any existing effects before applying new ones
sdImageView.removeAllSymbolEffects()
guard let effects = sfEffect, !effects.isEmpty else {
return
}
for sfEffectItem in effects {
applySingleSymbolEffect(sfEffectItem)
}
}
@available(iOS 17.0, tvOS 17.0, *)
private func applySingleSymbolEffect(_ sfEffectItem: SFSymbolEffect) {
let repeatCount = sfEffectItem.repeatCount
// -1 = infinite, 0 = play once, 1 = repeat once (play twice), etc.
let options: SymbolEffectOptions = repeatCount < 0 ? .repeating : .repeat(repeatCount + 1)
let scope = sfEffectItem.scope
let effect = sfEffectItem.effect
switch effect {
case .bounce, .bounceUp, .bounceDown:
let base: BounceSymbolEffect = effect == .bounceUp ? .bounce.up : effect == .bounceDown ? .bounce.down : .bounce
switch scope {
case .byLayer: sdImageView.addSymbolEffect(base.byLayer, options: options)
case .wholeSymbol: sdImageView.addSymbolEffect(base.wholeSymbol, options: options)
case .none: sdImageView.addSymbolEffect(base, options: options)
}
case .pulse:
switch scope {
case .byLayer: sdImageView.addSymbolEffect(.pulse.byLayer, options: options)
case .wholeSymbol: sdImageView.addSymbolEffect(.pulse.wholeSymbol, options: options)
case .none: sdImageView.addSymbolEffect(.pulse, options: options)
}
case .variableColor, .variableColorIterative, .variableColorCumulative:
let base: VariableColorSymbolEffect = effect == .variableColorIterative ? .variableColor.iterative :
effect == .variableColorCumulative ? .variableColor.cumulative : .variableColor
sdImageView.addSymbolEffect(base, options: options)
case .scale, .scaleUp, .scaleDown:
let base: ScaleSymbolEffect = effect == .scaleUp ? .scale.up : effect == .scaleDown ? .scale.down : .scale
switch scope {
case .byLayer: sdImageView.addSymbolEffect(base.byLayer, options: options)
case .wholeSymbol: sdImageView.addSymbolEffect(base.wholeSymbol, options: options)
case .none: sdImageView.addSymbolEffect(base, options: options)
}
case .appear:
switch scope {
case .byLayer: sdImageView.addSymbolEffect(.appear.byLayer, options: options)
case .wholeSymbol: sdImageView.addSymbolEffect(.appear.wholeSymbol, options: options)
case .none: sdImageView.addSymbolEffect(.appear, options: options)
}
case .disappear:
switch scope {
case .byLayer: sdImageView.addSymbolEffect(.disappear.byLayer, options: options)
case .wholeSymbol: sdImageView.addSymbolEffect(.disappear.wholeSymbol, options: options)
case .none: sdImageView.addSymbolEffect(.disappear, options: options)
}
default:
if #available(iOS 18.0, tvOS 18.0, *) {
applySymbolEffectiOS18(effect: effect, scope: scope, options: options)
}
}
}
@available(iOS 18.0, tvOS 18.0, *)
private func applySymbolEffectiOS18(effect: SFSymbolEffectType, scope: SFSymbolEffectScope?, options: SymbolEffectOptions) {
switch effect {
case .wiggle:
switch scope {
case .byLayer: sdImageView.addSymbolEffect(.wiggle.byLayer, options: options)
case .wholeSymbol: sdImageView.addSymbolEffect(.wiggle.wholeSymbol, options: options)
case .none: sdImageView.addSymbolEffect(.wiggle, options: options)
}
case .rotate:
switch scope {
case .byLayer: sdImageView.addSymbolEffect(.rotate.byLayer, options: options)
case .wholeSymbol: sdImageView.addSymbolEffect(.rotate.wholeSymbol, options: options)
case .none: sdImageView.addSymbolEffect(.rotate, options: options)
}
case .breathe:
switch scope {
case .byLayer: sdImageView.addSymbolEffect(.breathe.byLayer, options: options)
case .wholeSymbol: sdImageView.addSymbolEffect(.breathe.wholeSymbol, options: options)
case .none: sdImageView.addSymbolEffect(.breathe, options: options)
}
default:
if #available(iOS 26.0, tvOS 26.0, *) {
applySymbolEffectiOS26(effect: effect, scope: scope, options: options)
}
}
}
@available(iOS 26.0, tvOS 26.0, *)
private func applySymbolEffectiOS26(effect: SFSymbolEffectType, scope: SFSymbolEffectScope?, options: SymbolEffectOptions) {
switch effect {
case .drawOn:
switch scope {
case .byLayer: sdImageView.addSymbolEffect(.drawOn.byLayer, options: options)
case .wholeSymbol: sdImageView.addSymbolEffect(.drawOn.wholeSymbol, options: options)
case .none: sdImageView.addSymbolEffect(.drawOn, options: options)
}
case .drawOff:
switch scope {
case .byLayer: sdImageView.addSymbolEffect(.drawOff.byLayer, options: options)
case .wholeSymbol: sdImageView.addSymbolEffect(.drawOff.wholeSymbol, options: options)
case .none: sdImageView.addSymbolEffect(.drawOff, options: options)
}
default:
break
}
}
func startSymbolAnimation() {
if #available(iOS 17.0, tvOS 17.0, *) {
applySymbolEffect()
}
}
func stopSymbolAnimation() {
if #available(iOS 17.0, tvOS 17.0, *) {
sdImageView.removeAllSymbolEffects()
}
}
@available(iOS 17.0, tvOS 17.0, *)
func applyReplaceTransition(image: UIImage, effect: ImageTransitionEffect, duration: Double = 0) {
let animate: (@escaping () -> Void) -> Void = { block in
if duration > 0 {
UIView.animate(withDuration: duration, animations: block)
} else {
block()
}
}
switch effect {
case .sfDownUp:
animate { self.sdImageView.setSymbolImage(image, contentTransition: .replace.downUp) }
case .sfUpUp:
animate { self.sdImageView.setSymbolImage(image, contentTransition: .replace.upUp) }
case .sfOffUp:
animate { self.sdImageView.setSymbolImage(image, contentTransition: .replace.offUp) }
default:
animate { self.sdImageView.setSymbolImage(image, contentTransition: .replace) }
}
}
// MARK: - Helpers
private func parseSymbolWeight(_ fontWeight: String?) -> UIImage.SymbolWeight {
switch fontWeight {
case "100": return .ultraLight
case "200": return .thin
case "300": return .light
case "400", "normal": return .regular
case "500": return .medium
case "600": return .semibold
case "700", "bold": return .bold
case "800": return .heavy
case "900": return .black
default: return .regular
}
}
func cancelPendingOperation() {
pendingOperation?.cancel()
pendingOperation = nil
}
/**
A scale of the screen where the view is presented,
or the main scale if the view is not mounted yet.
*/
var screenScale: Double {
return window?.screen.scale as? Double ?? UIScreen.main.scale
}
/**
The image source that fits best into the view bounds.
*/
var bestSource: ImageSource? {
return getBestSource(from: sources, forSize: bounds.size, scale: screenScale)
}
/**
A bool value whether the image view doesn't render any image.
*/
var isViewEmpty: Bool {
return sdImageView.image == nil
}
/**
A bool value whether there is any source to load from.
*/
var hasAnySource: Bool {
return sources?.isEmpty == false
}
/**
Creates a base SDWebImageContext for this view. It should include options that are shared by both placeholders and final images.
*/
private func createBaseImageContext(source: ImageSource, cachePolicy: ImageCachePolicy? = nil) -> SDWebImageContext {
var context = createSDWebImageContext(
forSource: source,
cachePolicy: cachePolicy ?? self.cachePolicy,
useAppleWebpCodec: useAppleWebpCodec
)
// Decode to HDR if the `preferHighDynamicRange` prop is on (in this case `preferredImageDynamicRange` is set to high).
if #available(iOS 17.0, macCatalyst 17.0, tvOS 17.0, *) {
context[.imageDecodeToHDR] = sdImageView.preferredImageDynamicRange == .constrainedHigh || sdImageView.preferredImageDynamicRange == .high
}
// Some loaders (e.g. PhotoLibraryAssetLoader) may need to know the screen scale.
context[ImageView.screenScaleKey] = screenScale
return context
}
// MARK: - Live Text Interaction
#if !os(tvOS)
@available(iOS 16.0, macCatalyst 17.0, *)
static let imageAnalyzer = ImageAnalyzer.isSupported ? ImageAnalyzer() : nil
var enableLiveTextInteraction: Bool = false {
didSet {
guard #available(iOS 16.0, macCatalyst 17.0, *), oldValue != enableLiveTextInteraction, ImageAnalyzer.isSupported else {
return
}
if enableLiveTextInteraction {
let imageAnalysisInteraction = ImageAnalysisInteraction()
sdImageView.addInteraction(imageAnalysisInteraction)
} else if let interaction = findImageAnalysisInteraction() {
sdImageView.removeInteraction(interaction)
}
}
}
private func analyzeImage() {
guard #available(iOS 16.0, macCatalyst 17.0, *), ImageAnalyzer.isSupported, let image = sdImageView.image else {
return
}
Task {
guard let imageAnalyzer = Self.imageAnalyzer, let imageAnalysisInteraction = findImageAnalysisInteraction() else {
return
}
let configuration = ImageAnalyzer.Configuration([.text, .machineReadableCode])
do {
let imageAnalysis = try await imageAnalyzer.analyze(image, configuration: configuration)
// Make sure the image haven't changed in the meantime.
if image == sdImageView.image {
imageAnalysisInteraction.analysis = imageAnalysis
imageAnalysisInteraction.preferredInteractionTypes = .automatic
}
} catch {
log.error(error)
}
}
}
@available(iOS 16.0, macCatalyst 17.0, *)
private func findImageAnalysisInteraction() -> ImageAnalysisInteraction? {
let interaction = sdImageView.interactions.first {
return $0 is ImageAnalysisInteraction
}
return interaction as? ImageAnalysisInteraction
}
#endif
}

View File

@@ -0,0 +1,56 @@
@preconcurrency import SDWebImage
import ExpoModulesCore
class BlurhashLoader: NSObject, SDImageLoader {
typealias ImageLoaderCompletedBlock = @Sendable (UIImage?, Data?, (any Error)?, Bool) -> Void
// MARK: - SDImageLoader
func canRequestImage(for url: URL?) -> Bool {
return url?.scheme == "blurhash"
}
func requestImage(
with url: URL?,
options: SDWebImageOptions = [],
context: [SDWebImageContextOption: Any]?,
progress progressBlock: SDImageLoaderProgressBlock?,
completed completedBlock: ImageLoaderCompletedBlock? = nil
) -> SDWebImageOperation? {
guard let url else {
let error = makeNSError(description: "URL provided to BlurhashLoader is missing")
completedBlock?(nil, nil, error, false)
return nil
}
guard let source = context?[ImageView.contextSourceKey] as? ImageSource else {
let error = makeNSError(description: "Image source was not provided to the context")
completedBlock?(nil, nil, error, false)
return nil
}
// The URI looks like this: blurhash:/WgF}G?az0fs.x[jat7xFRjNHt6s.4;oe-:RkVtkCi^Nbo|xZRjWB
// Which means that the `pathComponents[0]` is `/` and we need to skip it to get the hash.
let blurhash = url.pathComponents[1]
let size = CGSize(width: source.width, height: source.height)
Task(priority: .high) {
let image = image(fromBlurhash: blurhash, size: size)
await MainActor.run {
if let image {
completedBlock?(UIImage(cgImage: image), nil, nil, true)
} else {
let error = makeNSError(description: "Unable to generate an image from the given blurhash")
completedBlock?(nil, nil, error, false)
}
}
}
return nil
}
func shouldBlockFailedURL(with url: URL, error: Error) -> Bool {
// If the algorithm failed to generate an image from the url,
// it's not possible that next time it will work :)
return true
}
}

View File

@@ -0,0 +1,153 @@
import Photos
import Dispatch
import SDWebImage
import ExpoModulesCore
/**
A custom loader for assets from the Photo Library. It handles all urls with the `ph` scheme.
*/
final class PhotoLibraryAssetLoader: NSObject, SDImageLoader {
// MARK: - SDImageLoader
func canRequestImage(for url: URL?) -> Bool {
return isPhotoLibraryAssetUrl(url)
}
func requestImage(
with url: URL?,
options: SDWebImageOptions = [],
context: SDWebImageContext?,
progress progressBlock: SDImageLoaderProgressBlock?,
completed completedBlock: SDImageLoaderCompletedBlock? = nil
) -> SDWebImageOperation? {
guard isPhotoLibraryStatusAuthorized() else {
let error = makeNSError(description: "Unauthorized access to the Photo Library")
completedBlock?(nil, nil, error, false)
return nil
}
let operation = PhotoLibraryAssetLoaderOperation()
DispatchQueue.global(qos: .userInitiated).async {
guard let url = url, let assetLocalIdentifier = assetLocalIdentifier(fromUrl: url) else {
let error = makeNSError(description: "Unable to obtain the asset identifier from the url: '\(String(describing: url?.absoluteString))'")
completedBlock?(nil, nil, error, false)
return
}
guard let asset = PHAsset.fetchAssets(withLocalIdentifiers: [assetLocalIdentifier], options: .none).firstObject else {
let error = makeNSError(description: "Asset with identifier '\(assetLocalIdentifier)' not found in the Photo Library")
completedBlock?(nil, nil, error, false)
return
}
operation.requestId = requestAsset(
asset,
url: url,
context: context,
progressBlock: progressBlock,
completedBlock: completedBlock
)
}
return operation
}
func shouldBlockFailedURL(with url: URL, error: Error) -> Bool {
// The lack of permission is one of the reasons of failed request,
// but in that single case we don't want to blacklist the url as
// the permission might be granted later and then the retry should be possible.
return isPhotoLibraryStatusAuthorized()
}
}
/**
Returns a bool value whether the given url references the Photo Library asset.
*/
internal func isPhotoLibraryAssetUrl(_ url: URL?) -> Bool {
return url?.scheme == "ph"
}
/**
Returns the local identifier of the asset from the given `ph://` url.
These urls have the form of "ph://26687849-33F9-4402-8EC0-A622CD011D70",
where the asset local identifier is used as the host part.
*/
private func assetLocalIdentifier(fromUrl url: URL) -> String? {
return url.host
}
/**
Checks whether the app is authorized to read the Photo Library.
*/
private func isPhotoLibraryStatusAuthorized() -> Bool {
let status = PHPhotoLibrary.authorizationStatus(for: .readWrite)
return status == .authorized || status == .limited
}
/**
Requests the image of the given asset object and returns the request identifier.
*/
private func requestAsset(
_ asset: PHAsset,
url: URL,
context: SDWebImageContext?,
progressBlock: SDImageLoaderProgressBlock?,
completedBlock: SDImageLoaderCompletedBlock?
) -> PHImageRequestID {
let options = PHImageRequestOptions()
options.isSynchronous = false
options.version = .current
options.deliveryMode = .highQualityFormat
options.resizeMode = .fast
options.normalizedCropRect = .zero
options.isNetworkAccessAllowed = true
if let progressBlock = progressBlock {
options.progressHandler = { progress, _, _, _ in
// The `progress` is a double from 0.0 to 1.0, but the loader needs integers so we map it to 0...100 range
let progressPercentage = Int((progress * 100.0).rounded())
progressBlock(progressPercentage, 100, url)
}
}
var targetSize = PHImageManagerMaximumSize
// We compute the minimal size required to display the image to avoid having to downsample it later
if let scale = context?[ImageView.screenScaleKey] as? Double,
let containerSize = context?[ImageView.frameSizeKey] as? CGSize,
let contentFit = context?[ImageView.contentFitKey] as? ContentFit {
targetSize = idealSize(
contentPixelSize: CGSize(width: asset.pixelWidth, height: asset.pixelHeight),
containerSize: containerSize,
scale: scale,
contentFit: contentFit
).rounded(.up) * scale
}
return PHImageManager.default().requestImage(
for: asset,
targetSize: targetSize,
contentMode: .aspectFit,
options: options,
resultHandler: { image, info in
// This value can be `true` only when network access is allowed and the photo is stored in the iCloud.
let isDegraded: Bool = info?[PHImageResultIsDegradedKey] as? Bool ?? false
completedBlock?(image, nil, nil, !isDegraded)
}
)
}
/**
Loader operation specialized for the Photo Library by keeping the request identifier.
*/
private class PhotoLibraryAssetLoaderOperation: NSObject, SDWebImageOperation {
var canceled: Bool = false
var requestId: PHImageRequestID?
// MARK: - SDWebImageOperation
func cancel() {
if let requestId = requestId {
PHImageManager.default().cancelImageRequest(requestId)
}
canceled = true
}
}

View File

@@ -0,0 +1,57 @@
// Copyright 2022-present 650 Industries. All rights reserved.
import SDWebImage
import ExpoModulesCore
class SFSymbolLoader: NSObject, SDImageLoader {
// MARK: - SDImageLoader
func canRequestImage(for url: URL?) -> Bool {
return url?.scheme == "sf"
}
func requestImage(
with url: URL?,
options: SDWebImageOptions = [],
context: [SDWebImageContextOption: Any]?,
progress progressBlock: SDImageLoaderProgressBlock?,
completed completedBlock: SDImageLoaderCompletedBlock? = nil
) -> SDWebImageOperation? {
guard let url else {
let error = makeNSError(description: "URL provided to SFSymbolLoader is missing")
completedBlock?(nil, nil, error, false)
return nil
}
// The URI looks like this: sf:/star.fill
// pathComponents[0] is `/`, pathComponents[1] is the symbol name
guard url.pathComponents.count > 1 else {
let error = makeNSError(description: "SF Symbol name is missing from the URL")
completedBlock?(nil, nil, error, false)
return nil
}
let symbolName = url.pathComponents[1]
// Use a large fixed point size for high quality, the image view will scale it down.
// Note: For weight configuration, use the symbolWeight prop on Image component.
// This loader is mainly used for prefetching where weight isn't critical.
let configuration = UIImage.SymbolConfiguration(pointSize: 100, weight: .regular)
guard let image = UIImage(systemName: symbolName, withConfiguration: configuration) else {
let error = makeNSError(description: "Unable to create SF Symbol image for '\(symbolName)'")
completedBlock?(nil, nil, error, false)
return nil
}
// Return as template image so tintColor prop works correctly
let templateImage = image.withRenderingMode(.alwaysTemplate)
completedBlock?(templateImage, nil, nil, true)
return nil
}
func shouldBlockFailedURL(with url: URL, error: Error) -> Bool {
// If the symbol doesn't exist, it won't exist on subsequent attempts
return true
}
}

View File

@@ -0,0 +1,55 @@
@preconcurrency import SDWebImage
import ExpoModulesCore
class ThumbhashLoader: NSObject, SDImageLoader {
typealias ImageLoaderCompletedBlock = @Sendable (UIImage?, Data?, (any Error)?, Bool) -> Void
// MARK: - SDImageLoader
func canRequestImage(for url: URL?) -> Bool {
return url?.scheme == "thumbhash"
}
func requestImage(
with url: URL?,
options: SDWebImageOptions = [],
context: [SDWebImageContextOption: Any]?,
progress progressBlock: SDImageLoaderProgressBlock?,
completed completedBlock: ImageLoaderCompletedBlock? = nil
) -> SDWebImageOperation? {
guard let url else {
let error = makeNSError(description: "URL provided to ThumbhashLoader is missing")
completedBlock?(nil, nil, error, false)
return nil
}
// The URI looks like this: thumbhash:/3OcRJYB4d3h\iIeHeEh3eIhw+j2w
// ThumbHash may include slashes which could break the structure of the URL, so we replace them
// with backslashes on the JS side and revert them back to slashes here, before generating the image.
var thumbhash = url.pathComponents[1].replacingOccurrences(of: "\\", with: "/")
// Thumbhashes with transparency cause the conversion to data to fail, padding the thumbhash string to correct length fixes that
let remainder = thumbhash.count % 4
if remainder > 0 {
thumbhash = thumbhash.padding(toLength: thumbhash.count + 4 - remainder, withPad: "=", startingAt: 0)
}
guard !thumbhash.isEmpty, let thumbhashData = Data(base64Encoded: thumbhash, options: .ignoreUnknownCharacters) else {
let error = makeNSError(description: "URL provided to ThumbhashLoader is invalid")
completedBlock?(nil, nil, error, false)
return nil
}
Task(priority: .high) {
let image = image(fromThumbhash: thumbhashData)
await MainActor.run {
completedBlock?(image, nil, nil, true)
}
}
return nil
}
func shouldBlockFailedURL(with url: URL, error: Error) -> Bool {
return true
}
}

41
node_modules/expo-image/ios/SFSymbolEffect.swift generated vendored Normal file
View File

@@ -0,0 +1,41 @@
// Copyright 2022-present 650 Industries. All rights reserved.
import ExpoModulesCore
enum SFSymbolEffectType: String, Enumerable {
case bounce = "bounce"
case bounceUp = "bounce/up"
case bounceDown = "bounce/down"
case pulse = "pulse"
case variableColor = "variable-color"
case variableColorIterative = "variable-color/iterative"
case variableColorCumulative = "variable-color/cumulative"
case scale = "scale"
case scaleUp = "scale/up"
case scaleDown = "scale/down"
case appear = "appear"
case disappear = "disappear"
// iOS 18+
case wiggle = "wiggle"
case rotate = "rotate"
case breathe = "breathe"
// iOS 26+
case drawOn = "draw/on"
case drawOff = "draw/off"
}
enum SFSymbolEffectScope: String, Enumerable {
case byLayer = "by-layer"
case wholeSymbol = "whole-symbol"
}
struct SFSymbolEffect: Record {
@Field
var effect: SFSymbolEffectType = .bounce
@Field(.keyed("repeat"))
var repeatCount: Int = 0
@Field
var scope: SFSymbolEffectScope?
}

326
node_modules/expo-image/ios/Utils/Blurhash.swift generated vendored Normal file
View File

@@ -0,0 +1,326 @@
// The blurhash algorithm was entirely created by Wolt Enterprises.
// This implementation was inspired by:
// - https://github.com/woltapp/blurhash/blob/master/Swift/BlurHashDecode.swift
// - https://github.com/woltapp/blurhash/blob/master/Swift/BlurHashEncode.swift
// See https://blurha.sh for more details about the blurhash.
import UIKit
// swiftlint:disable force_unwrapping
internal func image(fromBlurhash blurhash: String, size: CGSize, punch: Float = 1.0) -> CGImage? {
guard blurhash.count >= 6 else {
return nil
}
let sizeFlag = decode83(String(blurhash[0]))
let numY = (sizeFlag / 9) + 1
let numX = (sizeFlag % 9) + 1
let quantisedMaximumValue = decode83(String(blurhash[1]))
let maximumValue = Float(quantisedMaximumValue + 1) / 166
guard blurhash.count == 4 + 2 * numX * numY else {
return nil
}
let colors: [(Float, Float, Float)] = (0 ..< numX * numY).map { i in
if i == 0 {
let value = decode83(String(blurhash[2 ..< 6]))
return decodeDC(value)
} else {
let value = decode83(String(blurhash[4 + i * 2 ..< 4 + i * 2 + 2]))
return decodeAC(value, maximumValue: maximumValue * punch)
}
}
let width = Int(size.width)
let height = Int(size.height)
let bytesPerRow = width * 3
guard let data = CFDataCreateMutable(kCFAllocatorDefault, bytesPerRow * height) else {
return nil
}
CFDataSetLength(data, bytesPerRow * height)
guard let pixels = CFDataGetMutableBytePtr(data) else {
return nil
}
for y in 0 ..< height {
for x in 0 ..< width {
var r: Float = 0
var g: Float = 0
var b: Float = 0
for j in 0 ..< numY {
for i in 0 ..< numX {
let basis = cos(Float.pi * Float(x) * Float(i) / Float(width)) * cos(Float.pi * Float(y) * Float(j) / Float(height))
let color = colors[i + j * numX]
r += color.0 * basis
g += color.1 * basis
b += color.2 * basis
}
}
let intR = UInt8(linearTosRGB(r))
let intG = UInt8(linearTosRGB(g))
let intB = UInt8(linearTosRGB(b))
pixels[3 * x + 0 + y * bytesPerRow] = intR
pixels[3 * x + 1 + y * bytesPerRow] = intG
pixels[3 * x + 2 + y * bytesPerRow] = intB
}
}
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.none.rawValue)
guard let provider = CGDataProvider(data: data), let cgImage = CGImage(
width: width,
height: height,
bitsPerComponent: 8,
bitsPerPixel: 24,
bytesPerRow: bytesPerRow,
space: CGColorSpaceCreateDeviceRGB(),
bitmapInfo: bitmapInfo,
provider: provider,
decode: nil,
shouldInterpolate: true,
intent: .defaultIntent
) else {
return nil
}
return cgImage
}
internal func blurhash(fromImage image: UIImage, numberOfComponents components: (Int, Int)) -> String? {
let size = image.size
let scale = image.scale
let pixelWidth = Int(round(size.width * scale))
let pixelHeight = Int(round(size.height * scale))
let context = CGContext(
data: nil,
width: pixelWidth,
height: pixelHeight,
bitsPerComponent: 8,
bytesPerRow: pixelWidth * 4,
space: CGColorSpace(name: CGColorSpace.sRGB)!,
bitmapInfo: CGImageAlphaInfo.premultipliedLast.rawValue
)!
context.scaleBy(x: scale, y: -scale)
context.translateBy(x: 0, y: -size.height)
UIGraphicsPushContext(context)
image.draw(at: .zero)
UIGraphicsPopContext()
guard let cgImage = context.makeImage(),
let dataProvider = cgImage.dataProvider,
let data = dataProvider.data,
let pixels = CFDataGetBytePtr(data) else {
assertionFailure("Unexpected error!")
return nil
}
let width = cgImage.width
let height = cgImage.height
let bytesPerRow = cgImage.bytesPerRow
var factors: [(Float, Float, Float)] = []
for y in 0 ..< components.1 {
for x in 0 ..< components.0 {
let normalisation: Float = (x == 0 && y == 0) ? 1 : 2
let factor = multiplyBasisFunction(
pixels: pixels,
width: width,
height: height,
bytesPerRow: bytesPerRow,
bytesPerPixel: cgImage.bitsPerPixel / 8,
pixelOffset: 0
) {
normalisation * cos(Float.pi * Float(x) * $0 / Float(width)) as Float * cos(Float.pi * Float(y) * $1 / Float(height)) as Float
}
factors.append(factor)
}
}
let dc = factors.first!
let ac = factors.dropFirst()
var hash = ""
let sizeFlag = (components.0 - 1) + (components.1 - 1) * 9
hash += encode83(sizeFlag, length: 1)
let maximumValue: Float
if !ac.isEmpty {
let actualMaximumValue = ac.map({ max(abs($0.0), abs($0.1), abs($0.2)) }).max()!
let quantisedMaximumValue = Int(max(0, min(82, floor(actualMaximumValue * 166 - 0.5))))
maximumValue = Float(quantisedMaximumValue + 1) / 166
hash += encode83(quantisedMaximumValue, length: 1)
} else {
maximumValue = 1
hash += encode83(0, length: 1)
}
hash += encode83(encodeDC(dc), length: 4)
for factor in ac {
hash += encode83(encodeAC(factor, maximumValue: maximumValue), length: 2)
}
return hash
}
internal func isBlurhash(_ str: String) -> Bool {
return str.allSatisfy { char in decodeCharacters[char] != nil }
}
// MARK: - Encode
// swiftlint:disable:next function_parameter_count
private func multiplyBasisFunction(
pixels: UnsafePointer<UInt8>,
width: Int,
height: Int,
bytesPerRow: Int,
bytesPerPixel: Int,
pixelOffset: Int,
basisFunction: (Float, Float) -> Float
) -> (Float, Float, Float) {
var r: Float = 0
var g: Float = 0
var b: Float = 0
let buffer = UnsafeBufferPointer(start: pixels, count: height * bytesPerRow)
for x in 0 ..< width {
for y in 0 ..< height {
let basis = basisFunction(Float(x), Float(y))
r += basis * sRGBToLinear(buffer[bytesPerPixel * x + pixelOffset + 0 + y * bytesPerRow])
g += basis * sRGBToLinear(buffer[bytesPerPixel * x + pixelOffset + 1 + y * bytesPerRow])
b += basis * sRGBToLinear(buffer[bytesPerPixel * x + pixelOffset + 2 + y * bytesPerRow])
}
}
let scale = 1 / Float(width * height)
return (r * scale, g * scale, b * scale)
}
private let encodeCharacters: [Character] = Array("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz#$%*+,-.:;=?@[]^_{|}~")
private func encodeDC(_ value: (Float, Float, Float)) -> Int {
let roundedR = linearTosRGB(value.0)
let roundedG = linearTosRGB(value.1)
let roundedB = linearTosRGB(value.2)
return (roundedR << 16) + (roundedG << 8) + roundedB
}
private func encodeAC(_ value: (Float, Float, Float), maximumValue: Float) -> Int {
let quantR = Int(max(0, min(18, floor(signPow(value.0 / maximumValue, 0.5) * 9 + 9.5))))
let quantG = Int(max(0, min(18, floor(signPow(value.1 / maximumValue, 0.5) * 9 + 9.5))))
let quantB = Int(max(0, min(18, floor(signPow(value.2 / maximumValue, 0.5) * 9 + 9.5))))
return quantR * 19 * 19 + quantG * 19 + quantB
}
private func encode83(_ value: Int, length: Int) -> String {
var result = ""
for i in 1 ... length {
let digit = (value / pow(83, length - i)) % 83
result += String(encodeCharacters[Int(digit)])
}
return result
}
// MARK: - Decode
private func decodeDC(_ value: Int) -> (Float, Float, Float) {
let intR = value >> 16
let intG = (value >> 8) & 255
let intB = value & 255
return (sRGBToLinear(intR), sRGBToLinear(intG), sRGBToLinear(intB))
}
private func decodeAC(_ value: Int, maximumValue: Float) -> (Float, Float, Float) {
let quantR = value / (19 * 19)
let quantG = (value / 19) % 19
let quantB = value % 19
let rgb = (
signPow((Float(quantR) - 9) / 9, 2) * maximumValue,
signPow((Float(quantG) - 9) / 9, 2) * maximumValue,
signPow((Float(quantB) - 9) / 9, 2) * maximumValue
)
return rgb
}
private let decodeCharacters: [Character: Int] = {
var dict: [Character: Int] = [:]
for (index, character) in encodeCharacters.enumerated() {
dict[character] = index
}
return dict
}()
private func decode83(_ str: String) -> Int {
var value: Int = 0
for character in str {
if let digit = decodeCharacters[character] {
value = value * 83 + digit
}
}
return value
}
// MARK: - Helpers
private func signPow(_ value: Float, _ exp: Float) -> Float {
return copysign(pow(abs(value), exp), value)
}
private func linearTosRGB(_ value: Float) -> Int {
let v = max(0, min(1, value))
if v <= 0.0031308 {
return Int(v * 12.92 * 255 + 0.5)
} else {
return Int((1.055 * pow(v, 1 / 2.4) - 0.055) * 255 + 0.5)
}
}
private func sRGBToLinear<Type: BinaryInteger>(_ value: Type) -> Float {
let v = Float(Int64(value)) / 255
if v <= 0.04045 {
return v / 12.92
} else {
return pow((v + 0.055) / 1.055, 2.4)
}
}
private func pow(_ base: Int, _ exponent: Int) -> Int {
return (0 ..< exponent).reduce(1) { value, _ in value * base }
}
private extension String {
subscript (offset: Int) -> Character {
return self[index(startIndex, offsetBy: offset)]
}
subscript (bounds: CountableClosedRange<Int>) -> Substring {
let start = index(startIndex, offsetBy: bounds.lowerBound)
let end = index(startIndex, offsetBy: bounds.upperBound)
return self[start...end]
}
subscript (bounds: CountableRange<Int>) -> Substring {
let start = index(startIndex, offsetBy: bounds.lowerBound)
let end = index(startIndex, offsetBy: bounds.upperBound)
return self[start..<end]
}
}

275
node_modules/expo-image/ios/Utils/ImageUtils.swift generated vendored Normal file
View File

@@ -0,0 +1,275 @@
// Copyright 2022-present 650 Industries. All rights reserved.
import SDWebImage
import ExpoModulesCore
/**
An exception to throw when it its not possible to generate a blurhash for a given URL.
*/
public final class BlurhashGenerationException: Exception {
override public var reason: String {
"Unable to generate blurhash, make sure the image exists at the given URL"
}
}
func cacheTypeToString(_ cacheType: SDImageCacheType) -> String {
switch cacheType {
case .none:
return "none"
case .disk:
return "disk"
case .memory, .all:
// `all` doesn't make much sense, so we treat it as `memory`.
return "memory"
@unknown default:
log.error("Unhandled `SDImageCacheType` value: \(cacheType), returning `none` as fallback. Add the missing case as soon as possible.")
return "none"
}
}
func imageFormatToMediaType(_ format: SDImageFormat) -> String? {
switch format {
case .undefined:
return nil
case .JPEG:
return "image/jpeg"
case .PNG:
return "image/png"
case .GIF:
return "image/gif"
case .TIFF:
return "image/tiff"
case .webP:
return "image/webp"
case .HEIC:
return "image/heic"
case .HEIF:
return "image/heif"
case .PDF:
return "application/pdf"
case .SVG:
return "image/svg+xml"
default:
// On one hand we could remove this clause and always ensure that we have handled
// all supported formats (by erroring compilation otherwise).
// On the other hand, we do support overriding SDWebImage version,
// so we shouldn't fail to compile on SDWebImage versions with.
return nil
}
}
/**
Calculates the ideal size that fills in the container size while maintaining the source aspect ratio.
*/
func idealSize(contentPixelSize: CGSize, containerSize: CGSize, scale: Double = 1.0, contentFit: ContentFit) -> CGSize {
switch contentFit {
case .contain:
let aspectRatio = min(containerSize.width / contentPixelSize.width, containerSize.height / contentPixelSize.height)
return contentPixelSize * aspectRatio
case .cover:
let aspectRatio = max(containerSize.width / contentPixelSize.width, containerSize.height / contentPixelSize.height)
return contentPixelSize * aspectRatio
case .fill:
return containerSize
case .scaleDown:
if containerSize.width < contentPixelSize.width / scale || containerSize.height < contentPixelSize.height / scale {
// The container is smaller than the image scale it down and behave like `contain`
let aspectRatio = min(containerSize.width / contentPixelSize.width, containerSize.height / contentPixelSize.height)
return contentPixelSize * aspectRatio
} else {
// The container is bigger than the image don't scale it and behave like `none`
return contentPixelSize / scale
}
case .none:
return contentPixelSize / scale
}
}
/**
Returns a bool whether the image should be downscaled to the given size.
*/
func shouldDownscale(image: UIImage, toSize size: CGSize, scale: Double) -> Bool {
if size.width <= 0 || size.height <= 0 {
// View is invisible, so no reason to keep the image in memory.
// This already ensures that we won't be diving by zero in ratio calculations.
return true
}
if size.width.isInfinite || size.height.isInfinite {
// Keep the image unscaled for infinite sizes.
return false
}
let imageSize = image.size * image.scale
return imageSize.width > (size.width * scale) && imageSize.height > (size.height * scale)
}
/**
Resizes a static image to fit in the given size and scale.
*/
func resize(image: UIImage, toSize size: CGSize, scale: Double) -> UIImage {
let format = UIGraphicsImageRendererFormat()
format.scale = scale
return UIGraphicsImageRenderer(size: size, format: format).image { _ in
image.draw(in: CGRect(origin: .zero, size: size))
}
}
/**
The image source that fits best into the given size, that is the one with the closest number of pixels.
May be `nil` if there are no sources available or the size is zero.
*/
func getBestSource(from sources: [ImageSource]?, forSize size: CGSize, scale: Double = 1.0) -> ImageSource? {
guard let sources = sources, !sources.isEmpty else {
return nil
}
if size.width <= 0 || size.height <= 0 {
return nil
}
if sources.count == 1 {
return sources.first
}
var bestSource: ImageSource?
var bestFit = Double.infinity
let targetPixelCount = size.width * size.height * scale * scale
for source in sources {
let fit = abs(1 - (source.pixelCount / targetPixelCount))
if fit < bestFit {
bestSource = source
bestFit = fit
}
}
return bestSource
}
/**
Creates the cache key filter that returns the specific string.
*/
func createCacheKeyFilter(_ cacheKey: String?) -> SDWebImageCacheKeyFilter? {
guard let cacheKey = cacheKey else {
return nil
}
return SDWebImageCacheKeyFilter { _ in
return cacheKey
}
}
/**
Creates a default image context based on the source and the cache policy.
*/
func createSDWebImageContext(forSource source: ImageSource, cachePolicy: ImageCachePolicy = .disk, useAppleWebpCodec: Bool = true) -> SDWebImageContext {
var context = SDWebImageContext()
// Modify URL request to add headers.
if let headers = source.headers {
context[.downloadRequestModifier] = SDWebImageDownloaderRequestModifier(headers: headers)
}
// Allow for custom cache key. If not specified in the source, its uri is used as the key.
context[.cacheKeyFilter] = createCacheKeyFilter(source.cacheKey)
// Tell SDWebImage to use our own class for animated formats,
// which has better compatibility with the UIImage and fixes issues with the image duration.
context[.animatedImageClass] = AnimatedImage.self
// Passing useAppleWebpCodec into WebPCoder
context[.imageDecodeOptions] = [
imageCoderOptionUseAppleWebpCodec: useAppleWebpCodec
]
// Assets from the bundler have `scale` prop which needs to be passed to the context,
// otherwise they would be saved in cache with scale = 1.0 which may result in
// incorrectly rendered images for resize modes that don't scale (`center` and `repeat`).
context[.imageScaleFactor] = source.scale
let sdCacheType = cachePolicy.toSdCacheType().rawValue
context[.queryCacheType] = sdCacheType
context[.storeCacheType] = sdCacheType
if source.cacheOriginalImage {
context[.originalQueryCacheType] = sdCacheType
context[.originalStoreCacheType] = sdCacheType
} else {
context[.originalQueryCacheType] = SDImageCacheType.none.rawValue
context[.originalStoreCacheType] = SDImageCacheType.none.rawValue
}
// Some loaders (e.g. blurhash) may need access to the source.
context[ImageView.contextSourceKey] = source
return context
}
extension CGSize {
/**
Multiplies a size with a scalar.
*/
static func * (size: CGSize, scalar: Double) -> CGSize {
return CGSize(width: size.width * scalar, height: size.height * scalar)
}
/**
Divides a size with a scalar.
*/
static func / (size: CGSize, scalar: Double) -> CGSize {
return CGSize(width: size.width / scalar, height: size.height / scalar)
}
/**
Returns a new CGSize with width and height rounded to an integral value using the specified rounding rule.
*/
func rounded(_ rule: FloatingPointRoundingRule) -> CGSize {
return CGSize(width: width.rounded(rule), height: height.rounded(rule))
}
}
func makeNSError(description: String) -> NSError {
let userInfo = [NSLocalizedDescriptionKey: description]
return NSError(domain: "expo.modules.image", code: 0, userInfo: userInfo)
}
// MARK: - Async helpers
// TODO: Add helpers like these to the modules core eventually
/**
Asynchronously maps the given sequence (sequentially).
*/
func asyncMap<ItemsType: Sequence, ResultType>(
_ items: ItemsType,
_ transform: (ItemsType.Element) async throws -> ResultType
) async rethrows -> [ResultType] {
var values = [ResultType]()
for item in items {
try await values.append(transform(item))
}
return values
}
/**
Concurrently maps the given sequence.
*/
func concurrentMap<ItemsType: Sequence, ResultType: Sendable>(
_ items: ItemsType,
_ transform: @Sendable @escaping (ItemsType.Element) async throws -> ResultType
) async rethrows -> [ResultType] where ItemsType.Element: Sendable {
return try await withThrowingTaskGroup(of: (Int, ResultType).self) { group in
var results = Array<ResultType?>.init(repeating: nil, count: Array(items).count)
// Enumerate items to preserve the original order in the output.
for (index, item) in Array(items).enumerated() {
group.addTask { [item] in
let value = try await transform(item)
return (index, value)
}
}
while let (index, value) = try await group.next() {
results[index] = value
}
// Compact map to unwrap optionals, all positions should be filled.
return results.compactMap { $0 }
}
}

550
node_modules/expo-image/ios/Utils/Thumbhash.swift generated vendored Normal file
View File

@@ -0,0 +1,550 @@
import Foundation
// Blurhash implementation thanks to @evanw work
// https://github.com/evanw/thumbhash
// NOTE: Swift has an exponential-time type checker and compiling very simple
// expressions can easily take many seconds, especially when expressions involve
// numeric type constructors.
//
// This file deliberately breaks compound expressions up into separate variables
// to improve compile time even though this comes at the expense of readability.
// This is a known workaround for this deficiency in the Swift compiler.
//
// The following command is helpful when debugging Swift compile time issues:
//
// swiftc ThumbHash.swift -Xfrontend -debug-time-function-bodies
//
// These optimizations brought the compile time for this file from around 2.5
// seconds to around 250ms (10x faster).
// NOTE: Swift's debug-build performance of for-in loops over numeric ranges is
// really awful. Debug builds compile a very generic indexing iterator thing
// that makes many nested calls for every iteration, which makes debug-build
// performance crawl.
//
// This file deliberately avoids for-in loops that loop for more than a few
// times to improve debug-build run time even though this comes at the expense
// of readability. Similarly unsafe pointers are used instead of array getters
// to avoid unnecessary bounds checks, which have extra overhead in debug builds.
//
// These optimizations brought the run time to encode and decode 10 ThumbHashes
// in debug mode from 700ms to 70ms (10x faster).
// swiftlint:disable all
func rgbaToThumbHash(w: Int, h: Int, rgba: Data) -> Data {
// Encoding an image larger than 100x100 is slow with no benefit
assert(w <= 100 && h <= 100)
assert(rgba.count == w * h * 4)
// Determine the average color
var avg_r: Float32 = 0
var avg_g: Float32 = 0
var avg_b: Float32 = 0
var avg_a: Float32 = 0
rgba.withUnsafeBytes { rgba in
var rgba = rgba.baseAddress!.bindMemory(to: UInt8.self, capacity: rgba.count)
let n = w * h
var i = 0
while i < n {
let alpha = Float32(rgba[3]) / 255
avg_r += alpha / 255 * Float32(rgba[0])
avg_g += alpha / 255 * Float32(rgba[1])
avg_b += alpha / 255 * Float32(rgba[2])
avg_a += alpha
rgba = rgba.advanced(by: 4)
i += 1
}
}
if avg_a > 0 {
avg_r /= avg_a
avg_g /= avg_a
avg_b /= avg_a
}
let hasAlpha = avg_a < Float32(w * h)
let l_limit = hasAlpha ? 5 : 7 // Use fewer luminance bits if there's alpha
let imax_wh = max(w, h)
let iwl_limit = l_limit * w
let ihl_limit = l_limit * h
let fmax_wh = Float32(imax_wh)
let fwl_limit = Float32(iwl_limit)
let fhl_limit = Float32(ihl_limit)
let flx = round(fwl_limit / fmax_wh)
let fly = round(fhl_limit / fmax_wh)
var lx = Int(flx)
var ly = Int(fly)
lx = max(1, lx)
ly = max(1, ly)
var lpqa = [Float32](repeating: 0, count: w * h * 4)
// Convert the image from RGBA to LPQA (composite atop the average color)
rgba.withUnsafeBytes { rgba in
lpqa.withUnsafeMutableBytes { lpqa in
var rgba = rgba.baseAddress!.bindMemory(to: UInt8.self, capacity: rgba.count)
var lpqa = lpqa.baseAddress!.bindMemory(to: Float32.self, capacity: lpqa.count)
let n = w * h
var i = 0
while i < n {
let alpha = Float32(rgba[3]) / 255
let r = avg_r * (1 - alpha) + alpha / 255 * Float32(rgba[0])
let g = avg_g * (1 - alpha) + alpha / 255 * Float32(rgba[1])
let b = avg_b * (1 - alpha) + alpha / 255 * Float32(rgba[2])
lpqa[0] = (r + g + b) / 3
lpqa[1] = (r + g) / 2 - b
lpqa[2] = r - g
lpqa[3] = alpha
rgba = rgba.advanced(by: 4)
lpqa = lpqa.advanced(by: 4)
i += 1
}
}
}
// Encode using the DCT into DC (constant) and normalized AC (varying) terms
let encodeChannel = { (channel: UnsafePointer<Float32>, nx: Int, ny: Int) -> (Float32, [Float32], Float32) in
var dc: Float32 = 0
var ac: [Float32] = []
var scale: Float32 = 0
var fx = [Float32](repeating: 0, count: w)
fx.withUnsafeMutableBytes { fx in
let fx = fx.baseAddress!.bindMemory(to: Float32.self, capacity: fx.count)
var cy = 0
while cy < ny {
var cx = 0
while cx * ny < nx * (ny - cy) {
var ptr = channel
var f: Float32 = 0
var x = 0
while x < w {
let fw = Float32(w)
let fxx = Float32(x)
let fcx = Float32(cx)
fx[x] = cos(Float32.pi / fw * fcx * (fxx + 0.5))
x += 1
}
var y = 0
while y < h {
let fh = Float32(h)
let fyy = Float32(y)
let fcy = Float32(cy)
let fy = cos(Float32.pi / fh * fcy * (fyy + 0.5))
var x = 0
while x < w {
f += ptr.pointee * fx[x] * fy
x += 1
ptr = ptr.advanced(by: 4)
}
y += 1
}
f /= Float32(w * h)
if cx > 0 || cy > 0 {
ac.append(f)
scale = max(scale, abs(f))
} else {
dc = f
}
cx += 1
}
cy += 1
}
}
if scale > 0 {
let n = ac.count
var i = 0
while i < n {
ac[i] = 0.5 + 0.5 / scale * ac[i]
i += 1
}
}
return (dc, ac, scale)
}
let (
(l_dc, l_ac, l_scale),
(p_dc, p_ac, p_scale),
(q_dc, q_ac, q_scale),
(a_dc, a_ac, a_scale)
) = lpqa.withUnsafeBytes { lpqa in
let lpqa = lpqa.baseAddress!.bindMemory(to: Float32.self, capacity: lpqa.count)
return (
encodeChannel(lpqa, max(3, lx), max(3, ly)),
encodeChannel(lpqa.advanced(by: 1), 3, 3),
encodeChannel(lpqa.advanced(by: 2), 3, 3),
hasAlpha ? encodeChannel(lpqa.advanced(by: 3), 5, 5) : (1, [], 1)
)
}
// Write the constants
let isLandscape = w > h
let fl_dc = round(63.0 * l_dc)
let fp_dc = round(31.5 + 31.5 * p_dc)
let fq_dc = round(31.5 + 31.5 * q_dc)
let fl_scale = round(31.0 * l_scale)
let il_dc = UInt32(fl_dc)
let ip_dc = UInt32(fp_dc)
let iq_dc = UInt32(fq_dc)
let il_scale = UInt32(fl_scale)
let ihasAlpha = UInt32(hasAlpha ? 1 : 0)
let header24 = il_dc | (ip_dc << 6) | (iq_dc << 12) | (il_scale << 18) | (ihasAlpha << 23)
let fp_scale = round(63.0 * p_scale)
let fq_scale = round(63.0 * q_scale)
let ilxy = UInt16(isLandscape ? ly : lx)
let ip_scale = UInt16(fp_scale)
let iq_scale = UInt16(fq_scale)
let iisLandscape = UInt16(isLandscape ? 1 : 0)
let header16 = ilxy | (ip_scale << 3) | (iq_scale << 9) | (iisLandscape << 15)
var hash = Data(capacity: 25)
hash.append(UInt8(header24 & 255))
hash.append(UInt8((header24 >> 8) & 255))
hash.append(UInt8(header24 >> 16))
hash.append(UInt8(header16 & 255))
hash.append(UInt8(header16 >> 8))
var isOdd = false
if hasAlpha {
let fa_dc = round(15.0 * a_dc)
let fa_scale = round(15.0 * a_scale)
let ia_dc = UInt8(fa_dc)
let ia_scale = UInt8(fa_scale)
hash.append(ia_dc | (ia_scale << 4))
}
// Write the varying factors
for ac in [l_ac, p_ac, q_ac] {
for f in ac {
let f15 = round(15.0 * f)
let i15 = UInt8(f15)
if isOdd {
hash[hash.count - 1] |= i15 << 4
} else {
hash.append(i15)
}
isOdd = !isOdd
}
}
if hasAlpha {
for f in a_ac {
let f15 = round(15.0 * f)
let i15 = UInt8(f15)
if isOdd {
hash[hash.count - 1] |= i15 << 4
} else {
hash.append(i15)
}
isOdd = !isOdd
}
}
return hash
}
func thumbHashToRGBA(hash: Data) -> (Int, Int, Data) {
// Read the constants
let h0 = UInt32(hash[0])
let h1 = UInt32(hash[1])
let h2 = UInt32(hash[2])
let h3 = UInt16(hash[3])
let h4 = UInt16(hash[4])
let header24 = h0 | (h1 << 8) | (h2 << 16)
let header16 = h3 | (h4 << 8)
let il_dc = header24 & 63
let ip_dc = (header24 >> 6) & 63
let iq_dc = (header24 >> 12) & 63
var l_dc = Float32(il_dc)
var p_dc = Float32(ip_dc)
var q_dc = Float32(iq_dc)
l_dc = l_dc / 63
p_dc = p_dc / 31.5 - 1
q_dc = q_dc / 31.5 - 1
let il_scale = (header24 >> 18) & 31
var l_scale = Float32(il_scale)
l_scale = l_scale / 31
let hasAlpha = (header24 >> 23) != 0
let ip_scale = (header16 >> 3) & 63
let iq_scale = (header16 >> 9) & 63
var p_scale = Float32(ip_scale)
var q_scale = Float32(iq_scale)
p_scale = p_scale / 63
q_scale = q_scale / 63
let isLandscape = (header16 >> 15) != 0
let lx16 = max(3, isLandscape ? hasAlpha ? 5 : 7 : header16 & 7)
let ly16 = max(3, isLandscape ? header16 & 7 : hasAlpha ? 5 : 7)
let lx = Int(lx16)
let ly = Int(ly16)
var a_dc = Float32(1)
var a_scale = Float32(1)
if hasAlpha {
let ia_dc = hash[5] & 15
let ia_scale = hash[5] >> 4
a_dc = Float32(ia_dc)
a_scale = Float32(ia_scale)
a_dc /= 15
a_scale /= 15
}
// Read the varying factors (boost saturation by 1.25x to compensate for quantization)
let ac_start = hasAlpha ? 6 : 5
var ac_index = 0
let decodeChannel = { (nx: Int, ny: Int, scale: Float32) -> [Float32] in
var ac: [Float32] = []
for cy in 0 ..< ny {
var cx = cy > 0 ? 0 : 1
while cx * ny < nx * (ny - cy) {
let iac = (hash[ac_start + (ac_index >> 1)] >> ((ac_index & 1) << 2)) & 15
var fac = Float32(iac)
fac = (fac / 7.5 - 1) * scale
ac.append(fac)
ac_index += 1
cx += 1
}
}
return ac
}
let l_ac = decodeChannel(lx, ly, l_scale)
let p_ac = decodeChannel(3, 3, p_scale * 1.25)
let q_ac = decodeChannel(3, 3, q_scale * 1.25)
let a_ac = hasAlpha ? decodeChannel(5, 5, a_scale) : []
// Decode using the DCT into RGB
let ratio = thumbHashToApproximateAspectRatio(hash: hash)
let fw = round(ratio > 1 ? 32 : 32 * ratio)
let fh = round(ratio > 1 ? 32 / ratio : 32)
let w = Int(fw)
let h = Int(fh)
var rgba = Data(count: w * h * 4)
let cx_stop = max(lx, hasAlpha ? 5 : 3)
let cy_stop = max(ly, hasAlpha ? 5 : 3)
var fx = [Float32](repeating: 0, count: cx_stop)
var fy = [Float32](repeating: 0, count: cy_stop)
fx.withUnsafeMutableBytes { fx in
let fx = fx.baseAddress!.bindMemory(to: Float32.self, capacity: fx.count)
fy.withUnsafeMutableBytes { fy in
let fy = fy.baseAddress!.bindMemory(to: Float32.self, capacity: fy.count)
rgba.withUnsafeMutableBytes { rgba in
var rgba = rgba.baseAddress!.bindMemory(to: UInt8.self, capacity: rgba.count)
var y = 0
while y < h {
var x = 0
while x < w {
var l = l_dc
var p = p_dc
var q = q_dc
var a = a_dc
// Precompute the coefficients
var cx = 0
while cx < cx_stop {
let fw = Float32(w)
let fxx = Float32(x)
let fcx = Float32(cx)
fx[cx] = cos(Float32.pi / fw * (fxx + 0.5) * fcx)
cx += 1
}
var cy = 0
while cy < cy_stop {
let fh = Float32(h)
let fyy = Float32(y)
let fcy = Float32(cy)
fy[cy] = cos(Float32.pi / fh * (fyy + 0.5) * fcy)
cy += 1
}
// Decode L
var j = 0
cy = 0
while cy < ly {
var cx = cy > 0 ? 0 : 1
let fy2 = fy[cy] * 2
while cx * ly < lx * (ly - cy) {
l += l_ac[j] * fx[cx] * fy2
j += 1
cx += 1
}
cy += 1
}
// Decode P and Q
j = 0
cy = 0
while cy < 3 {
var cx = cy > 0 ? 0 : 1
let fy2 = fy[cy] * 2
while cx < 3 - cy {
let f = fx[cx] * fy2
p += p_ac[j] * f
q += q_ac[j] * f
j += 1
cx += 1
}
cy += 1
}
// Decode A
if hasAlpha {
j = 0
cy = 0
while cy < 5 {
var cx = cy > 0 ? 0 : 1
let fy2 = fy[cy] * 2
while cx < 5 - cy {
a += a_ac[j] * fx[cx] * fy2
j += 1
cx += 1
}
cy += 1
}
}
// Convert to RGB
var b = l - 2 / 3 * p
var r = (3 * l - b + q) / 2
var g = r - q
r = max(0, 255 * min(1, r))
g = max(0, 255 * min(1, g))
b = max(0, 255 * min(1, b))
a = max(0, 255 * min(1, a))
rgba[0] = UInt8(r)
rgba[1] = UInt8(g)
rgba[2] = UInt8(b)
rgba[3] = UInt8(a)
rgba = rgba.advanced(by: 4)
x += 1
}
y += 1
}
}
}
}
return (w, h, rgba)
}
func thumbHashToAverageRGBA(hash: Data) -> (Float32, Float32, Float32, Float32) {
let h0 = UInt32(hash[0])
let h1 = UInt32(hash[1])
let h2 = UInt32(hash[2])
let header = h0 | (h1 << 8) | (h2 << 16)
let il = header & 63
let ip = (header >> 6) & 63
let iq = (header >> 12) & 63
var l = Float32(il)
var p = Float32(ip)
var q = Float32(iq)
l = l / 63
p = p / 31.5 - 1
q = q / 31.5 - 1
let hasAlpha = (header >> 23) != 0
var a = Float32(1)
if hasAlpha {
let ia = hash[5] & 15
a = Float32(ia)
a = a / 15
}
let b = l - 2 / 3 * p
let r = (3 * l - b + q) / 2
let g = r - q
return (
max(0, min(1, r)),
max(0, min(1, g)),
max(0, min(1, b)),
a
)
}
func thumbHashToApproximateAspectRatio(hash: Data) -> Float32 {
let header = hash[3]
let hasAlpha = (hash[2] & 0x80) != 0
let isLandscape = (hash[4] & 0x80) != 0
let lx = isLandscape ? hasAlpha ? 5 : 7 : header & 7
let ly = isLandscape ? header & 7 : hasAlpha ? 5 : 7
return Float32(lx) / Float32(ly)
}
#if os(iOS) || os(tvOS)
import UIKit
func thumbHash(fromImage: UIImage) -> Data {
let size = fromImage.size
let w = Int(round(100 * size.width / max(size.width, size.height)))
let h = Int(round(100 * size.height / max(size.width, size.height)))
var rgba = Data(count: w * h * 4)
rgba.withUnsafeMutableBytes { rgba in
if
let space = fromImage.cgImage?.colorSpace,
let context = CGContext(
data: rgba.baseAddress,
width: w,
height: h,
bitsPerComponent: 8,
bytesPerRow: w * 4,
space: space,
bitmapInfo: CGImageAlphaInfo.premultipliedLast.rawValue
)
{
// EXIF orientation only works if you draw the UIImage, not the CGImage
context.concatenate(CGAffineTransform(1, 0, 0, -1, 0, CGFloat(h)))
UIGraphicsPushContext(context)
fromImage.draw(in: CGRect(x: 0, y: 0, width: w, height: h))
UIGraphicsPopContext()
// Convert from premultiplied alpha to unpremultiplied alpha
var rgba = rgba.baseAddress!.bindMemory(to: UInt8.self, capacity: rgba.count)
let n = w * h
var i = 0
while i < n {
let a = UInt16(rgba[3])
if a > 0 && a < 255 {
var r = UInt16(rgba[0])
var g = UInt16(rgba[1])
var b = UInt16(rgba[2])
r = min(255, r * 255 / a)
g = min(255, g * 255 / a)
b = min(255, b * 255 / a)
rgba[0] = UInt8(r)
rgba[1] = UInt8(g)
rgba[2] = UInt8(b)
}
rgba = rgba.advanced(by: 4)
i += 1
}
}
}
return rgbaToThumbHash(w: w, h: h, rgba: rgba)
}
func image(fromThumbhash: Data) -> UIImage {
var (w, h, rgba) = thumbHashToRGBA(hash: fromThumbhash)
rgba.withUnsafeMutableBytes { rgba in
// Convert from unpremultiplied alpha to premultiplied alpha
var rgba = rgba.baseAddress!.bindMemory(to: UInt8.self, capacity: rgba.count)
let n = w * h
var i = 0
while i < n {
let a = UInt16(rgba[3])
if a < 255 {
var r = UInt16(rgba[0])
var g = UInt16(rgba[1])
var b = UInt16(rgba[2])
r = min(255, r * a / 255)
g = min(255, g * a / 255)
b = min(255, b * a / 255)
rgba[0] = UInt8(r)
rgba[1] = UInt8(g)
rgba[2] = UInt8(b)
}
rgba = rgba.advanced(by: 4)
i += 1
}
}
let image = CGImage(
width: w,
height: h,
bitsPerComponent: 8,
bitsPerPixel: 32,
bytesPerRow: w * 4,
space: CGColorSpaceCreateDeviceRGB(),
bitmapInfo: CGBitmapInfo(rawValue: CGBitmapInfo.byteOrder32Big.rawValue | CGImageAlphaInfo.premultipliedLast.rawValue),
provider: CGDataProvider(data: rgba as CFData)!,
decode: nil,
shouldInterpolate: true,
intent: .perceptual
)
return UIImage(cgImage: image!)
}
#endif