From be7bdc551afe4e60fb0b3ac044743e82d91f6139 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Roman=20Kr=C4=8Dek?= Date: Mon, 11 Aug 2025 16:42:55 +0200 Subject: [PATCH] Performace optimization --- src/lib/components/wizard/StepGallery.svelte | 99 +++++++++++++++---- .../wizard/subcomponents/PhotoCard.svelte | 6 +- src/types/heic-convert-browser.d.ts | 4 + 3 files changed, 86 insertions(+), 23 deletions(-) create mode 100644 src/types/heic-convert-browser.d.ts diff --git a/src/lib/components/wizard/StepGallery.svelte b/src/lib/components/wizard/StepGallery.svelte index 9ccc0f9..5e2ae12 100644 --- a/src/lib/components/wizard/StepGallery.svelte +++ b/src/lib/components/wizard/StepGallery.svelte @@ -39,9 +39,43 @@ console.log('BlazeFace model loaded'); })(); } + return detectorPromise; } +// Create a downscaled JPEG preview to reduce memory usage for UI rendering +async function createPreviewBlob(original: Blob, maxSide = 1200, quality = 0.85): Promise { + try { + const bitmap = await createImageBitmap(original); + let { width, height } = bitmap; + const maxDim = Math.max(width, height); + // If image is already at or below the threshold, keep it as-is + if (maxDim <= maxSide) { + bitmap.close(); + return original; + } + const scale = Math.min(1, maxSide / maxDim); + const targetW = Math.max(1, Math.round(width * scale)); + const targetH = Math.max(1, Math.round(height * scale)); + + const canvas = document.createElement('canvas'); + canvas.width = targetW; + canvas.height = targetH; + const ctx = canvas.getContext('2d'); + if (!ctx) throw new Error('Canvas 2D context unavailable'); + ctx.drawImage(bitmap, 0, 0, targetW, targetH); + bitmap.close(); + + const blob = await new Promise((resolve, reject) => + canvas.toBlob((b) => (b ? resolve(b) : reject(new Error('toBlob failed'))), 'image/jpeg', quality) + ); + return blob; + } catch (e) { + // Fallback to original if downscale fails + return original; + } +} + // Force memory cleanup async function forceMemoryCleanup() { await tf.nextFrame(); // Wait for any pending GPU operations @@ -185,7 +219,10 @@ quality: 0.9 }); - const convertedBlob = new Blob([outputBuffer], { type: 'image/jpeg' }); + const buffer = outputBuffer instanceof Uint8Array + ? outputBuffer.buffer.slice(outputBuffer.byteOffset, outputBuffer.byteOffset + outputBuffer.byteLength) + : outputBuffer; + const convertedBlob = new Blob([buffer as ArrayBuffer], { type: 'image/jpeg' }); // Now that it's converted, process it like any other image await processLoadedBlob(index, convertedBlob); @@ -200,26 +237,30 @@ async function processLoadedBlob(index: number, blob: Blob) { const photo = photos[index]; try { - const objectUrl = createImageObjectUrl(blob); + // Downsize once and use this for storage, preview, and detection + const resizedBlob = await createPreviewBlob(blob, 1600, 0.85); + await set(photo.url, resizedBlob); + const objectUrl = createImageObjectUrl(resizedBlob); - // Test if image loads properly + // Test if downsized image loads properly await new Promise((resolve, reject) => { const img = new Image(); img.onload = () => resolve(); img.onerror = (error) => { - console.error(`Failed to load image for ${photo.name}:`, error); + console.error(`Failed to load downsized image for ${photo.name}:`, error); reject(new Error('Failed to load image')); }; img.src = objectUrl; }); + // Revoke any previous preview URL to avoid leaks + if (photo.objectUrl && photo.objectUrl.startsWith('blob:') && photo.objectUrl !== objectUrl) { + URL.revokeObjectURL(photo.objectUrl); + } + photo.objectUrl = objectUrl; photo.status = 'success'; - // Save blob to IndexedDB instead of the store - await set(photo.url, blob); - - // Save to pictures store, but without the blob to save memory pictures.update((pics) => ({ ...pics, [photo.url]: { @@ -231,8 +272,8 @@ } })); - // Add face detection to its queue - faceDetectionQueue.add(() => detectFaceForPhoto(index)); + // Add face detection to its queue using the downsized image shown in UI + faceDetectionQueue.add(() => detectFaceForPhoto(index)); } catch (error) { console.error(`Failed to process blob for ${photo.name}:`, error); photo.status = 'error'; @@ -255,31 +296,44 @@ photo.faceDetectionStatus = 'processing'; const img = new Image(); img.crossOrigin = 'anonymous'; - img.src = photo.objectUrl!; + // Use the downsized UI image to keep coordinates aligned + img.src = photo.objectUrl!; await new Promise((r, e) => { img.onload = r; img.onerror = e; }); - // Create tensor and manually dispose it after use - imageTensor = tf.browser.fromPixels(img); - const predictions = await detector.estimateFaces(imageTensor, false); + // Create tensor; run estimation (avoid tf.tidy here to not dispose returned tensors prematurely) + imageTensor = tf.browser.fromPixels(img); + const predictions: any[] = await detector.estimateFaces(imageTensor, false); if (predictions.length > 0) { - const getProbability = (p: number | tf.Tensor) => - typeof p === 'number' ? p : p.dataSync()[0]; + const tensorToNumArray = (v: any): number[] => { + if (Array.isArray(v)) return v as number[]; + if (typeof v === 'number') return [v]; + if (v && typeof v.dataSync === 'function') { + const arr = Array.from(v.dataSync() as Float32Array); + if (typeof v.dispose === 'function') v.dispose(); + return arr as number[]; + } + return []; + }; + const getProbability = (p: any) => tensorToNumArray(p)[0] ?? 0; const face = predictions.sort( (a, b) => getProbability(b.probability!) - getProbability(a.probability!) )[0]; - const topLeft = face.topLeft as [number, number]; - const bottomRight = face.bottomRight as [number, number]; + const topLeftArr = tensorToNumArray(face.topLeft); + const bottomRightArr = tensorToNumArray(face.bottomRight); + const topLeft = [topLeftArr[0], topLeftArr[1]] as [number, number]; + const bottomRight = [bottomRightArr[0], bottomRightArr[1]] as [number, number]; let [x1, y1] = topLeft; let [x2, y2] = bottomRight; - const scaleX = img.naturalWidth / img.width; - const scaleY = img.naturalHeight / img.height; + // Use natural sizes; detection ran on original if provided + const scaleX = 1; + const scaleY = 1; const faceWidth = (x2 - x1) * scaleX; const faceHeight = (y2 - y1) * scaleY; const faceCenterX = (x1 + (x2 - x1) / 2) * scaleX; @@ -411,6 +465,11 @@ faceDetectionQueue.clear(); } cleanupObjectUrls(); + // Dispose the detector model if possible to release GPU/CPU memory + if (detector && typeof (detector as any).dispose === 'function') { + (detector as any).dispose(); + } + detector = undefined; }; }); diff --git a/src/lib/components/wizard/subcomponents/PhotoCard.svelte b/src/lib/components/wizard/subcomponents/PhotoCard.svelte index 78e54b3..7ffca1e 100644 --- a/src/lib/components/wizard/subcomponents/PhotoCard.svelte +++ b/src/lib/components/wizard/subcomponents/PhotoCard.svelte @@ -108,8 +108,8 @@ {/if} -
-
+
+

{photo.name}

{#if photo.faceDetectionStatus === 'completed'} Face detected @@ -125,7 +125,7 @@