Performace optimization
This commit is contained in:
@@ -39,9 +39,43 @@
|
||||
console.log('BlazeFace model loaded');
|
||||
})();
|
||||
}
|
||||
|
||||
return detectorPromise;
|
||||
}
|
||||
|
||||
// Create a downscaled JPEG preview to reduce memory usage for UI rendering
|
||||
async function createPreviewBlob(original: Blob, maxSide = 1200, quality = 0.85): Promise<Blob> {
|
||||
try {
|
||||
const bitmap = await createImageBitmap(original);
|
||||
let { width, height } = bitmap;
|
||||
const maxDim = Math.max(width, height);
|
||||
// If image is already at or below the threshold, keep it as-is
|
||||
if (maxDim <= maxSide) {
|
||||
bitmap.close();
|
||||
return original;
|
||||
}
|
||||
const scale = Math.min(1, maxSide / maxDim);
|
||||
const targetW = Math.max(1, Math.round(width * scale));
|
||||
const targetH = Math.max(1, Math.round(height * scale));
|
||||
|
||||
const canvas = document.createElement('canvas');
|
||||
canvas.width = targetW;
|
||||
canvas.height = targetH;
|
||||
const ctx = canvas.getContext('2d');
|
||||
if (!ctx) throw new Error('Canvas 2D context unavailable');
|
||||
ctx.drawImage(bitmap, 0, 0, targetW, targetH);
|
||||
bitmap.close();
|
||||
|
||||
const blob = await new Promise<Blob>((resolve, reject) =>
|
||||
canvas.toBlob((b) => (b ? resolve(b) : reject(new Error('toBlob failed'))), 'image/jpeg', quality)
|
||||
);
|
||||
return blob;
|
||||
} catch (e) {
|
||||
// Fallback to original if downscale fails
|
||||
return original;
|
||||
}
|
||||
}
|
||||
|
||||
// Force memory cleanup
|
||||
async function forceMemoryCleanup() {
|
||||
await tf.nextFrame(); // Wait for any pending GPU operations
|
||||
@@ -185,7 +219,10 @@
|
||||
quality: 0.9
|
||||
});
|
||||
|
||||
const convertedBlob = new Blob([outputBuffer], { type: 'image/jpeg' });
|
||||
const buffer = outputBuffer instanceof Uint8Array
|
||||
? outputBuffer.buffer.slice(outputBuffer.byteOffset, outputBuffer.byteOffset + outputBuffer.byteLength)
|
||||
: outputBuffer;
|
||||
const convertedBlob = new Blob([buffer as ArrayBuffer], { type: 'image/jpeg' });
|
||||
|
||||
// Now that it's converted, process it like any other image
|
||||
await processLoadedBlob(index, convertedBlob);
|
||||
@@ -200,26 +237,30 @@
|
||||
async function processLoadedBlob(index: number, blob: Blob) {
|
||||
const photo = photos[index];
|
||||
try {
|
||||
const objectUrl = createImageObjectUrl(blob);
|
||||
// Downsize once and use this for storage, preview, and detection
|
||||
const resizedBlob = await createPreviewBlob(blob, 1600, 0.85);
|
||||
await set(photo.url, resizedBlob);
|
||||
const objectUrl = createImageObjectUrl(resizedBlob);
|
||||
|
||||
// Test if image loads properly
|
||||
// Test if downsized image loads properly
|
||||
await new Promise<void>((resolve, reject) => {
|
||||
const img = new Image();
|
||||
img.onload = () => resolve();
|
||||
img.onerror = (error) => {
|
||||
console.error(`Failed to load image for ${photo.name}:`, error);
|
||||
console.error(`Failed to load downsized image for ${photo.name}:`, error);
|
||||
reject(new Error('Failed to load image'));
|
||||
};
|
||||
img.src = objectUrl;
|
||||
});
|
||||
|
||||
// Revoke any previous preview URL to avoid leaks
|
||||
if (photo.objectUrl && photo.objectUrl.startsWith('blob:') && photo.objectUrl !== objectUrl) {
|
||||
URL.revokeObjectURL(photo.objectUrl);
|
||||
}
|
||||
|
||||
photo.objectUrl = objectUrl;
|
||||
photo.status = 'success';
|
||||
|
||||
// Save blob to IndexedDB instead of the store
|
||||
await set(photo.url, blob);
|
||||
|
||||
// Save to pictures store, but without the blob to save memory
|
||||
pictures.update((pics) => ({
|
||||
...pics,
|
||||
[photo.url]: {
|
||||
@@ -231,8 +272,8 @@
|
||||
}
|
||||
}));
|
||||
|
||||
// Add face detection to its queue
|
||||
faceDetectionQueue.add(() => detectFaceForPhoto(index));
|
||||
// Add face detection to its queue using the downsized image shown in UI
|
||||
faceDetectionQueue.add(() => detectFaceForPhoto(index));
|
||||
} catch (error) {
|
||||
console.error(`Failed to process blob for ${photo.name}:`, error);
|
||||
photo.status = 'error';
|
||||
@@ -255,31 +296,44 @@
|
||||
photo.faceDetectionStatus = 'processing';
|
||||
const img = new Image();
|
||||
img.crossOrigin = 'anonymous';
|
||||
img.src = photo.objectUrl!;
|
||||
// Use the downsized UI image to keep coordinates aligned
|
||||
img.src = photo.objectUrl!;
|
||||
await new Promise((r, e) => {
|
||||
img.onload = r;
|
||||
img.onerror = e;
|
||||
});
|
||||
|
||||
// Create tensor and manually dispose it after use
|
||||
imageTensor = tf.browser.fromPixels(img);
|
||||
const predictions = await detector.estimateFaces(imageTensor, false);
|
||||
// Create tensor; run estimation (avoid tf.tidy here to not dispose returned tensors prematurely)
|
||||
imageTensor = tf.browser.fromPixels(img);
|
||||
const predictions: any[] = await detector.estimateFaces(imageTensor, false);
|
||||
|
||||
if (predictions.length > 0) {
|
||||
const getProbability = (p: number | tf.Tensor) =>
|
||||
typeof p === 'number' ? p : p.dataSync()[0];
|
||||
const tensorToNumArray = (v: any): number[] => {
|
||||
if (Array.isArray(v)) return v as number[];
|
||||
if (typeof v === 'number') return [v];
|
||||
if (v && typeof v.dataSync === 'function') {
|
||||
const arr = Array.from(v.dataSync() as Float32Array);
|
||||
if (typeof v.dispose === 'function') v.dispose();
|
||||
return arr as number[];
|
||||
}
|
||||
return [];
|
||||
};
|
||||
const getProbability = (p: any) => tensorToNumArray(p)[0] ?? 0;
|
||||
|
||||
const face = predictions.sort(
|
||||
(a, b) => getProbability(b.probability!) - getProbability(a.probability!)
|
||||
)[0];
|
||||
|
||||
const topLeft = face.topLeft as [number, number];
|
||||
const bottomRight = face.bottomRight as [number, number];
|
||||
const topLeftArr = tensorToNumArray(face.topLeft);
|
||||
const bottomRightArr = tensorToNumArray(face.bottomRight);
|
||||
const topLeft = [topLeftArr[0], topLeftArr[1]] as [number, number];
|
||||
const bottomRight = [bottomRightArr[0], bottomRightArr[1]] as [number, number];
|
||||
|
||||
let [x1, y1] = topLeft;
|
||||
let [x2, y2] = bottomRight;
|
||||
const scaleX = img.naturalWidth / img.width;
|
||||
const scaleY = img.naturalHeight / img.height;
|
||||
// Use natural sizes; detection ran on original if provided
|
||||
const scaleX = 1;
|
||||
const scaleY = 1;
|
||||
const faceWidth = (x2 - x1) * scaleX;
|
||||
const faceHeight = (y2 - y1) * scaleY;
|
||||
const faceCenterX = (x1 + (x2 - x1) / 2) * scaleX;
|
||||
@@ -411,6 +465,11 @@
|
||||
faceDetectionQueue.clear();
|
||||
}
|
||||
cleanupObjectUrls();
|
||||
// Dispose the detector model if possible to release GPU/CPU memory
|
||||
if (detector && typeof (detector as any).dispose === 'function') {
|
||||
(detector as any).dispose();
|
||||
}
|
||||
detector = undefined;
|
||||
};
|
||||
});
|
||||
</script>
|
||||
|
||||
@@ -108,8 +108,8 @@
|
||||
{/if}
|
||||
</div>
|
||||
|
||||
<div class="p-3 flex items-center justify-between">
|
||||
<div>
|
||||
<div class="esnSection p-3 flex items-center justify-between gap-2">
|
||||
<div class="min-w-0 flex-1">
|
||||
<h4 class="font-medium text-sm text-gray-900 truncate">{photo.name}</h4>
|
||||
{#if photo.faceDetectionStatus === 'completed'}
|
||||
<span class="text-xs text-green-600">Face detected</span>
|
||||
@@ -125,7 +125,7 @@
|
||||
</div>
|
||||
<button
|
||||
onclick={() => (showCropper = true)}
|
||||
class="p-1 text-gray-500 hover:text-blue-600"
|
||||
class="p-1 text-gray-500 hover:text-blue-600 shrink-0"
|
||||
title="Edit Crop"
|
||||
aria-label="Edit Crop"
|
||||
>
|
||||
|
||||
4
src/types/heic-convert-browser.d.ts
vendored
Normal file
4
src/types/heic-convert-browser.d.ts
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
declare module 'heic-convert/browser' {
|
||||
const convert: (options: { buffer: Uint8Array; format: 'JPEG' | 'PNG'; quality?: number }) => Promise<ArrayBuffer | Uint8Array>;
|
||||
export default convert;
|
||||
}
|
||||
Reference in New Issue
Block a user