Crop works nicely

This commit is contained in:
Roman Krček
2025-07-17 21:12:26 +02:00
parent 4f119dc121
commit c695664784
2 changed files with 113 additions and 409 deletions

View File

@@ -1,259 +1,21 @@
<script lang="ts">
import { onMount } from 'svelte';
import { createEventDispatcher } from 'svelte';
import PhotoCrop from './PhotoCrop.svelte';
import * as tf from '@tensorflow/tfjs';
import * as blazeface from '@tensorflow-models/blazeface';
export let imageUrl: string;
export let personName: string;
export let isProcessing = false;
const dispatch = createEventDispatcher<{
export let cropData: { x: number; y: number; width: number; height: number } | null = null;
const dispatch = createEventDispatcher<{
cropUpdated: { x: number; y: number; width: number; height: number };
faceDetectionStarted: void;
faceDetectionCompleted: { success: boolean; hasAutoDetectedCrop: boolean };
}>();
let showCropEditor = false;
let autoDetectedCrop: { x: number; y: number; width: number; height: number } | null = null;
let currentCrop: { x: number; y: number; width: number; height: number } | null = null;
let isDetectingFace = false;
let faceDetectionError = false;
let detector: any = null;
let isModelLoading = false;
let isDownloadingModel = false;
let currentCrop = cropData;
let photoElement: HTMLImageElement;
onMount(async () => {
console.log('PhotoCard mounted, initializing face detection...');
await initializeFaceDetection();
});
async function initializeFaceDetection() {
try {
isDownloadingModel = true;
console.log('Downloading BlazeFace model...');
// Initialize TensorFlow.js with WebGL backend for better performance
await tf.setBackend('webgl');
await tf.ready();
console.log('TensorFlow.js WebGL backend initialized');
isDownloadingModel = false;
isModelLoading = true;
console.log('Loading BlazeFace model...');
// Load the BlazeFace model
detector = await blazeface.load();
isModelLoading = false;
console.log('BlazeFace model loaded successfully with WebGL backend');
} catch (error) {
console.error('Failed to initialize BlazeFace with WebGL:', error);
console.log('Falling back to CPU backend...');
try {
// Fallback to CPU if WebGL fails
await tf.setBackend('cpu');
await tf.ready();
console.log('TensorFlow.js CPU backend initialized as fallback');
detector = await blazeface.load();
isModelLoading = false;
console.log('BlazeFace model loaded successfully with CPU backend');
} catch (fallbackError) {
console.error('Failed to initialize BlazeFace with CPU fallback:', fallbackError);
isDownloadingModel = false;
isModelLoading = false;
faceDetectionError = true;
}
}
} // Simple face detection using BlazeFace
async function detectFaceWithMediaPipe() {
if (!photoElement || isDetectingFace || !detector) return;
dispatch('faceDetectionStarted');
isDetectingFace = true;
faceDetectionError = false;
try {
console.log('Detecting faces with BlazeFace...');
// Detect faces in the image
const predictions = await detector.estimateFaces(photoElement, false);
console.log(`BlazeFace found ${predictions.length} faces`);
if (predictions.length > 0) {
// Find the face with the highest probability
let bestFace = predictions[0];
let highestProbability = predictions[0].probability ? predictions[0].probability[0] : 0;
for (let i = 1; i < predictions.length; i++) {
const face = predictions[i];
const probability = face.probability ? face.probability[0] : 0;
if (probability > highestProbability) {
bestFace = face;
highestProbability = probability;
}
}
console.log(`Selected face with probability: ${highestProbability}`);
// Use the best detected face
const face = bestFace;
// BlazeFace returns topLeft and bottomRight coordinates
// These coordinates are relative to the DISPLAYED image size, not natural size
let [x1, y1] = face.topLeft;
let [x2, y2] = face.bottomRight;
console.log('BlazeFace detection (displayed coordinates):', { x1, y1, x2, y2 });
console.log('Image dimensions:', {
natural: { width: photoElement.naturalWidth, height: photoElement.naturalHeight },
displayed: { width: photoElement.clientWidth, height: photoElement.clientHeight }
});
// Calculate scale factors to convert from displayed to natural coordinates
const scaleX = photoElement.naturalWidth / photoElement.clientWidth;
const scaleY = photoElement.naturalHeight / photoElement.clientHeight;
console.log('Scale factors:', { scaleX, scaleY });
// Scale coordinates to natural image size
x1 = x1 * scaleX;
y1 = y1 * scaleY;
x2 = x2 * scaleX;
y2 = y2 * scaleY;
let faceWidth = x2 - x1;
let faceHeight = y2 - y1;
console.log('Scaled coordinates (natural size):', { x1, y1, x2, y2, faceWidth, faceHeight });
// BlazeFace coordinates are relative to the input image size
// Verify coordinates are within bounds and reasonable
if (x1 < 0 || y1 < 0 || x2 > photoElement.naturalWidth || y2 > photoElement.naturalHeight) {
console.warn('BlazeFace coordinates out of bounds, clamping:', { x1, y1, x2, y2 });
// Clamp coordinates to image bounds
x1 = Math.max(0, x1);
y1 = Math.max(0, y1);
x2 = Math.min(photoElement.naturalWidth, x2);
y2 = Math.min(photoElement.naturalHeight, y2);
// Recalculate dimensions
faceWidth = x2 - x1;
faceHeight = y2 - y1;
console.log('Clamped coordinates:', { x1, y1, x2, y2, faceWidth, faceHeight });
}
// Final validation - ensure we have a reasonable face size
if (faceWidth <= 0 || faceHeight <= 0) {
console.error('Invalid face dimensions after clamping');
throw new Error('Invalid face dimensions');
}
// Recalculate face dimensions after any clamping
const finalFaceWidth = x2 - x1;
const finalFaceHeight = y2 - y1;
// // Validate face size - reject if too small
// const faceArea = finalFaceWidth * finalFaceHeight;
// const imageArea = photoElement.naturalWidth * photoElement.naturalHeight;
// const faceRatio = faceArea / imageArea;
// console.log('Face area ratio:', faceRatio);
// // Only reject if smaller than 0.5% of image (very small noise)
// if (faceRatio < 0.005) {
// console.log('Face rejected: too small');
// throw new Error('Face too small');
// }
// Create crop area with environment-based configuration
const cropRatio = parseFloat(import.meta.env.VITE_CROP_RATIO || '1.0');
const faceOffsetX = parseFloat(import.meta.env.VITE_FACE_OFFSET_X || '0.0');
const faceOffsetY = parseFloat(import.meta.env.VITE_FACE_OFFSET_Y || '-0.1');
const cropScale = parseFloat(import.meta.env.VITE_CROP_SCALE || '2.5');
console.log('Crop configuration:', { cropRatio, faceOffsetX, faceOffsetY, cropScale });
// Calculate face center
const faceCenterX = x1 + finalFaceWidth / 2;
const faceCenterY = y1 + finalFaceHeight / 2;
// Calculate crop dimensions based on face width and scale
const cropWidth = finalFaceWidth * cropScale;
const cropHeight = cropWidth / cropRatio; // Maintain aspect ratio
// Apply face offset to crop center (offset is percentage of crop dimensions)
const cropCenterX = faceCenterX + (cropWidth * faceOffsetX);
const cropCenterY = faceCenterY + (cropHeight * faceOffsetY);
// Ensure crop fits within image bounds while maintaining aspect ratio
let finalCropWidth = cropWidth;
let finalCropHeight = cropHeight;
// Check if crop exceeds image bounds and scale down proportionally if needed
const maxWidth = photoElement.naturalWidth;
const maxHeight = photoElement.naturalHeight;
if (finalCropWidth > maxWidth || finalCropHeight > maxHeight) {
// Scale down to fit within bounds while maintaining ratio
const scaleToFitWidth = maxWidth / finalCropWidth;
const scaleToFitHeight = maxHeight / finalCropHeight;
const scaleToFit = Math.min(scaleToFitWidth, scaleToFitHeight);
finalCropWidth = finalCropWidth * scaleToFit;
finalCropHeight = finalCropHeight * scaleToFit;
console.log('Scaled crop to fit bounds:', { scaleToFit, finalCropWidth, finalCropHeight });
}
// Calculate crop position (top-left corner) with properly sized crop
const cropCenterXAdjusted = faceCenterX + (finalCropWidth * faceOffsetX);
const cropCenterYAdjusted = faceCenterY + (finalCropHeight * faceOffsetY);
const cropX = Math.max(0, Math.min(cropCenterXAdjusted - finalCropWidth / 2, photoElement.naturalWidth - finalCropWidth));
const cropY = Math.max(0, Math.min(cropCenterYAdjusted - finalCropHeight / 2, photoElement.naturalHeight - finalCropHeight));
console.log('Crop calculation:', {
faceCenter: { x: faceCenterX, y: faceCenterY },
cropDimensions: { width: cropWidth, height: cropHeight },
cropCenter: { x: cropCenterX, y: cropCenterY },
finalCrop: { x: cropX, y: cropY, width: finalCropWidth, height: finalCropHeight },
aspectRatio: finalCropWidth / finalCropHeight
});
autoDetectedCrop = {
x: Math.round(cropX),
y: Math.round(cropY),
width: Math.round(finalCropWidth),
height: Math.round(finalCropHeight)
};
currentCrop = { ...autoDetectedCrop };
dispatch('cropUpdated', currentCrop);
dispatch('faceDetectionCompleted', { success: true, hasAutoDetectedCrop: true });
console.log('BlazeFace detection successful!', autoDetectedCrop);
return;
}
// No faces detected
throw new Error('No faces detected by BlazeFace');
} catch (error) {
console.error('BlazeFace detection failed:', error);
faceDetectionError = true;
dispatch('faceDetectionCompleted', { success: false, hasAutoDetectedCrop: false });
// Don't fall back to anything - just leave it as an error state
} finally {
isDetectingFace = false;
}
}
function openCropEditor() {
showCropEditor = true;
}
@@ -261,17 +23,14 @@
function handleCropSave(e: CustomEvent<{ x: number; y: number; width: number; height: number }>) {
currentCrop = e.detail;
showCropEditor = false;
dispatch('cropUpdated', currentCrop);
dispatch('cropUpdated', currentCrop!);
}
function handleCropCancel() {
showCropEditor = false;
}
// Try face detection when image and detector are ready
$: if (imageUrl && photoElement && detector && !isDetectingFace && !autoDetectedCrop) {
detectFaceWithMediaPipe();
}
$: if (cropData) currentCrop = cropData;
</script>
<div class="relative group">
@@ -281,50 +40,8 @@
src={imageUrl}
alt={personName}
class="w-full h-full object-cover"
on:load={detectFaceWithMediaPipe}
/>
<!-- Small notification bars for all states -->
{#if isDownloadingModel}
<div class="absolute top-2 left-2 right-2 bg-blue-500/95 text-white px-3 py-2 rounded text-xs font-medium flex items-center space-x-2 shadow-lg">
<svg class="w-3 h-3 animate-spin" viewBox="0 0 24 24">
<circle class="opacity-25" cx="12" cy="12" r="10" stroke="currentColor" stroke-width="4" fill="none"/>
<path class="opacity-75" fill="currentColor" d="M4 12a8 8 0 018-8V0C5.373 0 0 5.373 0 12h4zm2 5.291A7.962 7.962 0 014 12H0c0 3.042 1.135 5.824 3 7.938l3-2.647z"/>
</svg>
<span>Downloading AI Model...</span>
<div class="flex-1 bg-white/20 rounded-full h-1 ml-2">
<div class="bg-white h-1 rounded-full animate-pulse" style="width: 30%"></div>
</div>
</div>
{:else if isModelLoading}
<div class="absolute top-2 left-2 right-2 bg-purple-500/95 text-white px-3 py-2 rounded text-xs font-medium flex items-center space-x-2 shadow-lg">
<svg class="w-3 h-3 animate-spin" viewBox="0 0 24 24">
<circle class="opacity-25" cx="12" cy="12" r="10" stroke="currentColor" stroke-width="4" fill="none"/>
<path class="opacity-75" fill="currentColor" d="M4 12a8 8 0 018-8V0C5.373 0 0 5.373 0 12h4zm2 5.291A7.962 7.962 0 014 12H0c0 3.042 1.135 5.824 3 7.938l3-2.647z"/>
</svg>
<span>Loading AI Model...</span>
<div class="flex-1 bg-white/20 rounded-full h-1 ml-2">
<div class="bg-white h-1 rounded-full animate-pulse" style="width: 60%"></div>
</div>
</div>
{:else if isDetectingFace}
<div class="absolute top-2 left-2 right-2 bg-green-500/95 text-white px-3 py-2 rounded text-xs font-medium flex items-center space-x-2 shadow-lg">
<svg class="w-3 h-3 animate-pulse" fill="none" viewBox="0 0 24 24" stroke="currentColor">
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M16 7a4 4 0 11-8 0 4 4 0 018 0zM12 14a7 7 0 00-7 7h14a7 7 0 00-7-7z"/>
</svg>
<span>Detecting Face
<span class="inline-flex ml-1">
<span class="animate-pulse">.</span>
<span class="animate-pulse" style="animation-delay: 0.2s">.</span>
<span class="animate-pulse" style="animation-delay: 0.4s">.</span>
</span>
</span>
<div class="flex-1 bg-white/20 rounded-full h-1 ml-2">
<div class="bg-white h-1 rounded-full animate-pulse" style="width: 80%"></div>
</div>
</div>
{/if}
{#if currentCrop}
<!-- Show crop preview overlay with proper masking -->
<div class="absolute inset-0 pointer-events-none">
@@ -352,33 +69,12 @@
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M15.232 5.232l3.536 3.536m-2.036-5.036a2.5 2.5 0 113.536 3.536L6.5 21.036H3v-3.572L16.732 3.732z"/>
</svg>
</button>
<!-- Status indicators -->
<div class="absolute bottom-2 left-2 flex space-x-1">
{#if faceDetectionError}
<div class="bg-yellow-500 text-white px-2 py-1 rounded text-xs font-medium">
Manual crop
</div>
{:else if currentCrop && autoDetectedCrop && JSON.stringify(currentCrop) !== JSON.stringify(autoDetectedCrop)}
<div class="bg-blue-500 text-white px-2 py-1 rounded text-xs font-medium">
Custom crop
</div>
{:else if autoDetectedCrop}
<div class="bg-green-500 text-white px-2 py-1 rounded text-xs font-medium">
Auto-cropped
</div>
{/if}
</div>
</div>
<div class="mt-2">
<p class="text-sm font-medium text-gray-900 truncate">{personName}</p>
{#if isProcessing}
<p class="text-xs text-gray-500">Processing...</p>
{:else if faceDetectionError}
<p class="text-xs text-yellow-600">Using center crop</p>
{:else if autoDetectedCrop}
<p class="text-xs text-green-600">Face detected</p>
{/if}
</div>
</div>

View File

@@ -3,7 +3,15 @@
import { columnMapping, filteredSheetData, currentStep } from '$lib/stores';
import { downloadDriveImage, isGoogleDriveUrl, createImageObjectUrl } from '$lib/google';
import PhotoCard from '../PhotoCard.svelte';
import * as tf from '@tensorflow/tfjs';
import * as blazeface from '@tensorflow-models/blazeface';
let photos: PhotoInfo[] = [];
let isProcessing = false;
let processedCount = 0;
let totalCount = 0;
let detector: blazeface.BlazeFaceModel;
interface PhotoInfo {
name: string;
url: string;
@@ -13,46 +21,40 @@
cropData?: { x: number; y: number; width: number; height: number };
faceDetectionStatus?: 'pending' | 'processing' | 'completed' | 'failed';
}
let photos: PhotoInfo[] = [];
let isProcessing = false;
let processedCount = 0;
let totalCount = 0;
let faceDetectionInProgress = false;
let faceDetectionCount = { started: 0, completed: 0 };
// Process photos when component mounts
onMount(() => {
console.log('StepGallery mounted, processing photos...');
// Initialize detector and process photos
onMount(async () => {
console.log('StepGallery mounted, initializing face detector...');
await tf.setBackend('webgl');
await tf.ready();
detector = await blazeface.load();
console.log('BlazeFace model loaded');
if ($filteredSheetData.length > 0 && $columnMapping.pictureUrl !== undefined) {
console.log('Processing photos for gallery step');
processPhotos();
} else {
console.log('No data to process:', {
dataLength: $filteredSheetData.length,
pictureUrlMapping: $columnMapping.pictureUrl
});
console.log('No data to process:', { dataLength: $filteredSheetData.length, pictureUrlMapping: $columnMapping.pictureUrl });
}
});
async function processPhotos() {
if (isProcessing) return;
console.log('Starting processPhotos...');
isProcessing = true;
processedCount = 0;
// Get valid and included rows from filteredSheetData
const validRows = $filteredSheetData.filter(row => row._isValid);
console.log(`Found ${validRows.length} valid rows`);
// Get unique photos to process
const photoUrls = new Set<string>();
const photoMap = new Map<string, any[]>(); // url -> row data
validRows.forEach((row: any) => {
const photoUrl = row.pictureUrl;
if (photoUrl && photoUrl.trim()) {
photoUrls.add(photoUrl.trim());
if (!photoMap.has(photoUrl.trim())) {
@@ -61,10 +63,10 @@
photoMap.get(photoUrl.trim())!.push(row);
}
});
console.log(`Found ${photoUrls.size} unique photo URLs`);
totalCount = photoUrls.size;
// Initialize photos array
photos = Array.from(photoUrls).map(url => ({
name: photoMap.get(url)![0].name + ' ' + photoMap.get(url)![0].surname, // Use first person's name for display
@@ -73,27 +75,27 @@
retryCount: 0,
faceDetectionStatus: 'pending' as const
}));
// Process each photo
for (let i = 0; i < photos.length; i++) {
await loadPhoto(i);
await detectFaceForPhoto(i);
processedCount++;
}
isProcessing = false;
}
async function loadPhoto(index: number, isRetry = false) {
const photo = photos[index];
if (!isRetry) {
photo.status = 'loading';
photos = [...photos]; // Trigger reactivity
}
try {
let objectUrl: string;
if (isGoogleDriveUrl(photo.url)) {
// Download from Google Drive
console.log(`Downloading from Google Drive: ${photo.name}`);
@@ -103,7 +105,7 @@
// Use direct URL
objectUrl = photo.url;
}
// Test if image loads properly
await new Promise<void>((resolve, reject) => {
const img = new Image();
@@ -114,66 +116,96 @@
};
img.src = objectUrl;
});
photo.objectUrl = objectUrl;
photo.status = 'success';
console.log(`Photo loaded successfully: ${photo.name}`);
// Automatically run face detection to generate crop
await detectFaceForPhoto(index);
} catch (error) {
console.error(`Failed to load photo for ${photo.name}:`, error);
photo.status = 'error';
}
photos = [...photos]; // Trigger reactivity
}
async function detectFaceForPhoto(index: number) {
try {
photos[index].faceDetectionStatus = 'processing';
const img = new Image();
img.crossOrigin = 'anonymous';
img.src = photos[index].objectUrl!;
await new Promise((r, e) => { img.onload = r; img.onerror = e; });
const predictions = await detector.estimateFaces(img, false);
if (predictions.length > 0) {
const face = predictions.sort((a,b) => (b.probability?.[0]||0) - (a.probability?.[0]||0))[0];
// Coordinates in displayed image space
let [x1,y1] = face.topLeft;
let [x2,y2] = face.bottomRight;
// Scale to natural image size
const scaleX = img.naturalWidth / img.width;
const scaleY = img.naturalHeight / img.height;
const faceWidth = (x2 - x1) * scaleX;
const faceHeight = (y2 - y1) * scaleY;
const faceCenterX = (x1 + (x2 - x1)/2) * scaleX;
const faceCenterY = (y1 + (y2 - y1)/2) * scaleY;
// Load crop config from env
const cropRatio = parseFloat(import.meta.env.VITE_CROP_RATIO || '1.0');
const offsetX = parseFloat(import.meta.env.VITE_FACE_OFFSET_X || '0.0');
const offsetY = parseFloat(import.meta.env.VITE_FACE_OFFSET_Y || '0.0');
const cropScale = parseFloat(import.meta.env.VITE_CROP_SCALE || '2.5');
// Compute crop size and center
let cropWidth = faceWidth * cropScale;
let cropHeight = cropWidth / cropRatio;
let centerX = faceCenterX + cropWidth * offsetX;
let centerY = faceCenterY + cropHeight * offsetY;
// Clamp center to ensure crop fits
centerX = Math.max(cropWidth/2, Math.min(centerX, img.naturalWidth - cropWidth/2));
centerY = Math.max(cropHeight/2, Math.min(centerY, img.naturalHeight - cropHeight/2));
const cropX = Math.round(centerX - cropWidth/2);
const cropY = Math.round(centerY - cropHeight/2);
const crop = {
x: Math.max(0, Math.min(cropX, img.naturalWidth - cropWidth)),
y: Math.max(0, Math.min(cropY, img.naturalHeight - cropHeight)),
width: Math.round(Math.min(cropWidth, img.naturalWidth)),
height: Math.round(Math.min(cropHeight, img.naturalHeight))
};
photos[index].cropData = crop;
photos[index].faceDetectionStatus = 'completed';
} else {
photos[index].faceDetectionStatus = 'failed';
}
} catch {
photos[index].faceDetectionStatus = 'failed';
}
photos = [...photos];
}
async function retryPhoto(index: number) {
const photo = photos[index];
if (photo.retryCount >= 3) {
return; // Max retries reached
}
photo.retryCount++;
await loadPhoto(index, true);
}
function handleCropUpdate(index: number, cropData: { x: number; y: number; width: number; height: number }) {
photos[index].cropData = cropData;
photos = [...photos]; // Trigger reactivity
}
function handleFaceDetectionStarted(index: number) {
photos[index].faceDetectionStatus = 'processing';
faceDetectionCount.started++;
faceDetectionInProgress = true;
photos = [...photos]; // Trigger reactivity
console.log(`Face detection started for photo ${index + 1}, total started: ${faceDetectionCount.started}`);
}
function handleFaceDetectionCompleted(index: number, detail: { success: boolean; hasAutoDetectedCrop: boolean }) {
photos[index].faceDetectionStatus = detail.success ? 'completed' : 'failed';
faceDetectionCount.completed++;
console.log(`Face detection completed for photo ${index + 1}, total completed: ${faceDetectionCount.completed}`);
// Check if all face detections are complete
if (faceDetectionCount.completed >= faceDetectionCount.started) {
faceDetectionInProgress = false;
console.log('All face detections completed');
}
photos = [...photos]; // Trigger reactivity
}
function canProceed() {
const hasPhotos = photos.length > 0;
const allLoaded = photos.every(photo => photo.status === 'success');
const allCropped = photos.every(photo => photo.cropData);
const faceDetectionComplete = !faceDetectionInProgress;
return hasPhotos && allLoaded && allCropped && faceDetectionComplete;
return hasPhotos && allLoaded && allCropped;
}
// Cleanup object URLs when component is destroyed
function cleanupObjectUrls() {
photos.forEach(photo => {
@@ -182,7 +214,7 @@
}
});
}
// Cleanup on unmount or when photos change
$: {
// This will run when photos array changes
@@ -228,33 +260,10 @@
</div>
{/if}
</div>
{:else if faceDetectionInProgress}
<div class="bg-green-50 border border-green-200 rounded-lg p-4 mb-6">
<div class="flex items-center justify-between">
<div class="flex items-center">
<div class="w-5 h-5 border-2 border-green-600 border-t-transparent rounded-full animate-spin mr-3"></div>
<span class="text-sm text-green-800">
Detecting faces and auto-cropping...
</span>
</div>
<span class="text-sm text-green-600">
{faceDetectionCount.completed} / {faceDetectionCount.started}
</span>
</div>
{#if faceDetectionCount.started > 0}
<div class="mt-3 w-full bg-green-200 rounded-full h-2">
<div
class="bg-green-600 h-2 rounded-full transition-all duration-300"
style="width: {(faceDetectionCount.completed / faceDetectionCount.started) * 100}%"
></div>
</div>
{/if}
</div>
{/if}
<!-- Summary Stats -->
{#if !isProcessing && !faceDetectionInProgress && photos.length > 0}
{#if !isProcessing && photos.length > 0}
<div class="bg-gray-50 border border-gray-200 rounded-lg p-4 mb-6">
<h3 class="text-sm font-medium text-gray-700 mb-3">Processing Summary</h3>
@@ -344,10 +353,9 @@
<PhotoCard
imageUrl={photo.objectUrl}
personName={photo.name}
isProcessing={false}
isProcessing={photo.faceDetectionStatus === 'processing'}
cropData={photo.cropData}
on:cropUpdated={(e) => handleCropUpdate(index, e.detail)}
on:faceDetectionStarted={() => handleFaceDetectionStarted(index)}
on:faceDetectionCompleted={(e) => handleFaceDetectionCompleted(index, e.detail)}
/>
{:else if photo.status === 'error'}
<div class="border border-gray-200 rounded-lg overflow-hidden bg-white shadow-sm">
@@ -359,7 +367,7 @@
<span class="text-xs text-red-600 mb-2">Failed to load</span>
<button
class="text-xs text-blue-600 hover:text-blue-800 underline"
on:click={() => retryPhoto(index)}
onclick={() => retryPhoto(index)}
disabled={photo.retryCount >= 3}
>
{photo.retryCount >= 3 ? 'Max retries' : 'Retry'}
@@ -381,14 +389,14 @@
<!-- Navigation -->
<div class="flex justify-between">
<button
on:click={() => currentStep.set(3)}
onclick={() => currentStep.set(4)}
class="px-4 py-2 bg-gray-200 text-gray-700 rounded-lg font-medium hover:bg-gray-300"
>
← Back to Row Filter
</button>
<button
on:click={() => currentStep.set(5)}
onclick={() => currentStep.set(5)}
disabled={!canProceed()}
class="px-4 py-2 bg-blue-600 text-white rounded-lg font-medium hover:bg-blue-700 disabled:bg-gray-400 disabled:cursor-not-allowed"
>