Compare commits

...

2 Commits

Author SHA1 Message Date
Roman Krček
8e41c6d78f Restructuring and navigator
All checks were successful
Build Docker image / build (push) Successful in 2m0s
Build Docker image / deploy (push) Successful in 3s
Build Docker image / verify (push) Successful in 27s
2025-07-18 13:59:28 +02:00
Roman Krček
1a8ce546d4 Dependency updates 2025-07-18 13:45:55 +02:00
11 changed files with 584 additions and 524 deletions

22
package-lock.json generated
View File

@@ -12,8 +12,8 @@
"@tensorflow/tfjs": "^4.22.0",
"@tensorflow/tfjs-backend-webgl": "^4.22.0",
"@types/gapi": "^0.0.47",
"@types/gapi.client.drive": "^3.0.15",
"@types/gapi.client.sheets": "^4.0.20201031",
"@types/gapi.client.drive-v3": "^0.0.5",
"@types/gapi.client.sheets-v4": "^0.0.4",
"@types/google.accounts": "^0.0.17",
"@types/uuid": "^10.0.0",
"fontkit": "^2.0.4",
@@ -1519,21 +1519,19 @@
"@maxim_mazurok/gapi.client.discovery-v1": "latest"
}
},
"node_modules/@types/gapi.client.drive": {
"version": "3.0.15",
"resolved": "https://registry.npmjs.org/@types/gapi.client.drive/-/gapi.client.drive-3.0.15.tgz",
"integrity": "sha512-qEfI0LxUBadOLmym4FkaNGpI4ibBCBPJHiUFWKIv0GIp7yKT2d+wztJYKr9giIRecErUCF+jGSDw1fzTZ6hPVQ==",
"deprecated": "use @types/gapi.client.drive-v3 instead; see https://github.com/Maxim-Mazurok/google-api-typings-generator/issues/652 for details",
"node_modules/@types/gapi.client.drive-v3": {
"version": "0.0.5",
"resolved": "https://registry.npmjs.org/@types/gapi.client.drive-v3/-/gapi.client.drive-v3-0.0.5.tgz",
"integrity": "sha512-yYBxiqMqJVBg4bns4Q28+f2XdJnd3tVA9dxQX1lXMVmzT2B+pZdyCi1u9HLwGveVlookSsAXuqfLfS9KO6MF6w==",
"license": "MIT",
"dependencies": {
"@maxim_mazurok/gapi.client.drive-v3": "latest"
}
},
"node_modules/@types/gapi.client.sheets": {
"version": "4.0.20201031",
"resolved": "https://registry.npmjs.org/@types/gapi.client.sheets/-/gapi.client.sheets-4.0.20201031.tgz",
"integrity": "sha512-1Aiu11rNNoyPDHW6v8TVcSmlDN+MkxSuafwiawaK5YqZ+uYA+O63vjUvkK+3qNduSLh7D9qBJc/8GGwgN6gsTw==",
"deprecated": "use @types/gapi.client.sheets-v4 instead; see https://github.com/Maxim-Mazurok/google-api-typings-generator/issues/652 for details",
"node_modules/@types/gapi.client.sheets-v4": {
"version": "0.0.4",
"resolved": "https://registry.npmjs.org/@types/gapi.client.sheets-v4/-/gapi.client.sheets-v4-0.0.4.tgz",
"integrity": "sha512-6kTJ7aDMAElfdQV1XzVJmZWjgbibpa84DMuKuaN8Cwqci/dkglPyHXKvsGrRugmuYvgFYr35AQqwz6j3q8R0dw==",
"license": "MIT",
"dependencies": {
"@maxim_mazurok/gapi.client.sheets-v4": "latest"

View File

@@ -33,8 +33,8 @@
"@tensorflow/tfjs": "^4.22.0",
"@tensorflow/tfjs-backend-webgl": "^4.22.0",
"@types/gapi": "^0.0.47",
"@types/gapi.client.drive": "^3.0.15",
"@types/gapi.client.sheets": "^4.0.20201031",
"@types/gapi.client.drive-v3": "^0.0.5",
"@types/gapi.client.sheets-v4": "^0.0.4",
"@types/google.accounts": "^0.0.17",
"@types/uuid": "^10.0.0",
"fontkit": "^2.0.4",

View File

@@ -2,6 +2,7 @@
import { selectedSheet, columnMapping, rawSheetData, currentStep } from '$lib/stores';
import { getSheetNames, getSheetData } from '$lib/google';
import { onMount } from 'svelte';
import Navigator from './subcomponents/Navigator.svelte';
// Type definitions for better TypeScript support
interface ColumnMappingType {
@@ -444,8 +445,6 @@
} catch (err) {
console.error('Failed to save column mapping to localStorage:', err);
}
currentStep.set(4); // Move to next step
}
async function handleShowEditor() {
@@ -754,20 +753,12 @@
{/if}
<!-- Navigation -->
<div class="flex justify-between">
<button
onclick={() => currentStep.set(2)}
class="rounded-lg bg-gray-200 px-4 py-2 font-medium text-gray-700 hover:bg-gray-300"
>
← Back to Sheet Selection
</button>
<button
onclick={handleContinue}
disabled={!mappingComplete}
class="rounded-lg bg-blue-600 px-4 py-2 font-medium text-white hover:bg-blue-700 disabled:cursor-not-allowed disabled:bg-gray-400"
>
{mappingComplete ? 'Continue →' : 'Select a column mapping'}
</button>
</div>
<Navigator
canProceed={mappingComplete}
{currentStep}
textBack="Back to Sheet Selection"
textForwardDisabled="Select a column mapping"
textForwardEnabled="Continue"
onForward={handleContinue}
/>
</div>

View File

@@ -1,506 +1,510 @@
<script lang="ts">
import { onMount } from 'svelte';
import { env } from '$env/dynamic/public';
import { columnMapping, filteredSheetData, currentStep, pictures, cropRects } from '$lib/stores';
import { downloadDriveImage, isGoogleDriveUrl, createImageObjectUrl } from '$lib/google';
import PhotoCard from '../PhotoCard.svelte';
import * as tf from '@tensorflow/tfjs';
import * as blazeface from '@tensorflow-models/blazeface';
import { onMount } from 'svelte';
import { env } from '$env/dynamic/public';
import { columnMapping, filteredSheetData, currentStep, pictures, cropRects } from '$lib/stores';
import { downloadDriveImage, isGoogleDriveUrl, createImageObjectUrl } from '$lib/google';
import Navigator from './subcomponents/Navigator.svelte';
import PhotoCard from './subcomponents/PhotoCard.svelte';
import * as tf from '@tensorflow/tfjs';
import * as blazeface from '@tensorflow-models/blazeface';
let photos = $state<PhotoInfo[]>([]);
let isProcessing = $state(false);
let processedCount = $state(0);
let totalCount = $state(0);
let detector: blazeface.BlazeFaceModel | undefined;
let detectorPromise: Promise<void> | undefined;
let photos = $state<PhotoInfo[]>([]);
let isProcessing = $state(false);
let processedCount = $state(0);
let totalCount = $state(0);
let detector: blazeface.BlazeFaceModel | undefined;
let detectorPromise: Promise<void> | undefined;
interface PhotoInfo {
name: string;
url: string;
status: 'loading' | 'success' | 'error';
objectUrl?: string;
retryCount: number;
cropData?: { x: number; y: number; width: number; height: number };
faceDetectionStatus?: 'pending' | 'processing' | 'completed' | 'failed' | 'manual';
}
interface PhotoInfo {
name: string;
url: string;
status: 'loading' | 'success' | 'error';
objectUrl?: string;
retryCount: number;
cropData?: { x: number; y: number; width: number; height: number };
faceDetectionStatus?: 'pending' | 'processing' | 'completed' | 'failed' | 'manual';
}
function initializeDetector() {
if (!detectorPromise) {
detectorPromise = (async () => {
console.log('Initializing face detector...');
await tf.setBackend('webgl');
await tf.ready();
detector = await blazeface.load();
console.log('BlazeFace model loaded');
})();
}
return detectorPromise;
}
function initializeDetector() {
if (!detectorPromise) {
detectorPromise = (async () => {
console.log('Initializing face detector...');
await tf.setBackend('webgl');
await tf.ready();
detector = await blazeface.load();
console.log('BlazeFace model loaded');
})();
}
return detectorPromise;
}
async function processPhotosInParallel() {
if (isProcessing) return;
async function processPhotosInParallel() {
if (isProcessing) return;
console.log('Starting processPhotos in parallel...');
isProcessing = true;
processedCount = 0;
console.log('Starting processPhotos in parallel...');
isProcessing = true;
processedCount = 0;
const validRows = $filteredSheetData.filter((row) => row._isValid);
const photoUrls = new Set<string>();
const photoMap = new Map<string, any[]>();
const validRows = $filteredSheetData.filter((row) => row._isValid);
const photoUrls = new Set<string>();
const photoMap = new Map<string, any[]>();
validRows.forEach((row: any) => {
const photoUrl = row.pictureUrl;
if (photoUrl && photoUrl.trim()) {
const trimmedUrl = photoUrl.trim();
photoUrls.add(trimmedUrl);
if (!photoMap.has(trimmedUrl)) {
photoMap.set(trimmedUrl, []);
}
photoMap.get(trimmedUrl)!.push(row);
}
});
validRows.forEach((row: any) => {
const photoUrl = row.pictureUrl;
if (photoUrl && photoUrl.trim()) {
const trimmedUrl = photoUrl.trim();
photoUrls.add(trimmedUrl);
if (!photoMap.has(trimmedUrl)) {
photoMap.set(trimmedUrl, []);
}
photoMap.get(trimmedUrl)!.push(row);
}
});
totalCount = photoUrls.size;
console.log(`Found ${totalCount} unique photo URLs`);
totalCount = photoUrls.size;
console.log(`Found ${totalCount} unique photo URLs`);
photos = Array.from(photoUrls).map((url) => ({
name: photoMap.get(url)![0].name + ' ' + photoMap.get(url)![0].surname,
url,
status: 'loading' as const,
retryCount: 0,
faceDetectionStatus: 'pending' as const
}));
photos = Array.from(photoUrls).map((url) => ({
name: photoMap.get(url)![0].name + ' ' + photoMap.get(url)![0].surname,
url,
status: 'loading' as const,
retryCount: 0,
faceDetectionStatus: 'pending' as const
}));
const concurrencyLimit = 5;
const promises = [];
const concurrencyLimit = 5;
const promises = [];
for (let i = 0; i < photos.length; i++) {
const promise = (async () => {
await loadPhoto(i);
processedCount++;
})();
promises.push(promise);
for (let i = 0; i < photos.length; i++) {
const promise = (async () => {
await loadPhoto(i);
processedCount++;
})();
promises.push(promise);
if (promises.length >= concurrencyLimit) {
await Promise.all(promises);
promises.length = 0;
}
}
if (promises.length >= concurrencyLimit) {
await Promise.all(promises);
promises.length = 0;
}
}
await Promise.all(promises);
await Promise.all(promises);
isProcessing = false;
console.log('All photos processed.');
}
isProcessing = false;
console.log('All photos processed.');
}
// Initialize detector and process photos
onMount(() => {
console.log('StepGallery mounted');
initializeDetector(); // Start loading model
if ($filteredSheetData.length > 0 && $columnMapping.pictureUrl !== undefined) {
console.log('Processing photos for gallery step');
processPhotosInParallel();
} else {
console.log('No data to process:', {
dataLength: $filteredSheetData.length,
pictureUrlMapping: $columnMapping.pictureUrl
});
}
});
// Initialize detector and process photos
onMount(() => {
console.log('StepGallery mounted');
initializeDetector(); // Start loading model
if ($filteredSheetData.length > 0 && $columnMapping.pictureUrl !== undefined) {
console.log('Processing photos for gallery step');
processPhotosInParallel();
} else {
console.log('No data to process:', {
dataLength: $filteredSheetData.length,
pictureUrlMapping: $columnMapping.pictureUrl
});
}
});
async function loadPhoto(index: number, isRetry = false) {
const photo = photos[index];
async function loadPhoto(index: number, isRetry = false) {
const photo = photos[index];
if (!isRetry) {
photo.status = 'loading';
// No need to reassign photos array with $state reactivity
}
if (!isRetry) {
photo.status = 'loading';
// No need to reassign photos array with $state reactivity
}
try {
let blob: Blob;
try {
let blob: Blob;
if (isGoogleDriveUrl(photo.url)) {
// Download from Google Drive
console.log(`Downloading from Google Drive: ${photo.name}`);
blob = await downloadDriveImage(photo.url);
} else {
// For direct URLs, convert to blob
const response = await fetch(photo.url);
blob = await response.blob();
}
if (isGoogleDriveUrl(photo.url)) {
// Download from Google Drive
console.log(`Downloading from Google Drive: ${photo.name}`);
blob = await downloadDriveImage(photo.url);
} else {
// For direct URLs, convert to blob
const response = await fetch(photo.url);
blob = await response.blob();
}
// Check for HEIC/HEIF format. If so, start conversion but don't block.
if (
// Check for HEIC/HEIF format. If so, start conversion but don't block.
if (
blob.type === 'image/heic' ||
blob.type === 'image/heif' ||
photo.url.toLowerCase().endsWith('.heic')
) {
console.log(`HEIC detected for ${photo.name}. Starting conversion in background.`);
photo.status = 'loading'; // Visually indicate something is happening
// Don't await this, let it run in the background
convertHeicPhoto(index, blob);
return; // End loadPhoto here for HEIC, conversion will handle the rest
photo.status = 'loading'; // Visually indicate something is happening
// Don't await this, let it run in the background
convertHeicPhoto(index, blob);
return; // End loadPhoto here for HEIC, conversion will handle the rest
}
// For non-HEIC images, proceed as normal
await processLoadedBlob(index, blob);
// For non-HEIC images, proceed as normal
await processLoadedBlob(index, blob);
} catch (error) {
console.error(`Failed to load photo for ${photo.name}:`, error);
photo.status = 'error';
}
}
} catch (error) {
console.error(`Failed to load photo for ${photo.name}:`, error);
photo.status = 'error';
}
}
async function convertHeicPhoto(index: number, blob: Blob) {
const photo = photos[index];
try {
console.log(`Converting HEIC with heic-convert for ${photo.name}...`);
async function convertHeicPhoto(index: number, blob: Blob) {
const photo = photos[index];
try {
console.log(`Converting HEIC with heic-convert for ${photo.name}...`);
// Dynamically import the browser-specific version of the library
const { default: convert } = await import('heic-convert/browser');
// Dynamically import the browser-specific version of the library
const { default: convert } = await import('heic-convert/browser');
const inputBuffer = await blob.arrayBuffer();
const outputBuffer = await convert({
buffer: new Uint8Array(inputBuffer), // heic-convert expects a Uint8Array
format: 'JPEG',
quality: 0.9
});
const inputBuffer = await blob.arrayBuffer();
const outputBuffer = await convert({
buffer: new Uint8Array(inputBuffer), // heic-convert expects a Uint8Array
format: 'JPEG',
quality: 0.9
});
const convertedBlob = new Blob([outputBuffer], { type: 'image/jpeg' });
const convertedBlob = new Blob([outputBuffer], { type: 'image/jpeg' });
console.log(`Successfully converted HEIC for ${photo.name}`);
// Now that it's converted, process it like any other image
await processLoadedBlob(index, convertedBlob);
console.log(`Successfully converted HEIC for ${photo.name}`);
} catch (e) {
console.error(`Failed to convert HEIC image for ${photo.name}:`, e);
photo.status = 'error';
}
}
// Now that it's converted, process it like any other image
await processLoadedBlob(index, convertedBlob);
} catch (e) {
console.error(`Failed to convert HEIC image for ${photo.name}:`, e);
photo.status = 'error';
}
}
async function processLoadedBlob(index: number, blob: Blob) {
const photo = photos[index];
try {
const objectUrl = createImageObjectUrl(blob);
async function processLoadedBlob(index: number, blob: Blob) {
const photo = photos[index];
try {
const objectUrl = createImageObjectUrl(blob);
// Test if image loads properly
await new Promise<void>((resolve, reject) => {
const img = new Image();
img.onload = () => resolve();
img.onerror = (error) => {
console.error(`Failed to load image for ${photo.name}:`, error);
reject(new Error('Failed to load image'));
};
img.src = objectUrl;
});
// Test if image loads properly
await new Promise<void>((resolve, reject) => {
const img = new Image();
img.onload = () => resolve();
img.onerror = (error) => {
console.error(`Failed to load image for ${photo.name}:`, error);
reject(new Error('Failed to load image'));
};
img.src = objectUrl;
});
photo.objectUrl = objectUrl;
photo.status = 'success';
console.log(`Photo loaded successfully: ${photo.name}`);
// Save to pictures store
pictures.update(pics => ({
...pics,
[photo.url]: {
id: photo.url,
blob: blob,
url: objectUrl,
downloaded: true,
faceDetected: false,
faceCount: 0
}
}));
// Automatically run face detection to generate crop
await detectFaceForPhoto(index);
} catch (error) {
console.error(`Failed to process blob for ${photo.name}:`, error);
photo.status = 'error';
}
}
photo.objectUrl = objectUrl;
photo.status = 'success';
console.log(`Photo loaded successfully: ${photo.name}`);
async function detectFaceForPhoto(index: number) {
try {
await initializeDetector(); // Ensure detector is loaded
if (!detector) {
photos[index].faceDetectionStatus = 'failed';
console.error('Face detector not available.');
return;
}
photos[index].faceDetectionStatus = 'processing';
const img = new Image();
img.crossOrigin = 'anonymous';
img.src = photos[index].objectUrl!;
await new Promise((r, e) => { img.onload = r; img.onerror = e; });
const predictions = await detector.estimateFaces(img, false);
// Save to pictures store
pictures.update((pics) => ({
...pics,
[photo.url]: {
id: photo.url,
blob: blob,
url: objectUrl,
downloaded: true,
faceDetected: false,
faceCount: 0
}
}));
if (predictions.length > 0) {
const getProbability = (p: number | tf.Tensor) => (typeof p === 'number' ? p : p.dataSync()[0]);
const face = predictions.sort((a,b) => getProbability(b.probability!) - getProbability(a.probability!))[0];
// Coordinates in displayed image space
let [x1,y1] = face.topLeft as [number, number];
let [x2,y2] = face.bottomRight as [number, number];
// Scale to natural image size
const scaleX = img.naturalWidth / img.width;
const scaleY = img.naturalHeight / img.height;
const faceWidth = (x2 - x1) * scaleX;
const faceHeight = (y2 - y1) * scaleY;
const faceCenterX = (x1 + (x2 - x1)/2) * scaleX;
const faceCenterY = (y1 + (y2 - y1) / 2) * scaleY;
// Load crop config from env
const cropRatio = parseFloat(env.PUBLIC_CROP_RATIO || '1.0');
const offsetX = parseFloat(env.PUBLIC_FACE_OFFSET_X || '0.0');
const offsetY = parseFloat(env.PUBLIC_FACE_OFFSET_Y || '0.0');
const cropScale = parseFloat(env.PUBLIC_CROP_SCALE || '2.5');
// Compute crop size and center
let cropWidth = faceWidth * cropScale;
let cropHeight = cropWidth / cropRatio;
// Automatically run face detection to generate crop
await detectFaceForPhoto(index);
} catch (error) {
console.error(`Failed to process blob for ${photo.name}:`, error);
photo.status = 'error';
}
}
// If crop is larger than image, scale it down while maintaining aspect ratio
if (cropWidth > img.naturalWidth || cropHeight > img.naturalHeight) {
const widthRatio = img.naturalWidth / cropWidth;
const heightRatio = img.naturalHeight / cropHeight;
const scale = Math.min(widthRatio, heightRatio);
cropWidth *= scale;
cropHeight *= scale;
}
async function detectFaceForPhoto(index: number) {
try {
await initializeDetector(); // Ensure detector is loaded
if (!detector) {
photos[index].faceDetectionStatus = 'failed';
console.error('Face detector not available.');
return;
}
let centerX = faceCenterX + cropWidth * offsetX;
let centerY = faceCenterY + cropHeight * offsetY;
// Clamp center to ensure crop fits
centerX = Math.max(cropWidth/2, Math.min(centerX, img.naturalWidth - cropWidth/2));
centerY = Math.max(cropHeight/2, Math.min(centerY, img.naturalHeight - cropHeight/2));
const cropX = centerX - cropWidth/2;
const cropY = centerY - cropHeight/2;
photos[index].faceDetectionStatus = 'processing';
const img = new Image();
img.crossOrigin = 'anonymous';
img.src = photos[index].objectUrl!;
await new Promise((r, e) => {
img.onload = r;
img.onerror = e;
});
const predictions = await detector.estimateFaces(img, false);
const crop = {
x: Math.round(Math.max(0, cropX)),
y: Math.round(Math.max(0, cropY)),
width: Math.round(cropWidth),
height: Math.round(cropHeight)
};
photos[index].cropData = crop;
photos[index].faceDetectionStatus = 'completed';
// Save crop data to store
cropRects.update(crops => ({
...crops,
[photos[index].url]: crop
}));
// Update pictures store with face detection info
pictures.update(pics => ({
...pics,
[photos[index].url]: {
...pics[photos[index].url],
faceDetected: true,
faceCount: predictions.length
}
}));
} else {
photos[index].faceDetectionStatus = 'failed';
}
} catch (error) {
console.error(`Face detection failed for ${photos[index].name}:`, error);
photos[index].faceDetectionStatus = 'failed';
}
// No need to reassign photos array with $state reactivity
}
if (predictions.length > 0) {
const getProbability = (p: number | tf.Tensor) =>
typeof p === 'number' ? p : p.dataSync()[0];
async function retryPhoto(index: number) {
const photo = photos[index];
const face = predictions.sort(
(a, b) => getProbability(b.probability!) - getProbability(a.probability!)
)[0];
// Coordinates in displayed image space
let [x1, y1] = face.topLeft as [number, number];
let [x2, y2] = face.bottomRight as [number, number];
// Scale to natural image size
const scaleX = img.naturalWidth / img.width;
const scaleY = img.naturalHeight / img.height;
const faceWidth = (x2 - x1) * scaleX;
const faceHeight = (y2 - y1) * scaleY;
const faceCenterX = (x1 + (x2 - x1) / 2) * scaleX;
const faceCenterY = (y1 + (y2 - y1) / 2) * scaleY;
// Load crop config from env
const cropRatio = parseFloat(env.PUBLIC_CROP_RATIO || '1.0');
const offsetX = parseFloat(env.PUBLIC_FACE_OFFSET_X || '0.0');
const offsetY = parseFloat(env.PUBLIC_FACE_OFFSET_Y || '0.0');
const cropScale = parseFloat(env.PUBLIC_CROP_SCALE || '2.5');
// Compute crop size and center
let cropWidth = faceWidth * cropScale;
let cropHeight = cropWidth / cropRatio;
if (photo.retryCount >= 3) {
return; // Max retries reached
}
// If crop is larger than image, scale it down while maintaining aspect ratio
if (cropWidth > img.naturalWidth || cropHeight > img.naturalHeight) {
const widthRatio = img.naturalWidth / cropWidth;
const heightRatio = img.naturalHeight / cropHeight;
const scale = Math.min(widthRatio, heightRatio);
cropWidth *= scale;
cropHeight *= scale;
}
photo.retryCount++;
await loadPhoto(index, true);
}
let centerX = faceCenterX + cropWidth * offsetX;
let centerY = faceCenterY + cropHeight * offsetY;
function handleCropUpdate(index: number, detail: { cropData: { x: number; y: number; width: number; height: number } }) {
photos[index].cropData = detail.cropData;
photos[index].faceDetectionStatus = 'manual';
// Save updated crop data to store
cropRects.update(crops => ({
...crops,
[photos[index].url]: detail.cropData
}));
// No need to reassign photos array with $state reactivity
}
// Clamp center to ensure crop fits
centerX = Math.max(cropWidth / 2, Math.min(centerX, img.naturalWidth - cropWidth / 2));
centerY = Math.max(cropHeight / 2, Math.min(centerY, img.naturalHeight - cropHeight / 2));
const canProceed = $derived(() => {
const hasPhotos = photos.length > 0;
const allLoaded = photos.every(photo => photo.status === 'success');
const allCropped = photos.every(photo => photo.cropData);
const cropX = centerX - cropWidth / 2;
const cropY = centerY - cropHeight / 2;
return hasPhotos && allLoaded && allCropped;
});
const crop = {
x: Math.round(Math.max(0, cropX)),
y: Math.round(Math.max(0, cropY)),
width: Math.round(cropWidth),
height: Math.round(cropHeight)
};
photos[index].cropData = crop;
photos[index].faceDetectionStatus = 'completed';
// Cleanup object URLs when component is destroyed
function cleanupObjectUrls() {
photos.forEach(photo => {
if (photo.objectUrl && photo.objectUrl.startsWith('blob:')) {
URL.revokeObjectURL(photo.objectUrl);
}
});
}
// Save crop data to store
cropRects.update((crops) => ({
...crops,
[photos[index].url]: crop
}));
// Cleanup on unmount using $effect
$effect(() => {
return () => {
cleanupObjectUrls();
};
});
// Update pictures store with face detection info
pictures.update((pics) => ({
...pics,
[photos[index].url]: {
...pics[photos[index].url],
faceDetected: true,
faceCount: predictions.length
}
}));
} else {
photos[index].faceDetectionStatus = 'failed';
}
} catch (error) {
console.error(`Face detection failed for ${photos[index].name}:`, error);
photos[index].faceDetectionStatus = 'failed';
}
// No need to reassign photos array with $state reactivity
}
async function retryPhoto(index: number) {
const photo = photos[index];
if (photo.retryCount >= 3) {
return; // Max retries reached
}
photo.retryCount++;
await loadPhoto(index, true);
}
function handleCropUpdate(
index: number,
detail: { cropData: { x: number; y: number; width: number; height: number } }
) {
photos[index].cropData = detail.cropData;
photos[index].faceDetectionStatus = 'manual';
// Save updated crop data to store
cropRects.update((crops) => ({
...crops,
[photos[index].url]: detail.cropData
}));
// No need to reassign photos array with $state reactivity
}
// Cleanup object URLs when component is destroyed
function cleanupObjectUrls() {
photos.forEach((photo) => {
if (photo.objectUrl && photo.objectUrl.startsWith('blob:')) {
URL.revokeObjectURL(photo.objectUrl);
}
});
}
const canProceed = $derived(() => {
const hasPhotos = photos.length > 0;
const allLoaded = photos.every((photo) => photo.status === 'success');
const allCropped = photos.every((photo) => photo.cropData);
return hasPhotos && allLoaded && allCropped;
});
// Cleanup on unmount using $effect
$effect(() => {
return () => {
cleanupObjectUrls();
};
});
</script>
<div class="p-6">
<div class="max-w-6xl mx-auto">
<div class="mb-6">
<h2 class="text-xl font-semibold text-gray-900 mb-2">
Review & Crop Photos
</h2>
<p class="text-sm text-gray-700 mb-4">
Photos are automatically cropped using face detection. Click the pen icon to manually adjust the crop area.
</p>
</div>
<div class="mb-6">
<h2 class="mb-2 text-xl font-semibold text-gray-900">Review & Crop Photos</h2>
<!-- Processing Status -->
{#if isProcessing}
<div class="bg-blue-50 border border-blue-200 rounded-lg p-4 mb-6">
<div class="flex items-center justify-between">
<div class="flex items-center">
<div class="w-5 h-5 border-2 border-blue-600 border-t-transparent rounded-full animate-spin mr-3"></div>
<span class="text-sm text-blue-800">
Processing photos...
</span>
</div>
<span class="text-sm text-blue-600">
{processedCount} / {totalCount}
</span>
</div>
{#if totalCount > 0}
<div class="mt-3 w-full bg-blue-200 rounded-full h-2">
<div
class="bg-blue-600 h-2 rounded-full transition-all duration-300"
style="width: {(processedCount / totalCount) * 100}%"
></div>
</div>
{/if}
</div>
{/if}
<p class="mb-4 text-sm text-gray-700">
Photos are automatically cropped using face detection. Click the pen icon to manually adjust
the crop area.
</p>
</div>
<!-- Summary Stats -->
{#if !isProcessing && photos.length > 0}
<div class="bg-gray-50 border border-gray-200 rounded-lg p-4 mb-6">
<h3 class="text-sm font-medium text-gray-700 mb-3">Processing Summary</h3>
<div class="grid grid-cols-2 md:grid-cols-5 gap-4 text-sm">
<div class="text-center">
<div class="text-2xl font-bold text-gray-900">{photos.length}</div>
<div class="text-gray-600">Total Photos</div>
</div>
<div class="text-center">
<div class="text-2xl font-bold text-green-600">
{photos.filter(p => p.status === 'success').length}
</div>
<div class="text-gray-600">Loaded</div>
</div>
<div class="text-center">
<div class="text-2xl font-bold text-blue-600">
{photos.filter(p => p.faceDetectionStatus === 'completed').length}
</div>
<div class="text-gray-600">Auto-cropped</div>
</div>
<div class="text-center">
<div class="text-2xl font-bold text-purple-600">
{photos.filter(p => p.cropData).length}
</div>
<div class="text-gray-600">Ready</div>
</div>
<div class="text-center">
<div class="text-2xl font-bold text-red-600">
{photos.filter(p => p.status === 'error').length}
</div>
<div class="text-gray-600">Failed</div>
</div>
</div>
<!-- Processing Status -->
{#if isProcessing}
<div class="mb-6 rounded-lg border border-blue-200 bg-blue-50 p-4">
<div class="flex items-center justify-between">
<div class="flex items-center">
<div
class="mr-3 h-5 w-5 animate-spin rounded-full border-2 border-blue-600 border-t-transparent"
></div>
<span class="text-sm text-blue-800"> Processing photos... </span>
</div>
<span class="text-sm text-blue-600">
{processedCount} / {totalCount}
</span>
</div>
{#if photos.filter(p => p.status === 'error').length > 0}
<div class="mt-4 p-3 bg-yellow-50 border border-yellow-200 rounded">
<p class="text-sm text-yellow-800">
<strong>Note:</strong> Cards will only be generated for photos that load successfully.
</p>
</div>
{/if}
{#if !canProceed() && photos.filter(p => p.status === 'success').length > 0}
<div class="mt-4 p-3 bg-blue-50 border border-blue-200 rounded">
<p class="text-sm text-blue-800">
<strong>Tip:</strong> All photos need to be cropped before proceeding. Face detection runs automatically.
</p>
</div>
{/if}
</div>
{/if}
{#if totalCount > 0}
<div class="mt-3 h-2 w-full rounded-full bg-blue-200">
<div
class="h-2 rounded-full bg-blue-600 transition-all duration-300"
style="width: {(processedCount / totalCount) * 100}%"
></div>
</div>
{/if}
</div>
{/if}
<!-- Photo Grid -->
<div class="bg-white border border-gray-200 rounded-lg overflow-hidden mb-6">
{#if photos.length === 0 && !isProcessing}
<div class="text-center py-12">
<svg class="mx-auto h-12 w-12 text-gray-400" fill="none" viewBox="0 0 24 24" stroke="currentColor">
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M4 16l4.586-4.586a2 2 0 012.828 0L16 16m-2-2l1.586-1.586a2 2 0 012.828 0L20 14m-6-6h.01M6 20h12a2 2 0 002-2V6a2 2 0 00-2-2H6a2 2 0 002 2z"/>
</svg>
<h3 class="mt-2 text-sm font-medium text-gray-900">No photos found</h3>
<p class="mt-1 text-sm text-gray-500">
Go back to check your column mapping and selected rows.
</p>
</div>
{:else}
<div class="p-6 grid grid-cols-1 md:grid-cols-2 lg:grid-cols-2 xl:grid-cols-3 gap-6">
{#each photos as photo, index}
<PhotoCard
{photo}
onCropUpdated={(e) => handleCropUpdate(index, e)}
onRetry={() => retryPhoto(index)}
/>
{/each}
</div>
{/if}
</div>
<!-- Summary Stats -->
{#if !isProcessing && photos.length > 0}
<div class="mb-6 rounded-lg border border-gray-200 bg-gray-50 p-4">
<h3 class="mb-3 text-sm font-medium text-gray-700">Processing Summary</h3>
<!-- Navigation -->
<div class="flex justify-between">
<button
onclick={() => currentStep.set(4)}
class="px-4 py-2 bg-gray-200 text-gray-700 rounded-lg font-medium hover:bg-gray-300"
>
← Back to Row Filter
</button>
<button
onclick={() => currentStep.set(6)}
disabled={!canProceed()}
class="px-4 py-2 bg-blue-600 text-white rounded-lg font-medium hover:bg-blue-700 disabled:bg-gray-400 disabled:cursor-not-allowed"
>
{canProceed()
? `Generate ${photos.filter(p => p.status === 'success' && p.cropData).length} Cards `
: 'Waiting for photos to load and crop'}
</button>
</div>
</div>
<div class="grid grid-cols-2 gap-4 text-sm md:grid-cols-5">
<div class="text-center">
<div class="text-2xl font-bold text-gray-900">{photos.length}</div>
<div class="text-gray-600">Total Photos</div>
</div>
<div class="text-center">
<div class="text-2xl font-bold text-green-600">
{photos.filter((p) => p.status === 'success').length}
</div>
<div class="text-gray-600">Loaded</div>
</div>
<div class="text-center">
<div class="text-2xl font-bold text-blue-600">
{photos.filter((p) => p.faceDetectionStatus === 'completed').length}
</div>
<div class="text-gray-600">Auto-cropped</div>
</div>
<div class="text-center">
<div class="text-2xl font-bold text-purple-600">
{photos.filter((p) => p.cropData).length}
</div>
<div class="text-gray-600">Ready</div>
</div>
<div class="text-center">
<div class="text-2xl font-bold text-red-600">
{photos.filter((p) => p.status === 'error').length}
</div>
<div class="text-gray-600">Failed</div>
</div>
</div>
{#if photos.filter((p) => p.status === 'error').length > 0}
<div class="mt-4 rounded border border-yellow-200 bg-yellow-50 p-3">
<p class="text-sm text-yellow-800">
<strong>Note:</strong> Cards will only be generated for photos that load successfully.
</p>
</div>
{/if}
{#if !canProceed() && photos.filter((p) => p.status === 'success').length > 0}
<div class="mt-4 rounded border border-blue-200 bg-blue-50 p-3">
<p class="text-sm text-blue-800">
<strong>Tip:</strong> All photos need to be cropped before proceeding. Face detection runs
automatically.
</p>
</div>
{/if}
</div>
{/if}
<!-- Photo Grid -->
<div class="mb-6 overflow-hidden rounded-lg bg-white">
{#if photos.length === 0 && !isProcessing}
<div class="py-12 text-center">
<svg
class="mx-auto h-12 w-12 text-gray-400"
fill="none"
viewBox="0 0 24 24"
stroke="currentColor"
>
<path
stroke-linecap="round"
stroke-linejoin="round"
stroke-width="2"
d="M4 16l4.586-4.586a2 2 0 012.828 0L16 16m-2-2l1.586-1.586a2 2 0 012.828 0L20 14m-6-6h.01M6 20h12a2 2 0 002-2V6a2 2 0 00-2-2H6a2 2 0 002 2z"
/>
</svg>
<h3 class="mt-2 text-sm font-medium text-gray-900">No photos found</h3>
<p class="mt-1 text-sm text-gray-500">
Go back to check your column mapping and selected rows.
</p>
</div>
{:else}
<div class="grid grid-cols-1 gap-6 md:grid-cols-2 lg:grid-cols-2 xl:grid-cols-3">
{#each photos as photo, index}
<PhotoCard
{photo}
onCropUpdated={(e) => handleCropUpdate(index, e)}
onRetry={() => retryPhoto(index)}
/>
{/each}
</div>
{/if}
</div>
<!-- Navigation -->
<Navigator
canProceed={canProceed()}
{currentStep}
textBack="Back to Row Filter"
textForwardDisabled="Waiting from photos"
textForwardEnabled={`Generate ${photos.filter((p) => p.status === 'success' && p.cropData).length} Cards`}
/>
</div>

View File

@@ -0,0 +1,34 @@
<script lang="ts">
let { canProceed, photos, currentStep } = $props<{
canProceed: () => boolean;
photos: any[];
currentStep: any;
}>();
function handleBack() {
currentStep.set(4);
}
function handleNext() {
currentStep.set(6);
}
</script>
<div class="flex justify-between">
<button
onclick={handleBack}
class="px-4 py-2 bg-gray-200 text-gray-700 rounded-lg font-medium hover:bg-gray-300"
>
← Back to Row Filter
</button>
<button
onclick={handleNext}
disabled={!canProceed()}
class="px-4 py-2 bg-blue-600 text-white rounded-lg font-medium hover:bg-blue-700 disabled:bg-gray-400 disabled:cursor-not-allowed"
>
{canProceed()
? `Generate ${photos.filter(p => p.status === 'success' && p.cropData).length} Cards `
: 'Waiting for photos to load and crop'}
</button>
</div>

View File

@@ -7,7 +7,7 @@
currentStep,
sheetData
} from '$lib/stores';
import type { RowData } from '$lib/stores';
import Navigator from './subcomponents/Navigator.svelte';
import { onMount } from 'svelte';
import { getSheetNames, getSheetData } from '$lib/google';
@@ -213,9 +213,6 @@
// Store the filtered data
filteredSheetData.set(selectedData);
// Move to next step
currentStep.set(5);
}
$: selectedValidCount = Array.from(selectedRows).filter((rowIndex) => {
@@ -496,22 +493,13 @@
</div>
{/if}
<!-- Navigation -->
<div class="flex justify-between">
<button
onclick={() => currentStep.set(3)}
class="rounded-lg bg-gray-200 px-4 py-2 font-medium text-gray-700 hover:bg-gray-300"
>
← Back to Colum Selection
</button>
<button
onclick={handleContinue}
disabled={!canProceed}
class="rounded-lg bg-blue-600 px-4 py-2 font-medium text-white hover:bg-blue-700 disabled:cursor-not-allowed disabled:bg-gray-400"
>
{canProceed
? `Continue with ${selectedValidCount} ${selectedValidCount === 1 ? 'row' : 'rows'} `
: 'Select rows to continue'}
</button>
</div>
<!-- Navigation -->
<Navigator
canProceed={canProceed}
currentStep={currentStep}
textBack="Back to Colum Selection"
textForwardDisabled="Select rows to continue"
textForwardEnabled={`Continue with ${selectedValidCount} ${selectedValidCount === 1 ? 'row' : 'rows'} →`}
onForward={handleContinue}
/>
</div>

View File

@@ -2,6 +2,7 @@
import { availableSheets, selectedSheet, currentStep } from '$lib/stores';
import { searchSheets } from '$lib/google';
import { onMount } from 'svelte';
import Navigator from './subcomponents/Navigator.svelte';
let searchQuery = $state('');
let isLoading = $state(false);
@@ -66,11 +67,6 @@
}
let canProceed = $derived($selectedSheet !== null);
function handleContinue() {
if (!canProceed) return;
currentStep.set(3); // Move to the column mapping step
}
</script>
<div class="p-6">
@@ -262,20 +258,11 @@
{/if}
<!-- Navigation -->
<div class="flex justify-between">
<button
onclick={() => currentStep.set(1)}
class="rounded-lg bg-gray-200 px-4 py-2 font-medium text-gray-700 hover:bg-gray-300"
>
← Back to Auth
</button>
<button
onclick={handleContinue}
disabled={!canProceed}
class="rounded-lg bg-blue-600 px-4 py-2 font-medium text-white hover:bg-blue-700 disabled:cursor-not-allowed disabled:bg-gray-400"
>
{canProceed ? 'Continue →' : 'Select a sheet to continue'}
</button>
</div>
<Navigator
{canProceed}
{currentStep}
textBack="Back to Auth"
textForwardDisabled="Select a sheet"
textForwardEnabled="Continue"
/>
</div>

View File

@@ -0,0 +1,60 @@
<script lang="ts">
import { on } from 'svelte/events';
let {
canProceed,
currentStep,
textBack,
textForwardDisabled,
textForwardEnabled,
onBack,
onForward
} = $props<{
canProceed: boolean;
currentStep: any;
textBack: string;
textForwardDisabled: string;
textForwardEnabled: string;
onBack?: () => void | null;
onForward?: () => void | null;
}>();
async function handleBack() {
if (onBack) {
// Allow custom back logic if provided
await onBack();
}
currentStep.set($currentStep - 1);
}
async function handleForward() {
if (onForward) {
// Allow custom forward logic if provided
await onForward();
}
currentStep.set($currentStep + 1);
}
</script>
<div class="flex flex-col gap-3 sm:flex-row sm:justify-between">
<button
onclick={handleBack}
class="flex w-full items-center justify-center gap-2 rounded-lg bg-gray-200 px-4 py-2 font-medium text-gray-700 hover:bg-gray-300 sm:w-auto"
>
<svg class="h-4 w-4" fill="none" viewBox="0 0 24 24" stroke="currentColor">
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M15 19l-7-7 7-7" />
</svg>
<span>{textBack}</span>
</button>
<button
onclick={handleForward}
disabled={!canProceed}
class="flex w-full items-center justify-center gap-2 rounded-lg bg-blue-600 px-4 py-2 font-medium text-white hover:bg-blue-700 disabled:cursor-not-allowed disabled:bg-gray-400 sm:w-auto"
>
<span>{canProceed ? textForwardEnabled : textForwardDisabled}</span>
<svg class="h-4 w-4" fill="none" viewBox="0 0 24 24" stroke="currentColor">
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M9 5l7 7-7 7" />
</svg>
</button>
</div>

View File

@@ -13,8 +13,6 @@ export function initGoogleClient(callback: () => void) {
script.onload = () => {
gapi.load('client', async () => {
await gapi.client.init({
// NOTE: API KEY IS NOT REQUIRED FOR THIS IMPLEMENTATION
// apiKey: 'YOUR_API_KEY',
discoveryDocs: [
'https://www.googleapis.com/discovery/v1/apis/drive/v3/rest',
'https://www.googleapis.com/discovery/v1/apis/sheets/v4/rest',