Supabase local development setup

This commit is contained in:
Roman Krček
2025-07-01 16:55:51 +02:00
parent 095936dcfd
commit 3d58500997
6 changed files with 1307 additions and 7 deletions

122
package-lock.json generated
View File

@@ -26,6 +26,7 @@
"prettier": "^3.4.2",
"prettier-plugin-svelte": "^3.3.3",
"prettier-plugin-tailwindcss": "^0.6.11",
"supabase": "^2.30.4",
"svelte": "^5.0.0",
"svelte-check": "^4.0.0",
"tailwindcss": "^4.0.0",
@@ -1515,6 +1516,23 @@
"node": "*"
}
},
"node_modules/bin-links": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/bin-links/-/bin-links-5.0.0.tgz",
"integrity": "sha512-sdleLVfCjBtgO5cNjA2HVRvWBJAHs4zwenaCPMNJAJU0yNxpzj80IpjOIimkpkr+mhlA+how5poQtt53PygbHA==",
"dev": true,
"license": "ISC",
"dependencies": {
"cmd-shim": "^7.0.0",
"npm-normalize-package-bin": "^4.0.0",
"proc-log": "^5.0.0",
"read-cmd-shim": "^5.0.0",
"write-file-atomic": "^6.0.0"
},
"engines": {
"node": "^18.17.0 || >=20.5.0"
}
},
"node_modules/buffer-equal-constant-time": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/buffer-equal-constant-time/-/buffer-equal-constant-time-1.0.1.tgz",
@@ -1605,6 +1623,16 @@
"node": ">=6"
}
},
"node_modules/cmd-shim": {
"version": "7.0.0",
"resolved": "https://registry.npmjs.org/cmd-shim/-/cmd-shim-7.0.0.tgz",
"integrity": "sha512-rtpaCbr164TPPh+zFdkWpCyZuKkjpAzODfaZCf/SVJZzJN+4bHQb/LP3Jzq5/+84um3XXY8r548XiWKSborwVw==",
"dev": true,
"license": "ISC",
"engines": {
"node": "^18.17.0 || >=20.5.0"
}
},
"node_modules/color-convert": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
@@ -2149,6 +2177,16 @@
"node": ">= 14"
}
},
"node_modules/imurmurhash": {
"version": "0.1.4",
"resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz",
"integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==",
"dev": true,
"license": "MIT",
"engines": {
"node": ">=0.8.19"
}
},
"node_modules/is-core-module": {
"version": "2.16.1",
"resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.16.1.tgz",
@@ -2643,6 +2681,16 @@
"url": "https://opencollective.com/node-fetch"
}
},
"node_modules/npm-normalize-package-bin": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/npm-normalize-package-bin/-/npm-normalize-package-bin-4.0.0.tgz",
"integrity": "sha512-TZKxPvItzai9kN9H/TkmCtx/ZN/hvr3vUycjlfmH0ootY9yFBzNOpiXAdIn1Iteqsvk4lQn6B5PTrt+n6h8k/w==",
"dev": true,
"license": "ISC",
"engines": {
"node": "^18.17.0 || >=20.5.0"
}
},
"node_modules/object-inspect": {
"version": "1.13.4",
"resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.4.tgz",
@@ -2888,6 +2936,16 @@
}
}
},
"node_modules/proc-log": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/proc-log/-/proc-log-5.0.0.tgz",
"integrity": "sha512-Azwzvl90HaF0aCz1JrDdXQykFakSSNPaPoiZ9fm5qJIMHioDZEi7OAdRwSm6rSoPtY3Qutnm3L7ogmg3dc+wbQ==",
"dev": true,
"license": "ISC",
"engines": {
"node": "^18.17.0 || >=20.5.0"
}
},
"node_modules/qrcode": {
"version": "1.5.4",
"resolved": "https://registry.npmjs.org/qrcode/-/qrcode-1.5.4.tgz",
@@ -2932,6 +2990,16 @@
"quoted-printable": "bin/quoted-printable"
}
},
"node_modules/read-cmd-shim": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/read-cmd-shim/-/read-cmd-shim-5.0.0.tgz",
"integrity": "sha512-SEbJV7tohp3DAAILbEMPXavBjAnMN0tVnh4+9G8ihV4Pq3HYF9h8QNez9zkJ1ILkv9G2BjdzwctznGZXgu/HGw==",
"dev": true,
"license": "ISC",
"engines": {
"node": "^18.17.0 || >=20.5.0"
}
},
"node_modules/readdirp": {
"version": "4.1.2",
"resolved": "https://registry.npmjs.org/readdirp/-/readdirp-4.1.2.tgz",
@@ -3136,6 +3204,19 @@
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/signal-exit": {
"version": "4.1.0",
"resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz",
"integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==",
"dev": true,
"license": "ISC",
"engines": {
"node": ">=14"
},
"funding": {
"url": "https://github.com/sponsors/isaacs"
}
},
"node_modules/simple-icons": {
"version": "15.3.0",
"resolved": "https://registry.npmjs.org/simple-icons/-/simple-icons-15.3.0.tgz",
@@ -3204,6 +3285,26 @@
"node": ">=8"
}
},
"node_modules/supabase": {
"version": "2.30.4",
"resolved": "https://registry.npmjs.org/supabase/-/supabase-2.30.4.tgz",
"integrity": "sha512-AOCyd2vmBBMTXbnahiCU0reRNxKS4n5CrPciUF2tcTrQ8dLzl1HwcLfe5DrG8E0QRcKHPDdzprmh/2+y4Ta5MA==",
"dev": true,
"hasInstallScript": true,
"license": "MIT",
"dependencies": {
"bin-links": "^5.0.0",
"https-proxy-agent": "^7.0.2",
"node-fetch": "^3.3.2",
"tar": "7.4.3"
},
"bin": {
"supabase": "bin/supabase"
},
"engines": {
"npm": ">=8"
}
},
"node_modules/supports-preserve-symlinks-flag": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz",
@@ -3265,13 +3366,6 @@
"typescript": ">=5.0.0"
}
},
"node_modules/svelte-kit": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/svelte-kit/-/svelte-kit-1.2.0.tgz",
"integrity": "sha512-RRaOHBhpDv4g2v9tcq8iNw055Pt0MlLps6JVA7/40f4KAbtztXSI4T6MZYbHRirO708urfAAMx6Qow+tQfCHug==",
"hasInstallScript": true,
"license": "MIT"
},
"node_modules/tailwindcss": {
"version": "4.1.7",
"resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-4.1.7.tgz",
@@ -3551,6 +3645,20 @@
"node": ">=8"
}
},
"node_modules/write-file-atomic": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-6.0.0.tgz",
"integrity": "sha512-GmqrO8WJ1NuzJ2DrziEI2o57jKAVIQNf8a18W3nCYU3H7PNWqCCVTeH6/NQE93CIllIgQS98rrmVkYgTX9fFJQ==",
"dev": true,
"license": "ISC",
"dependencies": {
"imurmurhash": "^0.1.4",
"signal-exit": "^4.0.1"
},
"engines": {
"node": "^18.17.0 || >=20.5.0"
}
},
"node_modules/ws": {
"version": "8.18.2",
"resolved": "https://registry.npmjs.org/ws/-/ws-8.18.2.tgz",

View File

@@ -22,6 +22,7 @@
"prettier": "^3.4.2",
"prettier-plugin-svelte": "^3.3.3",
"prettier-plugin-tailwindcss": "^0.6.11",
"supabase": "^2.30.4",
"svelte": "^5.0.0",
"svelte-check": "^4.0.0",
"tailwindcss": "^4.0.0",

8
supabase/.gitignore vendored Normal file
View File

@@ -0,0 +1,8 @@
# Supabase
.branches
.temp
# dotenvx
.env.keys
.env.local
.env.*.local

322
supabase/config.toml Normal file
View File

@@ -0,0 +1,322 @@
# For detailed configuration reference documentation, visit:
# https://supabase.com/docs/guides/local-development/cli/config
# A string used to distinguish different Supabase projects on the same host. Defaults to the
# working directory name when running `supabase init`.
project_id = "scan-wave"
[api]
enabled = true
# Port to use for the API URL.
port = 54321
# Schemas to expose in your API. Tables, views and stored procedures in this schema will get API
# endpoints. `public` and `graphql_public` schemas are included by default.
schemas = ["public", "graphql_public"]
# Extra schemas to add to the search_path of every request.
extra_search_path = ["public", "extensions"]
# The maximum number of rows returns from a view, table, or stored procedure. Limits payload size
# for accidental or malicious requests.
max_rows = 1000
[api.tls]
# Enable HTTPS endpoints locally using a self-signed certificate.
enabled = false
[db]
# Port to use for the local database URL.
port = 54322
# Port used by db diff command to initialize the shadow database.
shadow_port = 54320
# The database major version to use. This has to be the same as your remote database's. Run `SHOW
# server_version;` on the remote database to check.
major_version = 17
[db.pooler]
enabled = false
# Port to use for the local connection pooler.
port = 54329
# Specifies when a server connection can be reused by other clients.
# Configure one of the supported pooler modes: `transaction`, `session`.
pool_mode = "transaction"
# How many server connections to allow per user/database pair.
default_pool_size = 20
# Maximum number of client connections allowed.
max_client_conn = 100
# [db.vault]
# secret_key = "env(SECRET_VALUE)"
[db.migrations]
# If disabled, migrations will be skipped during a db push or reset.
enabled = true
# Specifies an ordered list of schema files that describe your database.
# Supports glob patterns relative to supabase directory: "./schemas/*.sql"
schema_paths = []
[db.seed]
# If enabled, seeds the database after migrations during a db reset.
enabled = true
# Specifies an ordered list of seed files to load during db reset.
# Supports glob patterns relative to supabase directory: "./seeds/*.sql"
sql_paths = ["./seed.sql"]
[realtime]
enabled = true
# Bind realtime via either IPv4 or IPv6. (default: IPv4)
# ip_version = "IPv6"
# The maximum length in bytes of HTTP request headers. (default: 4096)
# max_header_length = 4096
[studio]
enabled = true
# Port to use for Supabase Studio.
port = 54323
# External URL of the API server that frontend connects to.
api_url = "http://127.0.0.1"
# OpenAI API Key to use for Supabase AI in the Supabase Studio.
openai_api_key = "env(OPENAI_API_KEY)"
# Email testing server. Emails sent with the local dev setup are not actually sent - rather, they
# are monitored, and you can view the emails that would have been sent from the web interface.
[inbucket]
enabled = true
# Port to use for the email testing server web interface.
port = 54324
# Uncomment to expose additional ports for testing user applications that send emails.
# smtp_port = 54325
# pop3_port = 54326
# admin_email = "admin@email.com"
# sender_name = "Admin"
[storage]
enabled = true
# The maximum file size allowed (e.g. "5MB", "500KB").
file_size_limit = "50MiB"
# Image transformation API is available to Supabase Pro plan.
# [storage.image_transformation]
# enabled = true
# Uncomment to configure local storage buckets
# [storage.buckets.images]
# public = false
# file_size_limit = "50MiB"
# allowed_mime_types = ["image/png", "image/jpeg"]
# objects_path = "./images"
[auth]
enabled = true
# The base URL of your website. Used as an allow-list for redirects and for constructing URLs used
# in emails.
site_url = "http://127.0.0.1:3000"
# A list of *exact* URLs that auth providers are permitted to redirect to post authentication.
additional_redirect_urls = ["https://127.0.0.1:3000"]
# How long tokens are valid for, in seconds. Defaults to 3600 (1 hour), maximum 604,800 (1 week).
jwt_expiry = 3600
# If disabled, the refresh token will never expire.
enable_refresh_token_rotation = true
# Allows refresh tokens to be reused after expiry, up to the specified interval in seconds.
# Requires enable_refresh_token_rotation = true.
refresh_token_reuse_interval = 10
# Allow/disallow new user signups to your project.
enable_signup = true
# Allow/disallow anonymous sign-ins to your project.
enable_anonymous_sign_ins = false
# Allow/disallow testing manual linking of accounts
enable_manual_linking = false
# Passwords shorter than this value will be rejected as weak. Minimum 6, recommended 8 or more.
minimum_password_length = 6
# Passwords that do not meet the following requirements will be rejected as weak. Supported values
# are: `letters_digits`, `lower_upper_letters_digits`, `lower_upper_letters_digits_symbols`
password_requirements = ""
[auth.rate_limit]
# Number of emails that can be sent per hour. Requires auth.email.smtp to be enabled.
email_sent = 2
# Number of SMS messages that can be sent per hour. Requires auth.sms to be enabled.
sms_sent = 30
# Number of anonymous sign-ins that can be made per hour per IP address. Requires enable_anonymous_sign_ins = true.
anonymous_users = 30
# Number of sessions that can be refreshed in a 5 minute interval per IP address.
token_refresh = 150
# Number of sign up and sign-in requests that can be made in a 5 minute interval per IP address (excludes anonymous users).
sign_in_sign_ups = 30
# Number of OTP / Magic link verifications that can be made in a 5 minute interval per IP address.
token_verifications = 30
# Number of Web3 logins that can be made in a 5 minute interval per IP address.
web3 = 30
# Configure one of the supported captcha providers: `hcaptcha`, `turnstile`.
# [auth.captcha]
# enabled = true
# provider = "hcaptcha"
# secret = ""
[auth.email]
# Allow/disallow new user signups via email to your project.
enable_signup = true
# If enabled, a user will be required to confirm any email change on both the old, and new email
# addresses. If disabled, only the new email is required to confirm.
double_confirm_changes = true
# If enabled, users need to confirm their email address before signing in.
enable_confirmations = false
# If enabled, users will need to reauthenticate or have logged in recently to change their password.
secure_password_change = false
# Controls the minimum amount of time that must pass before sending another signup confirmation or password reset email.
max_frequency = "1s"
# Number of characters used in the email OTP.
otp_length = 6
# Number of seconds before the email OTP expires (defaults to 1 hour).
otp_expiry = 3600
# Use a production-ready SMTP server
# [auth.email.smtp]
# enabled = true
# host = "smtp.sendgrid.net"
# port = 587
# user = "apikey"
# pass = "env(SENDGRID_API_KEY)"
# admin_email = "admin@email.com"
# sender_name = "Admin"
# Uncomment to customize email template
# [auth.email.template.invite]
# subject = "You have been invited"
# content_path = "./supabase/templates/invite.html"
[auth.sms]
# Allow/disallow new user signups via SMS to your project.
enable_signup = false
# If enabled, users need to confirm their phone number before signing in.
enable_confirmations = false
# Template for sending OTP to users
template = "Your code is {{ .Code }}"
# Controls the minimum amount of time that must pass before sending another sms otp.
max_frequency = "5s"
# Use pre-defined map of phone number to OTP for testing.
# [auth.sms.test_otp]
# 4152127777 = "123456"
# Configure logged in session timeouts.
# [auth.sessions]
# Force log out after the specified duration.
# timebox = "24h"
# Force log out if the user has been inactive longer than the specified duration.
# inactivity_timeout = "8h"
# This hook runs before a new user is created and allows developers to reject the request based on the incoming user object.
# [auth.hook.before_user_created]
# enabled = true
# uri = "pg-functions://postgres/auth/before-user-created-hook"
# This hook runs before a token is issued and allows you to add additional claims based on the authentication method used.
# [auth.hook.custom_access_token]
# enabled = true
# uri = "pg-functions://<database>/<schema>/<hook_name>"
# Configure one of the supported SMS providers: `twilio`, `twilio_verify`, `messagebird`, `textlocal`, `vonage`.
[auth.sms.twilio]
enabled = false
account_sid = ""
message_service_sid = ""
# DO NOT commit your Twilio auth token to git. Use environment variable substitution instead:
auth_token = "env(SUPABASE_AUTH_SMS_TWILIO_AUTH_TOKEN)"
# Multi-factor-authentication is available to Supabase Pro plan.
[auth.mfa]
# Control how many MFA factors can be enrolled at once per user.
max_enrolled_factors = 10
# Control MFA via App Authenticator (TOTP)
[auth.mfa.totp]
enroll_enabled = false
verify_enabled = false
# Configure MFA via Phone Messaging
[auth.mfa.phone]
enroll_enabled = false
verify_enabled = false
otp_length = 6
template = "Your code is {{ .Code }}"
max_frequency = "5s"
# Configure MFA via WebAuthn
# [auth.mfa.web_authn]
# enroll_enabled = true
# verify_enabled = true
# Use an external OAuth provider. The full list of providers are: `apple`, `azure`, `bitbucket`,
# `discord`, `facebook`, `github`, `gitlab`, `google`, `keycloak`, `linkedin_oidc`, `notion`, `twitch`,
# `twitter`, `slack`, `spotify`, `workos`, `zoom`.
[auth.external.apple]
enabled = false
client_id = ""
# DO NOT commit your OAuth provider secret to git. Use environment variable substitution instead:
secret = "env(SUPABASE_AUTH_EXTERNAL_APPLE_SECRET)"
# Overrides the default auth redirectUrl.
redirect_uri = ""
# Overrides the default auth provider URL. Used to support self-hosted gitlab, single-tenant Azure,
# or any other third-party OIDC providers.
url = ""
# If enabled, the nonce check will be skipped. Required for local sign in with Google auth.
skip_nonce_check = false
# Allow Solana wallet holders to sign in to your project via the Sign in with Solana (SIWS, EIP-4361) standard.
# You can configure "web3" rate limit in the [auth.rate_limit] section and set up [auth.captcha] if self-hosting.
[auth.web3.solana]
enabled = false
# Use Firebase Auth as a third-party provider alongside Supabase Auth.
[auth.third_party.firebase]
enabled = false
# project_id = "my-firebase-project"
# Use Auth0 as a third-party provider alongside Supabase Auth.
[auth.third_party.auth0]
enabled = false
# tenant = "my-auth0-tenant"
# tenant_region = "us"
# Use AWS Cognito (Amplify) as a third-party provider alongside Supabase Auth.
[auth.third_party.aws_cognito]
enabled = false
# user_pool_id = "my-user-pool-id"
# user_pool_region = "us-east-1"
# Use Clerk as a third-party provider alongside Supabase Auth.
[auth.third_party.clerk]
enabled = false
# Obtain from https://clerk.com/setup/supabase
# domain = "example.clerk.accounts.dev"
[edge_runtime]
enabled = true
# Configure one of the supported request policies: `oneshot`, `per_worker`.
# Use `oneshot` for hot reload, or `per_worker` for load testing.
policy = "oneshot"
# Port to attach the Chrome inspector for debugging edge functions.
inspector_port = 8083
# The Deno major version to use.
deno_version = 1
# [edge_runtime.secrets]
# secret_key = "env(SECRET_VALUE)"
[analytics]
enabled = true
port = 54327
# Configure one of the supported backends: `postgres`, `bigquery`.
backend = "postgres"
# Experimental features may be deprecated any time
[experimental]
# Configures Postgres storage engine to use OrioleDB (S3)
orioledb_version = ""
# Configures S3 bucket URL, eg. <bucket_name>.s3-<region>.amazonaws.com
s3_host = "env(S3_HOST)"
# Configures S3 bucket region, eg. us-east-1
s3_region = "env(S3_REGION)"
# Configures AWS_ACCESS_KEY_ID for S3 bucket
s3_access_key = "env(S3_ACCESS_KEY)"
# Configures AWS_SECRET_ACCESS_KEY for S3 bucket
s3_secret_key = "env(S3_SECRET_KEY)"

View File

@@ -0,0 +1,827 @@
SET statement_timeout = 0;
SET lock_timeout = 0;
SET idle_in_transaction_session_timeout = 0;
SET client_encoding = 'UTF8';
SET standard_conforming_strings = on;
SELECT pg_catalog.set_config('search_path', '', false);
SET check_function_bodies = false;
SET xmloption = content;
SET client_min_messages = warning;
SET row_security = off;
CREATE EXTENSION IF NOT EXISTS "pg_cron" WITH SCHEMA "pg_catalog";
COMMENT ON SCHEMA "public" IS 'standard public schema';
CREATE EXTENSION IF NOT EXISTS "pg_graphql" WITH SCHEMA "graphql";
CREATE EXTENSION IF NOT EXISTS "pg_stat_statements" WITH SCHEMA "extensions";
CREATE EXTENSION IF NOT EXISTS "pgcrypto" WITH SCHEMA "extensions";
CREATE EXTENSION IF NOT EXISTS "pgjwt" WITH SCHEMA "extensions";
CREATE EXTENSION IF NOT EXISTS "supabase_vault" WITH SCHEMA "vault";
CREATE EXTENSION IF NOT EXISTS "uuid-ossp" WITH SCHEMA "extensions";
CREATE TYPE "public"."section_posititon" AS ENUM (
'events_manager',
'member'
);
ALTER TYPE "public"."section_posititon" OWNER TO "postgres";
CREATE OR REPLACE FUNCTION "public"."archive_event"("_event_id" "uuid") RETURNS "void"
LANGUAGE "plpgsql" SECURITY DEFINER
AS $$
DECLARE
v_total bigint;
v_scanned bigint;
v_evt public.events%ROWTYPE;
BEGIN
-------------------------------------------------------------------------
-- A. Fetch the event
-------------------------------------------------------------------------
SELECT * INTO v_evt
FROM public.events
WHERE id = _event_id;
IF NOT FOUND THEN
RAISE EXCEPTION 'archive_event_and_delete(): event % does not exist', _event_id;
END IF;
-------------------------------------------------------------------------
-- B. Count participants
-------------------------------------------------------------------------
SELECT COUNT(*) AS total,
COUNT(*) FILTER (WHERE scanned) AS scanned
INTO v_total, v_scanned
FROM public.participants
WHERE event = _event_id;
-------------------------------------------------------------------------
-- C. Upsert into events_archived (now with section_id)
-------------------------------------------------------------------------
INSERT INTO public.events_archived (
id, created_at, date, name,
section_id, total_participants, scanned_participants )
VALUES ( v_evt.id, clock_timestamp(), v_evt.date, v_evt.name,
v_evt.section_id, v_total, v_scanned )
ON CONFLICT (id) DO UPDATE
SET created_at = EXCLUDED.created_at,
date = EXCLUDED.date,
name = EXCLUDED.name,
section_id = EXCLUDED.section_id,
total_participants = EXCLUDED.total_participants,
scanned_participants= EXCLUDED.scanned_participants;
-------------------------------------------------------------------------
-- D. Delete original event row (participants cascade away)
-------------------------------------------------------------------------
DELETE FROM public.events
WHERE id = _event_id;
END;
$$;
ALTER FUNCTION "public"."archive_event"("_event_id" "uuid") OWNER TO "postgres";
CREATE OR REPLACE FUNCTION "public"."auto_archive_events"("_age_days" integer DEFAULT 7) RETURNS integer
LANGUAGE "plpgsql" SECURITY DEFINER
AS $$
DECLARE
v_cnt int := 0;
v_event_id uuid;
BEGIN
FOR v_event_id IN
SELECT id
FROM public.events
WHERE date IS NOT NULL
AND date <= CURRENT_DATE - _age_days
LOOP
BEGIN
PERFORM public.archive_event(v_event_id);
v_cnt := v_cnt + 1;
EXCEPTION
WHEN others THEN
-- Optionally record the failure somewhere and continue
RAISE WARNING 'Failed to archive event %, %', v_event_id, SQLERRM;
END;
END LOOP;
RETURN v_cnt;
END;
$$;
ALTER FUNCTION "public"."auto_archive_events"("_age_days" integer) OWNER TO "postgres";
SET default_tablespace = '';
SET default_table_access_method = "heap";
CREATE TABLE IF NOT EXISTS "public"."events" (
"id" "uuid" DEFAULT "gen_random_uuid"() NOT NULL,
"created_at" timestamp with time zone DEFAULT "now"() NOT NULL,
"created_by" "uuid" DEFAULT "auth"."uid"(),
"name" "text",
"date" "date",
"section_id" "uuid"
);
ALTER TABLE "public"."events" OWNER TO "postgres";
COMMENT ON TABLE "public"."events" IS 'Table of all events created';
CREATE OR REPLACE FUNCTION "public"."create_event"("p_name" "text", "p_date" "date") RETURNS "public"."events"
LANGUAGE "plpgsql" SECURITY DEFINER
SET "search_path" TO 'public'
AS $$
declare
v_user uuid := auth.uid(); -- current user
v_section uuid; -- their section_id
v_evt public.events%rowtype; -- the inserted event
begin
-- 1) lookup the user's section
select section_id
into v_section
from public.profiles
where id = v_user;
if v_section is null then
raise exception 'no profile/section found for user %', v_user;
end if;
-- 2) insert into events, filling created_by and section_id
insert into public.events (
name,
date,
created_by,
section_id
)
values (
p_name,
p_date,
v_user,
v_section
)
returning * into v_evt;
-- 3) return the full row
return v_evt;
end;
$$;
ALTER FUNCTION "public"."create_event"("p_name" "text", "p_date" "date") OWNER TO "postgres";
CREATE TABLE IF NOT EXISTS "public"."participants" (
"id" "uuid" DEFAULT "gen_random_uuid"() NOT NULL,
"created_at" timestamp with time zone DEFAULT "now"() NOT NULL,
"created_by" "uuid" DEFAULT "auth"."uid"(),
"event" "uuid",
"name" "text",
"surname" "text",
"email" "text",
"scanned" boolean DEFAULT false,
"scanned_at" timestamp with time zone,
"scanned_by" "uuid",
"section_id" "uuid"
);
ALTER TABLE "public"."participants" OWNER TO "postgres";
COMMENT ON TABLE "public"."participants" IS 'Table of all qrcodes issued';
CREATE OR REPLACE FUNCTION "public"."create_qrcodes_bulk"("p_section_id" "uuid", "p_event_id" "uuid", "p_names" "text"[], "p_surnames" "text"[], "p_emails" "text"[]) RETURNS SETOF "public"."participants"
LANGUAGE "plpgsql" SECURITY DEFINER
SET "search_path" TO 'public', 'pg_temp'
AS $$BEGIN
-----------------------------------------------------------------
-- 1) keep the array-length check exactly as before
-----------------------------------------------------------------
IF array_length(p_names, 1) IS DISTINCT FROM
array_length(p_surnames,1) OR
array_length(p_names, 1) IS DISTINCT FROM
array_length(p_emails, 1) THEN
RAISE EXCEPTION
'Names, surnames and emails arrays must all be the same length';
END IF;
RETURN QUERY
INSERT INTO public.participants (section_id, event, name, surname, email)
SELECT p_section_id,
p_event_id,
n, s, e
FROM unnest(p_names, p_surnames, p_emails) AS u(n, s, e)
RETURNING *;
END;$$;
ALTER FUNCTION "public"."create_qrcodes_bulk"("p_section_id" "uuid", "p_event_id" "uuid", "p_names" "text"[], "p_surnames" "text"[], "p_emails" "text"[]) OWNER TO "postgres";
CREATE OR REPLACE FUNCTION "public"."handle_new_user"() RETURNS "trigger"
LANGUAGE "plpgsql" SECURITY DEFINER
SET "search_path" TO 'public', 'auth'
AS $$begin
insert into public.profiles(id, display_name, created_at, updated_at)
values (new.id,
coalesce(new.raw_user_meta_data ->> 'display_name', -- meta-data name if present
split_part(new.email, '@', 1)), -- fallback: part of the email
now(), now());
return new;
end;$$;
ALTER FUNCTION "public"."handle_new_user"() OWNER TO "postgres";
CREATE OR REPLACE FUNCTION "public"."scan_ticket"("_ticket_id" "uuid") RETURNS "void"
LANGUAGE "plpgsql" SECURITY DEFINER
SET "search_path" TO 'public'
AS $$BEGIN
UPDATE participants
SET scanned = true,
scanned_at = NOW(),
scanned_by = auth.uid()
WHERE id = _ticket_id;
-- optionally, make sure exactly one row was updated
IF NOT FOUND THEN
RAISE EXCEPTION 'Ticket % not found or already scanned', _ticket_id;
END IF;
END;$$;
ALTER FUNCTION "public"."scan_ticket"("_ticket_id" "uuid") OWNER TO "postgres";
CREATE TABLE IF NOT EXISTS "public"."events_archived" (
"id" "uuid" DEFAULT "gen_random_uuid"() NOT NULL,
"created_at" timestamp with time zone DEFAULT "now"() NOT NULL,
"date" "date",
"name" "text" NOT NULL,
"total_participants" numeric,
"scanned_participants" numeric,
"section_id" "uuid"
);
ALTER TABLE "public"."events_archived" OWNER TO "postgres";
CREATE TABLE IF NOT EXISTS "public"."profiles" (
"id" "uuid" NOT NULL,
"display_name" "text",
"created_at" timestamp with time zone DEFAULT "now"(),
"updated_at" timestamp with time zone DEFAULT "now"(),
"section_id" "uuid",
"section_position" "public"."section_posititon" DEFAULT 'member'::"public"."section_posititon" NOT NULL
);
ALTER TABLE "public"."profiles" OWNER TO "postgres";
CREATE TABLE IF NOT EXISTS "public"."sections" (
"id" "uuid" DEFAULT "gen_random_uuid"() NOT NULL,
"created_at" timestamp with time zone DEFAULT "now"() NOT NULL,
"name" "text" NOT NULL
);
ALTER TABLE "public"."sections" OWNER TO "postgres";
COMMENT ON TABLE "public"."sections" IS 'List of ESN sections using the app';
ALTER TABLE ONLY "public"."events_archived"
ADD CONSTRAINT "events_archived_pkey" PRIMARY KEY ("id");
ALTER TABLE ONLY "public"."events"
ADD CONSTRAINT "events_pkey" PRIMARY KEY ("id");
ALTER TABLE ONLY "public"."profiles"
ADD CONSTRAINT "profiles_pkey" PRIMARY KEY ("id");
ALTER TABLE ONLY "public"."participants"
ADD CONSTRAINT "qrcodes_pkey" PRIMARY KEY ("id");
ALTER TABLE ONLY "public"."sections"
ADD CONSTRAINT "sections_name_key" UNIQUE ("name");
ALTER TABLE ONLY "public"."sections"
ADD CONSTRAINT "sections_pkey" PRIMARY KEY ("id");
ALTER TABLE ONLY "public"."events_archived"
ADD CONSTRAINT "events_archived_section_id_fkey" FOREIGN KEY ("section_id") REFERENCES "public"."sections"("id") ON DELETE CASCADE;
ALTER TABLE ONLY "public"."events"
ADD CONSTRAINT "events_created_by_fkey" FOREIGN KEY ("created_by") REFERENCES "auth"."users"("id");
ALTER TABLE ONLY "public"."events"
ADD CONSTRAINT "events_section_id_fkey" FOREIGN KEY ("section_id") REFERENCES "public"."sections"("id") ON DELETE CASCADE;
ALTER TABLE ONLY "public"."participants"
ADD CONSTRAINT "participants_created_by_fkey" FOREIGN KEY ("created_by") REFERENCES "auth"."users"("id") ON DELETE CASCADE;
ALTER TABLE ONLY "public"."participants"
ADD CONSTRAINT "participants_event_fkey" FOREIGN KEY ("event") REFERENCES "public"."events"("id") ON DELETE CASCADE;
ALTER TABLE ONLY "public"."participants"
ADD CONSTRAINT "participants_scanned_by_fkey" FOREIGN KEY ("scanned_by") REFERENCES "public"."profiles"("id") ON DELETE CASCADE;
ALTER TABLE ONLY "public"."profiles"
ADD CONSTRAINT "profiles_id_fkey" FOREIGN KEY ("id") REFERENCES "auth"."users"("id") ON DELETE CASCADE;
ALTER TABLE ONLY "public"."profiles"
ADD CONSTRAINT "profiles_section_id_fkey" FOREIGN KEY ("section_id") REFERENCES "public"."sections"("id") ON DELETE CASCADE;
ALTER TABLE ONLY "public"."participants"
ADD CONSTRAINT "qrcodes_scanned_by_fkey" FOREIGN KEY ("scanned_by") REFERENCES "auth"."users"("id");
ALTER TABLE ONLY "public"."participants"
ADD CONSTRAINT "qrcodes_section_id_fkey" FOREIGN KEY ("section_id") REFERENCES "public"."sections"("id") ON DELETE CASCADE;
CREATE POLICY "Access only to section resources" ON "public"."events_archived" FOR SELECT TO "authenticated" USING ((EXISTS ( SELECT 1
FROM "public"."profiles" "p"
WHERE ("p"."section_id" = "events_archived"."section_id"))));
CREATE POLICY "Enable select for authenticated users only" ON "public"."profiles" FOR SELECT TO "authenticated" USING (true);
CREATE POLICY "Enable select for authenticated users only" ON "public"."sections" FOR SELECT TO "authenticated" USING (true);
CREATE POLICY "Only display section resources" ON "public"."events" FOR SELECT TO "authenticated" USING ((EXISTS ( SELECT 1
FROM "public"."profiles" "p"
WHERE ("p"."section_id" = "events"."section_id"))));
CREATE POLICY "Only display section resources" ON "public"."participants" FOR SELECT TO "authenticated" USING ((EXISTS ( SELECT 1
FROM "public"."profiles" "p"
WHERE ("p"."section_id" = "participants"."section_id"))));
ALTER TABLE "public"."events" ENABLE ROW LEVEL SECURITY;
ALTER TABLE "public"."events_archived" ENABLE ROW LEVEL SECURITY;
ALTER TABLE "public"."participants" ENABLE ROW LEVEL SECURITY;
ALTER TABLE "public"."profiles" ENABLE ROW LEVEL SECURITY;
ALTER TABLE "public"."sections" ENABLE ROW LEVEL SECURITY;
ALTER PUBLICATION "supabase_realtime" OWNER TO "postgres";
GRANT USAGE ON SCHEMA "public" TO "postgres";
GRANT USAGE ON SCHEMA "public" TO "anon";
GRANT USAGE ON SCHEMA "public" TO "authenticated";
GRANT USAGE ON SCHEMA "public" TO "service_role";
GRANT ALL ON FUNCTION "public"."archive_event"("_event_id" "uuid") TO "anon";
GRANT ALL ON FUNCTION "public"."archive_event"("_event_id" "uuid") TO "authenticated";
GRANT ALL ON FUNCTION "public"."archive_event"("_event_id" "uuid") TO "service_role";
GRANT ALL ON FUNCTION "public"."auto_archive_events"("_age_days" integer) TO "anon";
GRANT ALL ON FUNCTION "public"."auto_archive_events"("_age_days" integer) TO "authenticated";
GRANT ALL ON FUNCTION "public"."auto_archive_events"("_age_days" integer) TO "service_role";
GRANT ALL ON TABLE "public"."events" TO "anon";
GRANT ALL ON TABLE "public"."events" TO "authenticated";
GRANT ALL ON TABLE "public"."events" TO "service_role";
GRANT ALL ON FUNCTION "public"."create_event"("p_name" "text", "p_date" "date") TO "anon";
GRANT ALL ON FUNCTION "public"."create_event"("p_name" "text", "p_date" "date") TO "authenticated";
GRANT ALL ON FUNCTION "public"."create_event"("p_name" "text", "p_date" "date") TO "service_role";
GRANT ALL ON TABLE "public"."participants" TO "anon";
GRANT ALL ON TABLE "public"."participants" TO "authenticated";
GRANT ALL ON TABLE "public"."participants" TO "service_role";
GRANT ALL ON FUNCTION "public"."create_qrcodes_bulk"("p_section_id" "uuid", "p_event_id" "uuid", "p_names" "text"[], "p_surnames" "text"[], "p_emails" "text"[]) TO "anon";
GRANT ALL ON FUNCTION "public"."create_qrcodes_bulk"("p_section_id" "uuid", "p_event_id" "uuid", "p_names" "text"[], "p_surnames" "text"[], "p_emails" "text"[]) TO "authenticated";
GRANT ALL ON FUNCTION "public"."create_qrcodes_bulk"("p_section_id" "uuid", "p_event_id" "uuid", "p_names" "text"[], "p_surnames" "text"[], "p_emails" "text"[]) TO "service_role";
GRANT ALL ON FUNCTION "public"."handle_new_user"() TO "anon";
GRANT ALL ON FUNCTION "public"."handle_new_user"() TO "authenticated";
GRANT ALL ON FUNCTION "public"."handle_new_user"() TO "service_role";
GRANT ALL ON FUNCTION "public"."scan_ticket"("_ticket_id" "uuid") TO "anon";
GRANT ALL ON FUNCTION "public"."scan_ticket"("_ticket_id" "uuid") TO "authenticated";
GRANT ALL ON FUNCTION "public"."scan_ticket"("_ticket_id" "uuid") TO "service_role";
GRANT ALL ON TABLE "public"."events_archived" TO "anon";
GRANT ALL ON TABLE "public"."events_archived" TO "authenticated";
GRANT ALL ON TABLE "public"."events_archived" TO "service_role";
GRANT ALL ON TABLE "public"."profiles" TO "anon";
GRANT ALL ON TABLE "public"."profiles" TO "authenticated";
GRANT ALL ON TABLE "public"."profiles" TO "service_role";
GRANT ALL ON TABLE "public"."sections" TO "anon";
GRANT ALL ON TABLE "public"."sections" TO "authenticated";
GRANT ALL ON TABLE "public"."sections" TO "service_role";
ALTER DEFAULT PRIVILEGES FOR ROLE "postgres" IN SCHEMA "public" GRANT ALL ON SEQUENCES TO "postgres";
ALTER DEFAULT PRIVILEGES FOR ROLE "postgres" IN SCHEMA "public" GRANT ALL ON SEQUENCES TO "anon";
ALTER DEFAULT PRIVILEGES FOR ROLE "postgres" IN SCHEMA "public" GRANT ALL ON SEQUENCES TO "authenticated";
ALTER DEFAULT PRIVILEGES FOR ROLE "postgres" IN SCHEMA "public" GRANT ALL ON SEQUENCES TO "service_role";
ALTER DEFAULT PRIVILEGES FOR ROLE "postgres" IN SCHEMA "public" GRANT ALL ON FUNCTIONS TO "postgres";
ALTER DEFAULT PRIVILEGES FOR ROLE "postgres" IN SCHEMA "public" GRANT ALL ON FUNCTIONS TO "anon";
ALTER DEFAULT PRIVILEGES FOR ROLE "postgres" IN SCHEMA "public" GRANT ALL ON FUNCTIONS TO "authenticated";
ALTER DEFAULT PRIVILEGES FOR ROLE "postgres" IN SCHEMA "public" GRANT ALL ON FUNCTIONS TO "service_role";
ALTER DEFAULT PRIVILEGES FOR ROLE "postgres" IN SCHEMA "public" GRANT ALL ON TABLES TO "postgres";
ALTER DEFAULT PRIVILEGES FOR ROLE "postgres" IN SCHEMA "public" GRANT ALL ON TABLES TO "anon";
ALTER DEFAULT PRIVILEGES FOR ROLE "postgres" IN SCHEMA "public" GRANT ALL ON TABLES TO "authenticated";
ALTER DEFAULT PRIVILEGES FOR ROLE "postgres" IN SCHEMA "public" GRANT ALL ON TABLES TO "service_role";
RESET ALL;

View File

@@ -0,0 +1,34 @@
revoke select on table "auth"."schema_migrations" from "postgres";
CREATE TRIGGER on_auth_users_created AFTER INSERT ON auth.users FOR EACH ROW EXECUTE FUNCTION handle_new_user();
grant delete on table "storage"."s3_multipart_uploads" to "postgres";
grant insert on table "storage"."s3_multipart_uploads" to "postgres";
grant references on table "storage"."s3_multipart_uploads" to "postgres";
grant select on table "storage"."s3_multipart_uploads" to "postgres";
grant trigger on table "storage"."s3_multipart_uploads" to "postgres";
grant truncate on table "storage"."s3_multipart_uploads" to "postgres";
grant update on table "storage"."s3_multipart_uploads" to "postgres";
grant delete on table "storage"."s3_multipart_uploads_parts" to "postgres";
grant insert on table "storage"."s3_multipart_uploads_parts" to "postgres";
grant references on table "storage"."s3_multipart_uploads_parts" to "postgres";
grant select on table "storage"."s3_multipart_uploads_parts" to "postgres";
grant trigger on table "storage"."s3_multipart_uploads_parts" to "postgres";
grant truncate on table "storage"."s3_multipart_uploads_parts" to "postgres";
grant update on table "storage"."s3_multipart_uploads_parts" to "postgres";