I am trying to host Supabase with a separate PostgreSQL DB. According to their docs, they say that Supabase uses pgjwt for auth. However, AWS RDS or Azure PosrgreSQL doesnt not support pgjwt.
Is there an alternate for pgjwt for PostgreSQL??
Thanks in advance
You can create the JWT functions manually as recommended here.
Just copy and run the SQL code from postgres-jwt-prototype.
Create JWT Utils:
CREATE SCHEMA jwt;
CREATE EXTENSION pgcrypto;
CREATE OR REPLACE FUNCTION jwt.url_encode(data BYTEA)
RETURNS TEXT LANGUAGE SQL AS $$
SELECT translate(encode(data, 'base64'), E'+/=\n', '-_');
$$;
CREATE OR REPLACE FUNCTION jwt.url_decode(data TEXT)
RETURNS BYTEA LANGUAGE SQL AS $$
WITH t AS (SELECT translate(data, '-_', '+/')),
rem AS (SELECT length((SELECT *
FROM t)) % 4) -- compute padding size
SELECT decode(
(SELECT *
FROM t) ||
CASE WHEN (SELECT *
FROM rem) > 0
THEN repeat('=', (4 - (SELECT *
FROM rem)))
ELSE '' END,
'base64');
$$;
CREATE OR REPLACE FUNCTION jwt.algorithm_sign(signables TEXT, secret TEXT, algorithm TEXT)
RETURNS TEXT LANGUAGE SQL AS $$
WITH
alg AS (
SELECT CASE
WHEN algorithm = 'HS256'
THEN 'sha256'
WHEN algorithm = 'HS384'
THEN 'sha384'
WHEN algorithm = 'HS512'
THEN 'sha512'
ELSE '' END) -- hmac throws error
SELECT jwt.url_encode(public.hmac(signables, secret, (SELECT *
FROM alg)));
$$;
CREATE OR REPLACE FUNCTION jwt.sign(payload JSON, secret TEXT, algorithm TEXT DEFAULT 'HS256')
RETURNS TEXT LANGUAGE SQL AS $$
WITH
header AS (
SELECT jwt.url_encode(convert_to('{"alg":"' || algorithm || '","typ":"JWT"}', 'utf8'))
),
payload AS (
SELECT jwt.url_encode(convert_to(payload :: TEXT, 'utf8'))
),
signables AS (
SELECT (SELECT *
FROM header) || '.' || (SELECT *
FROM payload)
)
SELECT (SELECT *
FROM signables)
|| '.' ||
jwt.algorithm_sign((SELECT *
FROM signables), secret, algorithm);
$$;
CREATE OR REPLACE FUNCTION jwt.verify(token TEXT, secret TEXT, algorithm TEXT DEFAULT 'HS256')
RETURNS TABLE(header JSON, payload JSON, valid BOOLEAN) LANGUAGE SQL AS $$
SELECT
convert_from(jwt.url_decode(r [1]), 'utf8') :: JSON AS header,
convert_from(jwt.url_decode(r [2]), 'utf8') :: JSON AS payload,
r [3] = jwt.algorithm_sign(r [1] || '.' || r [2], secret, algorithm) AS valid
FROM regexp_split_to_array(token, '\.') r;
$$;
Create sensitive:
CREATE SCHEMA sensitive;
CREATE TABLE sensitive.data (
org_no TEXT PRIMARY KEY,
secret TEXT NOT NULL
);
CREATE TABLE sensitive.token_secret (
shared_secret TEXT NOT NULL
);
CREATE OR REPLACE FUNCTION sensitive.get_data(token TEXT, OUT secret TEXT)
RETURNS TEXT
LANGUAGE plpgsql
SECURITY DEFINER
AS $$
DECLARE
_shared_secret TEXT := (SELECT shared_secret
FROM sensitive.token_secret
LIMIT 1);
BEGIN
WITH verifiedToken AS (SELECT *
FROM jwt.verify(token, _shared_secret)),
verified_org_no AS ( SELECT payload ->> 'orgNo'
FROM verifiedToken
WHERE VALID IS TRUE )
SELECT d.secret
FROM sensitive.data d
WHERE org_no = (SELECT *
FROM verified_org_no) AND org_no IS NOT NULL
INTO secret;
END;
$$;
Related
I'm trying to handle empty '' values in FUNCTION variable.
What should be the correct way to return nothing when calling the function with empty value like below
SELECT *
FROM metadata.fn_get_id('mea', 'sau', '');
DROP FUNCTION IF EXISTS metadata.fn_get_id(VARCHAR, VARCHAR, NUMERIC);
CREATE OR REPLACE FUNCTION metadata.fn_get_id(a1 CHARACTER VARYING, b2 CHARACTER VARYING,
c3 NUMERIC DEFAULT 0
) RETURNS INT
LANGUAGE plpgsql
AS
$$
DECLARE
linked_id INT;
BEGIN
EXECUTE
'SELECT linked_id::INT FROM ' || $1 || '_region
WHERE 1=1 AND iso=upper(' || QUOTE_LITERAL($2) || ') AND id = '|| $3 ||' limit 1;'
INTO linked_id;
RETURN linked_id;
END
$$;
-- TEST LINK_ID 1213506417 (PASS)
SELECT *
FROM metadata.fn_get_id('mea', 'sau', 414803422);
-- TEST Null (PASS)
SELECT *
FROM metadata.fn_get_id('mea', 'sau');
-- TEST empty (FAILS ... HOW to Handle)
SELECT *
FROM metadata.fn_get_id('mea', 'sau', '');
Make c3 function argument type text default null and check for empty string first thing in the function body.
create or replace function metadata.fn_get_id(a1 text, b2 text, c3 text default null)
RETURNS integer language plpgsql as
$$
declare
-- your declarations
begin
if nullif(c3, '') is null then
return null;
end if;
-- your function body
$$;
Call:
SELECT *
FROM metadata.fn_get_id('mea', 'sau', 414803422::text);
Btw the function in the example is injection prone.
Simply call the function like this:
metadata.fn_get_id('mea', 'sau', nullif('', ''))
So that an empty string is replaced with NULL.
I have a query that I use in different parts of a system. Now I want to make a function wherein it accepts a text to be used as a query, I'm not even sure if this is possible. However, I want to know if there's a possible workaround for this.
What I want to achieve is a function that returns a table and accepts a text/varchar that could be used as a query.
Here's what I have, I have a query that is kind of a "base query" that can have different data based on the given CTE which is named:
data_table
refer to the function and its usage below - this is mostly abstract but imo this should be enough.
CREATE OR REPLACE FUNCTION func(
data_query TEXT
)
RETURNS TABLE
(
id BIGINT,
random_text varchar
)
LANGUAGE plpgsql
AS $function$
BEGIN
RETURN QUERY
with data_table AS (
data_query
), another_data_table AS (
SELECT
*
FROM my_data
)
SELECT
d.id,
ad.random_text
FROM data_table d
INNER JOIN another_data_table ad
ON ad.ref_id = d.id;
END; $function$;
usage:
SELECT * FROM func('SELECT * FROM my_data_table');
SELECT * FROM func('SELECT * FROM my_second_data_table');
Instead of passing the query, you may pass the table name and access it dyamically in your query using EXECUTE format
CREATE OR REPLACE FUNCTION func(
table_name_in TEXT
)
RETURNS TABLE
(
id BIGINT,
random_text varchar
)
LANGUAGE plpgsql
AS $function$
BEGIN
RETURN QUERY EXECUTE format (
'SELECT
d.id :: bigint,
ad.random_text :: varchar
FROM %I d
INNER JOIN my_data ad
ON ad.ref_id = d.id', table_name_in );
END
$function$;
usage:
SELECT * FROM func('my_data_table');
SELECT * FROM func('my_second_data_table');
Now I have the following procedure:
CREATE OR REPLACE FUNCTION find_city_by_name(match varchar) RETURNS TABLE(city_name varchar) LANGUAGE plpgsql as $$
BEGIN
RETURN QUERY WITH r AS (
SELECT short_name FROM geo_cities WHERE short_name ILIKE CONCAT(match, '%')
)
SELECT r.short_name FROM r;
END;
$$
I want return all fields (*) (not only short_name). What I need to change in my procedure?
Here is a simplified (w/o WITH and with language sql) version, that I've mentioned in my comment to the adjacent answer:
create or replace function find_city_by_name(text)
returns table(city_name varchar, long_name varchar)
as $$
select * from geo_cities where short_name ilike $1 || '%';
$$ language sql;
Also, you might find it more convenient to refer to the geo_cities table itself defining the function's signature, using SETOF geo_cities:
create or replace function find_city_by_name(text)
returns setof geo_cities
as $$
select * from geo_cities where short_name ilike $1 || '%';
$$ language sql;
-- this will allow you to change the structure of geo_cities table w/o necessity to change the function's definition.
If you want a real row you must to explicit declare all fields in the return clausule:
create table geo_cities (
short_name varchar,
long_name varchar
);
insert into geo_cities values ('BERLIN', 'BERLIN'), ('BERLIN 2','BERLIN TWO');
CREATE OR REPLACE FUNCTION find_city_by_name(match varchar)
RETURNS TABLE(city_name varchar, long_name varchar)
LANGUAGE plpgsql
AS
$$
BEGIN
RETURN QUERY WITH r AS (
SELECT * FROM geo_cities WHERE short_name ILIKE CONCAT(match, '%')
)
SELECT * FROM r;
END;
$$;
select * from find_city_by_name('BERLIN');
See the example running at: http://rextester.com/IKTT52978
I am trying to create crosstab queries in PostgreSQL such that it automatically generates the crosstab columns instead of hardcoding it. I have written a function that dynamically generates the column list that I need for my crosstab query. The idea is to substitute the result of this function in the crosstab query using dynamic sql.
I know how to do this easily in SQL Server, but my limited knowledge of PostgreSQL is hindering my progress here. I was thinking of storing the result of function that generates the dynamic list of columns into a variable and use that to dynamically build the sql query. It would be great if someone could guide me regarding the same.
-- Table which has be pivoted
CREATE TABLE test_db
(
kernel_id int,
key int,
value int
);
INSERT INTO test_db VALUES
(1,1,99),
(1,2,78),
(2,1,66),
(3,1,44),
(3,2,55),
(3,3,89);
-- This function dynamically returns the list of columns for crosstab
CREATE FUNCTION test() RETURNS TEXT AS '
DECLARE
key_id int;
text_op TEXT = '' kernel_id int, '';
BEGIN
FOR key_id IN SELECT DISTINCT key FROM test_db ORDER BY key LOOP
text_op := text_op || key_id || '' int , '' ;
END LOOP;
text_op := text_op || '' DUMMY text'';
RETURN text_op;
END;
' LANGUAGE 'plpgsql';
-- This query works. I just need to convert the static list
-- of crosstab columns to be generated dynamically.
SELECT * FROM
crosstab
(
'SELECT kernel_id, key, value FROM test_db ORDER BY 1,2',
'SELECT DISTINCT key FROM test_db ORDER BY 1'
)
AS x (kernel_id int, key1 int, key2 int, key3 int); -- How can I replace ..
-- .. this static list with a dynamically generated list of columns ?
You can use the provided C function crosstab_hash for this.
The manual is not very clear in this respect. It's mentioned at the end of the chapter on crosstab() with two parameters:
You can create predefined functions to avoid having to write out the
result column names and types in each query. See the examples in the
previous section. The underlying C function for this form of crosstab
is named crosstab_hash.
For your example:
CREATE OR REPLACE FUNCTION f_cross_test_db(text, text)
RETURNS TABLE (kernel_id int, key1 int, key2 int, key3 int)
AS '$libdir/tablefunc','crosstab_hash' LANGUAGE C STABLE STRICT;
Call:
SELECT * FROM f_cross_test_db(
'SELECT kernel_id, key, value FROM test_db ORDER BY 1,2'
,'SELECT DISTINCT key FROM test_db ORDER BY 1');
Note that you need to create a distinct crosstab_hash function for every crosstab function with a different return type.
Related:
PostgreSQL row to columns
Your function to generate the column list is rather convoluted, the result is incorrect (int missing after kernel_id), it can be replaced with this SQL query:
SELECT 'kernel_id int, '
|| string_agg(DISTINCT key::text, ' int, ' ORDER BY key::text)
|| ' int, DUMMY text'
FROM test_db;
And it cannot be used dynamically anyway.
#erwin-brandstetter: The return type of the function isn't an issue if you're always returning a JSON type with the converted results.
Here is the function I came up with:
CREATE OR REPLACE FUNCTION report.test(
i_start_date TIMESTAMPTZ,
i_end_date TIMESTAMPTZ,
i_interval INT
) RETURNS TABLE (
tab JSON
) AS $ab$
DECLARE
_key_id TEXT;
_text_op TEXT = '';
_ret JSON;
BEGIN
-- SELECT DISTINCT for query results
FOR _key_id IN
SELECT DISTINCT at_name
FROM report.company_data_date cd
JOIN report.company_data_amount cda ON cd.id = cda.company_data_date_id
JOIN report.amount_types at ON cda.amount_type_id = at.id
WHERE date_start BETWEEN i_start_date AND i_end_date
AND interval_type_id = i_interval
LOOP
-- build function_call with datatype of column
IF char_length(_text_op) > 1 THEN
_text_op := _text_op || ', ' || _key_id || ' NUMERIC(20,2)';
ELSE
_text_op := _text_op || _key_id || ' NUMERIC(20,2)';
END IF;
END LOOP;
-- build query with parameter filters
RETURN QUERY
EXECUTE '
SELECT array_to_json(array_agg(row_to_json(t)))
FROM (
SELECT * FROM crosstab(''SELECT date_start, at.at_name, cda.amount ct
FROM report.company_data_date cd
JOIN report.company_data_amount cda ON cd.id = cda.company_data_date_id
JOIN report.amount_types at ON cda.amount_type_id = at.id
WHERE date_start between $$' || i_start_date::TEXT || '$$ AND $$' || i_end_date::TEXT || '$$
AND interval_type_id = ' || i_interval::TEXT || ' ORDER BY date_start'')
AS ct (date_start timestamptz, ' || _text_op || ')
) t;';
END;
$ab$ LANGUAGE 'plpgsql';
So, when you run it, you get the dynamic results in JSON, and you don't need to know how many values were pivoted:
select * from report.test(now()- '1 week'::interval, now(), 1);
tab
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
[{"date_start":"2015-07-27T08:40:01.277556-04:00","burn_rate":0.00,"monthly_revenue":5800.00,"cash_balance":0.00},{"date_start":"2015-07-27T08:50:02.458868-04:00","burn_rate":34000.00,"monthly_revenue":15800.00,"cash_balance":24000.00}]
(1 row)
Edit: If you have mixed datatypes in your crosstab, you can add logic to look it up for each column with something like this:
SELECT a.attname as column_name, format_type(a.atttypid, a.atttypmod) AS data_type
FROM pg_attribute a
JOIN pg_class b ON (a.attrelid = b.relfilenode)
JOIN pg_catalog.pg_namespace n ON n.oid = b.relnamespace
WHERE n.nspname = $$schema_name$$ AND b.relname = $$table_name$$ and a.attstattarget = -1;"
I realise this is an older post but struggled for a little while on the same issue.
My Problem Statement:
I had a table with muliple values in a field and wanted to create a crosstab query with 40+ column headings per row.
My Solution was to create a function which looped through the table column to grab values that I wanted to use as column headings within the crosstab query.
Within this function I could then Create the crosstab query. In my use case I added this crosstab result into a separate table.
E.g.
CREATE OR REPLACE FUNCTION field_values_ct ()
RETURNS VOID AS $$
DECLARE rec RECORD;
DECLARE str text;
BEGIN
str := '"Issue ID" text,';
-- looping to get column heading string
FOR rec IN SELECT DISTINCT field_name
FROM issue_fields
ORDER BY field_name
LOOP
str := str || '"' || rec.field_name || '" text' ||',';
END LOOP;
str:= substring(str, 0, length(str));
EXECUTE 'CREATE EXTENSION IF NOT EXISTS tablefunc;
DROP TABLE IF EXISTS temp_issue_fields;
CREATE TABLE temp_issue_fields AS
SELECT *
FROM crosstab(''select issue_id, field_name, field_value from issue_fields order by 1'',
''SELECT DISTINCT field_name FROM issue_fields ORDER BY 1'')
AS final_result ('|| str ||')';
END;
$$ LANGUAGE plpgsql;
The approach described here worked well for me.
Instead of retrieving the pivot table directly. The easier approach is to let the function generate a SQL query string. Dynamically execute the resulting SQL query string on demand.
Background
In a PostgreSQL 9.0 database, there are various tables that have many-to-many relationships. The number of those relationships must be restricted. A couple of example tables include:
CREATE TABLE authentication (
id bigserial NOT NULL, -- Primary key
cookie character varying(64) NOT NULL, -- Authenticates the user with a cookie
ip_address character varying(40) NOT NULL -- Device IP address (IPv6-friendly)
)
CREATE TABLE tag_comment (
id bigserial NOT NULL, -- Primary key
comment_id bigint, -- Foreign key to the comment table
tag_name_id bigint -- Foreign key to the tag name table
)
Different relationships, however, have different limitations. For example, in the authentication table, a given ip_address is allowed 1024 cookie values; whereas, in the tag_comment table, each comment_id can have 10 associated tag_name_ids.
Problem
Currently, a number of functions have these restrictions hard-coded; scattering the limitations throughout the database, and preventing them from being changed dynamically.
Question
How would you impose a maximum many-to-many relationship limit on tables in a generic fashion?
Idea
Create a table to track the limits:
CREATE TABLE imposed_maximums (
id serial NOT NULL,
table_name character varying(128) NOT NULL,
column_group character varying(128) NOT NULL,
column_count character varying(128) NOT NULL,
max_size INTEGER
)
Establish the restrictions:
INSERT INTO imposed_maximums
(table_name, column_group, column_count, max_size) VALUES
('authentication', 'ip_address', 'cookie', 1024);
INSERT INTO imposed_maximums
(table_name, column_group, column_count, max_size) VALUES
('tag_comment', 'comment_id', 'tag_id', 10);
Create a trigger function:
CREATE OR REPLACE FUNCTION impose_maximum()
RETURNS trigger AS
$BODY$
BEGIN
-- Join this up with imposed_maximums somehow?
select
count(1)
from
-- the table name
where
-- the group column = NEW value to INSERT;
RETURN NEW;
END;
Attach the trigger to every table:
CREATE TRIGGER trigger_authentication_impose_maximum
BEFORE INSERT
ON authentication
FOR EACH ROW
EXECUTE PROCEDURE impose_maximum();
Obviously it won't work as written... is there a way to make it work, or otherwise enforce the restrictions such that they are:
in a single location; and
not hard-coded?
Thank you!
I've been doing a similar type of generic triggers.
The most tricky part is to get the value entry in the NEW record based on the column name.
I'm doing it the following way:
convert NEW data into array;
find the attnum of the column and use it as an index for the array.
This approach works as long as there're no commas in the data :( I don't know of other ways how to convert NEW or OLD variables into the array of values.
The following function might help:
CREATE OR REPLACE FUNCTION impose_maximum() RETURNS trigger AS $impose_maximum$
DECLARE
_sql text;
_cnt int8;
_vals text[];
_anum int4;
_im record;
BEGIN
_vals := string_to_array(translate(trim(NEW::text), '()', ''), ',');
FOR _im IN SELECT * FROM imposed_maximums WHERE table_name = TG_TABLE_NAME LOOP
SELECT attnum INTO _anum FROM pg_catalog.pg_attribute a
JOIN pg_catalog.pg_class t ON t.oid = a.attrelid
WHERE t.relkind = 'r' AND t.relname = TG_TABLE_NAME
AND NOT a.attisdropped AND a.attname = _im.column_group;
_sql := 'SELECT count('||quote_ident(_im.column_count)||')'||
' FROM '||quote_ident(_im.table_name)||
' WHERE '||quote_ident(_im.column_group)||' = $1';
EXECUTE _sql INTO _cnt USING _vals[_anum];
IF _cnt > CAST(_im.max_size AS int8) THEN
RAISE EXCEPTION 'Maximum of % hit for column % in table %(%=%)',
_im.max_size, _im.column_count,
_im.table_name, _im.column_group, _vals[_anum];
END IF;
END LOOP;
RETURN NEW;
END; $impose_maximum$ LANGUAGE plpgsql;
This function will check for all conditions defined for a given table.
Yes, there is a way to make it work.
In my personal opinion your idea is the way to go. It just needs one level of "meta". So, the table imposed_restrictions should have trigger(s), which is (are) fired after insert, update and delete. The code should then in turn create, modify or remove triggers and functions.
Take a look at execute statement of PL/pgSQL, which - essentially - allows you to execute any string. Needless to say, this string may contain definitions of triggers, functions, etc. Obviously, you have the access to OLD and NEW in the triggers, so you can fill in the placeholders in the string and you are done.
I believe you should be able to accomplish what you want with this answer. Please note that this is my personal view on the topic and it might not be an optimal solution - I would like to see a different, maybe also more efficient, approach.
Edit - Below is a sample from one of my old projects. It is located inside the function that is triggered before update (though now I get to think of it, maybe it should have been called after ;) And yes, the code is messy, as it does not use the nice $escape$ syntax. I was really, really young then. Nonetheless, the snipped demonstrates that it is possible to achieve what you want.
query:=''CREATE FUNCTION '' || NEW.function_name || ''('';
IF NEW.parameter=''t'' THEN
query:=query || ''integer'';
END IF;
query:=query || '') RETURNS setof '' || type_name || '' AS'' || chr(39);
query:=query || '' DECLARE list '' || type_name || ''; '';
query:=query || ''BEGIN '';
query:=query || '' FOR list IN EXECUTE '' || chr(39) || chr(39);
query:=query || temp_s || '' FROM '' || NEW.table_name;
IF NEW.parameter=''t'' THEN
query:=query || '' WHERE id='' || chr(39) || chr(39) || ''||'' || chr(36) || ''1'';
ELSE
query:=query || '';'' || chr(39) || chr(39);
END IF;
query:=query || '' LOOP RETURN NEXT list; '';
query:=query || ''END LOOP; RETURN; END; '' || chr(39);
query:=query || ''LANGUAGE '' || chr(39) || ''plpgsql'' || chr(39) || '';'';
EXECUTE query;
These function + trigger could be used as a template. If You combine them with #Sorrow 's technique of dynamically generating the functions + triggers, this could solve the OP's problem.
Please note that, instead of recalculating the count for every affected row (by calling the COUNT() aggregate function), I maintain an 'incremental' count. This should be cheaper.
DROP SCHEMA tmp CASCADE;
CREATE SCHEMA tmp ;
SET search_path='tmp';
CREATE TABLE authentication
( id bigserial NOT NULL -- Primary key
, cookie varchar(64) NOT NULL -- Authenticates the user with a cookie
, ip_address varchar(40) NOT NULL -- Device IP address (IPv6-friendly)
, PRIMARY KEY (ip_address, cookie)
);
CREATE TABLE authentication_ip_count (
ip_address character varying(40) NOT NULL
PRIMARY KEY -- REFERENCES authentication(ip_address)
, refcnt INTEGER NOT NULL DEFAULT 0
--
-- This is much easyer:
-- keep the max value inside the table
-- + use a table constraint
-- , maxcnt INTEGER NOT NULL DEFAULT 2 -- actually 100
-- , CONSTRAINT no_more_cookies CHECK (refcnt <= maxcnt)
);
CREATE TABLE imposed_maxima
( id serial NOT NULL
, table_name varchar NOT NULL
, column_group varchar NOT NULL
, column_count varchar NOT NULL
, max_size INTEGER NOT NULL
, PRIMARY KEY (table_name,column_group,column_count)
);
INSERT INTO imposed_maxima(table_name,column_group,column_count,max_size)
VALUES('authentication','ip_address','cookie', 2);
CREATE OR REPLACE FUNCTION authentication_impose_maximum()
RETURNS trigger AS
$BODY$
DECLARE
dummy INTEGER;
BEGIN
IF (TG_OP = 'INSERT') THEN
INSERT INTO authentication_ip_count (ip_address)
SELECT sq.*
FROM ( SELECT NEW.ip_address) sq
WHERE NOT EXISTS (
SELECT *
FROM authentication_ip_count nx
WHERE nx.ip_address = sq.ip_address
);
UPDATE authentication_ip_count
SET refcnt = refcnt + 1
WHERE ip_address = NEW.ip_address
;
SELECT COUNT(*) into dummy -- ac.refcnt, mx.max_size
FROM authentication_ip_count ac
JOIN imposed_maxima mx ON (1=1) -- outer join
WHERE ac.ip_address = NEW.ip_address
AND mx.table_name = 'authentication'
AND mx.column_group = 'ip_address'
AND mx.column_count = 'cookie'
AND ac.refcnt > mx.max_size
;
IF FOUND AND dummy > 0 THEN
RAISE EXCEPTION 'Cookie moster detected';
END IF;
ELSIF (TG_OP = 'DELETE') THEN
UPDATE authentication_ip_count
SET refcnt = refcnt - 1
WHERE ip_address = OLD.ip_address
;
DELETE FROM authentication_ip_count ac
WHERE ac.ip_address = OLD.ip_address
AND ac.refcnt <= 0
;
-- ELSIF (TG_OP = 'UPDATE') THEN
-- (Only needed if we allow updates of ip-address)
-- otherwise the count stays the same.
END IF;
RETURN NEW;
END;
$BODY$
LANGUAGE plpgsql;
CREATE TRIGGER trigger_authentication_impose_maximum
BEFORE INSERT OR UPDATE OR DELETE
ON authentication
FOR EACH ROW
EXECUTE PROCEDURE authentication_impose_maximum();
-- Test it ...
INSERT INTO authentication(ip_address, cookie) VALUES ('1.2.3.4', 'Some koekje' );
INSERT INTO authentication(ip_address, cookie) VALUES ('1.2.3.4', 'kaakje' );
INSERT INTO authentication(ip_address, cookie) VALUES ('1.2.3.4', 'Yet another cookie' );
RESULTS:
INSERT 0 1
CREATE FUNCTION
CREATE TRIGGER
INSERT 0 1
INSERT 0 1
ERROR: Cookie moster detected