plsql code to Split Email and moblie number - email

I need code to split email and mobile number that is stored in single column and store it two collection variable to send sms and email. Note that Both email and mobile number will be combined and stored in single column/variable separated by comma

Updated code to display both mail and mobile numbers.
DECLARE
l_str VARCHAR(150) := 'adsf#qewr.com, uiohljkh#uiyhjn.com, +7(800)800-80-80, +7(900)900-90-90';
l_pattern_email VARCHAR(500) := '[a-zA-Z0-9._%-]+#[a-zA-Z0-9._%-]+\.[a-zA-Z]{2,4}';
l_pattern_phone VARCHAR(500) := '+[0-9]\([0-9]{3}\)([0-9]{3})-[0-9]{2}-[0-9]{2}';
TYPE tp_emails_col_typ IS TABLE OF VARCHAR(100);
TYPE tp_phone_col_typ IS TABLE OF VARCHAR(100);
l_emails_col tp_emails_col_typ;
l_phone_col tp_phone_col_typ;
BEGIN
SELECT REGEXP_SUBSTR( l_str, l_pattern_email, 1, LEVEL ) BULK COLLECT INTO l_emails_col
FROM (
SELECT l_str FROM dual
)
CONNECT BY LEVEL <= REGEXP_COUNT(l_str, l_pattern_email);
IF l_emails_col.count > 0 THEN
FOR idx IN l_emails_col.first..l_emails_col.last LOOP
dbms_output.put_line( l_emails_col(idx) );
END LOOP;
END IF;
SELECT REGEXP_SUBSTR( l_str, l_pattern_phone, 1, LEVEL ) BULK COLLECT INTO l_phone_col
FROM (
SELECT l_str FROM dual
)
CONNECT BY LEVEL <= REGEXP_COUNT(l_str, l_pattern_phone);
IF l_phone_col.count > 0 THEN
FOR idx IN l_phone_col.first..l_phone_col.last LOOP
dbms_output.put_line( l_phone_col(idx) );
END LOOP;
END IF;
END;

Related

postgres generate sql statement with dynamic fields

im building a package to generate HTML code that later will be sent by email.
the data query has be dynamic as it will have all days of the month.
in the end query should be something like
select day_20230101, day_20230102, ...,day_20230131,avg_cap, rank1,... from v_html_report;
and then execute in the FOR to generate the HTML. The problem I'm facing is that the list of the columns is being seen as an entire string. when executing the loop the result_rec.day_20230101 doesnt exist but exist the result_rec.column_list.
is there a way to execute for example the query from a string in the FOR or the query seen this as fields and not a single field?
Below my Procedure:
create or replace procedure proc_html_gen ()
language plpgsql
as $$
DECLARE
days_month int;
column_list text ;
result_rec record;
html_header text ;
html_result text ;
html_days text ;
html_sql text ;
begin
SELECT
date_part(
'days',
((to_date((select value from parameters rtp where id='html_report_month' and id2='execute_month')||'01','YYYYMMDD')) + interval '1 month - 1 day')
) into days_month;
update html_gen rtp
set aux1=days_month
where html_type='html_report';
SELECT to_char(generate_series(01,days_month ),'00') into column_list;
select string_agg(concat('days_',days_list),',') from (
SELECT concat(
(select value from parameters rtp where id='html_report' and id2='execute_month')
,trim(to_char(generate_series(01,days_month),'00'))) as days_list
) a into column_list;
update html_gen rtp
set aux2=column_list
where html_type='html_report';
html_sql := ' select ' || column_list || ', avg_cap,rank1, rank2, rank3, rank4, rank5, rank6, rank7, rank8, rank9, rank10 from v_monthly_polygon_peak_cap_table' ;
FOR result_rec IN execute html_sql
LOOP
select string_agg(days_list,'') into html_days
from (
select concat('<td>',concat(1,trim(to_char(generate_series(01,31),'00'))),'</td>') as days_list
) a ;
html_result := concat(html_result,'<tr>',html_days,'<td> ' || result_rec.avg_cap || '</td></tr>');
END LOOP;
commit;
end;$$
update the query with the solution
now im facing a new problem. how to get the field name dynamically?
like below im getting an error. days_ doesnt exist. i need to append the number of the day in the end of the variable.
html_result := result_rec.days_|| trim(to_char(generate_series(01,31),'00')))
Best Regards,
Macieira

Cannot Get Dynamic Exec of SP to Return INOUT Param

Using PostgreSQL 13.2, wherein a stored procedure (the Requestor) is given a name of a list of stored procedures to run (the job group). All sp's executed this way are coded to write a log record as their last task. I have chosen to pull that 'append log' code from all of the sp's, and instead send back the log record (always a single record) using an INOUT rowtype param, but have run into trouble. In my example below, the requestor sp will load the records returned from the sp's it calls into a temp table shaped like the permanent log table.
That permanent table looks like this:
create table public.job_log (
log_id integer,
event_id integer,
job_id integer,
rows_affected integer);
Any one of the jobs that is executed by the requestor sp might look like this one:
CREATE OR REPLACE procedure public.get_log_rcd(
inout p_log_rcd public.job_log)
LANGUAGE 'plpgsql'
as
$BODY$
declare
v_log_id integer = 40;
v_event_id integer = 698;
v_job_id integer = 45;
v_rows_affected integer = 60;
begin
select
v_log_id
, v_event_id
, v_job_id
, v_rows_affected
into
p_log_rcd.log_id,
p_log_rcd.event_id,
p_log_rcd.job_id,
p_log_rcd.rows_affected;
end;
$BODY$
This sample sp doesn't do anything--it's purpose here is only to simulate initialize of the log parameters to return to caller.
Again, the requestor sp that's going to run jobs like the one above creates a temp table with the same structure as the permanent log:
drop table if exists tmp_log_cache;
create temp table tmp_log_cache as table public.job_log with no data;
If the requestor sp didn't have to do dynamic SQL, it would look something like this block here:
do
$$
declare
big_local public.job_log;
begin
call public.get_log_rcd( big_local );
insert into tmp_log_cache (
log_id
, event_id
, job_id
, rows_affected )
values (
big_local.log_id
, big_local.event_id
, big_local.job_id
, big_local.rows_affected);
end;
$$;
Doing a
select * from tmp_log_cache;
Returns a row containing the 4 column values expected, all is well. But, dynamic execution is required. And, as I'm sure most folks here know, the following dog don't hunt:
do
$$
declare
big_local public.job_log;
v_query_text varchar;
v_job_name varchar = 'public.get_log_rcd';
begin
select 'call ' || v_job_name || '( $1 );'
into v_query_text;
execute v_query_text using big_local::public.job_log;
insert into tmp_log_cache (
log_id
, event_id
, job_id
, rows_affected )
values (
big_local.log_id
, big_local.event_id
, big_local.job_id
, big_local.rows_affected);
end;
$$;
The above dynamic statement executes without error, but the insert statement only has NULL values to work with--a row is inserted, all nulls. Any suggestions warmly welcomed. The sp's that comprise the various job groups could probably have been implemented as functions, although in all cases their primary tasks are to massage, normalize, cleanse telemetry data, not to spit anything out, per se.
Hmm, the documentation states that "parameter symbols (...) only work in SELECT, INSERT, UPDATE, and DELETE commands.", so this probably isn't possible using parameters.
But as a workaround you can build a dynamic DO and include a variable to get the values and the INSERT in there.
DO
$o$
DECLARE
v_query_text varchar;
v_job_name varchar := format('%I.%I',
'public',
'get_log_rcd');
BEGIN
v_query_text := concat('DO ',
'$i$ ',
'DECLARE ',
' big_local public.job_log; ',
'BEGIN ',
' CALL ', v_job_name, '(big_local); ',
' INSERT INTO tmp_log_cache ',
' (log_id, ',
' event_id, ',
' job_id, ',
' rows_affected) ',
' VALUES (big_local.log_id, ',
' big_local.event_id, ',
' big_local.job_id, '
' big_local.rows_affected); ',
'END; ',
'$i$; ');
EXECUTE v_query_text;
END;
$o$;
db<>fiddle
Thanks--I would not have considered the ability to execute a 'do' using execute. It just would not have occurred to me. Well, here's my solution: flip to functions.
With the understanding that my 'Requestor' is only given sp's to run because that's what we had to do with SQL Server and it was reflex, I did the 1-line change needed to flip my example sp above to a function:
CREATE OR REPLACE function public.get_log_rcdf(
inout p_log_rcd public.job_log)
returns public.job_log
LANGUAGE 'plpgsql'
as
$BODY$
declare
v_log_id integer = 40;
v_event_id integer = 698;
v_job_id integer = 45;
v_rows_affected integer = 60;
begin
select
v_log_id
, v_event_id
, v_job_id
, v_rows_affected
into
p_log_rcd.log_id,
p_log_rcd.event_id,
p_log_rcd.job_id,
p_log_rcd.rows_affected;
end;
$BODY$
In fact, the change to a function required the addition of a RETURNS line. Done. Then, the dynamic call was tweaked to a SELECT and the execute modified with an INTO:
do
$$
declare
big_local public.job_log;
v_query_text varchar;
v_job_name varchar = 'public.get_log_rcdf';
begin
select 'select * from ' || v_job_name || '( $1 );'
into v_query_text;
raise info 'SQL text is: %', v_query_text;
execute v_query_text into big_local using big_local;
insert into tmp_log_cache (
log_id
, event_id
, job_id
, rows_affected )
values (
big_local.log_id
, big_local.event_id
, big_local.job_id
, big_local.rows_affected);
end;
$$;
and the process now works exactly as desired. I tidy up my handling of the dynamic function name as illustrated in the first answer, and I think we're done here.

Best way to remove ordered sequential duplicates in a comma separated list with postgresSQL

I have a column with data that looks like this in a single field:
"a,a,b,b,c,a,b,b,b,a,a,a,a,a,a,c,a,a,b"
Using some sort of regex or SQL function I would like to make it look like this:
"a,b,c,a,b,a,c,a,b"
Essentially I am trying to get rid of repeated values that appear in order but keep the unique changes from one value to another.
My knowledge of reg-expressions pretty much ends at removing duplicates. Any help is greatly appreciated!
use regexp:
SELECT regexp_replace('a,a,b,b,c,a,b,b,b,a,a,a,a,a,a,c,a,a,b', '(\w)(,\1)+', '\1', 'g')
(\w)(,\1)+ mutches: (any word char) and following (, and this same word char) more than one time...
Fiddle example
RegExr example
You can convert the elements into rows, check if the previous row is different to the current and then keep only those where something changed. This can then be aggregated back into a comma separated list:
select string_agg(ch, ',' order by idx)
from (
select u.ch, u.idx,
coalesce(u.ch <> lag(u.ch) over (order by u.idx), true) as is_change
from unnest(string_to_array('a,a,b,b,c,a,b,b,b,a,a,a,a,a,a,c,a,a,b', ',')) with ordinality as u(ch, idx)
) t
where is_change
The with ordinality returns the original array index, so that we can sort the elements correctly when aggregating them.
This can also be put into a function:
create or replace function cleanup(p_input text)
returns text
as
$$
select string_agg(ch, ',' order by idx)
from (
select u.ch, u.idx,
coalesce(u.ch <> lag(u.ch) over (order by u.idx), true) as is_change
from unnest(string_to_array(p_input, ',')) with ordinality as u(ch, idx)
) t
where is_change;
$$
language sql;
Online example
My understanding is:
If the character is the same as previous character, you want to remove it from the string.
So I will use while loop and if statement in this case:
--CREATE TABLE TEST (ID VARCHAR(100));
--INSERT INTO TEST VALUES ('a,a,b,b,c,a,b,b,b,a,a,a,a,a,a,c,a,a,b');
DO $$
DECLARE
V_NEWSTRING VARCHAR(100) := '';
V_I INTEGER := 1;
V_LENGTH INTEGER := 0;
V_CURRENT VARCHAR(10) := '';
V_LAST VARCHAR(10) := '';
BEGIN
SELECT LENGTH(ID) FROM TEST INTO V_LENGTH;
WHILE V_I <= V_LENGTH LOOP
SELECT SUBSTRING(ID,V_I,1) from TEST INTO V_CURRENT;
IF V_CURRENT <> V_LAST THEN
V_NEWSTRING = V_NEWSTRING || V_CURRENT || ',';
END IF;
V_LAST = V_CURRENT;
V_I = V_I + 2;
END LOOP;
raise notice 'Value: %', V_NEWSTRING;
END $$;
Test Result (PostgreSQL-9.4):

Extract integer value from string column with additional text

I'm converting a BDE query (Paradox) to a Firebird (2.5, not 3.x) and I have a very convenient conversion in it:
select TRIM(' 1') as order1, CAST(' 1' AS INTEGER) AS order2 --> 1
select TRIM(' 1 bis') as order1, CAST(' 1 bis' AS INTEGER) AS order2 --> 1
Then ordering by the cast value then the trimmed value (ORDER order2, order1) provide me the result I need:
1
1 bis
2 ter
100
101 bis
However, in Firebird casting an incorrect integer will raise an exception and I did not find any way around to provide same result. I think I can tell if a number is present with something like below, but I couldn't find a way to extract it.
TRIM(' 1 bis') similar to '[ [:ALPHA:]]*[[:DIGIT:]]+[ [:ALPHA:]]*'
[EDIT]
I had to handle cases where text were before the number, so using #Arioch'The's trigger, I got this running great:
SET TERM ^ ;
CREATE TRIGGER SET_MYTABLE_INTVALUE FOR MYTABLE ACTIVE
BEFORE UPDATE OR INSERT POSITION 0
AS
DECLARE I INTEGER;
DECLARE S VARCHAR(13);
DECLARE C VARCHAR(1);
DECLARE R VARCHAR(13);
BEGIN
IF (NEW.INTVALUE is not null) THEN EXIT;
S = TRIM( NEW.VALUE );
R = NULL;
I = 1;
WHILE (I <= CHAR_LENGTH(S)) DO
BEGIN
C = SUBSTRING( S FROM I FOR 1 );
IF ((C >= '0') AND (C <= '9')) THEN LEAVE;
I = I + 1;
END
WHILE (I <= CHAR_LENGTH(S)) DO
BEGIN
C = SUBSTRING( S FROM I FOR 1 );
IF (C < '0') THEN LEAVE;
IF (C > '9') THEN LEAVE;
IF (C IS NULL) THEN LEAVE;
IF (R IS NULL) THEN R=C; ELSE R = R || C;
I = I + 1;
END
NEW.INTVALUE = CAST(R AS INTEGER);
END^
SET TERM ; ^
Converting such a table, you have to add a special indexed integer column for keeping the extracted integer data.
Note, this query while using "very convenient conversion" is actually rather bad: you should use indexed columns to sort (order) large amounts of data, otherwise you are going into slow execution and waste a lot of memory/disk for temporary sorting tables.
So you have to add an extra integer indexed column and to use it in the query.
Next question is how to populate that column.
Better would be to do it once, when you move your entire database and application from BDE to Firebird. And from that point make your application when entering new data rows fill BOTH varchar and integer columns properly.
One time conversion can be done by your convertor application, then.
Or you can use selectable Stored Procedure that would repeat the table with such and added column. Or you can make Execute Block that would iterate through the table and update its rows calculating the said integer value.
How to SELECT a PROCEDURE in Firebird 2.5
If you would need to keep legacy applications, that only insert text column but not integer column, then I think you would have to use BEFORE UPDATE OR INSERT triggers in Firebird, that would parse the text column value letter by letter and extract integer from it. And then make sure your application never changes that integer column directly.
See a trigger example at Trigger on Update Firebird
PSQL language documentation: https://www.firebirdsql.org/file/documentation/reference_manuals/fblangref25-en/html/fblangref25-psql.html
Whether you would write procedure or trigger to populate the said added integer indexed column, you would have to make simple loop over characters, copying string from first digit until first non-digit.
https://www.firebirdsql.org/file/documentation/reference_manuals/fblangref25-en/html/fblangref25-functions-scalarfuncs.html#fblangref25-functions-string
https://www.firebirdsql.org/file/documentation/reference_manuals/fblangref25-en/html/fblangref25-psql-coding.html#fblangref25-psql-declare-variable
Something like that
CREATE TRIGGER my_trigger FOR my_table
BEFORE UPDATE OR INSERT
AS
DECLARE I integer;
DECLARE S VARCHAR(100);
DECLARE C VARCHAR(100);
DECLARE R VARCHAR(100);
BEGIN
S = TRIM( NEW.MY_TXT_COLUMN );
R = NULL;
I = 1;
WHILE (i <= CHAR_LENGTH(S)) DO
BEGIN
C = SUBSTRING( s FROM i FOR 1 );
IF (C < '0') THEN LEAVE;
IF (C > '9') THEN LEAVE;
IF (C IS NULL) THEN LEAVE;
IF (R IS NULL) THEN R=C; ELSE R = R || C;
I = I + 1;
END
NEW.MY_INT_COLUMN = CAST(R AS INTEGER);
END;
In this example your ORDER order2, order1 would become
SELECT ..... FROM my_table ORDER BY MY_INT_COLUMN, MY_TXT_COLUMN
Additionally, it seems your column actually contains a compound data: an integer index and an optional textual postfix. If so, then the data you have is not normalized and the table better be restructured.
CREATE TABLE my_table (
ORDER_Int INTEGER NOT NULL,
ORDER_PostFix VARCHAR(24) CHECK( ORDER_PostFix = TRIM(ORDER_PostFix) ),
......
ORDER_TXT COMPUTED BY (ORDER_INT || COALESCE( ' ' || ORDER_PostFix, '' )),
PRIMARY KEY (ORDER_Int, ORDER_PostFix )
);
When you would move your data from Paradox to Firebird - make your convertor application check and split those values like "1 bis" into two new columns.
And your query then would be like
SELECT ORDER_TXT, ... FROM my_table ORDER BY ORDER_Int, ORDER_PostFix
if you're using fb2.5 you can use the following:
execute block (txt varchar(100) = :txt )
returns (res integer)
as
declare i integer;
begin
i=1;
while (i<=char_length(:txt)) do begin
if (substring(:txt from i for 1) not similar to '[[:DIGIT:]]')
then txt =replace(:txt,substring(:txt from i for 1),'');
else i=i+1;
end
res = :txt;
suspend;
end
in fb3.0 you have more convenient way to do the same
select
cast(substring(:txt||'#' similar '%#"[[:DIGIT:]]+#"%' escape '#') as integer)
from rdb$database
--assuming that the field is varchar(15))
select cast(field as integer) from table;
Worked in firebird version 2.5.

Generic trigger to restrict insertions based on count

Background
In a PostgreSQL 9.0 database, there are various tables that have many-to-many relationships. The number of those relationships must be restricted. A couple of example tables include:
CREATE TABLE authentication (
id bigserial NOT NULL, -- Primary key
cookie character varying(64) NOT NULL, -- Authenticates the user with a cookie
ip_address character varying(40) NOT NULL -- Device IP address (IPv6-friendly)
)
CREATE TABLE tag_comment (
id bigserial NOT NULL, -- Primary key
comment_id bigint, -- Foreign key to the comment table
tag_name_id bigint -- Foreign key to the tag name table
)
Different relationships, however, have different limitations. For example, in the authentication table, a given ip_address is allowed 1024 cookie values; whereas, in the tag_comment table, each comment_id can have 10 associated tag_name_ids.
Problem
Currently, a number of functions have these restrictions hard-coded; scattering the limitations throughout the database, and preventing them from being changed dynamically.
Question
How would you impose a maximum many-to-many relationship limit on tables in a generic fashion?
Idea
Create a table to track the limits:
CREATE TABLE imposed_maximums (
id serial NOT NULL,
table_name character varying(128) NOT NULL,
column_group character varying(128) NOT NULL,
column_count character varying(128) NOT NULL,
max_size INTEGER
)
Establish the restrictions:
INSERT INTO imposed_maximums
(table_name, column_group, column_count, max_size) VALUES
('authentication', 'ip_address', 'cookie', 1024);
INSERT INTO imposed_maximums
(table_name, column_group, column_count, max_size) VALUES
('tag_comment', 'comment_id', 'tag_id', 10);
Create a trigger function:
CREATE OR REPLACE FUNCTION impose_maximum()
RETURNS trigger AS
$BODY$
BEGIN
-- Join this up with imposed_maximums somehow?
select
count(1)
from
-- the table name
where
-- the group column = NEW value to INSERT;
RETURN NEW;
END;
Attach the trigger to every table:
CREATE TRIGGER trigger_authentication_impose_maximum
BEFORE INSERT
ON authentication
FOR EACH ROW
EXECUTE PROCEDURE impose_maximum();
Obviously it won't work as written... is there a way to make it work, or otherwise enforce the restrictions such that they are:
in a single location; and
not hard-coded?
Thank you!
I've been doing a similar type of generic triggers.
The most tricky part is to get the value entry in the NEW record based on the column name.
I'm doing it the following way:
convert NEW data into array;
find the attnum of the column and use it as an index for the array.
This approach works as long as there're no commas in the data :( I don't know of other ways how to convert NEW or OLD variables into the array of values.
The following function might help:
CREATE OR REPLACE FUNCTION impose_maximum() RETURNS trigger AS $impose_maximum$
DECLARE
_sql text;
_cnt int8;
_vals text[];
_anum int4;
_im record;
BEGIN
_vals := string_to_array(translate(trim(NEW::text), '()', ''), ',');
FOR _im IN SELECT * FROM imposed_maximums WHERE table_name = TG_TABLE_NAME LOOP
SELECT attnum INTO _anum FROM pg_catalog.pg_attribute a
JOIN pg_catalog.pg_class t ON t.oid = a.attrelid
WHERE t.relkind = 'r' AND t.relname = TG_TABLE_NAME
AND NOT a.attisdropped AND a.attname = _im.column_group;
_sql := 'SELECT count('||quote_ident(_im.column_count)||')'||
' FROM '||quote_ident(_im.table_name)||
' WHERE '||quote_ident(_im.column_group)||' = $1';
EXECUTE _sql INTO _cnt USING _vals[_anum];
IF _cnt > CAST(_im.max_size AS int8) THEN
RAISE EXCEPTION 'Maximum of % hit for column % in table %(%=%)',
_im.max_size, _im.column_count,
_im.table_name, _im.column_group, _vals[_anum];
END IF;
END LOOP;
RETURN NEW;
END; $impose_maximum$ LANGUAGE plpgsql;
This function will check for all conditions defined for a given table.
Yes, there is a way to make it work.
In my personal opinion your idea is the way to go. It just needs one level of "meta". So, the table imposed_restrictions should have trigger(s), which is (are) fired after insert, update and delete. The code should then in turn create, modify or remove triggers and functions.
Take a look at execute statement of PL/pgSQL, which - essentially - allows you to execute any string. Needless to say, this string may contain definitions of triggers, functions, etc. Obviously, you have the access to OLD and NEW in the triggers, so you can fill in the placeholders in the string and you are done.
I believe you should be able to accomplish what you want with this answer. Please note that this is my personal view on the topic and it might not be an optimal solution - I would like to see a different, maybe also more efficient, approach.
Edit - Below is a sample from one of my old projects. It is located inside the function that is triggered before update (though now I get to think of it, maybe it should have been called after ;) And yes, the code is messy, as it does not use the nice $escape$ syntax. I was really, really young then. Nonetheless, the snipped demonstrates that it is possible to achieve what you want.
query:=''CREATE FUNCTION '' || NEW.function_name || ''('';
IF NEW.parameter=''t'' THEN
query:=query || ''integer'';
END IF;
query:=query || '') RETURNS setof '' || type_name || '' AS'' || chr(39);
query:=query || '' DECLARE list '' || type_name || ''; '';
query:=query || ''BEGIN '';
query:=query || '' FOR list IN EXECUTE '' || chr(39) || chr(39);
query:=query || temp_s || '' FROM '' || NEW.table_name;
IF NEW.parameter=''t'' THEN
query:=query || '' WHERE id='' || chr(39) || chr(39) || ''||'' || chr(36) || ''1'';
ELSE
query:=query || '';'' || chr(39) || chr(39);
END IF;
query:=query || '' LOOP RETURN NEXT list; '';
query:=query || ''END LOOP; RETURN; END; '' || chr(39);
query:=query || ''LANGUAGE '' || chr(39) || ''plpgsql'' || chr(39) || '';'';
EXECUTE query;
These function + trigger could be used as a template. If You combine them with #Sorrow 's technique of dynamically generating the functions + triggers, this could solve the OP's problem.
Please note that, instead of recalculating the count for every affected row (by calling the COUNT() aggregate function), I maintain an 'incremental' count. This should be cheaper.
DROP SCHEMA tmp CASCADE;
CREATE SCHEMA tmp ;
SET search_path='tmp';
CREATE TABLE authentication
( id bigserial NOT NULL -- Primary key
, cookie varchar(64) NOT NULL -- Authenticates the user with a cookie
, ip_address varchar(40) NOT NULL -- Device IP address (IPv6-friendly)
, PRIMARY KEY (ip_address, cookie)
);
CREATE TABLE authentication_ip_count (
ip_address character varying(40) NOT NULL
PRIMARY KEY -- REFERENCES authentication(ip_address)
, refcnt INTEGER NOT NULL DEFAULT 0
--
-- This is much easyer:
-- keep the max value inside the table
-- + use a table constraint
-- , maxcnt INTEGER NOT NULL DEFAULT 2 -- actually 100
-- , CONSTRAINT no_more_cookies CHECK (refcnt <= maxcnt)
);
CREATE TABLE imposed_maxima
( id serial NOT NULL
, table_name varchar NOT NULL
, column_group varchar NOT NULL
, column_count varchar NOT NULL
, max_size INTEGER NOT NULL
, PRIMARY KEY (table_name,column_group,column_count)
);
INSERT INTO imposed_maxima(table_name,column_group,column_count,max_size)
VALUES('authentication','ip_address','cookie', 2);
CREATE OR REPLACE FUNCTION authentication_impose_maximum()
RETURNS trigger AS
$BODY$
DECLARE
dummy INTEGER;
BEGIN
IF (TG_OP = 'INSERT') THEN
INSERT INTO authentication_ip_count (ip_address)
SELECT sq.*
FROM ( SELECT NEW.ip_address) sq
WHERE NOT EXISTS (
SELECT *
FROM authentication_ip_count nx
WHERE nx.ip_address = sq.ip_address
);
UPDATE authentication_ip_count
SET refcnt = refcnt + 1
WHERE ip_address = NEW.ip_address
;
SELECT COUNT(*) into dummy -- ac.refcnt, mx.max_size
FROM authentication_ip_count ac
JOIN imposed_maxima mx ON (1=1) -- outer join
WHERE ac.ip_address = NEW.ip_address
AND mx.table_name = 'authentication'
AND mx.column_group = 'ip_address'
AND mx.column_count = 'cookie'
AND ac.refcnt > mx.max_size
;
IF FOUND AND dummy > 0 THEN
RAISE EXCEPTION 'Cookie moster detected';
END IF;
ELSIF (TG_OP = 'DELETE') THEN
UPDATE authentication_ip_count
SET refcnt = refcnt - 1
WHERE ip_address = OLD.ip_address
;
DELETE FROM authentication_ip_count ac
WHERE ac.ip_address = OLD.ip_address
AND ac.refcnt <= 0
;
-- ELSIF (TG_OP = 'UPDATE') THEN
-- (Only needed if we allow updates of ip-address)
-- otherwise the count stays the same.
END IF;
RETURN NEW;
END;
$BODY$
LANGUAGE plpgsql;
CREATE TRIGGER trigger_authentication_impose_maximum
BEFORE INSERT OR UPDATE OR DELETE
ON authentication
FOR EACH ROW
EXECUTE PROCEDURE authentication_impose_maximum();
-- Test it ...
INSERT INTO authentication(ip_address, cookie) VALUES ('1.2.3.4', 'Some koekje' );
INSERT INTO authentication(ip_address, cookie) VALUES ('1.2.3.4', 'kaakje' );
INSERT INTO authentication(ip_address, cookie) VALUES ('1.2.3.4', 'Yet another cookie' );
RESULTS:
INSERT 0 1
CREATE FUNCTION
CREATE TRIGGER
INSERT 0 1
INSERT 0 1
ERROR: Cookie moster detected