trigger is working but no result - postgresql

CREATE or replace FUNCTION billtesting() RETURNS trigger AS
$$
BEGIN
if (destination_number'^(?:[0-7] ?){6,14}[0-9]$' ~ digits and destination_number !~ '15487498')
then
insert into isp_cdr(destination_number,caller_id,duration,billsec)
values (destination_number,caller_id,duration,billsec);
UPDATE isp_cdr
SET nibble_total_billed = billsec * user_rate;
end if;
RETURN NEW;
END
$$
LANGUAGE plpgsql;
CREATE TRIGGER bill_testing_update
BEFORE UPDATE
ON isp_cdr
FOR EACH ROW
EXECUTE PROCEDURE billtesting();
This is the query for testing
insert into isp_cdr(destination_number,caller_id,duration,billsec)
values ('012687512123','123125641','43','35');
This is the sample lcr table information
digits user_rate
1 0.02
23 0.07
652 0.12
1123 0.28
87521 0.15
123161 0.54
9641231 1.20
65491641 0.89
this is the sample isp_cdr table information
destination_number caller_id duration billsec nibble_total_billed
123561231 1315142 67 58 0
with this trigger procedure i want to match the digits with destination_number with and use the user_rate to do the calculation
user_rate * billsec = nibble_total_billed
but after the insert the testing query the calculation does working it din't show any result at nibble_total_billed what i think is my pattern matching part has some mistake i need to match the digits with the destination_number.

Related

Other way to cast varbit to int? And bigint?

This function is a workaround ... nothing with better performance?
CREATE or REPLACE FUNCTION varbit_to_int(v varbit) RETURNS int AS $f$
SELECT CASE bit_length(v)
WHEN 1 THEN v::bit(1)::int
WHEN 2 THEN v::bit(2)::int
WHEN 3 THEN v::bit(3)::int
...
WHEN 30 THEN v::bit(30)::int
WHEN 31 THEN v::bit(31)::int
WHEN 32 THEN v::bit(32)::int
ELSE NULL::int
END
$f$ LANGUAGE SQL IMMUTABLE;
Same problem for bigint:
CREATE or replace FUNCTION varbit_to_bigint(p varbit) RETURNS bigint AS $f$
SELECT CASE bit_length($1)
WHEN 1 THEN $1::bit(1)::bigint
WHEN 2 THEN $1::bit(2)::bigint
WHEN 3 THEN $1::bit(3)::bigint
...
WHEN 62 THEN $1::bit(62)::bigint
WHEN 63 THEN $1::bit(63)::bigint
WHEN 64 THEN $1::bit(64)::bigint
ELSE NULL::bigint
END
$f$ LANGUAGE SQL IMMUTABLE STRICT;
Using many times in loops seems CPU-wasteful, only to avoid "cannot cast type bit varying to integer" error. Maybe an external C-language library do this and other useful castings.
NOTICE select b'101'::bit(64)::bigint != b'101'::bigint;
I tested a couple of variants (for bigint only) with built-in functionality and this variant with OVERLAY() turned out fastest in my local tests on Postgres 11:
CREATE OR REPLACE FUNCTION varbit2bigint2(b varbit)
RETURNS bigint AS
$func$
SELECT OVERLAY(bit(64) '0' PLACING b FROM 65 - bit_length(b))::bigint
$func$ LANGUAGE SQL IMMUTABLE;
Other candidates:
Note the different conversion of empty bitstrings ('') to 0 vs. NULL. Adapt to your needs!
Your function:
CREATE OR REPLACE FUNCTION varbit2bigint1(b varbit)
RETURNS bigint AS
$func$
SELECT CASE bit_length($1)
WHEN 1 THEN $1::bit(1)::bigint
WHEN 2 THEN $1::bit(2)::bigint
WHEN 3 THEN $1::bit(3)::bigint
WHEN 4 THEN $1::bit(4)::bigint
WHEN 5 THEN $1::bit(5)::bigint
WHEN 6 THEN $1::bit(6)::bigint
WHEN 7 THEN $1::bit(7)::bigint
WHEN 8 THEN $1::bit(8)::bigint
WHEN 9 THEN $1::bit(9)::bigint
WHEN 10 THEN $1::bit(10)::bigint
WHEN 11 THEN $1::bit(11)::bigint
WHEN 12 THEN $1::bit(12)::bigint
WHEN 13 THEN $1::bit(13)::bigint
WHEN 14 THEN $1::bit(14)::bigint
WHEN 15 THEN $1::bit(15)::bigint
WHEN 16 THEN $1::bit(16)::bigint
WHEN 17 THEN $1::bit(17)::bigint
WHEN 18 THEN $1::bit(18)::bigint
WHEN 19 THEN $1::bit(19)::bigint
WHEN 20 THEN $1::bit(20)::bigint
WHEN 21 THEN $1::bit(21)::bigint
WHEN 22 THEN $1::bit(22)::bigint
WHEN 23 THEN $1::bit(23)::bigint
WHEN 24 THEN $1::bit(24)::bigint
WHEN 25 THEN $1::bit(25)::bigint
WHEN 26 THEN $1::bit(26)::bigint
WHEN 27 THEN $1::bit(27)::bigint
WHEN 28 THEN $1::bit(28)::bigint
WHEN 29 THEN $1::bit(29)::bigint
WHEN 30 THEN $1::bit(30)::bigint
WHEN 31 THEN $1::bit(31)::bigint
WHEN 32 THEN $1::bit(32)::bigint
WHEN 33 THEN $1::bit(33)::bigint
WHEN 34 THEN $1::bit(34)::bigint
WHEN 35 THEN $1::bit(35)::bigint
WHEN 36 THEN $1::bit(36)::bigint
WHEN 37 THEN $1::bit(37)::bigint
WHEN 38 THEN $1::bit(38)::bigint
WHEN 39 THEN $1::bit(39)::bigint
WHEN 40 THEN $1::bit(40)::bigint
WHEN 41 THEN $1::bit(41)::bigint
WHEN 42 THEN $1::bit(42)::bigint
WHEN 43 THEN $1::bit(43)::bigint
WHEN 44 THEN $1::bit(44)::bigint
WHEN 45 THEN $1::bit(45)::bigint
WHEN 46 THEN $1::bit(46)::bigint
WHEN 47 THEN $1::bit(47)::bigint
WHEN 48 THEN $1::bit(48)::bigint
WHEN 49 THEN $1::bit(49)::bigint
WHEN 50 THEN $1::bit(50)::bigint
WHEN 51 THEN $1::bit(51)::bigint
WHEN 52 THEN $1::bit(52)::bigint
WHEN 53 THEN $1::bit(53)::bigint
WHEN 54 THEN $1::bit(54)::bigint
WHEN 55 THEN $1::bit(55)::bigint
WHEN 56 THEN $1::bit(56)::bigint
WHEN 57 THEN $1::bit(57)::bigint
WHEN 58 THEN $1::bit(58)::bigint
WHEN 59 THEN $1::bit(59)::bigint
WHEN 60 THEN $1::bit(60)::bigint
WHEN 61 THEN $1::bit(61)::bigint
WHEN 62 THEN $1::bit(62)::bigint
WHEN 63 THEN $1::bit(63)::bigint
WHEN 64 THEN $1::bit(64)::bigint
ELSE NULL::bigint
END
$func$ LANGUAGE SQL IMMUTABLE; -- no STRICT modifier
Left-padding the text representation with '0':
CREATE OR REPLACE FUNCTION pg_temp.varbit2bigint3(b varbit)
RETURNS bigint AS
$func$
SELECT lpad(b::text, 64, '0')::bit(64)::bigint
$func$ LANGUAGE SQL IMMUTABLE;
Bit-shifting before the cast:
CREATE OR REPLACE FUNCTION varbit2bigint4(b varbit)
RETURNS bigint AS
$func$
SELECT (bit(64) '0' || b << bit_length(b))::bit(64)::bigint
$func$ LANGUAGE SQL IMMUTABLE;
db<>fiddle here
Related:
Postgresql Convert bit varying to integer
Your feedback
It is not worst, it is faster!
EXPLAIN ANALYZE select
varbit_to_bigint(osm_id::bit(64)::varbit)
from planet_osm_point limit 10000 ;
-- Planning time: 0.697 ms
-- Execution time: 1133.571 ms
EXPLAIN ANALYZE select
lpad(osm_id::bit(64)::varbit::text, 32, '0')::bit(64)::bigint
from planet_osm_point limit 10000;
-- Planning time: 0.105 ms
-- Execution time: 26.429 ms
You show a STRICT modifier with the bigint variant of the function in the question (not sure why it differs from the integer variant). If that represents the function you actually tested, I expect most of the observed performance difference is due to that added STRICT modifier preventing function inlining. Quoting the Postgres Wiki:
if the function is declared STRICT, then the planner must be able to
prove that the body expression necessarily returns NULL if any
parameter is null. At present, this condition is only satisfied if:
every parameter is referenced at least once, and all functions,
operators and other constructs used in the body are themselves STRICT.
That seems to hurt your function badly - while my winner seems unaffected, and the other two variants are even ~ 10 % faster. Same fiddle with STRICT functions:
db<>fiddle here
Related:
Function executes faster without STRICT modifier?
I suggest you re-test with and without STRICT modifier to see for yourself.

How can I write an iterative function that bases the current row output off of the prior row output?

I need to determine whether my current row value is positive or negative, which is a function of a starting value, scheduled increases, and daily decrement (which is different depending on if the prior day output value was positive or negative).
I only know my starting number for day 1, my schedule of increases, and my decrement values if positive or negative.
If "Prior Day Output" + "Today scheduled increase" is positive, then "Prior Day Output" + "Today scheduled increase" - 2(decrement value)
If "Prior Day Output" + "Today scheduled increase" is negative, then "Prior Day Output" + "Today scheduled increase" - 1(decrement value)
I haven't tried anything, as I can't think of an algebraic way to perform this. New to iterative functions or loops.
Here is the data I have to start with:
Here is what I want to end with:
I believe I have a solution for you.
Note: You have a stipulation saying if start_val is positive (>= 0) set the decrement to 2 but, on Day 11 of your output example you have the decrement set to 1 where start_val + increase = 0.
This solution will match your example output which considers 0 to be negative. That is easily changeable in the segment that sets new_dec. Just move the = to the appropriate location.
CREATE OR REPLACE FUNCTION update_vals()
RETURNS SETOF test
AS
$$
DECLARE
new_dec integer;
new_end integer;
new_start integer;
rec record;
BEGIN
FOR rec IN
SELECT * FROM test
LOOP
new_start := NULL::integer;
IF rec.start_val IS NULL
THEN
SELECT end_val
INTO new_start
FROM
(
SELECT MAX(id) last_id FROM test WHERE id < rec.id
) a
JOIN test ON id = a.last_id
;
END IF;
IF COALESCE(rec.start_val, new_start) + rec.increase > 0
THEN
new_dec := 2;
ELSIF COALESCE(rec.start_val, new_start) + rec.increase <= 0
THEN
new_dec := 1;
END IF;
new_end := COALESCE(rec.start_val, new_start) + rec.increase - new_dec;
IF new_start IS NOT NULL
THEN
RETURN QUERY
UPDATE test
SET (start_val, decrement, end_val) = (new_start, new_dec, new_end)
WHERE id = rec.id
RETURNING *
;
ELSE
RETURN QUERY
UPDATE test
SET (decrement, end_val) = (new_dec, new_end)
WHERE id = rec.id
RETURNING *
;
END IF;
END LOOP;
END;
$$ LANGUAGE PLPGSQL;
Here is a db-fiddle to show a working example.

SP/View for daily aggregation of 3-hours values

i'm trying to group 3-hours forecasting values into a daily table, The problem is that i need to process non standard group operations on values. I attach an example (provided by Openweather ).
time temp press desc w_sp w_dir
"2017-12-20 00:00:00" -4.49 1023.42 "clear" 1.21 198.501
"2017-12-20 03:00:00" -2.51 1023.63 "clouds" 1.22 180.501
"2017-12-20 06:00:00" -0.07 1024.43 "clouds" 1.53 169.503
"2017-12-20 09:00:00" 0.57 1024.83 "snow" 1.77 138.502
"2017-12-20 12:00:00" 0.95 1024.41 "snow" 1.61 271.001
"2017-12-20 15:00:00" -0.47 1024.17 "snow" 0.61 27.5019
"2017-12-20 18:00:00" -2.52 1024.52 "clear" 1.16 13.0007
"2017-12-20 21:00:00" -2.63 1024.73 "clear" 1.07 131.504
In my case i should evaluate the overall daily meteo description according to a mix of the top 2 occurence labels, and concerning wind direction i cannot AVG the 8 values, i have to apply a specific formula.
I'm familiar with sql but not so much with postgres stored procedures, i think i need something like cursor but i'm a bit lost here. I'm sure this can be achieved in many ways but i'm asking you to give me the path. So far i have a draft of a stored procedure but i'm a bit clueless
CREATE FUNCTION meteo_forecast_daily ()
RETURNS TABLE (
forecasting_date DATE,
temperature NUMERIC,
pressure NUMERIC,
description VARCHAR(20),
w_speed NUMERIC,
w_dir NUMERIC
)
AS $$
DECLARE
clouds INTEGER;
snow INTEGER;
clear INTEGER;
rain INTEGER;
thunderstorm INTEGER;
BEGIN
RETURN QUERY SELECT
m.forecasting_time::date as forecasting_date,
avg(m.temperature) as temperature
avg(m.pressure) as pressure
description???
avg(m.w_sp) as w_speed
w_dir????
FROM
meteo_forecast_last_update m
WHERE
forecasting_time > now()
group by forecasting_date;
END; $$
LANGUAGE 'plpgsql';
Thus my question is, how can i retrieve the 8 elements for each date and process them somehow separately?
Desired result:
time temp press desc w_sp w_dir
"2017-12-20" -4.49 1023.42 "clear,clouds,rain,..." 1.21 (198.501, 212.23..)
"2017-12-21" -4.49 1023.42 "rain,snow,rain,..." 1.45 (211.501, 112.26..)
"2017-12-22" -4.49 1023.42 "clear,clouds,rain,..." 1.89 (156.501, 312.53..)
Thanks in advance and happy new year :)
You should achieve this by
SELECT m.forecasting_time::date AS forecasting_date,
AVG(m.temperature) as temperature,
AVG(m.pressure) as pressure,
STRING_AGG(DISTINCT m.description, ',') AS description,
AVG(m.w_sp) as w_speed,
ARRAY_AGG(m.w_dir) AS w_dir
FROM meteo_forecast_last_update m
WHERE m.forecasting_time > now()
GROUP BY 1 ORDER BY 1;
You may use DISTINCT inside of an aggregate function. It applies the aggregate function only for distinct values.

Postgresql, maintaining hierarchical data with triggers

I have adjacency list table account, with columns id, code, name, and parent_id.
To make sorting and displaying easier I added two more columns: depth, and path (materialized path). I know, postgresql has a dedicated data type for materialized path, but I'd like to use a more generic approach, not specific to postgresql. I also applied several rules to my design:
1) code can be up to 10 characters long
2) Max depth is 9; so root account can have sub accounts at maximum 8 level deep.
3) Once set, parent_id is never changed, so there's no need to move a branch of tree to another part of the tree.
4) path is an account's materialized path, which is up to 90 characters long; it is built by concatenating account codes, right padded to 10 characters long; for example, like '10000______10001______'.
So, to automatically maintain depth and path columns, I created a trigger and a trigger function for the account table:
CREATE FUNCTION public.fn_account_set_hierarchy()
RETURNS TRIGGER AS $$
DECLARE d INTEGER; p CHARACTER VARYING;
BEGIN
IF TG_OP = 'INSERT' THEN
IF NEW.parent_id IS NULL THEN
NEW.depth := 1;
NEW.path := rpad(NEW.code, 10);
ELSE
BEGIN
SELECT depth, path INTO d, p
FROM public.account
WHERE id = NEW.parent_id;
NEW.depth := d + 1;
NEW.path := p || rpad(NEW.code, 10);
END;
END IF;
ELSE
IF NEW.code IS DISTINCT FROM OLD.code THEN
UPDATE public.account
SET path = OVERLAY(path PLACING rpad(NEW.code, 10)
FROM (OLD.depth - 1) * 10 + 1 FOR 10)
WHERE SUBSTRING(path FROM (OLD.depth - 1) * 10 + 1 FOR 10) =
rpad(OLD.code, 10);
END IF;
END IF;
RETURN NEW;
END$$
LANGUAGE plpgsql
CREATE TRIGGER tg_account_set_hierarchy
BEFORE INSERT OR UPDATE ON public.account
FOR EACH ROW
EXECUTE PROCEDURE public.fn_account_set_hierarchy();
The above seems to work for INSERTs. But for UPDATEs, an error is thrown: "UPDATE statement on table 'account' expected to update 1 row(s); 0 were matched.". I have a doubt on "UPDATE public.account ..." part. Can someone help me correct the above trigger?
Well, in the above code, update part updates all records, including the record, on which the trigger was fired (concurrency execption?). That seems not to work. So I had to issue 2 different statements:
UPDATE {0}.{1} SET path = OVERLAY(path PLACING rpad(NEW.code, 10)
FROM (OLD.depth - 1) * 10 + 1 FOR 10)
WHERE SUBSTRING(path FROM (OLD.depth - 1) * 10 + 1 FOR 10) = rpad(OLD.code, 10)
AND id <> NEW.id;
NEW.path = OVERLAY(OLD.path PLACING rpad(NEW.code, 10)
FROM (OLD.depth - 1) * 10 + 1 FOR 10);

Calling a user defined scalar function from a sql program

I am still really new to SQL functions. I am trying to figure out how to use the in a SQL program properly. I am wanting to test scalar UDF's that I have created to see that the return the data correctly and can return a large quantity of data in order. I am not sure what is wrong with my syntax in the SQL to use the function as this is my first attempt. Can someone steer me in the right direction?
Here is an example.
Function code :
SET ANSI_NULLS_ON
GO
GET QUOTED_IDENTIFIER ON
GO
ALTER FUNCTION dbo.FN_LTV_Ranges
(
#LTV_RANGE decimal(4,3)
)
Returns variable (160
as
Begin
declare #Return varchar(16)
select #Return =
Case
When #LTV_Range is NULL then 'Missing'
When #LTV_Range is 00.00 then 'Missing'
When #LTV_Range <= 0.75 then '<=0.75'
When #LTV_Range between 0.75 and 0.80 then '75-80'
When #LTV_Range between 0.80 and 0.90 then '80-90'
When #LTV_Range between 0.90 and 1.00 then '90-100'
When #LTV_Range >= 100 then '100+'
else null end
Return &Return
END
here is SQL program to call and test above function:
declare #LTV_Range decimal(4,3)
Select top 600 s.LNumber
from OPENQUERY (SvrLink, '
Select Lnumber, dbo.FN_LTV_Range(#LTV_Range)
from some_table s
where s.LNumber > '0'
group by #LTV_Range
Order by #LTV_Range
for Fetch only with UR')
Here is error returned on attempt to run SQL program:
OLE CB provider "MSDASQL" for linked server "SvrLink" returned message "(IBM)(CLI Driver) (DB2/LINUXX8641) SQL0306N "#LTV_RANGE" is not valid in context where it is used. SQLSTATE=
42703
Msg 7350, Level 16, State 2, Line 5
Cannot get the column information from OLE DB provider "MSDASQL" for linked server "SrvLinnk"
Well, the function should read like this at least if it's for SQL Server: what you have above is wrong
ALTER FUNCTION dbo.FN_LTV_Ranges
(
#LTV_RANGE decimal(4,3)
)
Returns varchar(16)
as
Begin
declare #Return varchar(16)
select #Return =
Case
When #LTV_Range is NULL then 'Missing'
When #LTV_Range = 0 then 'Missing'
When #LTV_Range <= 0.75 then '<=0.75'
When #LTV_Range between 0.75 and 0.80 then '75-80'
When #LTV_Range between 0.80 and 0.90 then '80-90'
When #LTV_Range between 0.90 and 1.00 then '90-100'
When #LTV_Range >= 100 then '100+'
else null end
Return #Return
END
For decimal(4,3) your min/max is +/- 9.999 so why this "When #LTV_Range >= 100 then '100+'"?
Next, why have OPENQUERY submitting a SQL call to a DB2 instance that includes a SQL Server function?
I assume you want the function call + grouping + ordering outside. And where do you set #LTV_Range?
Finally, grouping + ordering on #LTV_Range is pointless: it's a single value so I assume you mean to group/order on the result of the function call
declare #LTV_Range decimal(4,3)
Select top 600
s.LNumber, dbo.FN_LTV_Range(#LTV_Range)
from
OPENQUERY (SvrLink, '
Select Lnumber
from some_table s
where s.LNumber > '0'
for Fetch only with UR')
group by dbo.FN_LTV_Range(#LTV_Range)
Order by dbo.FN_LTV_Range(#LTV_Range)
The question as it stands makes no sense I'm sorry to say...