I have this piece of code :
DO
$$
DECLARE v_RowCountInt Int;
BEGIN
execute ('insert into vcc.toto select * from vcc.segment s limit 50
on conflict (id_segment,pds) do update
set id_segment = excluded.id_segment
,pds = excluded.pds');
GET DIAGNOSTICS v_RowCountInt = ROW_COUNT;
RAISE NOTICE 'Returned % rows', v_RowCountInt;
END;
$$
The RAISE NOTICE return 50 rows which is what i want.
But if i add this to my code
DO
$$
DECLARE v_RowCountInt Int;
DECLARE query_plan varchar;
BEGIN
execute ('explain (analyze, COSTS, VERBOSE, BUFFERS, format json)
insert into vcc.toto select * from vcc.segment s limit 50
on conflict (id_segment,pds) do update
set id_segment = excluded.id_segment
,pds = excluded.pds')
into query_plan;
GET DIAGNOSTICS v_RowCountInt = ROW_COUNT;
RAISE NOTICE 'Returned % rows', v_RowCountInt;
END;
$$
This RAISE 1 row only (because of into query_plan i think)
How can i get 50 rows instead while keeping the "INTO query_plan" variable ?
Thanks
Related
I am trying to assign a variable the result of a query in a postgres stored procedure.
Here is what I am trying to run:
CREATE OR Replace PROCEDURE schema.MyProcedure()
AS $$
DECLARE
RowCount int = 100;
BEGIN
select cnt into RowCount
from (
Select count(*) as cnt
From schema.MyTable
) ;
RAISE NOTICE 'RowCount: %', RowCount;
END;
$$
LANGUAGE plpgsql;
schema.MyTable is just some arbitrary table name but the script is not displaying anything, not even the random value I assigned RowCount to (100).
What am I doing wrong?
Thanks
You need an alias for the subquery, for example : as sub
CREATE OR Replace PROCEDURE schema.MyProcedure()
AS $$
DECLARE
RowCount int = 100;
BEGIN
select cnt into RowCount
from (
Select count(*) as cnt
From schema.MyTable
) as sub ;
RAISE NOTICE 'RowCount: %', RowCount;
END;
$$
LANGUAGE plpgsql;
You can also assign any variable with a query result in parenthesis.
CREATE OR REPLACE PROCEDURE schema.my_procedure()
AS
$$
DECLARE
row_count BIGINT;
BEGIN
row_count = (SELECT COUNT(*) FROM schema.my_table);
RAISE NOTICE 'RowCount: %', row_count;
END;
$$ LANGUAGE plpgsql;
You should use BIGINT instead of INT.
And it's far better to write your code and table definition with snake_case style as possible.
I have stored proc in Redshift in plpgsql.
When I run call test3() I get exception
Amazon Invalid operation: Row Count: 1;
1 statement failed.
CREATE OR REPLACE PROCEDURE test3()
LANGUAGE plpgsql
AS $$
DECLARE
found_record RECORD;
integer_var integer;
BEGIN
SELECT * INTO found_record FROM tbl WHERE id='ABC';
IF FOUND THEN
GET DIAGNOSTICS integer_var = ROW_COUNT;
RAISE EXCEPTION 'Row Count: %', integer_var;
END IF;
END;
$$
But when I run call test4() I get
0 rows affected
CALL executed successfully
CREATE OR REPLACE PROCEDURE test4()
LANGUAGE plpgsql
AS $$
DECLARE
found_record RECORD;
integer_var integer;
BEGIN
EXECUTE 'SELECT * FROM tbl WHERE id=\'ABC\'' into found_record ;
IF FOUND THEN
GET DIAGNOSTICS integer_var = ROW_COUNT;
RAISE EXCEPTION 'Row Count: %', integer_var;
END IF;
END;
$$
How do I access FOUND while executing dynamic query
Dynamic SQL has not a impact on FOUND variable. But you can use a GET DIAGNOSTICS statement. You do it in your example:
CREATE OR REPLACE PROCEDURE test4()
LANGUAGE plpgsql
AS $$
DECLARE
found_record RECORD;
integer_var integer;
BEGIN
EXECUTE 'SELECT * FROM tbl WHERE id=\'ABC\'' into found_record ;
GET DIAGNOSTICS integer_var = ROW_COUNT;
RAISE EXCEPTION 'Row Count: %', integer_var;
END;
$$
You can use GET DIAGNOSTICS statement everywhere - not only when FOUND is true.
You can't, however you can check that found_record is not null.
I have the following code: The code is to do a map from demoimage table to fitsheader table. I can run the code without complaining if I comment out the commit; line. But demoimage table doesn't change which make me confused these hours. I search a lot on the problem on google but got no result.
CREATE OR REPLACE FUNCTION public.linkjpeg(
)
RETURNS TABLE(matchtext text)
LANGUAGE 'plpgsql'
COST 100
VOLATILE
ROWS 1000
AS $BODY$
DECLARE
v_jpeghref demoImage.href%type;
v_fitsHREF fitsheader."HREF"%type;
v_uid varchar(500);
v_count bigint;
c_jpeg cursor for SELECT href from demoImage;
c_fitsHREF cursor for select t."HREF" from fitsheader t;
array_matches text[];
array_fitsHREF text[];
array_imgHref text[];
v_arrayDim bigint;
fileBone varchar(500);
fileBoneFits char(500);
c_checkUID cursor for select v_uid=any(array_matches);
c_checkfitsHREF cursor for select v_fitsHREF = any(array_fitsHREF);
c_matchrow cursor for select t.id_fitsheader from fitsheader t where t."HREF" like '%'||fileBoneFits||'%';
v_idfits bigint;
v_matchedFitsHREF fitsheader."HREF"%type;
i bigint;
_sql text;
BEGIN
i := 0;
open c_jpeg;
loop
fetch c_jpeg into v_jpegHref;
v_uid := substring(v_jpegHref from '/member.uid.*');
if(v_uid is not null) then
array_imgHref := string_to_array(v_uid,'.');
v_arrayDim := cardinality(array_imgHref);
fileBone := array_to_string(array_imgHref[1:v_arrayDim-3],'.');
-- replace xxxxx-x-xxxxxS. with null
fileBoneFits := regexp_replace(filebone,'\d\d\d\d.\d.\d\d\d\d\d.S-','');
open c_matchrow;
begin
fetch c_matchrow into v_idfits;
exception
when others then
raise notice '%','not found v_idfits';
end;
raise notice '%','v_idfits'||v_idfits;
begin
execute 'update demoImage set id_fitsheader='|| v_idfits ||' where href ='||quote_literal(v_jpegHref);
**commit;**
exception
when others then
raise notice '%','commit failed'||fileBoneFits;
end;
i := i+1;
raise notice '%', i;
close c_matchrow;
end if;
end loop;
close c_jpeg;
END
The problem is that I can never make the commit executed successfully. I can run the sql above the commit successfully in psql window separately. Can anyone help me figure out where I am wrong? Thanks in advance!
You cannot have transaction statements like COMMIT and ROLLBACK in a PL/pgSQL function; that feature will become available in v11.
I am trying to write a plpgsql function that loops through a table. On each loop, it pulls a row from the table, stores it in a record, then uses that record in the join clause of a query. Here is my code:
CREATE OR REPLACE FUNCTION "testfncjh2" () RETURNS int
IMMUTABLE
SECURITY DEFINER
AS $dbvis$
DECLARE
counter int;
tablesize int;
rec1 record;
tablename text;
rec2 record;
BEGIN
counter = 0;
for rec1 in SELECT * FROM poilocations_sridconv loop
raise notice 'here';
execute $$ select count(*) from $$||rec1||$$ $$ into tablesize;
while counter < tablesize loop
counter = counter + 1;
raise notice 'hi';
execute $$ select count(*) from cities_sridconv $$ into tablesize;
end loop;
end loop;
return counter;
END;
$dbvis$ LANGUAGE plpgsql;
Each time I run this, I get the following error:
ERROR: could not find array type for data type record
Is there a way to use the row as a table in the query within the nested loops?
My end goal is to build a function that loops through a table, pulling a row from that table on each loop. In each loop, a number COUNTER is computed using the row, then a query is executed depending on the row and COUNTER. Knowing that this code is currently very flawed, I am posting it below to give an idea of what I am trying to do:
CREATE OR REPLACE FUNCTION "testfncjh" () RETURNS void
IMMUTABLE
SECURITY DEFINER
AS $dbvis$
DECLARE
counter int;
tablesize int;
rec1 record;
tablename text;
rec2 record;
BEGIN
for rec1 in SELECT * FROM poilocations_sridconv loop
counter = 0;
execute $$ select count(*)
from $$||rec1||$$ a
join
cities_srid_conv b
on right(a.geom_wgs_pois,$$||counter||$$) = right(b.geom_wgs_pois,$$||counter||$$) $$ into tablesize;
raise notice 'got through first execute';
while tablesize = 0 loop
counter = counter + 1;
execute $$ select count(*)
from '||rec1||' a
join
cities_srid_conv b
on right(a.geom_wgs_pois,'||counter||') = right(b.geom_wgs_pois,'||counter||') $$ into tablesize;
raise notice 'hi';
end loop;
EXECUTE
'select
poiname,
name as cityname,
postgis.ST_Distance(postgis.ST_GeomFromText(''POINT(poilat poilong)''),
postgis.ST_GeomFromText(''POINT(citylat citylong)'')
) as distance
from (select a.poiname,
a.latitude::text as poilat,
a.longitude::text as poilong,
b.geonameid,
b.name,
b.latitude as citylat,
b.longitude as citylong
from '||rec1||' a
join cities_srid_conv b
on right(a.geom_wgs_pois,'||counter||') = right(b.geom_wgs_pois,'||counter||'))
) x
order by distance
limit 1'
poi_cities_match (poiname, cityname, distance); ------SQL STATEMENT TO INSERT CLOSEST CITY TO TABLE POI_CITIES_MATCH
end loop;
END;
$dbvis$ LANGUAGE plpgsql;
I am running on a PostgreSQL 8.2.15 database.
Also, sorry for reposting. I had to remove some data from the original.
I think you should be able to use composite types for what you want. I simplified your top example and used composite types in the following way.
CREATE OR REPLACE FUNCTION "testfncjh2" () RETURNS int
IMMUTABLE
SECURITY DEFINER
AS $dbvis$
DECLARE
counter int;
tablesize int;
rec1 poilocations_sridconv;
tablename text;
rec2 record;
BEGIN
counter = 0;
for rec1 in SELECT * FROM poilocations_sridconv loop
raise notice 'here';
select count(*) FROM (select (rec1).*)theRecord into counter;
end loop;
return counter;
END;
$dbvis$ LANGUAGE plpgsql;
The main changes being the rec1 poilocations_sridconv; line and using (select (rec1).*)
Hope it helps.
EDIT: I should note that the function is not doing the same thing as it does in the question above. This is just as an example of how you could use a record as a table in a query.
You have a few issues with your code (apart, perhaps, from your logic).
Foremost, you should not use a record as a table source in a JOIN. Instead, filter the second table for rows that match some field from the record.
Second, you should use the format() function instead of assembling strings with the || operator. But you can't because you are using the before-prehistoric version 8.2. This is from the cave-painting era (yes, it's that bad). UPGRADE!
Thirdly, don't over-complicate your queries. The sub-query is not necessary here.
Put together, the second dynamic query from your real code would reduce to this:
EXECUTE format(
'SELECT b.name,
postgis.ST_Distance(postgis.ST_SetSRID(postgis.ST_MakePoint(%1$I.longitude, %1$I.latitude), 4326),
postgis.ST_SetSRID(postgis.ST_MakePoint(b.longitude, b.latitude), 4326))
FROM cities_srid_conv b
WHERE right(%1$I.geom_wgs_pois, %2$L) = right(b.geom_wgs_pois, %2$L)
ORDER BY distance
LIMIT 1', rec1, counter) INTO cityname, distance;
poi_cities_match (rec1.poiname, cityname, distance); ------SQL STATEMENT TO INSERT CLOSEST CITY TO TABLE POI_CITIES_MATCH
Here %1$I refers to the first parameter after the string, which is an idenifier: rec1; %2$L is the second parameter, being a literal value: counter. I leave it to yourself to re-work this to a pre-8.4 string concatenation. The results from the query are stored in a few additional variables which you can then use in the following function call.
Lastly, you had longitude and latitude reversed. In PostGIS longitude always comes first.
How can I iterate over integer[] if I have:
operators_ids = string_to_array(operators_ids_g,',')::integer[];
I want iterate over operators_ids.
I can't do it in this way:
FOR oid IN operators_ids LOOP
and this:
FOR oid IN SELECT operators_ids LOOP
oid is integer;
You can iterate over an array like
DO
$body$
DECLARE your_array integer[] := '{1, 2, 3}'::integer[];
BEGIN
FOR i IN array_lower(your_array, 1) .. array_upper(your_array, 1)
LOOP
-- do something with your value
raise notice '%', your_array[i];
END LOOP;
END;
$body$
LANGUAGE plpgsql;
But the main question in my view is: why do you need to do this? There are chances you can solve your problem in better ways, for example:
DO
$body$
DECLARE i record;
BEGIN
FOR i IN (SELECT operators_id FROM your_table)
LOOP
-- do something with your value
raise notice '%', i.operators_id;
END LOOP;
END;
$body$
LANGUAGE plpgsql;
I think Dezso is right. You do not need to use looping the array using an index.
If you make a select statement grouping by person_id in combination with limit 1, you have the result set you wanted:
create or replace function statement_example(p_data text[]) returns int as $$
declare
rw event_log%rowtype;
begin
for rw in select * from "PRD".events_log where (event_type_id = 100 or event_type_id = 101) and person_id = any(operators_id::int[]) and plc_time < begin_date_g order by plc_time desc group by person_id limit 1 loop
raise notice 'interesting log: %', rw.field;
end loop;
return 1;
end;
$$ language plpgsql volatile;
That should perform much better.
If you still prefer looping an integer array and there are a lot of person_ids to look after, then might you consider using the flyweight design pattern:
create or replace function flyweight_example(p_data text[]) returns int as $$
declare
i_id int;
i_min int;
i_max int;
begin
i_min := array_lower(p_data,1);
i_max := array_upper(p_data,1);
for i_id in i_min .. i_max loop
raise notice 'interesting log: %',p_data[i_id];
end loop;
return 1;
end;
$$ language plpgsql volatile;