Postgresql bulk collect - postgresql

Im trying to migrate oracle app to postgresql.
In one of the oracle`s functions i have the next code :
V_Step := 3;
command := 'declare
type tab_data is table of ' || tab_name ||
'%ROWTYPE;
CURSOR raw_data is SELECT * FROM ' || tab_name ||
'_vw;
mydata tab_data;
V_COUNTER integer := 0;
BEGIN
open raw_data;
LOOP
V_COUNTER := V_COUNTER + 1;
FETCH raw_data BULK COLLECT INTO mydata LIMIT ' ||
to_char(current_setting('Gaps.BATCH_SIZE')::bigint) || ';
FORALL i IN 1..mydata.COUNT
INSERT INTO ' || tab_name ||
' VALUES mydata(i);
EXIT WHEN raw_data%NOTFOUND;
END LOOP;
RAISE NOTICE ''V_COUNTER = '%', V_COUNTER;
--commit;
close raw_data;
END;';
V_Step := 4;
RAISE NOTICE '%', command;
V_Step := 5;
EXECUTE command;
We are trying to load into a local postgresql table huge amount of data from remote oracle table (I have link). In oracle using bulk make it faster than regular insert into x select * from. Is there any function in postgresql that is similar to oracle`s bulk ?

Related

Update Null columns to Zero dynamically in Redshift

Here is the code in SAS, It finds the numeric columns with blank and replace with 0's
DATA dummy_table;
SET dummy_table;
ARRAY DUMMY _NUMERIC_;
DO OVER DUMMY;
IF DUMMY=. THEN DUMMY=0;
END;
RUN;
I am trying to replicate this in Redshift, here is what I tried
create or replace procedure sp_replace_null_to_zero(IN tbl_nm varchar) as $$
Begin
Execute 'declare ' ||
'tot_cnt int := (select count(*) from information_schema.columns where table_name = ' || tbl_nm || ');' ||
'init_loop int := 0; ' ||
'cn_nm varchar; '
Begin
While init_loop <= tot_cnt
Loop
Raise info 'init_loop = %', Init_loop;
Raise info 'tot_cnt = %', tot_cnt;
Execute 'Select column_name into cn_nm from information_schema.columns ' ||
'where table_name ='|| tbl_nm || ' and ordinal_position = init_loop ' ||
'and data_type not in (''character varying'',''date'',''text''); '
Raise info 'cn_nm = %', cn_nm;
if cn_nm is not null then
Execute 'Update ' || tbl_nm ||
'Set ' || cn_nm = 0 ||
'Where ' || cn_nm is null or cn_nm =' ';
end if;
init_loop = init_loop + 1;
end loop;
End;
End;
$$ language plpgsql;
Issues I am facing
When I pass the Input parameter here, I am getting 0 count
tot_cnt int := (select count(*) from information_schema.columns where table_name = ' || tbl_nm || ');'
For testing purpose I tried hardcode the table name inside proc, I am getting the error amazon invalid operation: value for domain information_schema.cardinal_number violates check constraint "cardinal_number_domain_check"
Is this even possible in redshift, How can I do this logic or any other workaround.
Need Expertise advise here!!
You can simply run an UPDATE over the table(s) using the NVL(cn_nm,0) function
UPDATE tbl_raw
SET col2 = NVL(col2,0);
However UPDATE is a fairly expensive operation. Consider just using a view over your table that wraps the columns in NVL(cn_nm,0)
CREATE VIEW tbl_clean
AS
SELECT col1
, NVL(col2,0) col2
FROM tbl_raw;

Postgres colpivot function return nothing

I try to follow this tutorial http://www.anhuiyouxi.com/transposing-an-sql-result-so-that-one-column-goes-onto-multiple-columns/. I do it by run below code:
CREATE EXTENSION IF NOT EXISTS tablefunc;
create or replace function colpivot(
out_table varchar, in_query varchar,
key_cols varchar[], class_cols varchar[],
value_e varchar, col_order varchar
) returns void as $$
declare
in_table varchar;
col varchar;
ali varchar;
on_e varchar;
i integer;
rec record;
query varchar;
-- This is actually an array of arrays but postgres does not support an array of arrays type so we flatten it.
-- We could theoretically use the matrix feature but it's extremly cancerogenous and we would have to involve
-- custom aggrigates. For most intents and purposes postgres does not have a multi-dimensional array type.
clsc_cols text[] := array[]::text[];
n_clsc_cols integer;
n_class_cols integer;
begin
in_table := quote_ident('__' || out_table || '_in');
execute ('create temp table ' || in_table || ' on commit drop as ' || in_query);
-- get ordered unique columns (column combinations)
query := 'select array[';
i := 0;
foreach col in array class_cols loop
if i > 0 then
query := query || ', ';
end if;
query := query || 'quote_literal(' || quote_ident(col) || ')';
i := i + 1;
end loop;
query := query || '] x from ' || in_table;
for j in 1..2 loop
if j = 1 then
query := query || ' group by ';
else
query := query || ' order by ';
if col_order is not null then
query := query || col_order || ' ';
exit;
end if;
end if;
i := 0;
foreach col in array class_cols loop
if i > 0 then
query := query || ', ';
end if;
query := query || quote_ident(col);
i := i + 1;
end loop;
end loop;
-- raise notice '%', query;
for rec in
execute query
loop
clsc_cols := array_cat(clsc_cols, rec.x);
end loop;
n_class_cols := array_length(class_cols, 1);
n_clsc_cols := array_length(clsc_cols, 1) / n_class_cols;
-- build target query
query := 'select ';
i := 0;
foreach col in array key_cols loop
if i > 0 then
query := query || ', ';
end if;
query := query || '_key.' || quote_ident(col) || ' ';
i := i + 1;
end loop;
for j in 1..n_clsc_cols loop
query := query || ', ';
col := '';
for k in 1..n_class_cols loop
if k > 1 then
col := col || ', ';
end if;
col := col || clsc_cols[(j - 1) * n_class_cols + k];
end loop;
ali := '_clsc_' || j::text;
query := query || '(' || replace(value_e, '#', ali) || ')' || ' as ' || quote_ident(col) || ' ';
end loop;
query := query || ' from (select distinct ';
i := 0;
foreach col in array key_cols loop
if i > 0 then
query := query || ', ';
end if;
query := query || quote_ident(col) || ' ';
i := i + 1;
end loop;
query := query || ' from ' || in_table || ') _key ';
for j in 1..n_clsc_cols loop
ali := '_clsc_' || j::text;
on_e := '';
i := 0;
foreach col in array key_cols loop
if i > 0 then
on_e := on_e || ' and ';
end if;
on_e := on_e || ali || '.' || quote_ident(col) || ' = _key.' || quote_ident(col) || ' ';
i := i + 1;
end loop;
for k in 1..n_class_cols loop
on_e := on_e || ' and ';
on_e := on_e || ali || '.' || quote_ident(class_cols[k]) || ' = ' || clsc_cols[(j - 1) * n_class_cols + k];
end loop;
query := query || 'left join ' || in_table || ' as ' || ali || ' on ' || on_e || ' ';
end loop;
-- raise notice '%', query;
execute ('create temp table ' || quote_ident(out_table) || ' on commit drop as ' || query);
-- cleanup temporary in_table before we return
execute ('drop table ' || in_table)
return;
end;
$$ language plpgsql volatile;
begin;
DROP TABLE IF EXISTS qa;
create temp table qa (id int, usr int, question_id int, answer_id int);
insert into qa values
(1,1,1,1)
,(2,1,2,9)
,(3,1,3,15)
,(4,2,1,2)
,(5,2,2,12)
,(6,2,3,20);
--select * from qa;
select colpivot('_output', $$
select usr, ('q' || question_id::text) question_id, answer_id from qa
$$, array['usr'], array['question_id'], '#.answer_id', null);
select * from _output;
rollback;
After reach to the end line of the code, I got nothing.
Am I doing it wrong?
Please advise
I've changed the function a little bit, you can do a diff to see exactly what, but it seems like the function is dropping the result table at the end (i'm assuming it does a commit, so drops it). I've added a drop of the temp table incase it already exists. Also I removed the quote_ident() function calls around the table names as that was causing a problem when passing across a long table name value for out_table (on ver 9.6).
-- Copyright © 2015, Hannes Landeholm <hannes#jumpstarter.io>
-- This Source Code Form is subject to the terms of the Mozilla Public
-- License, v. 2.0. If a copy of the MPL was not distributed with this
-- file, You can obtain one at http://mozilla.org/MPL/2.0/.
-- See the README.md file distributed with this project for documentation.
create or replace function colpivot(
out_table varchar, in_query varchar,
key_cols varchar[], class_cols varchar[],
value_e varchar, col_order varchar
) returns void as $$
declare
in_table varchar;
col varchar;
ali varchar;
on_e varchar;
i integer;
rec record;
query varchar;
-- This is actually an array of arrays but postgres does not support an array of arrays type so we flatten it.
-- We could theoretically use the matrix feature but it's extremly cancerogenous and we would have to involve
-- custom aggrigates. For most intents and purposes postgres does not have a multi-dimensional array type.
clsc_cols text[] := array[]::text[];
n_clsc_cols integer;
n_class_cols integer;
begin
in_table := ('__' || out_table || '_in');
-- if the temp table already exists, drop
execute ( 'drop TABLE IF EXISTS ' || in_table );
execute ('create temp table ' || in_table || ' on commit drop as ' || in_query);
-- get ordered unique columns (column combinations)
query := 'select array[';
i := 0;
foreach col in array class_cols loop
if i > 0 then
query := query || ', ';
end if;
query := query || 'quote_literal(' || quote_ident(col) || ')';
i := i + 1;
end loop;
query := query || '] x from ' || in_table;
for j in 1..2 loop
if j = 1 then
query := query || ' group by ';
else
query := query || ' order by ';
if col_order is not null then
query := query || col_order || ' ';
exit;
end if;
end if;
i := 0;
foreach col in array class_cols loop
if i > 0 then
query := query || ', ';
end if;
query := query || quote_ident(col);
i := i + 1;
end loop;
end loop;
-- raise notice '%', query;
for rec in
execute query
loop
clsc_cols := array_cat(clsc_cols, rec.x);
end loop;
n_class_cols := array_length(class_cols, 1);
n_clsc_cols := array_length(clsc_cols, 1) / n_class_cols;
-- build target query
query := 'select ';
i := 0;
foreach col in array key_cols loop
if i > 0 then
query := query || ', ';
end if;
query := query || '_key.' || quote_ident(col) || ' ';
i := i + 1;
end loop;
for j in 1..n_clsc_cols loop
query := query || ', ';
col := '';
for k in 1..n_class_cols loop
if k > 1 then
col := col || ', ';
end if;
col := col || clsc_cols[(j - 1) * n_class_cols + k];
end loop;
ali := '_clsc_' || j::text;
query := query || '(' || replace(value_e, '#', ali) || ')' || ' as ' || quote_ident(col) || ' ';
end loop;
query := query || ' from (select distinct ';
i := 0;
foreach col in array key_cols loop
if i > 0 then
query := query || ', ';
end if;
query := query || quote_ident(col) || ' ';
i := i + 1;
end loop;
query := query || ' from ' || in_table || ') _key ';
for j in 1..n_clsc_cols loop
ali := '_clsc_' || j::text;
on_e := '';
i := 0;
foreach col in array key_cols loop
if i > 0 then
on_e := on_e || ' and ';
end if;
on_e := on_e || ali || '.' || quote_ident(col) || ' = _key.' || quote_ident(col) || ' ';
i := i + 1;
end loop;
for k in 1..n_class_cols loop
on_e := on_e || ' and ';
on_e := on_e || ali || '.' || quote_ident(class_cols[k]) || ' = ' || clsc_cols[(j - 1) * n_class_cols + k];
end loop;
query := query || 'left join ' || in_table || ' as ' || ali || ' on ' || on_e || ' ';
end loop;
-- raise notice '%', query;
execute ('create temp table ' || out_table || ' as ' || query);
-- cleanup temporary in_table before we return
execute ('drop table ' || in_table);
return;
end;
$$ language plpgsql volatile;
Now you can run it like so:
DROP TABLE IF EXISTS qa;
create temp table qa (id int, usr int, question_id int, answer_id int);
insert into qa values
(1,1,1,1)
,(2,1,2,9)
,(3,1,3,15)
,(4,2,1,2)
,(5,2,2,12)
,(6,2,3,20);
--select * from qa;
select colpivot('_output', $$
select usr, ('q' || question_id::text) question_id, answer_id from qa
$$, array['usr'], array['question_id'], '#.answer_id', null);
-- then run the select to get the result
select * from _output;

How to replace tablename with a variable in a DB2 cursor in an anonymous block

I want to replace the table name with a variable that is derived from another cursor, but no matter what logic I tried to use I just could not get it right, I am seeing a couple of examples for Oracle and SQL Server but I failed to interpret that code into the DB2 SQL. Please help.
Declare
v_user VarCHAR(100);
v_schema VARCHAR(1000);
V_Studio_svr VARCHAR(1000);
v_db2_schema VARCHAR(1000);
v_oracle_string varchar(5000) ;
v_db2_string varchar(5000) := '(' ;
v_sys_columns varchar(2000);
v_sys_values varchar(2000);
V_UID iNTEGER := 41;
begin
-- Main Table to Get Table Name From
FOR v In ( Select app_id,Upper(alias) ALIAS
From FREEDOM.FORMS where app_id = '5e988af8-ef0f-48c7-9794-9bc4f1134c80' ) Loop
v_schema := 'S__'||V.app_ID||'_1';
v_schema := replace(v_schema,'-','_');
v_studio_svr := 'PTU'||SUBSTR(v.alias,2,LENGTH(v.alias));
v_db2_schema := 'TF'||SUBSTR(v.alias,2,LENGTH(v.alias));
-- This is where I want to use Table Name as Variable Coming From Cursor V
For P in
(Select * from studio_svr||'.'||v_studio_svr) loop
-- Table to get Data Type Mappings
For i in
(Select * From fREEDOM.DB2_DT_MAPPING
Where Table_Name = v.alias ) Loop
IF I.DB2_DATATYPE LIKE 'DECIMAL%' THEN
v_ORACLE_STRING := Nvl(v_ORACLE_STRING,'')||'CAST('||'INTEGER('||I.STUDIO_SVR_COLUMN||') AS DECIMAL(22,6)),';
ELSE
v_ORACLE_STRING := Nvl(v_ORACLE_STRING,'')||I.STUDIO_SVR_COLUMN||',';
END IF;
v_DB2_STRING := v_DB2_STRING||I.DB2_COLUMN||',';
End Loop;
v_DB2_STRING := SUBSTR(v_DB2_STRING,1,LENGTH(v_DB2_STRING)-1)||')';
execute immediate 'Insert Into ' || v_schema || '.' || v_db2_schema || ' '|| v_db2_string ||' SELECT '|| v_oracle_string ||' FROM Studio_svr.' || v_studio_svr || 'where S__recordid ='||p.s__recordid ;
v_db2_string := '(';
v_oracle_string := '';
v_uid := v_uid + 1;
commit;
End loop;
END lOOP;
END
Obviously, you need to use dynamic SQL for that cursor, like so:
Declare
v_user VarCHAR(100);
...
V_UID iNTEGER := 41;
--->
v_cursor_studio SYS_REFCURSOR;
begin
-- Main Table to Get Table Name From
FOR v In ( Select app_id,Upper(alias) ALIAS
From FREEDOM.FORMS where app_id = '5e988af8-ef0f-48c7-9794-9bc4f1134c80' ) Loop
v_schema := 'S__'||V.app_ID||'_1';
v_schema := replace(v_schema,'-','_');
v_studio_svr := 'PTU'||SUBSTR(v.alias,2,LENGTH(v.alias));
v_db2_schema := 'TF'||SUBSTR(v.alias,2,LENGTH(v.alias));
-- This is where I want to use Table Name as Variable Coming From Cursor V
--->
OPEN v_cursor_studio for 'Select * from ' || studio_svr||'.'||v_studio_svr;
For P in v_cursor_studio
...
The code is not tested, but I hope you get the idea.

Postgres creating a local temp table (on commit drop) from a dynamic sql string

I have a query string generated in a postgres UDF, i'd like to put it's result in a temp table to perform joins against (I'm using LIMIT and OFFSET, and I don't want to join against other ttables only to end up choping the data off at the end --i.e., the LIMIT operator in the query plan). I attempt to create the temp table with the following statement.
CREATE LOCAL TEMP TABLE query_result ON COMMIT DROP AS EXECUTE query_string_;
But I get the following error notice :
********** Error **********
ERROR: prepared statement "query_string_" does not exist
SQL state: 26000
Context: SQL statement "CREATE LOCAL TEMP TABLE query_result ON COMMIT DROP AS EXECUTE query_string_"
PL/pgSQL function "search_posts_unjoined" line 48 at SQL statement
Additionally, I tried preparing the statemen, but I couldn't get the syntax right either.
The UDF in question is :
CREATE OR REPLACE FUNCTION search_posts_unjoined(
forum_id_ INTEGER,
query_ CHARACTER VARYING,
offset_ INTEGER DEFAULT NULL,
limit_ INTEGER DEFAULT NULL,
from_date_ TIMESTAMP WITHOUT TIME ZONE DEFAULT NULL,
to_date_ TIMESTAMP WITHOUT TIME ZONE DEFAULT NULL,
in_categories_ INTEGER[] DEFAULT '{}'
)
RETURNS SETOF forum_posts AS $$
DECLARE
join_string CHARACTER VARYING := ' ';
from_where_date CHARACTER VARYING := ' ';
to_where_date CHARACTER VARYING := ' ';
query_string_ CHARACTER VARYING := ' ';
offset_str_ CHARACTER VARYING := ' ';
limit_str_ CHARACTER VARYING := ' ';
BEGIN
IF NOT from_date_ IS NULL THEN
from_where_date := ' AND fp.posted_at > ''' || from_date_ || '''';
END IF;
IF NOT to_date_ IS NULL THEN
to_where_date := ' AND fp.posted_at < ''' || to_date_ || '''';
END IF;
IF NOT offset_ IS NULL THEN
offset_str_ := ' OFFSET ' || offset_;
END IF;
IF NOT limit_ IS NULL THEN
limit_str_ := ' LIMIT ' || limit_;
END IF;
IF NOT limit_ IS NULL THEN
END IF;
CREATE LOCAL TEMP TABLE un_cat(id) ON COMMIT DROP AS (select * from unnest(in_categories_)) ;
if in_categories_ != '{}' THEN
join_string := ' INNER JOIN un_cat uc ON uc.id = fp.category_id ' ;
END IF;
query_string_ := '
SELECT fp.*
FROM forum_posts fp' ||
join_string
||
'WHERE fp.forum_id = ' || forum_id_ || ' AND
to_tsvector(''english'',fp.post_text) ## to_tsquery(''english'','''|| query_||''')' ||
from_where_date ||
to_where_date ||
offset_str_ ||
limit_str_
|| ';';
CREATE LOCAL TEMP TABLE query_result ON COMMIT DROP AS EXECUTE query_string_;
RAISE NOTICE '%', query_string_;
RETURN QUERY
EXECUTE query_string_;
END;
$$ LANGUAGE plpgsql;
And it works when the statement in question is removed.
Use instead:
EXECUTE '
CREATE TEMP TABLE query_result ON COMMIT DROP AS '|| query_string_;
EXECUTE the whole statement.
The syntax form CREATE TABLE foo AS EXECUTE <query> isn't valid.
LOCAL is just a noise word and ignored in this context.
More details in the manual.

PostgreSQL backend process high memory usage issue

We are evaluating using PostgreSQL to implement a multitenant database,
Currently we are running some tests on single-database-multiple-schema model
(basically, all tenants have the same set of database objects under then own schema within the same database).
The application will maintain a connection pool that will be shared among all tenants/schemas.
e.g. If the database has 500 tenants/schemas and each tenants has 200 tables/views,
the total number of tables/views will be 500 * 200 = 100,000.
Since the connection pool will be used by all tenants, eventually each connection will hit all the tables/views.
In our tests, when the connection hits more views, we found the memory usage of the backend process increases quite fast and most of them are private memory.
Those memory will be hold until the connection is closed.
We have a test case that one backend process uses more the 30GB memory and eventually get an out of memory error.
To help understand the issue, I wrote code to create a simplified test cases
- MTDB_destroy: used to clear tenant schemas
- MTDB_Initialize: used to create a multitenant DB
- MTDB_RunTests: simplified test case, basically select from all tenant views one by one.
The tests I've done was on PostgreSQL 9.0.3 on CentOS 5.4
To make sure I have a clean environment, I re-created database cluster and leave majority configurations as default,
(the only thing I HAVE to change is to increase "max_locks_per_transaction" since MTDB_destroy needs to drop many objects.)
This is what I do to reproduce the issue:
create a new database
create the three functions using the code attached
connect to the new created db and run the initialize scripts
-- Initialize
select MTDB_Initialize('tenant', 100, 100, true);
-- not sure if vacuum analyze is useful here, I just run it
vacuum analyze;
-- check the tables/views created
select table_schema, table_type, count(*) from information_schema.tables where table_schema like 'tenant%' group by table_schema, table_type order by table_schema, table_type;
open another connection to the new created db and run the test scripts
-- get backend process id for current connection
SELECT pg_backend_pid();
-- open a linux console and run ps -p and watch VIRT, RES and SHR
-- run tests
select MTDB_RunTests('tenant', 1);
Observations:
when the connection for running tests was first created,
VIRT = 182MB, RES = 6240K, SHR=4648K
after run the tests once, (took 175 seconds)
VIRT = 1661MB RES = 1.5GB SHR = 55MB
re-run the test again (took 167 seconds)
VIRT = 1661MB RES = 1.5GB SHR = 55MB
re-run the test again (took 165 seconds)
VIRT = 1661MB RES = 1.5GB SHR = 55MB
as we scale up the number of tables, the memory usages go up in the tests too.
Can anyone help explain what's happening here?
Is there a way we can control memory usage of PostgreSQL backend process?
Thanks.
Samuel
-- MTDB_destroy
create or replace function MTDB_destroy (schemaNamePrefix varchar(100))
returns int as $$
declare
curs1 cursor(prefix varchar) is select schema_name from information_schema.schemata where schema_name like prefix || '%';
schemaName varchar(100);
count integer;
begin
count := 0;
open curs1(schemaNamePrefix);
loop
fetch curs1 into schemaName;
if not found then exit; end if;
count := count + 1;
execute 'drop schema ' || schemaName || ' cascade;';
end loop;
close curs1;
return count;
end $$ language plpgsql;
-- MTDB_Initialize
create or replace function MTDB_Initialize (schemaNamePrefix varchar(100), numberOfSchemas integer, numberOfTablesPerSchema integer, createViewForEachTable boolean)
returns integer as $$
declare
currentSchemaId integer;
currentTableId integer;
currentSchemaName varchar(100);
currentTableName varchar(100);
currentViewName varchar(100);
count integer;
begin
-- clear
perform MTDB_Destroy(schemaNamePrefix);
count := 0;
currentSchemaId := 1;
loop
currentSchemaName := schemaNamePrefix || ltrim(currentSchemaId::varchar(10));
execute 'create schema ' || currentSchemaName;
currentTableId := 1;
loop
currentTableName := currentSchemaName || '.' || 'table' || ltrim(currentTableId::varchar(10));
execute 'create table ' || currentTableName || ' (f1 integer, f2 integer, f3 varchar(100), f4 varchar(100), f5 varchar(100), f6 varchar(100), f7 boolean, f8 boolean, f9 integer, f10 integer)';
if (createViewForEachTable = true) then
currentViewName := currentSchemaName || '.' || 'view' || ltrim(currentTableId::varchar(10));
execute 'create view ' || currentViewName || ' as ' ||
'select t1.* from ' || currentTableName || ' t1 ' ||
' inner join ' || currentTableName || ' t2 on (t1.f1 = t2.f1) ' ||
' inner join ' || currentTableName || ' t3 on (t2.f2 = t3.f2) ' ||
' inner join ' || currentTableName || ' t4 on (t3.f3 = t4.f3) ' ||
' inner join ' || currentTableName || ' t5 on (t4.f4 = t5.f4) ' ||
' inner join ' || currentTableName || ' t6 on (t5.f5 = t6.f5) ' ||
' inner join ' || currentTableName || ' t7 on (t6.f6 = t7.f6) ' ||
' inner join ' || currentTableName || ' t8 on (t7.f7 = t8.f7) ' ||
' inner join ' || currentTableName || ' t9 on (t8.f8 = t9.f8) ' ||
' inner join ' || currentTableName || ' t10 on (t9.f9 = t10.f9) ';
end if;
currentTableId := currentTableId + 1;
count := count + 1;
if (currentTableId > numberOfTablesPerSchema) then exit; end if;
end loop;
currentSchemaId := currentSchemaId + 1;
if (currentSchemaId > numberOfSchemas) then exit; end if;
end loop;
return count;
END $$ language plpgsql;
-- MTDB_RunTests
create or replace function MTDB_RunTests(schemaNamePrefix varchar(100), rounds integer)
returns integer as $$
declare
curs1 cursor(prefix varchar) is select table_schema || '.' || table_name from information_schema.tables where table_schema like prefix || '%' and table_type = 'VIEW';
currentViewName varchar(100);
count integer;
begin
count := 0;
loop
rounds := rounds - 1;
if (rounds < 0) then exit; end if;
open curs1(schemaNamePrefix);
loop
fetch curs1 into currentViewName;
if not found then exit; end if;
execute 'select * from ' || currentViewName;
count := count + 1;
end loop;
close curs1;
end loop;
return count;
end $$ language plpgsql;
Are these connections idle in transaction or just idle? Sounds like unfinished transactions are holding onto memory, or maybe you've got a memory leak or something.
For people who see this thread when searching around (as i did), I found what appeared to be the same problem in a different context. Idle processes slowly consuming more and more memory until the OOM killer takes them out (causing periodic DB crashes).
We traced the problem back to really long running PHP scripts which kept one connection open for a long time. We were able to get the memory under control by periodically closing the connection and re-connecting.
From what i've read postgres does a lot of caching so if you have one session hitting a lot of different tables/queries this cache data can continue to grow and grow.
-Ken