Sample Code as follows : ALL or ANY operator is not working. I need to compare ALL the values of the array
CREATE OR REPLACE FUNCTION public.sample_function(
tt_sample_function text)
RETURNS TABLE (..... )
LANGUAGE 'plpgsql'
COST 100
VOLATILE
ROWS 1000
AS $BODY$
declare
e record;
v_cnt INTEGER:=0;
rec record;
str text;
a_v text [];
BEGIN
FOR rec IN ( SELECT * FROM json_populate_recordset(null::sample_function ,sample_function::json) )
LOOP
a_v:= array_append(a_v, ''''||rec.key || '#~#' || rec.value||'''');
END LOOP;
SELECT MAInfo.userid FROM
(SELECT DISTINCT i.userid,
CASE WHEN (i.settingKey || '#~#' || i.settingvalue) = ALL (a_v)
THEN i.settingKey || '#~#' || 'Y'
ELSE i.settingKey || '#~#' || 'N' END
AS MatchResult
FROM public.sample_table i
WHERE (i.settingKey || '#~#' || i.settingvalue) = ALL (a_v)
GROUP BY i.userid, MatchResult) AS MAInfo
GROUP BY MAInfo.userid
HAVING COUNT(MAInfo.userid) >= 1;
RETURN QUERY (....);
END;
$BODY$;
CREATE TYPE tt_sample_function AS
(
key character varying,
value character varying
)
Inputs are
SELECT public.sample_function(
'[{"key":"devicetype", "value":"TestType"},{"key":"ostype", "value":"TestType"}]'
)
Any suggestion, why my ALL operator is not working. I mean its always giving false, it should match with all the array elements...
Note: ofcourse data is there in table.
You are over complicating things. You don't need the FOR loop or the array to do the comparison. You can do that all in a single statement. No need for an extra TYPE or generating an array.
The parameter to the function should be declared as jsonb as you clearly want to pass valid JSON there.
I don't understand what you are trying to achieve with the CASE expression. The WHERE clause only returns rows that match the first condition in the CASE, so the second one will never be reached.
I also don't understand why you have the CASE at all, as you discard the result of that in the outer query completely.
But keeping the original structure as close as possible, I think you can simplify this to a single CREATE TABLE AS statement and get rid of all the array processing.
CREATE OR REPLACE FUNCTION public.sample_function(p_settings jsonb)
RETURNS TABLE (..... )
LANGUAGE plpgsql
AS $BODY$
declare
...
begin
CREATE TEMP TABLE hold_userID AS
SELECT MAInfo.userid
FROM (
-- the distinct is useless as the GROUP BY already does that
SELECT i.userid,
CASE
-- this checks if the parameter contains the settings key/value from sample_table
-- but the WHERE clause already makes sure of that???
WHEN p_settings #> jsonb_build_object('key', i.settingKey, 'value', i.settingvalue)
THEN i.settingKey || '#~#' || 'Y'
ELSE i.settingKey || '#~#' || 'N'
END AS MatchResult
FROM public.sample_table i
WHERE (i.settingKey, i.settingvalue) = IN (select t.element ->> 'key' as key,
t.element ->> 'value' as value
from jsonb_array_elements(p_settings) as t(element))
GROUP BY i.userid, MatchResult
) AS MAInfo
GROUP BY MAInfo.userid
HAVING COUNT(MAInfo.userid) >= 1;
return query ...;
end;
$body$
If you want to check if certain users have all the settings passed to the function, you don't really need a CASE expression, just a proper having condition
So maybe you want this instead:
CREATE TEMP TABLE hold_userID AS
SELECT i.userid,
FROM public.sample_table i
WHERE (i.settingKey, i.settingvalue) = IN (select t.element ->> 'key' as key,
t.element ->> 'value' as value
from jsonb_array_elements(p_settings) as t(element))
GROUP BY i.userid
HAVING COUNT(*) = jsonb_array_length(p_settings);
Or alternatively:
SELECT i.userid
FROM (
select userid, settingkey as key, settingvalue as value
from public.sample_table
) i
group by i.userid
HAVING jsonb_object_agg(key, value) = p_settings
Related
I have my function to run SELECT query with 3 condition of HAVING CLAUSE:
having sum() > 0
having sum() <= 0
dont have HAVING CLAUSE
Here is my function:
DROP function getf(arg int);
create or replace function getf(arg int)
returns table (
option_id bigint,
importQuantity bigint,
sold bigint,
remain bigint
)
as $$
begin
if arg = 1 then
return query select b.option_id, SUM(b.import_quantity)::bigint as importQuantity, SUM(b.sold_quantity)::bigint as sold, SUM(b.remaining_quantity)::bigint as remain from batch b where b.product_id = 220 and b.option_id in (select o.id from "option" o where o.barcode like '%%' or o.barcode is null) group by b.option_id having sum(b.remaining_quantity) > 0;
elsif arg = 2 then
return query select b.option_id, SUM(b.import_quantity)::bigint as importQuantity, SUM(b.sold_quantity)::bigint as sold, SUM(b.remaining_quantity)::bigint as remain from batch b where b.product_id = 220 and b.option_id in (select o.id from "option" o where o.barcode like '%%' or o.barcode is null) group by b.option_id having sum(b.remaining_quantity) <= 0;
elsif arg = 3 then
return query select b.option_id, SUM(b.import_quantity)::bigint as importQuantity, SUM(b.sold_quantity)::bigint as sold, SUM(b.remaining_quantity)::bigint as remain from batch b where b.product_id = 220 and b.option_id in (select o.id from "option" o where o.barcode like '%%' or o.barcode is null) group by b.option_id;
end if;
end; $$ language plpgsql;
And I call my function:
select getf(3);
Question
The function work fine. But SELECT query only different at HAVING CLAUSE
How can I use dynamic query to appending HAVING with if-else condition?
Since you need to change the structure of the query not just replace a parameter value within the query you need dynamic SQL. But first lets put on a diet. That is remove unnecessary parts.
b.option_id in (select o.id from "option" o where o.barcode like '%%' or o.barcode is null)
This is unnecessary the WHERE clause contains a tautology (a statement that in always true). Why is this. Well the predicate o.barcode like '%%' actually check if the barcode contains 0 or more characters. Only NULL makes this false, but in that case the other predicate barcode is NULL will evaluate true,so the overall condition is always true. As a result the sub-query always returns as ALL ids in options table and b.option_id is always in the list. (That of course assumes batch.option_id is properly defined as not null and a FK to options).
Now lets consider that HAVING clause. The first two are trivial replacements, just replace the rvalue with '< 0' or '>= 0'. The third option presents a problem, as you need to remove the entire clause rather change the rvalue. Its not all that difficult, however it also can be turned into a simple rvalue change. The HAVING predicate simply needs to evaluate true and we accomplish the same thing. That is achieved by replacing it by 'is not null'. Finally we can create an array of the rvalue replacements avoid any if then logic on the parameter (arg). Other that validating it contains a valid value. So: see demo
create or replace function getf(arg int)
returns table (option_id bigint
,importQuantity bigint
,sold bigint
,remain bigint
)
language plpgsql
as $$
declare
k_having_arg constant text[] = array['> 0','<= 0', 'is not null']; -- Replacement values for RVALUE below
k_base_query constant text =
'select b.option_id'
', SUM(b.import_quantity)::bigint '
', SUM(b.sold_quantity)::bigint '
', SUM(b.remaining_quantity)::bigint '
' from batch b'
' where b.product_id = 220'
' group by b.option_id '
' having sum(b.remaining_quantity) %s; '; -- expressions RVALUE to be replaces
l_exec_query text;
begin
if arg not between 1 and 3 then
raise exception 'Invalid Arg value (%) out of range, Must be 1, 2, or 3.', arg;
end if;
l_exec_query = format (k_base_query,k_having_arg[arg]);
raise notice E'Running query\n%',l_exec_query;
return query execute l_exec_query;
end;
$$;
try using with CTE clause. you can have your query (till group by) in with clause.
Then use can use your if conditions and select from with CTE and and append having clause.
For reference you can check
How to declare a variable in a PostgreSQL query
I have a column with data that looks like this in a single field:
"a,a,b,b,c,a,b,b,b,a,a,a,a,a,a,c,a,a,b"
Using some sort of regex or SQL function I would like to make it look like this:
"a,b,c,a,b,a,c,a,b"
Essentially I am trying to get rid of repeated values that appear in order but keep the unique changes from one value to another.
My knowledge of reg-expressions pretty much ends at removing duplicates. Any help is greatly appreciated!
use regexp:
SELECT regexp_replace('a,a,b,b,c,a,b,b,b,a,a,a,a,a,a,c,a,a,b', '(\w)(,\1)+', '\1', 'g')
(\w)(,\1)+ mutches: (any word char) and following (, and this same word char) more than one time...
Fiddle example
RegExr example
You can convert the elements into rows, check if the previous row is different to the current and then keep only those where something changed. This can then be aggregated back into a comma separated list:
select string_agg(ch, ',' order by idx)
from (
select u.ch, u.idx,
coalesce(u.ch <> lag(u.ch) over (order by u.idx), true) as is_change
from unnest(string_to_array('a,a,b,b,c,a,b,b,b,a,a,a,a,a,a,c,a,a,b', ',')) with ordinality as u(ch, idx)
) t
where is_change
The with ordinality returns the original array index, so that we can sort the elements correctly when aggregating them.
This can also be put into a function:
create or replace function cleanup(p_input text)
returns text
as
$$
select string_agg(ch, ',' order by idx)
from (
select u.ch, u.idx,
coalesce(u.ch <> lag(u.ch) over (order by u.idx), true) as is_change
from unnest(string_to_array(p_input, ',')) with ordinality as u(ch, idx)
) t
where is_change;
$$
language sql;
Online example
My understanding is:
If the character is the same as previous character, you want to remove it from the string.
So I will use while loop and if statement in this case:
--CREATE TABLE TEST (ID VARCHAR(100));
--INSERT INTO TEST VALUES ('a,a,b,b,c,a,b,b,b,a,a,a,a,a,a,c,a,a,b');
DO $$
DECLARE
V_NEWSTRING VARCHAR(100) := '';
V_I INTEGER := 1;
V_LENGTH INTEGER := 0;
V_CURRENT VARCHAR(10) := '';
V_LAST VARCHAR(10) := '';
BEGIN
SELECT LENGTH(ID) FROM TEST INTO V_LENGTH;
WHILE V_I <= V_LENGTH LOOP
SELECT SUBSTRING(ID,V_I,1) from TEST INTO V_CURRENT;
IF V_CURRENT <> V_LAST THEN
V_NEWSTRING = V_NEWSTRING || V_CURRENT || ',';
END IF;
V_LAST = V_CURRENT;
V_I = V_I + 2;
END LOOP;
raise notice 'Value: %', V_NEWSTRING;
END $$;
Test Result (PostgreSQL-9.4):
this is my function:
CREATE OR REPLACE FUNCTION SANDBOX.DAILYVERIFY_DATE(TABLE_NAME regclass, DATE_DIFF INTEGER)
RETURNS void AS $$
DECLARE
RESULT BOOLEAN;
DATE DATE;
BEGIN
EXECUTE 'SELECT VORHANDENES_DATUM AS DATE, CASE WHEN DATUM IS NULL THEN FALSE ELSE TRUE END AS UPDATED FROM
(SELECT DISTINCT DATE VORHANDENES_DATUM FROM ' || TABLE_NAME ||
' WHERE DATE > CURRENT_DATE -14-'||DATE_DIFF|| '
) A
RIGHT JOIN
(
WITH compras AS (
SELECT ( NOW() + (s::TEXT || '' day'')::INTERVAL )::TIMESTAMP(0) AS DATUM
FROM generate_series(-14, -1, 1) AS s
)
SELECT DATUM::DATE
FROM compras)
B
ON DATUM = VORHANDENES_DATUM'
INTO date,result;
RAISE NOTICE '%', result;
INSERT INTO SANDBOX.UPDATED_TODAY VALUES (TABLE_NAME, DATE, RESULT);
END;
$$ LANGUAGE plpgsql;
It is supposed to upload rows into the table SANDBOX.UPDATED_TODAY, which contains table name, a date and a boolean.
The boolean shows, whether there was an entry for that date in the table. The whole part, which is inside of EXECUTE ... INTO works fine and gives me those days.
However, this code only inserts the first row of the query's result. What I want is that all 14 rows get inserted. Obviously, I need to change it into something like a loop or something completely different, but how exactly would that work?
Side note: I removed some unnecessary parts regarding those 2 parameters you can see. It does not have to do with that at all.
Put the INSERT statement inside the EXECUTE. You don't need the result of the SELECT for anything other than inserting it into that table, right? So just insert it directly as part of the same query:
CREATE OR REPLACE FUNCTION SANDBOX.DAILYVERIFY_DATE(TABLE_NAME regclass, DATE_DIFF INTEGER)
RETURNS void AS
$$
BEGIN
EXECUTE
'INSERT INTO SANDBOX.UPDATED_TODAY
SELECT ' || QUOTE_LITERAL(TABLE_NAME) || ', VORHANDENES_DATUM, CASE WHEN DATUM IS NULL THEN FALSE ELSE TRUE END
FROM (
SELECT DISTINCT DATE VORHANDENES_DATUM FROM ' || TABLE_NAME ||
' WHERE DATE > CURRENT_DATE -14-'||DATE_DIFF|| '
) A
RIGHT JOIN (
WITH compras AS (
SELECT ( NOW() + (s::TEXT || '' day'')::INTERVAL )::TIMESTAMP(0) AS DATUM
FROM generate_series(-14, -1, 1) AS s
)
SELECT DATUM::DATE
FROM compras
) B
ON DATUM = VORHANDENES_DATUM';
END;
$$ LANGUAGE plpgsql;
The idiomatic way to loop through dynamic query results would be
FOR date, result IN
EXECUTE 'SELECT ...'
LOOP
INSERT INTO ...
END LOOP;
I am trying to create crosstab queries in PostgreSQL such that it automatically generates the crosstab columns instead of hardcoding it. I have written a function that dynamically generates the column list that I need for my crosstab query. The idea is to substitute the result of this function in the crosstab query using dynamic sql.
I know how to do this easily in SQL Server, but my limited knowledge of PostgreSQL is hindering my progress here. I was thinking of storing the result of function that generates the dynamic list of columns into a variable and use that to dynamically build the sql query. It would be great if someone could guide me regarding the same.
-- Table which has be pivoted
CREATE TABLE test_db
(
kernel_id int,
key int,
value int
);
INSERT INTO test_db VALUES
(1,1,99),
(1,2,78),
(2,1,66),
(3,1,44),
(3,2,55),
(3,3,89);
-- This function dynamically returns the list of columns for crosstab
CREATE FUNCTION test() RETURNS TEXT AS '
DECLARE
key_id int;
text_op TEXT = '' kernel_id int, '';
BEGIN
FOR key_id IN SELECT DISTINCT key FROM test_db ORDER BY key LOOP
text_op := text_op || key_id || '' int , '' ;
END LOOP;
text_op := text_op || '' DUMMY text'';
RETURN text_op;
END;
' LANGUAGE 'plpgsql';
-- This query works. I just need to convert the static list
-- of crosstab columns to be generated dynamically.
SELECT * FROM
crosstab
(
'SELECT kernel_id, key, value FROM test_db ORDER BY 1,2',
'SELECT DISTINCT key FROM test_db ORDER BY 1'
)
AS x (kernel_id int, key1 int, key2 int, key3 int); -- How can I replace ..
-- .. this static list with a dynamically generated list of columns ?
You can use the provided C function crosstab_hash for this.
The manual is not very clear in this respect. It's mentioned at the end of the chapter on crosstab() with two parameters:
You can create predefined functions to avoid having to write out the
result column names and types in each query. See the examples in the
previous section. The underlying C function for this form of crosstab
is named crosstab_hash.
For your example:
CREATE OR REPLACE FUNCTION f_cross_test_db(text, text)
RETURNS TABLE (kernel_id int, key1 int, key2 int, key3 int)
AS '$libdir/tablefunc','crosstab_hash' LANGUAGE C STABLE STRICT;
Call:
SELECT * FROM f_cross_test_db(
'SELECT kernel_id, key, value FROM test_db ORDER BY 1,2'
,'SELECT DISTINCT key FROM test_db ORDER BY 1');
Note that you need to create a distinct crosstab_hash function for every crosstab function with a different return type.
Related:
PostgreSQL row to columns
Your function to generate the column list is rather convoluted, the result is incorrect (int missing after kernel_id), it can be replaced with this SQL query:
SELECT 'kernel_id int, '
|| string_agg(DISTINCT key::text, ' int, ' ORDER BY key::text)
|| ' int, DUMMY text'
FROM test_db;
And it cannot be used dynamically anyway.
#erwin-brandstetter: The return type of the function isn't an issue if you're always returning a JSON type with the converted results.
Here is the function I came up with:
CREATE OR REPLACE FUNCTION report.test(
i_start_date TIMESTAMPTZ,
i_end_date TIMESTAMPTZ,
i_interval INT
) RETURNS TABLE (
tab JSON
) AS $ab$
DECLARE
_key_id TEXT;
_text_op TEXT = '';
_ret JSON;
BEGIN
-- SELECT DISTINCT for query results
FOR _key_id IN
SELECT DISTINCT at_name
FROM report.company_data_date cd
JOIN report.company_data_amount cda ON cd.id = cda.company_data_date_id
JOIN report.amount_types at ON cda.amount_type_id = at.id
WHERE date_start BETWEEN i_start_date AND i_end_date
AND interval_type_id = i_interval
LOOP
-- build function_call with datatype of column
IF char_length(_text_op) > 1 THEN
_text_op := _text_op || ', ' || _key_id || ' NUMERIC(20,2)';
ELSE
_text_op := _text_op || _key_id || ' NUMERIC(20,2)';
END IF;
END LOOP;
-- build query with parameter filters
RETURN QUERY
EXECUTE '
SELECT array_to_json(array_agg(row_to_json(t)))
FROM (
SELECT * FROM crosstab(''SELECT date_start, at.at_name, cda.amount ct
FROM report.company_data_date cd
JOIN report.company_data_amount cda ON cd.id = cda.company_data_date_id
JOIN report.amount_types at ON cda.amount_type_id = at.id
WHERE date_start between $$' || i_start_date::TEXT || '$$ AND $$' || i_end_date::TEXT || '$$
AND interval_type_id = ' || i_interval::TEXT || ' ORDER BY date_start'')
AS ct (date_start timestamptz, ' || _text_op || ')
) t;';
END;
$ab$ LANGUAGE 'plpgsql';
So, when you run it, you get the dynamic results in JSON, and you don't need to know how many values were pivoted:
select * from report.test(now()- '1 week'::interval, now(), 1);
tab
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
[{"date_start":"2015-07-27T08:40:01.277556-04:00","burn_rate":0.00,"monthly_revenue":5800.00,"cash_balance":0.00},{"date_start":"2015-07-27T08:50:02.458868-04:00","burn_rate":34000.00,"monthly_revenue":15800.00,"cash_balance":24000.00}]
(1 row)
Edit: If you have mixed datatypes in your crosstab, you can add logic to look it up for each column with something like this:
SELECT a.attname as column_name, format_type(a.atttypid, a.atttypmod) AS data_type
FROM pg_attribute a
JOIN pg_class b ON (a.attrelid = b.relfilenode)
JOIN pg_catalog.pg_namespace n ON n.oid = b.relnamespace
WHERE n.nspname = $$schema_name$$ AND b.relname = $$table_name$$ and a.attstattarget = -1;"
I realise this is an older post but struggled for a little while on the same issue.
My Problem Statement:
I had a table with muliple values in a field and wanted to create a crosstab query with 40+ column headings per row.
My Solution was to create a function which looped through the table column to grab values that I wanted to use as column headings within the crosstab query.
Within this function I could then Create the crosstab query. In my use case I added this crosstab result into a separate table.
E.g.
CREATE OR REPLACE FUNCTION field_values_ct ()
RETURNS VOID AS $$
DECLARE rec RECORD;
DECLARE str text;
BEGIN
str := '"Issue ID" text,';
-- looping to get column heading string
FOR rec IN SELECT DISTINCT field_name
FROM issue_fields
ORDER BY field_name
LOOP
str := str || '"' || rec.field_name || '" text' ||',';
END LOOP;
str:= substring(str, 0, length(str));
EXECUTE 'CREATE EXTENSION IF NOT EXISTS tablefunc;
DROP TABLE IF EXISTS temp_issue_fields;
CREATE TABLE temp_issue_fields AS
SELECT *
FROM crosstab(''select issue_id, field_name, field_value from issue_fields order by 1'',
''SELECT DISTINCT field_name FROM issue_fields ORDER BY 1'')
AS final_result ('|| str ||')';
END;
$$ LANGUAGE plpgsql;
The approach described here worked well for me.
Instead of retrieving the pivot table directly. The easier approach is to let the function generate a SQL query string. Dynamically execute the resulting SQL query string on demand.
I'm attempting to write an area of a function in PL/pgSQL that loops through an hstore and sets a record's column(the key of the hstore) to a specific value (the value of the hstore). I'm using Postgres 9.1.
The hstore will look like: ' "column1"=>"value1","column2"=>"value2" '
Generally, here is what I want from a function that takes in an hstore and has a record with values to modify:
FOR my_key, my_value IN
SELECT key,
value
FROM EACH( in_hstore )
LOOP
EXECUTE 'SELECT $1'
INTO my_row.my_key
USING my_value;
END LOOP;
The error which I am getting with this code:
"myrow" has no field "my_key". I've been searching for quite a while now for a solution, but everything else I've tried to achieve the same result hasn't worked.
Simpler alternative to your posted answer. Should perform much better.
This function retrieves a row from a given table (in_table_name) and primary key value (in_row_pk), and inserts it as new row into the same table, with some values replaced (in_override_values). The new primary key value as per default is returned (pk_new).
CREATE OR REPLACE FUNCTION f_clone_row(in_table_name regclass
, in_row_pk int
, in_override_values hstore
, OUT pk_new int)
LANGUAGE plpgsql AS
$func$
DECLARE
_pk text; -- name of PK column
_cols text; -- list of names of other columns
BEGIN
-- Get name of PK column
SELECT INTO _pk a.attname
FROM pg_catalog.pg_index i
JOIN pg_catalog.pg_attribute a ON a.attrelid = i.indrelid
AND a.attnum = i.indkey[0] -- single PK col!
WHERE i.indrelid = in_table_name
AND i.indisprimary;
-- Get list of columns excluding PK column
SELECT INTO _cols string_agg(quote_ident(attname), ',')
FROM pg_catalog.pg_attribute
WHERE attrelid = in_table_name -- regclass used as OID
AND attnum > 0 -- exclude system columns
AND attisdropped = FALSE -- exclude dropped columns
AND attname <> _pk; -- exclude PK column
-- INSERT cloned row with override values, returning new PK
EXECUTE format('
INSERT INTO %1$I (%2$s)
SELECT %2$s
FROM (SELECT (t #= $1).* FROM %1$I t WHERE %3$I = $2) x
RETURNING %3$I'
, in_table_name, _cols, _pk)
USING in_override_values, in_row_pk -- use override values directly
INTO pk_new; -- return new pk directly
END
$func$;
Call:
SELECT f_clone_row('tbl', 1, '"col1"=>"foo_new","col2"=>"bar_new"');
db<>fiddle here
Old sqlfiddle
Use regclass as input parameter type, so only valid table names can be used to begin with and SQL injection is ruled out. The function also fails earlier and more gracefully if you should provide an illegal table name.
Use an OUT parameter (pk_new) to simplify the syntax.
No need to figure out the next value for the primary key manually. It is inserted automatically and returned after the fact. That's not only simpler and faster, you also avoid wasted or out-of-order sequence numbers.
Use format() to simplify the assembly of the dynamic query string and make it less error-prone. Note how I use positional parameters for identifiers and unquoted strings respectively.
I build on your implicit assumption that allowed tables have a single primary key column of type integer with a column default. Typically serial columns.
Key element of the function is the final INSERT:
Merge override values with the existing row using the #= operator in a subselect and decompose the resulting row immediately.
Then you can select only relevant columns in the main SELECT.
Let Postgres assign the default value for the PK and get it back with the RETURNING clause.
Write the returned value into the OUT parameter directly.
All done in a single SQL command, that is generally fastest.
Since I didn't want to have to use any external functions for speed purposes, I created a solution using hstores to insert a record into a table:
CREATE OR REPLACE FUNCTION fn_clone_row(in_table_name character varying, in_row_pk integer, in_override_values hstore)
RETURNS integer
LANGUAGE plpgsql
AS $function$
DECLARE
my_table_pk_col_name varchar;
my_key text;
my_value text;
my_row record;
my_pk_default text;
my_pk_new integer;
my_pk_new_text text;
my_row_hstore hstore;
my_row_keys text[];
my_row_keys_list text;
my_row_values text[];
my_row_values_list text;
BEGIN
-- Get the next value of the pk column for the table.
SELECT ad.adsrc,
at.attname
INTO my_pk_default,
my_table_pk_col_name
FROM pg_attrdef ad
JOIN pg_attribute at
ON at.attnum = ad.adnum
AND at.attrelid = ad.adrelid
JOIN pg_class c
ON c.oid = at.attrelid
JOIN pg_constraint cn
ON cn.conrelid = c.oid
AND cn.contype = 'p'
AND cn.conkey[1] = at.attnum
JOIN pg_namespace n
ON n.oid = c.relnamespace
WHERE c.relname = in_table_name
AND n.nspname = 'public';
-- Get the next value of the pk in a local variable
EXECUTE ' SELECT ' || my_pk_default
INTO my_pk_new;
-- Set the integer value back to text for the hstore
my_pk_new_text := my_pk_new::text;
-- Add the next value statement to the hstore of changes to make.
in_override_values := in_override_values || hstore( my_table_pk_col_name, my_pk_new_text );
-- Copy over only the given row to the record.
EXECUTE ' SELECT * '
' FROM ' || quote_ident( in_table_name ) ||
' WHERE ' || quote_ident( my_table_pk_col_name ) ||
' = ' || quote_nullable( in_row_pk )
INTO my_row;
-- Replace the values that need to be changed in the column name array
my_row := my_row #= in_override_values;
-- Create an hstore of my record
my_row_hstore := hstore( my_row );
-- Create a string of comma-delimited, quote-enclosed column names
my_row_keys := akeys( my_row_hstore );
SELECT array_to_string( array_agg( quote_ident( x.colname ) ), ',' )
INTO my_row_keys_list
FROM ( SELECT unnest( my_row_keys ) AS colname ) x;
-- Create a string of comma-delimited, quote-enclosed column values
my_row_values := avals( my_row_hstore );
SELECT array_to_string( array_agg( quote_nullable( x.value ) ), ',' )
INTO my_row_values_list
FROM ( SELECT unnest( my_row_values ) AS value ) x;
-- Insert the values into the columns of a new row
EXECUTE 'INSERT INTO ' || in_table_name || '(' || my_row_keys_list || ')'
' VALUES (' || my_row_values_list || ')';
RETURN my_pk_new;
END
$function$;
It's quite a bit longer than what I had envisioned, but it works and is actually quite speedy.