Dynamic RETURN EXECUTE QUERY with ILIKE in Postgresql - postgresql

The search filter in the function below doesn't seem to work. If I don't provide a search parameter, it works, otherwise I get no recordset back. I'm assuming I am making a mess of the single-quoting and ILIKE, but not sure how to re-write this properly. Suggestions?
CREATE OR REPLACE FUNCTION get_operator_basic_by_operator(
_operator_id UUID DEFAULT NULL,
_search TEXT DEFAULT NULL,
_page_number INTEGER DEFAULT 1,
_page_size INTEGER DEFAULT 10,
_sort_col TEXT DEFAULT 'username',
_sort_dir TEXT DEFAULT 'asc',
_include_deleted BOOLEAN DEFAULT FALSE
)
RETURNS TABLE (
id UUID,
party_id UUID,
party_name TEXT,
username VARCHAR(32),
profile_picture_uri VARCHAR(512),
first_name VARCHAR(64),
last_name VARCHAR(64),
street VARCHAR(128),
specifier VARCHAR(128),
city VARCHAR(64),
state VARCHAR(2),
zipcode VARCHAR(9),
primary_email CITEXT,
primary_phone VARCHAR(10),
secondary_email CITEXT,
secondary_phone VARCHAR(10),
last_login TIMESTAMP WITH TIME ZONE,
created TIMESTAMP WITH TIME ZONE,
deleted TIMESTAMP WITH TIME ZONE
)
AS $$
DECLARE
_offset BIGINT;
BEGIN
IF (_page_number < 1 OR _page_number IS NULL) THEN
RAISE EXCEPTION '_page_number cannot be null or less than 1.';
END IF;
IF (_page_size < 1 OR _page_size IS NULL) THEN
RAISE EXCEPTION '_page_size cannot be null or less than 1.';
END IF;
IF (_sort_dir <> 'asc' AND _sort_dir <> 'desc') THEN
RAISE EXCEPTION '_sort_dir must be "asc" or "desc".';
END IF;
_offset := (_page_size * (_page_number-1));
RETURN QUERY EXECUTE '
SELECT
o.id,
p.id,
p.party_name,
o.username,
o.profile_picture_uri,
o.first_name,
o.last_name,
o.street,
o.specifier,
o.city,
o.state,
o.zipcode,
o.primary_email,
o.primary_phone,
o.secondary_email,
o.secondary_phone,
o.last_login,
o.created,
o.deleted
FROM
operator o
LEFT JOIN
party p ON o.party_id = p.id
WHERE ( -- include all or only those active, based on _include_deleted
$1 OR o.deleted > statement_timestamp()
)
AND o.party_id IN ( -- limit to operators in same party
SELECT oi.party_id FROM operator oi WHERE oi.id = $2
)
AND ( -- use optional search filter
$3 IS NULL
OR
o.username ILIKE ''%$3%''
OR
o.first_name ILIKE ''%$3%''
OR
o.last_name ILIKE ''%$3%''
OR
o.primary_email ILIKE ''%$3%''
)
ORDER BY ' || quote_ident(_sort_col) || ' ' || _sort_dir || '
LIMIT
$4
OFFSET
$5'
USING _include_deleted, _operator_id, _search, _page_size, _offset;
END;

The parameter is inserted as a text with quotes, so you should use it this way:
...
o.username ILIKE concat(''%'', $3, ''%'')
...
Personally I would use format() and dollar-quotes.

Strings handles as is, you should to pass ready to use values:
EXECUTE '... o.username ILIKE $3 ...' using ..., '%' || _search || '%', ...

Related

(Select Procedure) SUM of two different SMALLINT fields from different tables. I am using SQL Manager for Interbase and Firebird

I want to get the SUM of two different SMALLINT fields from different tables in select procedure. I am using SQL Manager for Interbase and Firebird
The values of field TRAINING_SRCVDAYS in Table E are 1 and 2 while the value of field LEAVE_WITHPAY_NUMDAY in Table A is 2.
Table E Table A
1 2
2
Expected output(Sum) 3 2
real output to my procedure 3 4
here is my code. please help
CREATE PROCEDURE AAAASAMPLE(
UPDATEHANDLER VARCHAR(50) CHARACTER SET ISO8859_1 COLLATE ISO8859_1,
WHAT_YEAR VARCHAR(50) CHARACTER SET ISO8859_1 COLLATE ISO8859_1)
RETURNS(
FULLNAME VARCHAR(100) CHARACTER SET ISO8859_1 COLLATE ISO8859_1,
TRAINING_SRCVDAYS SMALLINT,
LEAVE_WITHPAY_NUMDAY SMALLINT,
SICKLEAVE SMALLINT)
AS
BEGIN
FOR
SELECT
C.EMP_SURNAME || ', ' || C.EMP_FIRSTNAME || ' ' || C.EMP_MIDDLENAME || ' '||C.EMP_SUFFIXNAME,
SUM(E.TRAINING_SRCVDAYS),
SUM(A.LEAVE_WITHPAY_NUMDAY),
SUM(E.TRAINING_SRCVDAYS) - SUM(A.LEAVE_WITHPAY_NUMDAY)
FROM LEAVE_TABLE A, POSITION_TABLE B, EMP_TABLE C, TRAINTRN_TABLE D, TRAINTYP_TABLE E
WHERE
(A.EMP_PK =: UPDATEHANDLER AND A.LEAVE_FROM_YEAR =: WHAT_YEAR AND A.EMP_PK = C.EMP_PK
AND A.POSITION_PK = B.POSITION_PK AND B.POSITION_CLASS_REF = 0 )
AND
(D.EMP_PK = C.EMP_PK AND D.TRAINING_PK = E.TRAINING_PK AND D.EMP_PK =: UPDATEHANDLER
AND E.TRAINING_SRVCCRDT = 'Yes' AND E.TRAINING_FROMYEAR =: WHAT_YEAR)
GROUP BY
C.EMP_SURNAME || ', ' || C.EMP_FIRSTNAME || ' ' || C.EMP_MIDDLENAME || ' '|| C.EMP_SUFFIXNAME
INTO
:FULLNAME,
:TRAINING_SRCVDAYS,
:LEAVE_WITHPAY_NUMDAY,
:SICKLEAVE
DO
BEGIN
SUSPEND;
END
END;

Postgresql, select a "fake" row

In Postgres 8.4 or higher, what is the most efficient way to get a row of data populated by defaults without actually creating the row. Eg, as a transaction (pseudocode):
create table "mytable"
(
id serial PRIMARY KEY NOT NULL,
parent_id integer NOT NULL DEFAULT 1,
random_id integer NOT NULL DEFAULT random(),
)
begin transaction
fake_row = insert into mytable (id) values (0) returning *;
delete from mytable where id=0;
return fake_row;
end transaction
Basically I'd expect a query with a single row where parent_id is 1 and random_id is a random number (or other function return value) but I don't want this record to persist in the table or impact on the primary key sequence serial_id_seq.
My options seem to be using a transaction like above or creating views which are copies of the table with the fake row added but I don't know all the pros and cons of each or whether a better way exists.
I'm looking for an answer that assumes no prior knowledge of the datatypes or default values of any column except id or the number or ordering of the columns. Only the table name will be known and that a record with id 0 should not exist in the table.
In the past I created the fake record 0 as a permanent record but I've come to consider this record a type of pollution (since I typically have to filter it out of future queries).
You can copy the table definition and defaults to the temp table with:
CREATE TEMP TABLE table_name_rt (LIKE table_name INCLUDING DEFAULTS);
And use this temp table to generate dummy rows. Such table will be dropped at the end of the session (or transaction) and will only be visible to current session.
You can query the catalog and build a dynamic query
Say we have this table:
create table test10(
id serial primary key,
first_name varchar( 100 ),
last_name varchar( 100 ) default 'Tom',
age int not null default 38,
salary float default 100.22
);
When you run following query:
SELECT string_agg( txt, ' ' order by id )
FROM (
select 1 id, 'SELECT ' txt
union all
select 2, -9999 || ' as id '
union all
select 3, ', '
|| coalesce( column_default, 'null'||'::'||c.data_type )
|| ' as ' || c.column_name
from information_schema.columns c
where table_schema = 'public'
and table_name = 'test10'
and ordinal_position > 1
) xx
;
you will get this sting as a result:
"SELECT -9999 as id , null::character varying as first_name ,
'Tom'::character varying as last_name , 38 as age , 100.22 as salary"
then execute this query and you will get the "phantom row".
We can build a function that build and excecutes the query and return our row as a result:
CREATE OR REPLACE FUNCTION get_phantom_rec (p_i test10.id%type )
returns test10 as $$
DECLARE
v_sql text;
myrow test10%rowtype;
begin
SELECT string_agg( txt, ' ' order by id )
INTO v_sql
FROM (
select 1 id, 'SELECT ' txt
union all
select 2, p_i || ' as id '
union all
select 3, ', '
|| coalesce( column_default, 'null'||'::'||c.data_type )
|| ' as ' || c.column_name
from information_schema.columns c
where table_schema = 'public'
and table_name = 'test10'
and ordinal_position > 1
) xx
;
EXECUTE v_sql INTO myrow;
RETURN myrow;
END$$ LANGUAGE plpgsql ;
and then this simple query gives you what you want:
select * from get_phantom_rec ( -9999 );
id | first_name | last_name | age | salary
-------+------------+-----------+-----+--------
-9999 | | Tom | 38 | 100.22
I would just select the fake values as literals:
select 1 id, 1 parent_id, 1 user_id
The returned row will be (virtually) indistinguishable from a real row.
To get the values from the catalog:
select
0 as id, -- special case for serial type, just return 0
(select column_default::int -- Cast to int, because we know the column is int
from INFORMATION_SCHEMA.COLUMNS
where table_name = 'mytable'
and column_name = 'parent_id') as parent_id,
(select column_default::int -- Cast to int, because we know the column is int
from INFORMATION_SCHEMA.COLUMNS
where table_name = 'mytable'
and column_name = 'user_id') as user_id;
Note that you must know what the columns are and their type, but this is reasonable. If you change the table schema (except default value), you would need to tweak the query.
See the above as a SQLFiddle.

Test for null in function with varying parameters

I have a Postgres function:
create function myfunction(integer, text, text, text, text, text, text) RETURNS
table(id int, match text, score int, nr int, nr_extra character varying, info character varying, postcode character varying,
street character varying, place character varying, country character varying, the_geom geometry)
AS $$
BEGIN
return query (select a.id, 'address' as match, 1 as score, a.ad_nr, a.ad_nr_extra,a.ad_info,a.ad_postcode, s.name as street, p.name place , c.name country, a.wkb_geometry as wkb_geometry from "Addresses" a
left join "Streets" s on a.street_id = s.id
left join "Places" p on s.place_id = p.id
left join "Countries" c on p.country_id = c.id
where c.name = $7
and p.name = $6
and s.name = $5
and a.ad_nr = $1
and a.ad_nr_extra = $2
and a.ad_info = $3
and ad_postcode = $4);
END;
$$
LANGUAGE plpgsql;
This function fails to give the right result when one or more of the variables entered are NULL because ad_postcode = NULL will fail.
What can I do to test for NULL inside the query?
I disagree with some of the advice in other answers. This can be done with PL/pgSQL and I think it is mostly far superior to assembling queries in a client application. It is faster and cleaner and the app only sends the bare minimum across the wire in requests. SQL statements are saved inside the database, which makes it easier to maintain - unless you want to collect all business logic in the client application, this depends on the general architecture.
PL/pgSQL function with dynamic SQL
CREATE OR REPLACE FUNCTION func(
_ad_nr int = NULL
, _ad_nr_extra text = NULL
, _ad_info text = NULL
, _ad_postcode text = NULL
, _sname text = NULL
, _pname text = NULL
, _cname text = NULL)
RETURNS TABLE(id int, match text, score int, nr int, nr_extra text
, info text, postcode text, street text, place text
, country text, the_geom geometry)
LANGUAGE plpgsql AS
$func$
BEGIN
-- RAISE NOTICE '%', -- for debugging
RETURN QUERY EXECUTE concat(
$$SELECT a.id, 'address'::text, 1 AS score, a.ad_nr, a.ad_nr_extra
, a.ad_info, a.ad_postcode$$
, CASE WHEN (_sname, _pname, _cname) IS NULL THEN ', NULL::text' ELSE ', s.name' END -- street
, CASE WHEN (_pname, _cname) IS NULL THEN ', NULL::text' ELSE ', p.name' END -- place
, CASE WHEN _cname IS NULL THEN ', NULL::text' ELSE ', c.name' END -- country
, ', a.wkb_geometry'
, concat_ws('
JOIN '
, '
FROM "Addresses" a'
, CASE WHEN NOT (_sname, _pname, _cname) IS NULL THEN '"Streets" s ON s.id = a.street_id' END
, CASE WHEN NOT (_pname, _cname) IS NULL THEN '"Places" p ON p.id = s.place_id' END
, CASE WHEN _cname IS NOT NULL THEN '"Countries" c ON c.id = p.country_id' END
)
, concat_ws('
AND '
, '
WHERE TRUE'
, CASE WHEN $1 IS NOT NULL THEN 'a.ad_nr = $1' END
, CASE WHEN $2 IS NOT NULL THEN 'a.ad_nr_extra = $2' END
, CASE WHEN $3 IS NOT NULL THEN 'a.ad_info = $3' END
, CASE WHEN $4 IS NOT NULL THEN 'a.ad_postcode = $4' END
, CASE WHEN $5 IS NOT NULL THEN 's.name = $5' END
, CASE WHEN $6 IS NOT NULL THEN 'p.name = $6' END
, CASE WHEN $7 IS NOT NULL THEN 'c.name = $7' END
)
)
USING $1, $2, $3, $4, $5, $6, $7;
END
$func$;
Call:
SELECT * FROM func(1, '_ad_nr_extra', '_ad_info', '_ad_postcode', '_sname');
SELECT * FROM func(1, _pname := 'foo');
Since all function parameters have default values, you can use positional notation, named notation or mixed notation at your choosing in the function call. See:
Functions with variable number of input parameters
More explanation for basics of dynamic SQL:
Refactor a PL/pgSQL function to return the output of various SELECT queries
The concat() function is instrumental for building the string. It was introduced with Postgres 9.1.
The ELSE branch of a CASE statement defaults to NULL when not present. Simplifies the code.
The USING clause for EXECUTE makes SQL injection impossible as values are passed as values and allows to use parameter values directly, exactly like in prepared statements.
NULL values are used to ignore parameters here. They are not actually used to search.
You don't need parentheses around the SELECT with RETURN QUERY.
Simple SQL function
You could do it with a plain SQL function and avoid dynamic SQL. For some cases this may be faster. But I wouldn't expect it in this case. Planning the query without unnecessary joins and predicates typically produces best results. Planning cost for a simple query like this is almost negligible.
CREATE OR REPLACE FUNCTION func_sql(
_ad_nr int = NULL
, _ad_nr_extra text = NULL
, _ad_info text = NULL
, _ad_postcode text = NULL
, _sname text = NULL
, _pname text = NULL
, _cname text = NULL)
RETURNS TABLE(id int, match text, score int, nr int, nr_extra text
, info text, postcode text, street text, place text
, country text, the_geom geometry)
LANGUAGE sql AS
$func$
SELECT a.id, 'address' AS match, 1 AS score, a.ad_nr, a.ad_nr_extra
, a.ad_info, a.ad_postcode
, s.name AS street, p.name AS place
, c.name AS country, a.wkb_geometry
FROM "Addresses" a
LEFT JOIN "Streets" s ON s.id = a.street_id
LEFT JOIN "Places" p ON p.id = s.place_id
LEFT JOIN "Countries" c ON c.id = p.country_id
WHERE ($1 IS NULL OR a.ad_nr = $1)
AND ($2 IS NULL OR a.ad_nr_extra = $2)
AND ($3 IS NULL OR a.ad_info = $3)
AND ($4 IS NULL OR a.ad_postcode = $4)
AND ($5 IS NULL OR s.name = $5)
AND ($6 IS NULL OR p.name = $6)
AND ($7 IS NULL OR c.name = $7)
$func$;
Identical call.
To effectively ignore parameters with NULL values:
($1 IS NULL OR a.ad_nr = $1)
To actually use NULL values as parameters, use this construct instead:
($1 IS NULL AND a.ad_nr IS NULL OR a.ad_nr = $1) -- AND binds before OR
This also allows for indexes to be used.
For the case at hand, replace all instances of LEFT JOIN with JOIN.
db<>fiddle here - with simple demo for all variants.
Old sqlfiddle
Asides
Don't use name and id as column names. They are not descriptive and when you join a bunch of tables (like you do to a lot in a relational database), you end up with several columns all named name or id, and have to attach aliases to sort the mess.
Please format your SQL properly, at least when asking public questions. But do it privately as well, for your own good.
If you can modify the query, you could do something like
and (ad_postcode = $4 OR $4 IS NULL)
You can use
c.name IS NOT DISTINCT FROM $7
It will return true if c.name and $7 are equal or both are null.
Or you can use
(c.name = $7 or $7 is null )
It will return true if c.name and $7 are equal or $7 is null.
Several things...
First, as side note: the semantics of your query might need a revisit. Some of the stuff in your where clauses might actually belong in your join clauses, like:
from ...
left join ... on ... and ...
left join ... on ... and ...
When they don't, you most should probably be using an inner join, rather than a left join.
Second, there is a is not distinct from operator, which can occasionally be handy in place of =. a is not distinct from b is basically equivalent to a = b or a is null and b is null.
Note, however, that is not distinct from does NOT use an index, whereas = and is null actually do. You could use (field = $i or $i is null) instead in your particular case, and it will yield the optimal plan if you're using the latest version of Postgres:
https://gist.github.com/ddebernardy/5884267

How to approach data warehouse (PostgreSQL) documentation?

We do have a small data warehouse in PostgreSQL database and I have to document all the tables.
I thought I can add a comment to every column and table and use pipe "|" separator to add more attributes. Then I can use information schema and array function to get documentation and use any reporting software to create desired output.
select
ordinal_position,
column_name,
data_type,
character_maximum_length,
numeric_precision,
numeric_scale,
is_nullable,
column_default,
(string_to_array(descr.description,'|'))[1] as cs_name,
(string_to_array(descr.description,'|'))[2] as cs_description,
(string_to_array(descr.description,'|'))[3] as en_name,
(string_to_array(descr.description,'|'))[4] as en_description,
(string_to_array(descr.description,'|'))[5] as other
from
information_schema.columns columns
join pg_catalog.pg_class klass on (columns.table_name = klass.relname and klass.relkind = 'r')
left join pg_catalog.pg_description descr on (descr.objoid = klass.oid and descr.objsubid = columns.ordinal_position)
where
columns.table_schema = 'data_warehouse'
order by
columns.ordinal_position;
It is a good idea or is there better approach?
Unless you must include descriptions of the system tables, I wouldn't try to shoehorn your descriptions into pg_catalog.pg_description. Make your own table. That way you get to keep the columns as columns, and not have to use clunky string functions.
Alternatively, consider adding specially formatted comments to your master schema file, along the lines of javadoc. Then write a tool to extract those comments and create a document. That way the comments stay close to the thing they're commenting, and you don't have to mess with the database at all to produce the report. For example:
--* Used for authentication.
create table users
(
--* standard Rails-friendly primary key. Also an example of
--* a long comment placed before the item, rather than on the
--* the same line.
id serial primary key,
name text not null, --* Real name (hopefully)
login text not null, --* Name used for authentication
...
);
Your documentation tool reads the file, looks for the --* comments, figures out what comments go with what things, and produces some kind of report, e.g.:
table users: Used for authentication
id: standard Rails-friendly primary key. Also an example of a
long comment placed before the item, rather than on the same
line.
name: Real name
login: Name used for authentication
You might note that with appropriate comments, the master schema file itself is a pretty good report in its own right, and that perhaps nothing else is needed.
If anyone interested, here is what I've used for initial load for my small documentation project. Documentation is in two tables, one for describing tables and one for describing columns and constraints. I appreciate any feedback.
/* -- Initial Load - Tables */
drop table dw_description_table cascade;
create table dw_description_table (
table_description_key serial primary key,
physical_full_name character varying,
physical_schema_name character varying,
physical_table_name character varying,
Table_Type character varying, -- Fact Dimension ETL Transformation
Logical_Name_CS character varying,
Description_CS character varying,
Logical_Name_EN character varying,
Description_EN character varying,
ToDo character varying,
Table_Load_Type character varying, --Manually TruncateLoad AddNewRows
Known_Exclusions character varying,
Table_Clover_Script character varying
);
insert into dw_description_table (physical_full_name, physical_schema_name, physical_table_name) (
select
table_schema || '.' || table_name as physical_full_name,
table_schema,
table_name
from
information_schema.tables
where
table_name like 'dw%' or table_name like 'etl%'
)
/* -- Initial Load - Columns */
CREATE TABLE dw_description_column (
column_description_key serial,
table_description_key bigint,
physical_full_name text,
physical_schema_name character varying,
physical_table_name character varying,
physical_column_name character varying,
ordinal_position character varying,
column_default character varying,
is_nullable character varying,
data_type character varying,
logical_name_cs character varying,
description_cs character varying,
logical_name_en character varying,
description_en character varying,
derived_rule character varying,
todo character varying,
pk_name character varying,
fk_name character varying,
foreign_table_name character varying,
foreign_column_name character varying,
is_primary_key boolean,
is_foreign_key boolean,
CONSTRAINT dw_description_column_pkey PRIMARY KEY (column_description_key ),
CONSTRAINT fk_dw_description_table_key FOREIGN KEY (table_description_key)
REFERENCES dw_description_table (table_description_key) MATCH SIMPLE
ON UPDATE NO ACTION ON DELETE NO ACTION
);
insert into dw_description_column (
table_description_key ,
physical_full_name ,
physical_schema_name ,
physical_table_name ,
physical_column_name ,
ordinal_position ,
column_default ,
is_nullable ,
data_type ,
logical_name_cs ,
description_cs ,
logical_name_en ,
description_en ,
derived_rule ,
todo ,
pk_name ,
fk_name ,
foreign_table_name ,
foreign_column_name ,
is_primary_key ,
is_foreign_key )
(
with
dw_constraints as (
SELECT
tc.constraint_name,
tc.constraint_schema || '.' || tc.table_name || '.' || kcu.column_name as physical_full_name,
tc.constraint_schema,
tc.table_name,
kcu.column_name,
ccu.table_name AS foreign_table_name,
ccu.column_name AS foreign_column_name,
TC.constraint_type
FROM
information_schema.table_constraints AS tc
JOIN information_schema.key_column_usage AS kcu ON (tc.constraint_name = kcu.constraint_name and tc.table_name = kcu.table_name)
JOIN information_schema.constraint_column_usage AS ccu ON ccu.constraint_name = tc.constraint_name
WHERE
constraint_type in ('PRIMARY KEY','FOREIGN KEY')
AND tc.constraint_schema = 'bizdata'
and (tc.table_name like 'dw%' or tc.table_name like 'etl%')
group by
tc.constraint_name,
tc.constraint_schema,
tc.table_name,
kcu.column_name,
ccu.table_name ,
ccu.column_name,
TC.constraint_type
)
select
dwdt.table_description_key,
col.table_schema || '.' || col.table_name || '.' || col.column_name as physical_full_name,
col.table_schema as physical_schema_name,
col.table_name as physical_table_name,
col.column_name as physical_column_name,
col.ordinal_position,
col.column_default,
col.is_nullable,
col.data_type,
null as Logical_Name_CS ,
null as Description_CS ,
null as Logical_Name_EN,
null as Description_EN ,
null as Derived_Rule ,
null as ToDo,
dwc1.constraint_name pk_name,
dwc2.constraint_name as fk_name,
dwc2.foreign_table_name,
dwc2.foreign_column_name,
case when dwc1.constraint_name is not null then true else false end as is_primary_key,
case when dwc2.constraint_name is not null then true else false end as foreign_key
from
information_schema.columns col
join dw_description_table dwdt on (col.table_schema || '.' || col.table_name = dwdt.physical_full_name )
left join dw_constraints dwc1 on ((col.table_schema || '.' || col.table_name || '.' || col.column_name) = dwc1.physical_full_name and dwc1.constraint_type = 'PRIMARY KEY')
left join dw_constraints dwc2 on ((col.table_schema || '.' || col.table_name || '.' || col.column_name) = dwc2.physical_full_name and dwc2.constraint_type = 'FOREIGN KEY')
where
col.table_name like 'dw%' or col.table_name like 'etl%'
)

in T-SQL, is it possible to find names of columns containing NULL in a given row (without knowing all column names)?

Is it possible in T-SQL to write a proper query reflecting this pseudo-code:
SELECT {primary_key}, {column_name}
FROM {table}
WHERE {any column_name value} is NULL
i.e. without referencing each column-name explicitly.
Sounds simple enough but I've searched pretty extensively and found nothing.
You have to use dynamic sql to solve that problem. I have demonstrated how it could be done.
With this sql you can pick a table and check the row with id = 1 for columns being null and primary keys. I included a test table at the bottom of the script. Code will not display anything if there is not primary keys and no columns being null.
DECLARE #table_name VARCHAR(20)
DECLARE #chosencolumn VARCHAR(20)
DECLARE #sqlstring VARCHAR(MAX)
DECLARE #sqlstring2 varchar(100)
DECLARE #text VARCHAR(8000)
DECLARE #t TABLE (col1 VARCHAR(30), dummy INT)
SET #table_name = 'test_table' -- replace with your tablename if you want
SET #chosencolumn = 'ID=1' -- replace with criteria for selected row
SELECT #sqlstring = COALESCE(#sqlstring, '') + 'UNION ALL SELECT '',''''NULL '''' '' + '''+t1.column_name+''', 1000 ordinal_position FROM ['+#table_name+'] WHERE [' +t1.column_name+ '] is null and ' +#chosencolumn+ ' '
FROM INFORMATION_SCHEMA.COLUMNS t1
LEFT JOIN INFORMATION_SCHEMA.KEY_COLUMN_USAGE t2
ON t1.column_name = t2.column_name
AND t1.table_name = t2.table_name
AND t1.table_schema = t2.table_schema
WHERE t1.table_name = #table_name
AND t2.column_name is null
SET #sqlstring = stuff('UNION ALL SELECT '',''''PRIMARY KEY'''' ''+ column_name + '' '' col1, ordinal_position
FROM INFORMATION_SCHEMA.KEY_COLUMN_USAGE
WHERE table_name = ''' + #table_name+ '''' + #sqlstring, 1, 10, '') + 'order by 2'
INSERT #t
EXEC( #sqlstring)
SELECT #text = COALESCE(#text, '') + col1
FROM #t
SET #sqlstring2 ='select '+stuff(#text,1,1,'')
EXEC( #sqlstring2)
Result:
id host_id date col1
PRIMARY KEY PRIMARY KEY PRIMARY KEY NULL
Test table
CREATE TABLE [dbo].[test_table](
[id] int not null,
[host_id] [int] NOT NULL,
[date] [datetime] NOT NULL,
[col1] [varchar](20) NULL,
[col2] [varchar](20) NULL,
CONSTRAINT [PK_test_table] PRIMARY KEY CLUSTERED
(
[id] ASC,
[host_id] ASC,
[date] ASC
))
Test data
INSERT test_table VALUES (1, 1, getdate(), null, 'somevalue')