ad_org table with column id & name
ad_org
ad_org_id | name
----------------------------------+-----------
357947E87C284935AD1D783CF6F099A1 | Spain
43D590B4814049C6B85C6545E8264E37 | Main
5EFF95EB540740A3B10510D9814EFAD5 | USA
2878085215E54C73A04D394BFD170733 | India
22669845D93A49A98932CE29AE02E0FD | Honkong
how to get output of all names(in 1 string) in this way from the above database
Spain | Main | USA | India | Honkong
in 1 select statement.
Use string_agg.
SELECT string_agg("name", ' | ') FROM thetable;
For older PostgreSQL, you must use array_agg and array_to_string:
SELECT array_to_string( array_agg("name"), ' | ') FROM thetable;
If you want a particular order, put it in the aggregate, e.g for alphabetical:
SELECT string_agg("name", ' | ' ORDER BY "name") FROM thetable;
use below code
DECLARE #cols AS NVARCHAR(MAX),
#query AS NVARCHAR(MAX)
select #cols = STUFF((SELECT ',' + QUOTENAME(ColumnName)
from yourtable
group by ColumnName, id
order by id
FOR XML PATH(''), TYPE
).value('.', 'NVARCHAR(MAX)')
,1,1,'')
set #query = 'SELECT ' + #cols + ' from
(
select value, ColumnName
from yourtable
) x
pivot
(
max(value)
for ColumnName in (' + #cols + ')
) p '
execute(#query)
Click here for Demo
got it by searching..
Equivalent to PostgreSQL array() / array_to_string() functions in Oracle 9i
select array_to_string(array(select name from ad_org), '|') as names;
Related
I am trying to aggregate distinct values from JSONB arrays in a SQL GROUP BY statement:
One dataset has many cfiles and a cfile only ever has one dataset
SELECT * FROM cfiles;
id | dataset_id | property_values (jsonb)
----+------------+-----------------------------------------------
1 | 1 | {"Sample Names": ["SampA", "SampB", "SampC"]}
2 | 1 | {"Sample Names": ["SampA", "SampB", "SampD"]}
3 | 1 | {"Sample Names": ["SampE"]}
4 | 2 | {"Sample Names": ["SampA", "SampF"]}
5 | 2 | {"Sample Names": ["SampG"]}
This query works and returns the correct result I want but it's a mess.
SELECT distinct(datasets.id) as dataset_id,
ARRAY_TO_STRING(
ARRAY(
SELECT DISTINCT * FROM unnest(
STRING_TO_ARRAY(
STRING_AGG(
DISTINCT REPLACE(
REPLACE(
REPLACE(
REPLACE(
cfiles.property_values ->> 'Sample Names', '",' || chr(32) || '"', ';'
), '[' , ''
), '"' , ''
), ']' , ''
), ';'
), ';'
)
) ORDER BY 1 ASC
), '; '
) as sample_names
FROM datasets
JOIN cfiles ON cfiles.dataset_id=datasets.id
GROUP BY datasets.id
dataset_id | sample_names
------------+-----------------------------------
1 | SampA; SampB; SampC; SampD; SampE
2 | SampA; SampF; SampG
Is there a better way to write this query without all the string manipulation?
I tired jsonb_array_elements but it gave me the error subquery uses ungrouped column "cfiles.property_values" from outer query. So then I added cfiles.property_values to the GROUP BY but it no longer grouped just by the dataset_id
Not the result I want:
SELECT DISTINCT datasets.id as dataset_id,
ARRAY_TO_STRING(
ARRAY(
SELECT DISTINCT * FROM jsonb_array_elements(
cfiles.property_values -> 'Sample Names'
) ORDER BY 1 ASC
), '; '
) as sample_names
FROM datasets
JOIN cfiles ON cfiles.dataset_id=datasets.id
GROUP BY datasets.id, cfiles.property_values
dataset_id | sample_names
------------+---------------------------
1 | "SampA"; "SampB"; "SampC"
1 | "SampA"; "SampB"; "SampD"
1 | "SampE"
2 | "SampA"; "SampF"
2 | "SampG"
SQL for creating demo
CREATE TABLE datasets (
id INT PRIMARY KEY
);
CREATE TABLE cfiles (
id INT PRIMARY KEY,
dataset_id INT,
property_values JSONB,
FOREIGN KEY (dataset_id) REFERENCES datasets(id)
);
INSERT INTO datasets values (1),(2);
INSERT INTO cfiles values
(1,1,'{"Sample Names":["SampA", "SampB", "SampC"]}'),
(2,1,'{"Sample Names":["SampA", "SampB", "SampD"]}'),
(3,1,'{"Sample Names":["SampE"]}');
INSERT INTO cfiles values
(4,2,'{"Sample Names":["SampA", "SampF"]}'),
(5,2,'{"Sample Names":["SampG"]}');
jsonb_array_elements is a set returning function and should be used in the FROM clause. Using it in the SELECT list makes things unnecessarily complicated:
select c.dataset_id, string_agg(distinct n.name, '; ' order by n.name)
from cfiles c
cross join jsonb_array_elements_text(c.property_values -> 'Sample Names') as n(name)
group by c.dataset_id
order by c.dataset_id;
Online example
I have list of tables that have specific column names like
SELECT table_name
FROM information_schema.columns
WHERE column_name = 'column1'
I need to find the max value of column1 for each tables. I expect result like the following
|--------|--------------|
| Table | Max column1 |
|--------|--------------|
| Table1 | 100 |
| Table2 | 200 |
| ... | ... |
|--------|--------------|
How can I construct a query?
You can use a variation of the row count for all tables approach:
select t.table_name,
(xpath('/row/max/text()', xmax))[1]::text::int
from (
SELECT table_name, data_type,
query_to_xml(format('select max(%I) from %I.%I', column_name, table_schema, table_name), true, true, '') as xmax
FROM information_schema.columns
WHERE column_name = 'column1'
and table_schema = 'public'
) as t;
query_to_xml() runs a select max(..) from .. for each column returned from the query. The result of that is something like:
<row xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<max>42</max>
</row>
The xpath() function is then used to extract the value from the XML. The derived table (sub-query) is not really needed, but makes the xpath() expression more readable (in my opinion).
You may create a generic function that returns a TABLE type by constructing a UNION ALL query from information_schema.columns
CREATE OR REPLACE FUNCTION public.get_max(TEXT )
RETURNS TABLE(t_table_name TEXT, t_max text )
LANGUAGE plpgsql
AS $BODY$
DECLARE
p_colname TEXT := $1;
v_sql_statement TEXT;
BEGIN
SELECT STRING_AGG( 'SELECT '''||table_name||''','||' MAX('
|| column_name||'::text'
|| ') FROM '
|| table_name
,' UNION ALL ' ) INTO v_sql_statement
FROM information_schema.columns
WHERE column_name = p_colname
--and table_schema = 'public';
IF v_sql_statement IS NOT NULL THEN
RETURN QUERY EXECUTE v_sql_statement;
END IF;
END
$BODY$;
Execute and get the results like this.
knayak=# select * FROM get_max('id');
t_table_name | t_max
--------------+-------
f | 2
t2 | 1
friends | id4
person | 2
customer |
employe |
diary | 4
jsontable | 6
atable |
t_json | 2
ingredients | 1
test | 2
accts |
mytable | 30
(14 rows)
I am trying to generate a pivot table with dynamic column names, but failing miserably.
My table has the following structure:
id, int() PKEY
prod_no, VARCHAR(20)
f_month, INT
f_year, INT
f_value, INT
with sample data looking like this
-------------------
AB1234|1|2016|15698
-------------------
AB1234|2|2016|25438
-------------------
AB1234|3|2016|53323
-------------------
AB1234|1|2017|34535
-------------------
AB1234|2|2017|66244
-------------------
AB1234|3|2017|54534
-------------------
CD9876|1|2016|43278
-------------------
CD9876|2|2016|11245
-------------------
CD9876|3|2016|82432
-------------------
CD9876|1|2017|93563
-------------------
CD9876|2|2017|89356
-------------------
CD9876|3|2017|45724
-------------------
the result I'm after is something like this:
prod_no|1-2016|2-2016|3-2016|1-2017|2-2017|3-2017|
--------------------------------------------------
AB1234 |15698 |25438 |53323 |34535 |66244 |54534 |
--------------------------------------------------
CD9876 |43278 |11245 |82432 |93563 |89356 |45724 |
So the columns as prod_no, followed by dynamic columns being concatenation of f_month-f_year
an the data as product number and value corresponding to the month-year in that column.
I played around with some dynamic pivot examples from the web but so far no luck with getting this to work
Try this:
DECLARE #cols AS NVARCHAR(MAX),
#query AS NVARCHAR(MAX);
SET #cols = STUFF((SELECT distinct ',' + QUOTENAME(convert(varchar,c.f_month)+'-'+convert(varchar,c.f_year))
FROM dynpi c
FOR XML PATH(''), TYPE
).value('.', 'NVARCHAR(MAX)')
,1,1,'')
print #cols
set #query = 'SELECT prod_no,' + #cols + ' from
(
select prod_no, f_value,convert(varchar,f_month)+''-''+convert(varchar,f_year) as dyn
from dynpi
) x
pivot
(
max(f_value)
for dyn in (' + #cols + ')
) p '
execute(#query)
The result is:
*---------*---------*-------*-------*-------*-------*------*
|prod_no |1-2016 |1-2017 |2-2016 |2-2017 |3-2016 |3-2017|
*---------*---------*-------*-------*-------*-------*------*
|AB1234 |15698 |34535 |25438 |66244 |53323 |54534 |
*---------*---------*-------*-------*-------*-------*------*
|CD9876 |43278 |93563 |11245 |89356 |82432 |45724 |
*---------*---------*-------*-------*-------*-------*------*
This will maintain the desired column sequence
Example
Declare #SQL varchar(max) = '
Select *
From (
Select prod_no
,item = concat(f_year,''-'',f_month)
,value = f_value
From YourTable
) A
Pivot (sum([value]) For [Item] in (' + Stuff((Select ','+QuoteName(concat(f_year,'-',f_month))
From (Select Distinct Top 100 f_year,f_month From YourTable Order By f_Year,f_month ) A
For XML Path('')),1,1,'') + ') ) p'
Exec(#SQL)
--Print #SQL
Returns
I have table with three columns: column 2 and 3 contains comma-separated values.
-col1----col2---col3--
| 1 | 1,2,3 | 4,5 |
----------------------
What is the most efficient way to get a table of three columns that contains all the combinations of values of these three columns, like this:
1 | 1 | 4
1 | 2 | 4
1 | 3 | 4
1 | 1 | 5
1 | 2 | 5
1 | 3 | 5
Using query and nodes:
DECLARE #t TABLE (col1 VARCHAR(100), col2 VARCHAR(100), col3 VARCHAR(100))
INSERT #t VALUES ('1', '1,2,3', '4,5')
;WITH cte AS
(
SELECT
col1 = CAST('<x>' + REPLACE(col1, ',','</x><x>') + '</x>' AS XML),
col2 = CAST('<x>' + REPLACE(col2, ',','</x><x>') + '</x>' AS XML),
col3 = CAST('<x>' + REPLACE(col3, ',','</x><x>') + '</x>' AS XML)
FROM #t
)
SELECT
col1.n.query('.[1]').value('.', 'int'),
col2.n.query('.[1]').value('.', 'int'),
col3.n.query('.[1]').value('.', 'int')
FROM
cte
CROSS APPLY col1.nodes('x') AS col1(n)
CROSS APPLY col2.nodes('x') AS col2(n)
CROSS APPLY col3.nodes('x') AS col3(n)
SQL Fiddle
Try this:
DECLARE #T1 TABLE (COL1 VARCHAR(25), COL2 VARCHAR(25), COL3 VARCHAR(25))
INSERT INTO #T1 (COL1,COL2,COL3)
VALUES ('1','1,2,3','4,5')
DECLARE #COL1 TABLE (VAL1 VARCHAR(25))
DECLARE #COL2 TABLE (VAL2 VARCHAR(25))
DECLARE #COL3 TABLE (VAL3 VARCHAR(25))
INSERT INTO #COL1 (VAL1)
SELECT DISTINCT Split.a.value('.', 'VARCHAR(max)') AS String
FROM (SELECT CAST ('<M>' + REPLACE(CAST(COL1 AS VARCHAR), ',', '</M><M>') + '</M>' AS XML) AS String
FROM #t1) AS A
CROSS APPLY String.nodes ('/M') AS Split(a)
INSERT INTO #COL2 (VAL2)
SELECT DISTINCT Split.a.value('.', 'VARCHAR(max)') AS String
FROM (SELECT CAST ('<M>' + REPLACE(CAST(COL2 AS VARCHAR), ',', '</M><M>') + '</M>' AS XML) AS String
FROM #t1) AS A
CROSS APPLY String.nodes ('/M') AS Split(a)
INSERT INTO #COL3 (VAL3)
SELECT DISTINCT Split.a.value('.', 'VARCHAR(max)') AS String
FROM (SELECT CAST ('<M>' + REPLACE(CAST(COL3 AS VARCHAR), ',', '</M><M>') + '</M>' AS XML) AS String
FROM #t1) AS A
CROSS APPLY String.nodes ('/M') AS Split(a)
SELECT *
FROM #COL1
CROSS APPLY #COL2
CROSS APPLY #COL3
ORDER BY VAL1,VAL2,VAL3
I have successfully constructed the output that I have been looking for from using dynamic SQL to create a pivot table with dynamically created column names.
My code is:
IF OBJECT_ID('tempdb..#TempDB') IS NOT NULL
DROP TABLE #TempDB
SELECT CASEID, FORMNAME, NAME, VALUE INTO #TempDB FROM dbo.EFORM WHERE FORMNAME='IncidentReporting'
IF OBJECT_ID('tempdb..#TempDB1') IS NOT NULL
DROP TABLE #TempDB1
SELECT DISTINCT Name INTO #TempDB1 FROM #TempDB
DECLARE #columns varchar(max)
DECLARE #query varchar(max)
SELECT #columns = COALESCE(#columns + ',[' + cast([Name] as varchar(100)) + ']',
'[' + cast([Name] as varchar(100))+ ']')
FROM #TempDB1
SET #query = 'SELECT * FROM #TempDB AS PivotData '
SET #query = #query +
'PIVOT (MAX(VALUE) FOR [NAME] IN (' + #columns + ')) AS p'
EXEC (#query)
This successfully gives me results like:
CASEID FORMNAME Column1 Column2 Column3
501000000621 IncidentReporting Value1 Valuea Valuev
501000000622 IncidentReporting Value2 Valueb Valuew
601000000126 IncidentReporting Value3 Valuec Valuex
601000000127 IncidentReporting Value4 Valued Valuey
601000000128 IncidentReporting Value5 Valuee Valuez
These results, outputed from the #query variable, are in exactly the format that I want a table of these results to be in.
Can anyone tell me how to get the results that are in the #query variable into a standard SQL table?
I have tried doing a statement like this, but I get the message "Incorrect syntax near ' + #columns + '":
SELECT *
INTO #TempDB4
FROM (SELECT * FROM #TempDB AS PivotData
PIVOT (MAX(VALUE) FOR [NAME] IN (' + #columns + ')) AS p)
Many thanks in advance.
In your existing code, add your into to this line:
SET #query = 'SELECT * FROM #TempDB AS PivotData '
so that you get:
SET #query = 'SELECT * INTO #TempDB4 FROM #TempDB AS PivotData '
Or add insert in the same manner.
To get your unsuccessful query to work as you expect, you'd have to turn that into dynamic SQL, much like your successful query, and call it using exec or sp_executesql