concatenating single column in TSQL - tsql

I am using SSMS 2008 and trying to concatenate one of the rows together based on a different field's grouping. I have two columns, people_id and address_desc. They look like this:
address_desc people_id
---------- ------------
Murfreesboro, TN 37130 F15D1135-9947-4F66-B778-00E43EC44B9E
11 Mohawk Rd., Burlington, MA 01803 C561918F-C2E9-4507-BD7C-00FB688D2D6E
Unknown, UN 00000 C561918F-C2E9-4507-BD7C-00FB688D2D6E Jacksonville, NC 28546 FC7C78CD-8AEA-4C8E-B93D-010BF8E4176D
Memphis, TN 38133 8ED8C601-5D35-4EB7-9217-012905D6E9F1
44 Maverick St., Fitchburg, MA 8ED8C601-5D35-4EB7-9217-012905D6E9F1
Now I want to concatenate the address_desc field / people_id. So the first one here should just display "Murfreesboro, TN 37130" for address_desc. But second person should have just one line instead of two which says "11 Mohawk Rd., Burlington, MA 01803;Unknown, UN 00000" for address_desc.
How do I do this? I tried using CTE, but this was giving me ambiguity error:
WITH CTE ( people_id, address_list, address_desc, length )
AS ( SELECT people_id, CAST( '' AS VARCHAR(8000) ), CAST( '' AS VARCHAR(8000) ), 0
FROM dbo.address_view
GROUP BY people_id
UNION ALL
SELECT p.people_id, CAST( address_list +
CASE WHEN length = 0 THEN '' ELSE ', ' END + c.address_desc AS VARCHAR(8000) ),
CAST( c.address_desc AS VARCHAR(8000)), length + 1
FROM CTE c
INNER JOIN dbo.address_view p
ON c.people_id = p.people_id
WHERE p.address_desc > c.address_desc )
SELECT people_id, address_list
FROM ( SELECT people_id, address_list,
RANK() OVER ( PARTITION BY people_id ORDER BY length DESC )
FROM CTE ) D ( people_id, address_list, rank )
WHERE rank = 1 ;
Here was my initial SQL query:
SELECT a.address_desc, a.people_id
FROM dbo.address_view a
INNER JOIN (SELECT people_id
FROM dbo.address_view
GROUP BY people_id
HAVING COUNT(*) > 1) t
ON a.people_id = t.people_id
order by a.people_id

You can use FOR XML PATH('') like this:
DECLARE #TestData TABLE
(
address_desc NVARCHAR(100) NOT NULL
,people_id UNIQUEIDENTIFIER NOT NULL
);
INSERT #TestData
SELECT 'Murfreesboro, TN 37130', 'F15D1135-9947-4F66-B778-00E43EC44B9E'
UNION ALL
SELECT '11 Mohawk Rd., Burlington, MA 01803', 'C561918F-C2E9-4507-BD7C-00FB688D2D6E'
UNION ALL
SELECT 'Unknown, UN 00000', 'C561918F-C2E9-4507-BD7C-00FB688D2D6E'
UNION ALL
SELECT 'Memphis, TN 38133', '8ED8C601-5D35-4EB7-9217-012905D6E9F1'
UNION ALL
SELECT '44 Maverick St., Fitchburg, MA', '8ED8C601-5D35-4EB7-9217-012905D6E9F1';
SELECT a.people_id,
(SELECT SUBSTRING(
(SELECT ';'+b.address_desc
FROM #TestData b
WHERE a.people_id = b.people_id
FOR XML PATH(''), TYPE).value('.', 'NVARCHAR(MAX)')
,2
,4000)
) GROUP_CONCATENATE
FROM #TestData a
GROUP BY a.people_id
Results:
people_id GROUP_CONCATENATE
------------------------------------ ------------------------------------------------------
F15D1135-9947-4F66-B778-00E43EC44B9E Murfreesboro, TN 37130
C561918F-C2E9-4507-BD7C-00FB688D2D6E 11 Mohawk Rd., Burlington, MA 01803;Unknown, UN 00000
8ED8C601-5D35-4EB7-9217-012905D6E9F1 Memphis, TN 38133;44 Maverick St., Fitchburg, MA

Related

Ho do i convert this code into PostgreSQL Store Procedure?

I am new to PostgreSQL. I want to converter or add this code to Store Procedure.
WITH RECURSIVE t(i) AS (
SELECT * FROM unnest((select regexp_split_to_array('signature',''))::char[])
), cte AS (
SELECT i AS combo, i, 1 AS ct
FROM t
UNION ALL
SELECT cte.combo || t.i, t.i, ct + 1
FROM cte, t
WHERE ct <= 8
AND position(t.i in cte.combo) = 0
)
SELECT distinct cc.combo,ww.word
FROM cte cc
inner join words ww ON ww.word=cc.combo
WHERE length(combo)>1
AND ww.source_id in(1,2,19,21,24,26,33,34)
ORDER BY cc.combo ASC;
Just put it into a function:
create function generate_anagrams(p_word text)
returns table(combo text, word text)
as
$$
WITH RECURSIVE t(i) AS (
SELECT *
FROM unnest((select regexp_split_to_array(p_word,''))::char[])
), cte AS (
SELECT i AS combo, i, 1 AS ct
FROM t
UNION ALL
SELECT cte.combo || t.i, t.i, ct + 1
FROM cte, t
WHERE ct <= 8
AND position(t.i in cte.combo) = 0
)
SELECT distinct cc.combo,ww.word
FROM cte cc
inner join words ww ON ww.word=cc.combo
WHERE length(combo)>1
AND ww.source_id in(1,2,19,21,24,26,33,34)
ORDER BY cc.combo ASC
$$
language sql;
You can use it like this:
select *
from generate_anagrams('signature');

postgresql combining several periods into one

I'm trying to combine range.
WITH a AS (
select '2017-09-16 07:12:57' as begat,'2017-09-16 11:30:22' as endat
union
select '2017-09-18 17:05:21' ,'2017-09-19 13:18:01'
union
select '2017-09-19 15:34:40' ,'2017-09-22 13:29:37'
union
select '2017-09-22 12:24:16' ,'2017-09-22 13:18:29'
union
select '2017-09-28 09:48:54' ,'2017-09-28 13:39:13'
union
select '2017-09-20 13:52:43' ,'2017-09-20 14:14:43'
), b AS (
SELECT *, lag(endat) OVER (ORDER BY begat) < begat OR NULL AS step
FROM a
)
, c AS (
SELECT *, count(step) OVER (ORDER BY begat) AS grp
FROM b
)
SELECT min(begat), coalesce( max(endat), 'infinity' ) AS range
FROM c
GROUP BY grp
ORDER BY 1
Result
1 "2017-09-16 07:12:57";"2017-09-16 11:30:22"
2 "2017-09-18 17:05:21";"2017-09-19 13:18:01"
3 "2017-09-19 15:34:40";"2017-09-22 13:29:37"
4 "2017-09-22 12:24:16";"2017-09-22 13:18:29"
5 "2017-09-28 09:48:54";"2017-09-28 13:39:13"
positions 3,4 intersect (endata> next begat)
How do I make the union of all the intersections into one large interval
I need result
1 "2017-09-16 07:12:57";"2017-09-16 11:30:22"
2 "2017-09-18 17:05:21";"2017-09-19 13:18:01"
3 "2017-09-19 15:34:40";"2017-09-22 13:29:37"
4 "2017-09-28 09:48:54";"2017-09-28 13:39:13"
Hey I would suggest using the following process :
1- Identify when a row is new, so you give a value of 1 to values that do not overlap (CTE b)
2- Sequence together the rows that have overlaps with others. This way you can see have a common identifier that will allow you to MAX and MIN begat and endat (CTE c)
3- For each sequence, give the MIN of begat and the MAX of endat so you will have your final values
WITH a AS (
select '2017-09-16 07:12:57' as begat,'2017-09-16 11:30:22' as endat
union
select '2017-09-18 17:05:21' ,'2017-09-19 13:18:01'
union
select '2017-09-19 15:34:40' ,'2017-09-22 13:29:37'
union
select '2017-09-22 12:24:16' ,'2017-09-22 13:18:29'
union
select '2017-09-28 09:48:54' ,'2017-09-28 13:39:13'
union
select '2017-09-20 13:52:43' ,'2017-09-20 14:14:43'
)
, b AS (
SELECT
begat
, endat
, (begat > MAX(endat) OVER w IS TRUE)::INT is_new
FROM a
WINDOW w AS (ORDER BY begat ROWS BETWEEN UNBOUNDED PRECEDING AND 1 PRECEDING)
)
, c AS (
SELECT
begat
, endat
, SUM((is_new)) OVER (ORDER BY begat) seq
FROM b
)
SELECT
MIN(begat) beg_at
, MAX(endat) end_at
FROM c
GROUP BY seq
EDITED
If you need speed you can use a psql function:
create or replace function append_ranges_in_a() returns setof a
language plpgsql
as
$BODY$
declare
v_current a%rowtype;
v_new a%rowtype;
v_first boolean:=true;
begin
for v_current in select begat, endat from a order by begat, endat
loop
if v_first then
v_first := false;
v_new.begat := v_current.begat;
elsif v_new.endat < v_current.begat then
return next v_new;
v_new.begat := v_current.begat;
end if;
v_new.endat := greatest(v_current.endat,v_new.endat);
end loop;
return next v_new;
return;
end;
$BODY$;
select * from append_ranges_in_a()
I test it with ~ 400000 rows:
delete from a;
insert into a (begat, endat)
select time::text, (time+'1 day'::interval)::text
from (select t+(round(random()*23.0)||' hours')::interval as time
from generate_series('1401-01-01'::timestamp,'2018-08-21'::timestamp,'1 day'::interval) t
) t;
select count(*) from a;
select * from append_ranges_in_a() offset 100000 limit 10
and it is twice fast as O(n^2) pure SQL version.
OLD slow solution:
You can use a recursive WITH query https://www.postgresql.org/docs/current/static/queries-with.html to construct the result row by row.
I create the table
The first row is the candidate first row (ending where ending), but the row is not "ready"
Then I look at the next row (step) and if it is not intersecting I add a ready row,
Also I add a not ready row with the current (last) observed range
When I do not have more rows I calculate the last row
I retain ready rows and the last row
Here is the code
CREATE TABLE a as
select '2017-09-16 07:12:57' as begat,'2017-09-16 11:30:22' as endat
union
select '2017-09-18 17:05:21' ,'2017-09-19 13:18:01'
union
select '2017-09-19 15:34:40' ,'2017-09-22 13:29:37'
union
select '2017-09-22 12:24:16' ,'2017-09-22 13:18:29'
union
select '2017-09-28 09:48:54' ,'2017-09-28 13:39:13'
union
select '2017-09-20 13:52:43' ,'2017-09-20 14:14:43';
WITH RECURSIVE t(begat, endat, ready, step) AS (
select * from (
select *,false,1 from a order by begat, endat limit 1) a
UNION ALL
SELECT new_rows.*
FROM (SELECT * FROM t ORDER BY begat DESC limit 1) t,
lateral (SELECT * FROM a ORDER BY begat, endat OFFSET step LIMIT 1) a,
lateral (
SELECT t.begat, t.endat, true as ready, step WHERE t.endat < a.begat
UNION SELECT CASE WHEN t.endat < a.begat THEN a.begat ELSE t.begat END, greatest(a.endat, t.endat), false, step+1
) new_rows
)
select begat, endat
from (
select begat, endat, ready, row_number() over (order by begat desc, endat desc)=1 is_last
from t
order by begat, endat) t
where ready or is_last;
i using range type
https://www.postgresql.org/docs/9.3/static/rangetypes.html
WITH tmp AS (
-- preparation range type
select begat, coalesce( endat, 'infinity' ) as endAt, tsrange( begat, coalesce( endat, 'infinity' ) ) as rg
from (
select '2017-09-11 17:13:03'::timestamp as begat ,'2017-09-12 12:24:09'::timestamp as endat union
select '2017-09-19 15:34:40','2017-09-20 11:04:45' union
select '2017-09-20 08:32:00','2017-09-22 13:28:37' union
select '2017-09-20 13:52:43','2017-09-20 14:14:43' union
select '2017-09-21 12:24:16','2017-09-21 13:28:29' union
select '2017-09-22 12:24:16','2017-09-22 13:28:29' union
select '2017-09-22 12:34:16','2017-09-23 13:28:29' union
select '2017-09-22 12:25:16','2017-09-24 13:28:29' union
select '2017-09-28 09:48:54','2017-09-28 13:39:13' union
select '2017-09-28 14:22:16','2017-09-28 15:52:15' union
select '2017-10-05 12:17:45','2017-10-06 12:35:38' union
select '2017-10-06 16:20:44','2017-10-07 10:11:09' union
select '2017-10-07 20:38:32','2017-10-09 14:42:29' union
select '2017-10-12 18:22:14','2017-10-12 20:52:45'
) a
),a as (
-- group intersecting range
select l.*
from tmp l left join tmp r on l.begAt > r.begAt and r.rg #> l.rg
where r.begAt is null
),
b AS (
SELECT *, lag(endat) OVER (ORDER BY begat) < begat OR NULL AS step
FROM a
)
, c AS (
SELECT *, count(step) OVER (ORDER BY begat) AS grp
FROM b
)
SELECT min(begat), coalesce( max(endat), 'infinity' ) AS range
FROM c
GROUP BY grp
ORDER BY 1

SQL - How to roll up results into 1 row

If I have a table:
ID NAME
1 Red
2 Blue
3 Green
How can I return a query so that my result is:
Col1 Col2 Col3
Red Blue Green
Would I just do an inner join on itself or would I need a pivot table?
Yes, you can do it with join, eg:
select t1.name col1, t2.name col2, t3.name col3
from yourtable t1
join yourtable t2 on t2.id=2
join yourtable t3 on t3.id=3
where t1.id=1;
Or you can simply do it with embedded select statements, like:
In MySQL:
select
(select name from yourtable where id=1) col1,
(select name from yourtable where id=2) col2,
(select name from yourtable where id=3) col3;
In Oracle:
select
(select name from yourtable where id=1) col1,
(select name from yourtable where id=2) col2,
(select name from yourtable where id=3) col3
from dual;
Of course in that query the number of cols is fixed, you must edit it if you add more rows to roll up.
you can use dynamic SQL with PIVOT:
DECLARE #cols AS NVARCHAR(MAX),
#query AS NVARCHAR(MAX)
select #cols = STUFF((SELECT ',' + QUOTENAME(id)
from yourtable
group by ColumnName, id
order by id
FOR XML PATH(''), TYPE
).value('.', 'NVARCHAR(MAX)')
,1,1,'')
set #query = N'SELECT ' + #cols + N' from
(
select id, ColumnName
from yourtable
) x
pivot
(
max(ColumnName)
for id in (' + #cols + N')
) p '
exec sp_executesql #query;

partition over two columns

I'm wanting to partition by two columns (PROJECT_ID, AND CATEGORY_NAME) and I'm having trouble writing the correct syntax. My query below is functional but when I attempt to add an additional over clause it doesn't work correctly. The recursive query was used to concatenate rows partitioning over project_id, creating a list of admins combining and concatenating name_last and name_first to make a list. I need to use an additional over clause to include the CATEGORY_NAME due to admins in the list that work in different categories ('INVISION' AND 'INSIGHT') but are under the same project_id. The first subquery
SELECT
RowNumber() over (PARTITION BY F13.DIM_PROJECT_ID, F13.CATEGORY_NAME ORDER BY F13.PROJECT_NAME),
F13.DIM_PROJECT_ID.....etc.
extracts the correct data, I'm just unsure of how to pull that correct data out partitioning by both project and category. I'm using db2.
with
t1(rowNum, PROJECT_ID, NAME_LAST, NAME_FIRST, POINT_OF_CONTACT, PROJECT_NAME, BUSINESS_NAME) as
(
SELECT
RowNumber() over (PARTITION BY F13.DIM_PROJECT_ID, F13.CATEGORY_NAME ORDER BY F13.PROJECT_NAME),
F13.DIM_PROJECT_ID,
F2P.NAME_LAST,
F2P.NAME_FIRST,
REPLACE(F2P.POINT_OF_CONTACT, ',', ' |') AS POINT_OF_CONTACT,
F13.PROJECT_NAME,
F2H.CATEGORY_NAME,
FROM FACT_TABLE AS F13
INNER JOIN ADMIN AS F2P ON F13.DIM_PROJECT_ID = F2P.DIM_PROJECT_ID
LEFT JOIN HOURS AS F2H ON F13.DIM_PROJECT_ID = F2H.DIM_PROJECT_ID
WHERE F2H.CATEGORY_NAME = ('INVISION')
group by
F13.DIM_PROJECT_ID,
F13.PROJECT_NAME,
F2P.NAME_LAST,
F2P.NAME_FIRST,
F2P.POINT_OF_CONTACT,
F2H.CATEGORY_NAME
) ,
t2(PROJECT_ID, LIST, POINT_OF_CONTACT, PROJECT_NAME, BUSINESS_NAME, cnt) AS
( SELECT PROJECT_ID,
VARCHAR(NAME_FIRST CONCAT ' ' CONCAT NAME_LAST, 6000),
POINT_OF_CONTACT,
PROJECT_NAME,
CATEGORY_NAME,
1
FROM t1
WHERE rowNum = 1
UNION ALL
SELECT t2.PROJECT_ID,
t2.list || ' | ' || t1.NAME_FIRST CONCAT ' ' CONCAT t1.NAME_LAST,
t1.POINT_OF_CONTACT,
t1.PROJECT_NAME,
t1.CATEGORY_NAME
FROM t2, t1
WHERE t2.project_id = t1.project_id
AND t2.cnt + 1 = t1.rowNum )
SELECT PROJECT_ID,
PROJECT_NAME,
POINT_OF_CONTACT,
CATEGORY_NAME
list
FROM t2
WHERE ( PROJECT_ID, cnt ) IN (
SELECT PROJECT_ID, MAX(rowNum)
FROM t1
GROUP BY PROJECT_ID )
The results that I'm getting are producing duplicates but only when the second column (category_name is included in the partition clause. Current results:
Desired results:
I figured it out. I added an ID for category and partitioned by category_id and project_id.
with
t1(rowNum, PROJECT_ID, NAME_LAST, NAME_FIRST, POINT_OF_CONTACT, PROJECT_NAME, CATEGORY_ID, CATEGORY_NAME) as
(
SELECT
RowNumber() over (PARTITION BY F13.DIM_PROJECT_ID, F13.CATEGORY_ID ORDER BY F13.PROJECT_NAME, F13.CATEGORY_NAME),
F13.DIM_PROJECT_ID,
F2P.NAME_LAST,
F2P.NAME_FIRST,
REPLACE(F2P.POINT_OF_CONTACT, ',', ' |') AS POINT_OF_CONTACT,
F13.PROJECT_NAME,
F13.CATEGORY_ID
F13.CATEGORY_NAME,
FROM FACT_TABLE AS F13
INNER JOIN ADMIN AS F2P ON F13.DIM_PROJECT_ID = F2P.DIM_PROJECT_ID
LEFT JOIN HOURS AS F2H ON F13.DIM_PROJECT_ID = F2H.DIM_PROJECT_ID
WHERE F13.CATEGORY_NAME = ('INVISION')
group by
F13.DIM_PROJECT_ID,
F13.PROJECT_NAME,
F2P.NAME_LAST,
F2P.NAME_FIRST,
F2P.POINT_OF_CONTACT,
F13.CATEGORY_ID
F13.CATEGORY_NAME
) ,
t2(PROJECT_ID, LIST, POINT_OF_CONTACT, PROJECT_NAME, CATEGORY_ID, CATEGORY_NAME, cnt) AS
( SELECT PROJECT_ID,
VARCHAR(NAME_FIRST CONCAT ' ' CONCAT NAME_LAST, 6000),
POINT_OF_CONTACT,
PROJECT_NAME,
CATEGORY_ID,
CATEGORY_NAME,
1
FROM t1
WHERE rowNum = 1
UNION ALL
SELECT t2.PROJECT_ID,
t2.list || ' | ' || t1.NAME_FIRST CONCAT ' ' CONCAT t1.NAME_LAST,
t1.POINT_OF_CONTACT,
t1.PROJECT_NAME,
t1.CATEGORY_ID,
t1.CATEGORY_NAME
FROM t2, t1
WHERE t2.project_id = t1.project_id
AND t2.category_id = t1.category_id
AND t2.cnt + 1 = t1.rowNum )
SELECT PROJECT_ID,
PROJECT_NAME,
POINT_OF_CONTACT,
CATEGORY_ID,
CATEGORY_NAME
list
FROM t2
WHERE ( PROJECT_ID, CATEGORY_ID, cnt ) IN (
SELECT PROJECT_ID, CATEGORY_ID, MAX(rowNum)
FROM t1
GROUP BY PROJECT_NAME )

T-SQL -- convert comma-delimited column into multiple columns

From the table below, how can I convert the Values column into multiple columns, populated with individual values that are currently separated by commas? Before the conversion:
Name Values
---- ------
John val,val2,val3
Peter val5,val7,val9,val14
Lesli val8,val34,val36,val65,val71,val
Amy val3,val5,val99
The result of the conversion should look like:
Name Col1 Col2 Col3 Col4 Col5 Col6
---- ---- ---- ---- ---- ---- ----
John val val2 val3
Peter val5 val7 val9 val14
Lesli val8 val34 val36 val65 val71 val
Amy val3 val5 val99
First, what database product and version are you using? If you are using SQL Server 2005 and later, you can write a Split user-defined function like so:
CREATE FUNCTION [dbo].[Split]
(
#DelimitedList nvarchar(max)
, #Delimiter varchar(2) = ','
)
RETURNS TABLE
AS
RETURN
(
With CorrectedList As
(
Select Case When Left(#DelimitedList, DataLength(#Delimiter)) <> #Delimiter Then #Delimiter Else '' End
+ #DelimitedList
+ Case When Right(#DelimitedList, DataLength(#Delimiter)) <> #Delimiter Then #Delimiter Else '' End
As List
, DataLength(#Delimiter) As DelimiterLen
)
, Numbers As
(
Select TOP (Coalesce(Len(#DelimitedList),1)) Row_Number() Over ( Order By c1.object_id ) As Value
From sys.objects As c1
Cross Join sys.columns As c2
)
Select CharIndex(#Delimiter, CL.list, N.Value) + CL.DelimiterLen As Position
, Substring (
CL.List
, CharIndex(#Delimiter, CL.list, N.Value) + CL.DelimiterLen
, CharIndex(#Delimiter, CL.list, N.Value + 1)
- ( CharIndex(#Delimiter, CL.list, N.Value) + CL.DelimiterLen )
) As Value
From CorrectedList As CL
Cross Join Numbers As N
Where N.Value < Len(CL.List)
And Substring(CL.List, N.Value, CL.DelimiterLen) = #Delimiter
)
You can then split out the values in you want using something akin to:
Select Name, Values
From Table1 As T1
Where Exists (
Select 1
From Table2 As T2
Cross Apply dbo.Split (T1.Values, ',') As T1Values
Cross Apply dbo.Split (T2.Values, ',') As T2Values
Where T2.Values.Value = T1Values.Value
And T1.Name = T2.Name
)
Here is a solution that uses a recursive cte to generate a "table of numbers" (courtesy of Itzik Ben-Gan), which is useful for all manner of problems including string splitting, and PIVOT. SQL Server 2005 onwards. Full table create, insert and select script included.
CREATE TABLE dbo.Table1
(
Name VARCHAR(30),
[Values] VARCHAR(128)
)
GO
INSERT INTO dbo.Table1 VALUES ('John', 'val,val2,val3')
INSERT INTO dbo.Table1 VALUES ('Peter', 'val5,val7,val9,val14')
INSERT INTO dbo.Table1 VALUES ('Lesli', 'val8,val34,val36,val65,val71,val')
INSERT INTO dbo.Table1 VALUES ('Amy', 'val3,val5,val99')
GO
SELECT * FROM dbo.Table1;
GO
WITH
L0 AS(SELECT 1 AS c UNION ALL SELECT 1),
L1 AS(SELECT 1 AS c FROM L0 AS A, L0 AS B),
L2 AS(SELECT 1 AS c FROM L1 AS A, L1 AS B),
L3 AS(SELECT 1 AS c FROM L2 AS A, L2 AS B),
Numbers AS(SELECT ROW_NUMBER() OVER(ORDER BY c) AS n FROM L3)
SELECT Name, [1] AS Column1, [2] AS Column2, [3] AS Column3, [4] AS Column4, [5] AS Column5, [6] AS Column6, [7] AS Column7
FROM
(SELECT Name,
ROW_NUMBER() OVER (PARTITION BY Name ORDER BY nums.n) AS PositionInList,
LTRIM(RTRIM(SUBSTRING(valueTable.[Values], nums.n, charindex(N',', valueTable.[Values] + N',', nums.n) - nums.n))) AS [Value]
FROM Numbers AS nums INNER JOIN dbo.Table1 AS valueTable ON nums.n <= CONVERT(int, LEN(valueTable.[Values])) AND SUBSTRING(N',' + valueTable.[Values], n, 1) = N',') AS SourceTable
PIVOT
(
MAX([VALUE]) FOR PositionInList IN ([1], [2], [3], [4], [5], [6], [7])
) AS Table2
GO
--DROP TABLE dbo.Table1
Which converts this output
Name Values
John val,val2,val3
Peter val5,val7,val9,val14
Lesli val8,val34,val36,val65,val71,val
Amy val3,val5,val99
to
Name Column1 Column2 Column3 Column4 Column5 Column6 Column7
Amy val3 val5 val99 NULL NULL NULL NULL
John val val2 val3 NULL NULL NULL NULL
Lesli val8 val34 val36 val65 val71 val NULL
Peter val5 val7 val9 val14 NULL NULL NULL