Group by in pivot table in SQL Server 2008 R2 - sql-server-2008-r2

How can I group by this pivot table
select *
from
(SELECT
ProductionID,ProductionDetailID,
[DeviceID],[DeviceSpeed],[LattNO]
from
(SELECT
*
from view_3
where ProductionID = 6) x pivot
(
max(Value)FOR PropertyName in ([DeviceID],[DeviceSpeed],[LattNO])
) AS pvt ) as pp
Result:
ProductionID ProductionDetailID DeviceID DeviceSpeed LattNO
6 2 5 NULL NULL
6 2 NULL 8 NULL
6 2 NULL NULL 6
6 3 1 NULL NULL
6 3 NULL 2 NULL
and how can I get this result:
ProductionID ProductionDetailID DeviceID DeviceSpeed LattNO
6 2 5 8 6
6 3 1 2 NULL

SELECT
ProductionID,ProductionDetailID, Sum(Cast(isnull([DeviceID],0) as Int)) [DeviceID],Sum(Cast(isnull([DeviceSpeed],0) as Int)) [DeviceSpeed],Case Sum(Cast(isnull([LattNO],0) as Int)) When 0 then Null else Sum(Cast(isnull([LattNO],0) as Int)) End [LattNO] from
(
SELECT
* FROM dbo.View_3
)
x pivot
(
max(Value)FOR PropertyName in ([DeviceID],[DeviceSpeed],[LattNO])
) AS pvt
Group by ProductionID , ProductionDetailID

Related

Columns to Rows in T-SQL

I have table tbl_Survey:
SurveyID 1 2 3 4
7 4 4 4 4
8 3 3 3 3
9 2 2 2 2
My goal is to transfer table headers - 1 2 3 4 into rows, as the following:
enter
SurveyID Ouestion Rating
7 1 4
7 2 4
7 3 4
7 4 4
8 1 3
8 2 3
8 3 3
8 4 3
9 1 2
9 2 2
9 3 2
9 4 2
My code is (trying to follow help recommendations):
SELECT [SurveyID]
,[Question]
,[Rating]
FROM
[tbl_Survey]
cross apply
(
values
('1', 1 ),
('2', 2 ),
('3', 3 ),
('4', 4 )
) c (Question, Rating);
Results are not fully correct (Rating column is a problem):
SurveyID Ouestion Rating
7 1 1
7 2 2
7 3 3
7 4 4
8 1 1
8 2 2
8 3 3
8 4 4
9 1 1
9 2 2
9 3 3
9 4 4
Please, help...
My problem (because of which I couldn't proceed) was that I haven't used brackets for my code.
Here is the updated code for this:
SELECT [SurveyID], [Question], [Rating]
FROM [dbo].[tbl_Survey]
UNPIVOT
(
[Rating]
FOR [Question] in ([1], [2], [3], [4])
) AS SurveyUnpivot
How about this:
DECLARE #T TABLE (SurveyID int, q1 int, q2 int, q3 int, q4 int)
INSERT #T (SurveyID, q1, q2, q3, q4)
VALUES (7,4,4,4,4), (8,3,3,3,3), (9, 2, 2, 2, 2)
SELECT SurveyID, REPLACE(Question,'q','') as Question, Rating
FROM #T UNPIVOT (Rating FOR Question in (q1, q2, q3, q4)) as UPV
Same approach. Just make sure you use a global temporary table as a temp table will not be visible in the scope of the EXEC statement. This should work with any column name and any number of columns.
IF OBJECT_ID('tempdb..##T') IS NOT NULL DROP TABLE ##T
CREATE TABLE ##T (SurveyID int, xxxxx int, yyyyy int, zzzzzz int, tttttt int)
INSERT ##T VALUES (7,4,4,4,4), (8,3,3,3,3), (9, 2, 2, 2, 2)
DECLARE #Colnames nvarchar(4000)
SELECT #Colnames = STUFF((SELECT ',[' + [name] +']' FROM tempdb.sys.columns where object_id = object_id('tempdb..##T') AND name <> 'SurveyID' FOR XML PATH('') ),1,1,'')
DECLARE #SQL nvarchar(4000) SET #SQL = 'SELECT SurveyID, Question, Rating FROM ##T UNPIVOT (Rating FOR Question in ('+#colnames+')) as UPV'
EXEC(#SQL)

Improve performance on CTE with sub-queries

I have a table with this structure:
WorkerID Value GroupID Sequence Validity
1 '20%' 1 1 2018-01-01
1 '10%' 1 1 2017-06-01
1 'Yes' 1 2 2017-06-01
1 '2018-01-01' 2 1 2017-06-01
1 '17.2' 2 2 2017-06-01
2 '10%' 1 1 2017-06-01
2 'No' 1 2 2017-06-01
2 '2016-03-01' 2 1 2017-06-01
2 '15.9' 2 2 2017-06-01
This structure was created so that the client can create customized data for a worker. For example Group 1 can be something like "Salary" and Sequence is one value that belongs to that Group like "Overtime Compensation". The column Value is a VARCHAR(150) field and the correct validation and conversation is done in another part of the application.
The Validity column exist mainly for historical reasons.
Now I would like to show, for the different workers, the information in a grid where each row should be one worker (displaying the one with the most recent Validity):
Worker 1_1 1_2 2_1 2_2
1 20% Yes 2018-01-01 17.2
2 10% No 2016-03-01 15.9
To accomplish this I created a CTE that looks like this:
WITH CTE_worker_grid
AS
(
SELECT
worker,
/* 1 */
(
SELECT top 1 w.Value
FROM worker_values AS w
WHERE w.GroupID = 1
AND w.Sequence = 1
ORDER BY w.Validity DESC
) AS 1_1,
(
SELECT top 1 w.Value
FROM worker_values AS w
WHERE w.GroupID = 1
AND w.Sequence = 2
ORDER BY w.Validity DESC
) AS 1_2,
/* 2 */
(
SELECT top 1 w.Value
FROM worker_values AS w
WHERE w.GroupID = 2
AND w.Sequence = 1
ORDER BY w.Validity DESC
) AS 2_1,
(
SELECT top 1 w.Value
FROM worker_values AS w
WHERE w.GroupID = 2
AND w.Sequence = 2
ORDER BY w.Validity DESC
) AS 2_2
)
GO
This produces the correct result but it's very slow as it creates this grid for over 18'000 worker with almost 30 Groups and up to 20 Sequences in each Group.
How could one speed up the process of a CTE of this magnitude? Should CTE even be used? Can the sub-queries be changed or re-factored out to speed up the execution?
Use a PIVOT!
+----------+---------+---------+------------+---------+
| WorkerId | 001_001 | 001_002 | 002_001 | 002_002 |
+----------+---------+---------+------------+---------+
| 1 | 20% | Yes | 2018-01-01 | 17.2 |
| 2 | 10% | No | 2016-03-01 | 15.9 |
+----------+---------+---------+------------+---------+
SQL Fiddle: http://sqlfiddle.com/#!18/6e768/1
CREATE TABLE WorkerAttributes
(
WorkerID INT NOT NULL
, [Value] VARCHAR(50) NOT NULL
, GroupID INT NOT NULL
, [Sequence] INT NOT NULL
, Validity DATE NOT NULL
)
INSERT INTO WorkerAttributes
(WorkerID, Value, GroupID, Sequence, Validity)
VALUES
(1, '20%', 1, 1, '2018-01-01')
, (1, '10%', 1, 1, '2017-06-01')
, (1, 'Yes', 1, 2, '2017-06-01')
, (1, '2018-01-01', 2, 1, '2017-06-01')
, (1, '17.2', 2, 2, '2017-06-01')
, (2, '10%', 1, 1, '2017-06-01')
, (2, 'No', 1, 2, '2017-06-01')
, (2, '2016-03-01', 2, 1, '2017-06-01')
, (2, '15.9', 2, 2, '2017-06-01')
;WITH CTE_WA_RANK
AS
(
SELECT
ROW_NUMBER() OVER (PARTITION BY WorkerID, GroupID, [Sequence] ORDER BY Validity DESC) AS VersionNumber
, WA.WorkerID
, WA.GroupID
, WA.[Sequence]
, WA.[Value]
FROM
WorkerAttributes AS WA
),
CTE_WA
AS
(
SELECT
WA_RANK.WorkerID
, RIGHT('000' + CAST(WA_RANK.GroupID AS VARCHAR(3)), 3)
+ '_'
+ RIGHT('000' + CAST(WA_RANK.[Sequence] AS VARCHAR(3)), 3) AS SMART_KEY
, WA_RANK.[Value]
FROM
CTE_WA_RANK AS WA_RANK
WHERE
WA_RANK.VersionNumber = 1
)
SELECT
WorkerId
, [001_001] AS [001_001]
, [001_002] AS [001_002]
, [002_001] AS [002_001]
, [002_002] AS [002_002]
FROM
(
SELECT
CTE_WA.WorkerId
, CTE_WA.SMART_KEY
, CTE_WA.[Value]
FROM
CTE_WA
) AS WA
PIVOT
(
MAX([Value])
FOR
SMART_KEY IN
(
[001_001]
, [001_002]
, [002_001]
, [002_002]
)
) AS PVT

TSQL - First and last number in range

I have table with:
1
2
3
4
5
6
9
10
11
12
and I need to receive:
1-6
9-12
How I can do that?
I need to see that I have two or more range of number i table and that from 1 to 6 and from 9 to 12.
SELECT
CONCAT(MIN(A.b), '-', max(A.b))
FROM
(
SELECT
*,
ROW_NUMBER() OVER (ORDER BY b) RowId
FROM
(VALUES (1), (2),(3),(4),(5),(6),(7),(8),(9),(10),(11),(12)) a(b)
--WHERE
--(a.b >= 1 AND a.b <= 6) OR
--(a.b >= 9 AND a.b <= 12)
) A
GROUP BY
A.b - A.RowId

Postgres - bind results of equal type by year - long to wide data

Please excuse my not very propper way of asking this as i am new to postgres...
Having the following two tables:
CREATE TABLE pub (
id int
, time timestamp
);
id time
1 1 2010-02-10 01:00:00
2 2 2011-02-10 01:00:00
3 3 2012-02-10 01:00:00
And
CREATE TABLE val (
id int
, type text
, val int
);
id type val
1 1 A 1
2 1 B 2
3 1 C 3
4 2 A 4
5 2 B 5
6 3 D 6
I would like to get the following output (for id <= 2 )
type 2010 2011
1 A 1 4
2 B 2 5
3 C 3 NULL
So type is the superset of all type's present in table val.
NULL meaning that there is no value for label C.
Ideally the column-headings are are years of the time. Alternatively the id itself...
Exists at least two ways to do this.
If your table have not many categories you can use CTE
WITH x AS (
SELECT type,
sum(val) FILTER (WHERE date_part('year', time) = 2010) AS "2010",
sum(val) FILTER (WHERE date_part('year', time) = 2011) AS "2011"
FROM pub AS p JOIN val AS v ON (v.id = p.id)
GROUP BY type
)
SELECT * FROM x
WHERE "2010" is NOT NULL OR "2011" IS NOT NULL
ORDER BY type
;
But if you have many or dynamic categories you must use crosstab:
CREATE EXTENSION tablefunc;
SELECT * FROM crosstab(
$$
SELECT type,
date_part('year', time)::text as time,
sum(val) AS val
FROM pub AS p JOIN val AS v ON (v.id = p.id)
GROUP BY type, 2
ORDER BY 1, 2
$$,
$$VALUES ('2010'::text), ('2011'), ('2012') $$
) AS ct (type text, "2010" int, "2011" int, "2012" int);
;

How to read all records recursively and show by level depth TSQL

Is there a way to read records recursively in similar table and order by depth level?
#table:
id int | parent int | value string
--------------------------------------------
1 -1 some
2 1 some2
3 2 some3
4 2 some4
5 3 some5
6 4 some6
7 3 some5
8 3 some5
9 8 some5
10 8 some5
So is there a way to recursively select where result table would look like this.
select * from #table where id=3
id int | parent int | value string | depth
--------------------------------------------------------
3 2 some3 0
5 3 some5 1
7 3 some5 1
8 3 some5 1
9 8 some5 2
10 8 some5 2
So if I choose id=3 I would see recursion for id=3 and children
Thank you
;with C as
(
select id,
parent,
value,
0 as depth
from YourTable
where id = 3
union all
select T.id,
T.parent,
T.value,
C.depth + 1
from YourTable as T
inner join C
on T.parent = C.id
)
select *
from C
SE-Data
You can accomplish using CTEs, in particular rCTEs.
See this, and this for more information.
Example to follow:
WITH sampleCTE (id, parent, value, depth)
AS (
-- Anchor definition
SELECT id
, parent
, value
, 0
FROM #table
WHERE id = #targetId
-- Recursive definition
UNION ALL
SELECT child.id
, child.parent
, child.value
, sampleCTE.depth + 1
FROM #table child
INNER JOIN sampleCTE ON sampleCTE.id = child.parent
)