Dynamic sql to select a specific value from a column using Joins - tsql

I am attempting to use dynamic sql to select a value based on a field. I have a table of field references I am using for the column names. What I am having troubles with is of course the dynamic sql. My return result is (SELECT ecoa_code FROM CRA_METRO2_BASE WHERE id = 568470) for example. But I really want it to run that select statement. Executing only returns the last row.
DECLARE #BaseCol VARCHAR(250)
SELECT
#BaseCol = '(SELECT ' + FR_base.field_name + ' FROM CRA_METRO2_BASE WHERE id = ' + CONVERT(VARCHAR(15), B.id) + ')'
FROM CRA_INNOVIS_AUDIT_ERROR_FIELDS E
LEFT JOIN CRA_METRO2_BASE B
ON B.id = E.base_id
LEFT JOIN CRA_METRO2_FIELD_REF FR_base
ON FR_base.id = E.base_field_ref
WHERE E.audit_id = #audit_id
EXEC(#BaseCol)

I am not sure I understand your premises correctly and without a mock-up...; so please take this answer with a grain of salt:)
DECLARE #sqlstring VARCHAR(MAX)
SELECT #sqlstring = 'SELECT ' + a.column_name + ' FROM ' + a.[Schema] + '.' + a.table_name
from (
SELECT TOP 1 T.object_id,OBJECT_SCHEMA_NAME(T.[object_id],DB_ID()) AS [Schema],
T.[name] AS [table_name], AC.[name] AS [column_name]
--,TY.[name] AS system_data_type
, AC.[max_length],
AC.[precision], AC.[scale], AC.[is_nullable], AC.[is_ansi_padded]
,AC.column_id
FROM sys.tables AS T
INNER JOIN sys.[all_columns] AC ON T.[object_id] = AC.[object_id]
) a
SELECT #sqlstring
EXEC(#sqlstring)

So I used my above query and now I am using a CTE to build my basic result list. And in my cte I create update statements which then are all put into a temp table.
I extract the update statements and execute them on the temp table. And walla, I have my results!
IF(OBJECT_ID('tempdb..#Temp') IS NOT NULL)
BEGIN
DROP TABLE #Temp
END
CREATE TABLE #Temp
(
usb_data VARCHAR(500),
cra_data VARCHAR(500)
);
WITH ErrorFieldsCTE(id, field, usb_data, cra_data, AUD, SOR, acceptable_variance, is_variance_known, is_reviewed)
AS(
SELECT
+ 'UPDATE #TEMP SET usb_data = (SELECT ' + FR_base.field_name +' FROM CRA_METRO2_BASE WHERE id = '+ CONVERT(VARCHAR(25), B.id) +' ) WHERE id = ' + CONVERT(VARCHAR(15), E.id) + ' ' [usb_data],
+ 'UPDATE #TEMP SET cra_data = (SELECT ' + FR_audit.field_name +' FROM CRA_INNOVIS_INBOUND_AUDIT_INFORMATION WHERE id = '+ CONVERT(VARCHAR(25), A.id) +') WHERE id = ' + CONVERT(VARCHAR(15), E.id) + ' ' [cra_data]
FROM CRA_INNOVIS_AUDIT_ERROR_FIELDS E
LEFT JOIN CRA_METRO2_BASE B
ON B.id = E.base_id
LEFT JOIN CRA_INNOVIS_INBOUND_AUDIT_INFORMATION A
ON A.id = E.audit_id
LEFT JOIN CRA_METRO2_FIELD_REF FR_audit
ON FR_audit.id = E.audit_field_ref
LEFT JOIN CRA_METRO2_FIELD_REF FR_base
ON FR_base.id = E.base_field_ref
WHERE E.audit_id = #audit_id
)
INSERT INTO #Temp
SELECT
id, field, usb_data, cra_data, AUD, SOR, acceptable_variance, is_variance_known, is_reviewed
FROM ErrorFieldsCTE
SELECT -- extract query
#usb_data += usb_data + '',
#cra_data += cra_data + ''
FROM #Temp
EXEC(#usb_data) -- updating temp table, selects usb-data
EXEC(#cra_data) -- updating temp table, selects cra-data
SELECT -- return to web
id, field, usb_data, cra_data, AUD, SOR, acceptable_variance, is_variance_known, is_reviewed
FROM #Temp
IF(OBJECT_ID('tempdb..#Temp') IS NOT NULL)
Begin
Drop Table #Temp
End

Related

Error converting data type varchar to numeric in standard audit trigger

I have been using a standard block of TSQL for auditing of various tables for some time now. However I now have a problem when running the trigger on a new table: "Error converting data type varchar to numeric". This occurs when running the EXEC (#sql) line. I've determined that the code for #sql is:
insert Audit_AppointmentsWS
(Type,
TableName,
PK,
FieldName,
OldValue,
NewValue,
UpdateDate,
UserName)
SELECT 'U',
'AppointmentsWorkshop',
+convert(varchar(100), coalesce(i.UniqueID,d.UniqueID)),
'[JobHours]',
convert(varchar(1000),d.[JobHours]),
convert(varchar(1000),i.[JobHours]),
'20220816 12:32:43:410',
'DELLXPS\ian'
from #ins i full outer join #del d on i.UniqueID = d.UniqueID where ISNULL(i.JobHours],'') <> ISNULL(d.[JobHours],'')
I've tried deleting the trigger & the audit table and then recreating them but no joy. I've also tried copying an existing trigger and just changing the table details but I still get the same error. I'm completely stumped on this and would appreciate some feedback. Many thanks in advance!
Here is the trigger:
/****** Object: Trigger [dbo].[tr_AppointmentsWS] Script Date: 16/08/2022 12:02:10 ******/
SET ANSI_NULLS ON
GO
SET QUOTED_IDENTIFIER ON
GO
create TRIGGER [dbo].[tr_AppointmentsWS] ON [dbo].AppointmentsWorkshop FOR UPDATE, DELETE
AS
DECLARE #bit INT ,
#field INT ,
#maxfield INT ,
#char INT ,
#fieldname VARCHAR(128) ,
#TableName VARCHAR(128) ,
#AuditTable VARCHAR(128) ,
#PKCols VARCHAR(MAX) ,
#sql VARCHAR(2000),
#UpdateDate VARCHAR(21) ,
#UserName VARCHAR(128) ,
#Type CHAR(1) ,
#PKSelect VARCHAR(MAX)
--Changes required:
-- 1. Change the name of the trigger and the table, above
-- 2. Change #TableName to match the table to be audited
-- 3. Change the #AuditTable to the table holding the changes
SELECT #TableName = 'AppointmentsWorkshop'
SELECT #AuditTable = 'Audit_AppointmentsWS'
-- date and user
SELECT #UserName = SYSTEM_USER ,
#UpdateDate = CONVERT(VARCHAR(8), GETDATE(), 112) + ' ' + CONVERT(VARCHAR(12), GETDATE(), 114)
-- Action
IF EXISTS (SELECT * FROM inserted)
IF EXISTS (SELECT * FROM deleted)
SELECT #Type = 'U'
ELSE
SELECT #Type = 'I'
ELSE
SELECT #Type = 'D'
-- get list of columns
SELECT * INTO #ins FROM inserted
SELECT * INTO #del FROM deleted
-- Get primary key columns for full outer join
SELECT #PKCols = COALESCE(#PKCols + ' and', ' on') + ' i.' + c.COLUMN_NAME + ' = d.' + c.COLUMN_NAME
FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS pk, INFORMATION_SCHEMA.KEY_COLUMN_USAGE c
WHERE pk.TABLE_NAME = #TableName
AND CONSTRAINT_TYPE = 'PRIMARY KEY'
AND c.TABLE_NAME = pk.TABLE_NAME
AND c.CONSTRAINT_NAME = pk.CONSTRAINT_NAME
-- Get primary key select for insert
SELECT #PKSelect = COALESCE(#PKSelect+'+','') + '+convert(varchar(100), coalesce(i.' + COLUMN_NAME +',d.' + COLUMN_NAME + '))'
FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS pk, INFORMATION_SCHEMA.KEY_COLUMN_USAGE c
WHERE pk.TABLE_NAME = #TableName
AND CONSTRAINT_TYPE = 'PRIMARY KEY'
AND c.TABLE_NAME = pk.TABLE_NAME
AND c.CONSTRAINT_NAME = pk.CONSTRAINT_NAME
IF #PKCols IS NULL
BEGIN
RAISERROR('no PK on table %s', 16, -1, #TableName)
RETURN
END
SELECT #field = 0, #maxfield = MAX(COLUMNPROPERTY(OBJECT_ID(TABLE_SCHEMA + '.' + #Tablename),COLUMN_NAME, 'ColumnID'))
FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = #TableName
WHILE #field < #maxfield
BEGIN
SELECT #field = MIN(COLUMNPROPERTY(OBJECT_ID(TABLE_SCHEMA + '.' + #Tablename),COLUMN_NAME, 'ColumnID'))
FROM INFORMATION_SCHEMA.COLUMNS
WHERE TABLE_NAME = #TableName
AND COLUMNPROPERTY(OBJECT_ID(TABLE_SCHEMA + '.' + #Tablename),COLUMN_NAME, 'ColumnID') > #field
SELECT #bit = (#field - 1 )% 8 + 1
SELECT #bit = POWER(2,#bit - 1)
SELECT #char = ((#field - 1) / 8) + 1
IF SUBSTRING(COLUMNS_UPDATED(),#char, 1) & #bit > 0 OR #Type IN ('I','D')
BEGIN
SELECT #fieldname = COLUMN_NAME
FROM INFORMATION_SCHEMA.COLUMNS
WHERE TABLE_NAME = #TableName
AND COLUMNPROPERTY(OBJECT_ID(TABLE_SCHEMA + '.' + #Tablename),COLUMN_NAME, 'ColumnID') = #field
SELECT #sql = 'insert ' + #AuditTable + '
(Type,
TableName,
PK,
FieldName,
OldValue,
NewValue,
UpdateDate,
UserName)
SELECT ''' + #Type + ''','''
+ #TableName + ''',' + #PKSelect
+ ',''[' + #fieldname + ']'''
+ ',convert(varchar(1000),d.[' + #fieldname + '])'
+ ',convert(varchar(1000),i.[' + #fieldname + '])'
+ ',''' + #UpdateDate + ''''
+ ',''' + #UserName + ''''
+ ' from #ins i full outer join #del d'
+ #PKCols
+ ' where ISNULL(i.[' + #fieldname + '],'''') <> ISNULL(d.[' + #fieldname + '],'''')' --Skip identical values and excludes NULLS vs empty strings
EXEC (#sql)
END
END
Well I finally figured it out. The error is being generated with columns of data type 'decimal' and it is down to the ISNULL section of the last SELECT. I've fixed it by checking for the decimal type and then using the following code (which included a zero rather than an empty sting):
+ ' where ISNULL(i.[' + #fieldname + '],''0'') <> ISNULL(d.[' + #fieldname + '],''0'')' --Skip identical values and excludes NULLS vs empty strings

t-sql select column names from all tables where there is at least 1 null value

Context: I am exploring a new database (in MS SQL server), and I want to know for each table, all columns that have null values.
I.e. result would look something like this:
table column nulls
Tbl1 Col1 8
I have found this code here on stackoverflow, that makes a table of table-columnnames - without the WHERE statement which is my addition.
I tried to filter for nulls in WHERE statement, but then the table ends up empty, and I see why - i am checking if the col name is actually null, and not its contents. But can't figure out how to proceed.
select schema_name(tab.schema_id) as schema_name,
tab.name as table_name,
col.name as column_name
from sys.tables as tab
inner join sys.columns as col
on tab.object_id = col.object_id
left join sys.types as t
on col.user_type_id = t.user_type_id
-- in this where statement, I am trying to filter for nulls, but i get an empty result. and i know there are nulls
where col.name is null
order by schema_name, table_name, column_id
I also tried this (see 4th line):
select schema_name(tab.schema_id) as schema_name,
tab.name as table_name,
col.name as column_name
,(select count(*) from tab.name where col.name is null) as countnulls
from sys.tables as tab
inner join sys.columns as col
on tab.object_id = col.object_id
left join sys.types as t
on col.user_type_id = t.user_type_id
order by schema_name, table_name, column_id
the last one returns an error "Invalid object name 'tab.name'."
column name can't be null but if you mean nullable column (column that accept null value) that has null value at least so you can use following statement:
declare #schema varchar(255), #table varchar(255), #col varchar(255), #cmd varchar(max)
DECLARE getinfo cursor for
SELECT schema_name(tab.schema_id) as schema_name,tab.name , col.name from sys.tables as tab
inner join sys.columns as col on tab.object_id = col.object_id
where col.is_nullable =1
order by schema_name(tab.schema_id),tab.name,col.name
OPEN getinfo
FETCH NEXT FROM getinfo into #schema,#table,#col
WHILE ##FETCH_STATUS = 0
BEGIN
set #schema = QUOTENAME(#schema)
set #table = QUOTENAME(#table)
set #col = QUOTENAME(#col)
SELECT #cmd = 'IF EXISTS (SELECT 1 FROM '+ #schema +'.'+ #table +' WHERE ' + #col + ' IS NULL) BEGIN SELECT '''+#schema+''' as schemaName, '''+#table+''' as tablename, '''+#col+''' as columnName, * FROM '+ #schema +'.'+ #table +' WHERE ' + #col + ' IS NULL end'
EXEC(#cmd)
FETCH NEXT FROM getinfo into #schema,#table,#col
END
CLOSE getinfo
DEALLOCATE getinfo
that use cursor on all nullable columns in every table in the Database then check if this column has at least one null value if yes will select schema Name, table name, column name and all records that has null value in this column
but if you want to get only count of nulls you can use the following statement:
declare #schema varchar(255), #table varchar(255), #col varchar(255), #cmd varchar(max)
DECLARE getinfo cursor for
SELECT schema_name(tab.schema_id) as schema_name,tab.name , col.name from sys.tables as tab
inner join sys.columns as col on tab.object_id = col.object_id
where col.is_nullable =1
order by schema_name(tab.schema_id),tab.name,col.name
OPEN getinfo
FETCH NEXT FROM getinfo into #schema,#table,#col
WHILE ##FETCH_STATUS = 0
BEGIN
set #schema = QUOTENAME(#schema)
set #table = QUOTENAME(#table)
set #col = QUOTENAME(#col)
SELECT #cmd = 'IF EXISTS (SELECT 1 FROM '+ #schema +'.'+ #table +' WHERE ' + #col + ' IS NULL) BEGIN SELECT '''+#schema+''' as schemaName, '''+#table+''' as tablename, '''+#col+''' as columnName, count(*) as nulls FROM '+ #schema +'.'+ #table +' WHERE ' + #col + ' IS NULL end'
EXEC(#cmd)
FETCH NEXT FROM getinfo into #schema,#table,#col
END
that use cursor on all nullable columns in every table in the Database then check if this column has at least one null value if yes will select schema Name, table name, column name and count all records that has null value in this column

Rework query to include the Schema in the name

I am leeching off this post: Query to list number of records in each table in a database
With this procedure:
CREATE PROCEDURE ListTableRowCounts
AS
BEGIN
SET NOCOUNT ON
CREATE TABLE #TableCounts
(
TableName VARCHAR(500),
CountOf INT
)
INSERT #TableCounts
EXEC sp_msForEachTable
'SELECT PARSENAME(''?'', 1),
COUNT(*) FROM ? WITH (NOLOCK)'
SELECT TableName , CountOf
FROM #TableCounts
ORDER BY TableName
DROP TABLE #TableCounts
END
GO
The procedure works well enough but I need it to output the name as Schema.Name and sort by that.
Is that possible? I'm not sure how to change this but you can see what it is doing below:
I have several instances were the table names are the same from different schemas.
CREATE PROCEDURE ListTableRowCounts
AS
BEGIN
SET NOCOUNT ON
CREATE TABLE #TableCounts
( SchemaName VARCHAR(500),
TableName VARCHAR(500),
CountOf INT
)
INSERT #TableCounts
EXEC sp_msForEachTable
'SELECT PARSENAME(''?'', 2), PARSENAME(''?'', 1),
COUNT(*) FROM ? WITH (NOLOCK)'
SELECT SchemaName, TableName , CountOf
FROM #TableCounts
ORDER BY TableName, SchemaName
DROP TABLE #TableCounts
END
GO
Taking some code from: https://stackoverflow.com/a/1443723/4584335
and from: How do I list all tables in all databases in SQL Server in a single result set?
Could I suggest this one (just in case "sp_msForEachTable" doesn't exist anymore):
declare #sql nvarchar(max);
select #sql = isnull(#sql + N'union all ', '')
+ N'
select b.name as "DB"
,a.name collate Latin1_General_CI_AI
,a.object_id
,a.schema_id
,' + cast(database_id as nvarchar(10)) + N'
,p.[Rows]
from ' + quotename(name) + N'.sys.tables a
join
' + quotename(name) + N'.sys.indexes i
on a.OBJECT_ID = i.object_id
and i.index_id <= 1
join
' + quotename(name) + N'.sys.partitions p
on i.object_id = p.OBJECT_ID
and i.index_id = p.index_id
join sys.databases b
on database_id=' + cast(database_id as nvarchar(10)) + ' '
from sys.databases
where state = 0
and user_access = 0;
exec sp_executesql #sql;

Dynamically generated insert and select statements

I have a database with some tables in it and I want to make dynamically generated insert and select statements without the need to use the case statement in the select clause for each table.
The select statement is quite easy, the challenge is with the insert one because I have to deal with each column and its data type. I managed to overcome it by means of a case statement, but I think it's hard working for tables with lots of columns and for databases with many tables.
I wish it was possible to change hardcoded table and column names for each table and column I need the dynamically generated SQL command.
I worked out in the following select statement for the given tables of the database (testDB) I have:
use testDB;
go
set dateformat dmy;
select
'select * from ' + s.name + '.' + t.name + ';' as cmd_select,
'insert into ' + s.name + '.' + t.name + ' (' +
stuff(( select ', ' + column_name
from information_schema.columns
where table_name = t.name and ordinal_position > 1
order by ordinal_position
for xml path(''), type).value('.', 'nvarchar(max)'),
1, 2, '')
+ ') values (' +
case t.name
when 'Person' then '''xxx'''
when 'WeightHistory' then '0.0, ''' + convert(varchar, current_timestamp, 103) + ''', ''' + left(convert(varchar, current_timestamp, 108), 5) + ''', 1'
when 'WorkTime' then '''' + convert(varchar, current_timestamp, 103) + ''', ''' + left(convert(varchar, current_timestamp, 108), 5) + ''', 1, null'
when 'TimeReference' then '''07:00'', ''' + convert(varchar, current_timestamp, 103) + ''', null'
end
+ ');'as cmd_insert --,
--t.lob_data_space_id
--, s.name, t.name, *
from sys.tables as t
inner join sys.schemas as s on t.schema_id = s.schema_id
where t.lob_data_space_id = 0; /* tables that don't have LOB columns (sysdiagrams, varchar(max), xml, etc.) */
What exactly I want to know is:
Is there a better way of making the dynamically generated insert statement without using a case statement for each table and column of the database?
ADITIONAL INFO
The table definition for the above code is the following:
if not exists (select * from sys.tables where lower(name) = N'person' )
begin
create table Person.Person (
PersonID int
constraint PK_Person
primary key
identity (1, 1),
Name varchar(100)
);
end;
go
if not exists (select * from sys.tables where lower(name) = N'weighthistory' )
begin
create table dbo.WeightHistory (
WeightHistoryID int
constraint PK_WeightHistory
primary key
identity (1, 1),
MeasureValue money,
MeasureDate date,
MeasureTime time(0),
PersonID int,
constraint FK_Weight_Person foreign key (PersonID) references Person.Person (PersonID)
);
end;
go
if not exists (select * from sys.tables where lower(name) = N'worktime')
begin
create table WorkTime (
WorkTimeID int
constraint PK_WorkTime primary key
identity(1, 1),
WorkDate date,
WorkTime time(0),
PersonID int,
TimeReferenceID int,
constraint FK_WorkTime_Person foreign key (PersonID) references Person.Person (PersonID) on delete cascade,
constraint FK_WorkTime_Reference foreign key (TimeReferenceID) references Work.Timereference (TimeReferenceID)
);
end;
go
if not exists (select * from sys.tables where lower(name) = N'timereference')
begin
create table Work.TimeReference (
TimeReferenceID int
constraint PK_TimeReferene primary key
identity (1, 1),
WorkTime time(0),
WorkTimeStartDate date,
WorkTimeEndDate date
);
end;

Dynamic field definitions - can this be done in T-SQL?

I have a requirement that I'm struggling to implement. If possible, I'd like to achieve this with native T-SQL.
I have the following tables:
CUSTOMER
========
ID,
Name
FIELDDEF
========
ID,
Name
FieldType (Char T, N, D for Text, Number or Date)
CUSTOMERFIELD
=============
ID,
CustomerID,
FieldDefID,
CaptureDate,
ValueText,
ValueNumber,
ValueDate
Basically, the purpose of these tables is to provide an extensible custom field system. The idea is that the user creates new field definitions that can be a text, number or date field. Then, they create values for these fields in the ValueText, ValueNumber OR ValueDate field.
Example:
*Customer*
1,BOB
2,JIM
*FieldDef*
1,Mobile,T
1,DateOfBirth,D
*CustomerField*
ID,CustomerID,FieldDefID,CaptureDate,ValueText,ValueNumber,ValueDate
1,1,1,2011-01-1,07123456789,NULL,NULL
2,1,2,2011-01-1,NULL,NULL,09-DEC-1980
3,1,1,2011-01-2,07123498787,NULL,NULL
I need to create a view that looks like this:
*CustomerView*
ID,Name,Mobile,DateOfBirth
1,BOB,07123498787,09-DEC-1980
Note that Bob's mobile is the second one in the list, because it uses the most recent capture date.
Ideally, I need this to be extensible, so if I create a new field def in the future, it is automatically picked up in the CustomerView.
Is this possible in T-SQL at all?
Thanks,
Simon.
This would not be possible with a view, unless the view is dynamically recreated on the fly every time FieldDef changes because view schemas are locked-in at creation time. However, it may be possible with a stored procedure, which may or may not work depending on how you are using it.
Edit 1
Here is a sample query that works just for your current field names, and would have to be modified by dynamic SQL to work in general:
Edit 2
Modified to grab the newest values from the customer field table
with CustomerFieldNewest as (
select
cf1.*
from
customerfield cf1
inner join
(
select
customerid,
fielddefid,
max(capturedate) as maxcapturedate
from
customerfield cf2
group by
customerid,
fielddefid
) cf2 on cf1.customerid = cf2.customerid
and cf1.fielddefid = cf2.fielddefid
and cf1.capturedate = cf2.maxcapturedate
)
,CustomerFieldPivot as (
select
C.ID as ID
,max(case when F.Name = 'Mobile' then CF.ValueText end) as Mobile
,max(case when F.Name = 'DateOfBirth' then CF.ValueDate end) as DateOfBirth
from
Customer C
left join
CustomerFieldNewest CF on C.ID = CF.CustomerID
left join
FieldDef F on F.ID = CF.FieldDefID
group by
C.ID
)
select
C.*
,P.Mobile
,P.DateOfBirth
from
Customer C
left join
CustomerFieldPivot P on C.ID = P.ID
Edit 3
Here is T-SQL code to generate the view on the fly based on the current set of fields in FieldDef (this assumes the view CustomerView already exists, so you will need to create it first as a blank definition or you will get an error). I'm not sure about the performance of all this, but it should work correctly.
declare #sql varchar(max)
declare #fielddef varchar(max)
declare #fieldlist varchar(max)
select
#fielddef = coalesce(#fielddef + ', ' + CHAR(13) + CHAR(10), '') +
' max(case when F.Name = ''' + F.Name + ''' then CF.' +
case F.FieldType
when 'T' then 'ValueText'
when 'N' then 'ValueNumber'
when 'D' then 'ValueDate'
end
+ ' end) as [' + F.Name + ']'
,#fieldlist = coalesce(#fieldlist + ', ' + CHAR(13) + CHAR(10), '') +
' [' + F.Name + ']'
from
FieldDef F
set #sql = '
alter view [CustomerView] as
with CustomerFieldNewest as (
select
cf1.*
from
customerfield cf1
inner join
(
select
customerid,
fielddefid,
max(capturedate) as maxcapturedate
from
customerfield cf2
group by
customerid,
fielddefid
) cf2 on cf1.customerid = cf2.customerid
and cf1.fielddefid = cf2.fielddefid
and cf1.capturedate = cf2.maxcapturedate
)
,CustomerFieldPivot as (
select
C.ID as ID,
' + #fielddef + '
from
Customer C
left join
CustomerFieldNewest CF on C.ID = CF.CustomerID
left join
FieldDef F on F.ID = CF.FieldDefID
group by
C.ID
)
select
C.*,
' + #fieldlist + '
from
Customer C
left join
CustomerFieldPivot P on C.ID = P.ID
'
print #sql
exec(#sql)
select * from CustomerView
You need to build a crosstab which you do with the Pivot statement in TSQL. Here's an article that talks about how to build the pivot dynamically.
http://sqlserver-qa.net/blogs/t-sql/archive/2008/08/27/4809.aspx
Just for completeness there is sql_variant:
declare #t table (typ varchar(1), yuk sql_variant)
insert #t values ('d', getdate())
insert #t values ('i', 1234)
insert #t values ('s', 'bleep bloop')
select
yuk,
case typ
when 'd' then convert(datetime, yuk, 106)+50
when 'i' then cast(yuk as int) * 2
when 's' then reverse(cast(yuk as varchar))
else yuk
end
from #t