SQL Server Merge Statement compilation throw error Incorrect syntax near the keyword WHEN - tsql

I have use sql server merge statement and when compiling the my Store procedure then getting this error.
Incorrect syntax near the keyword WHEN
Please have a look at my code and tell me where i have made the mistake.
CREATE TABLE #TmpTenQKData
(
Section NVARCHAR(MAX),
LineItem NVARCHAR(MAX),
XFundCode NVARCHAR(MAX),
StandardDate NVARCHAR(MAX),
StandardValue VARCHAR(MAX),
ActualProvidedByCompany VARCHAR(MAX)
)
BEGIN
INSERT INTO #TmpTenQKData
SELECT col.value('(Section/text())[1]', 'NVARCHAR(MAX)') AS Section
,col.value('(LineItem/text())[1]', 'NVARCHAR(MAX)') AS LineItem
,col.value('(XFundCode/text())[1]', 'NVARCHAR(MAX)') AS XFundCode
,col.value('(StandardDate/text())[1]', 'NVARCHAR(MAX)') AS StandardDate
,col.value('(StandardValue/text())[1]', 'VARCHAR(MAX)') AS StandardValue
,col.value('(ActualProvidedByCompany/text())[1]', 'VARCHAR(MAX)') AS ActualProvidedByCompany
FROM #BogyXML.nodes('/Root/PeriodicalData') AS tab (col)
END
BEGIN
Merge into TblLineItemTemplate as Trg
Using #TmpTenQKData as Src
on UPPER(TRIM(Trg.LineItem))=UPPER(TRIM(Src.LineItem)) AND Trg.TickerID=#TickerID
WHEN MATCHED THEN
UPDATE SET
Trg.XFundCode=Src.XFundCode,
Action='U',
Insertdate=GETDATE();
WHEN NOT MATCHED BY Trg THEN
INSERT TblLineItemTemplate
(
TickerID,
LineItem,
XFundCode,
Action,
UserID,
Insertdate
)
VALUES
(
TRIM(#TickerID),
TRIM(Src.LineItem),
TRIM(Src.XFundCode),
'I',
#UserID,GETDATE()
);
END
Please tell me what to change in code. Thanks

This parses correctly, if it does what you want is another question:
MERGE INTO TblLineItemTemplate Trg
USING #TmpTenQKData Src ON UPPER(TRIM(Trg.LineItem)) = UPPER(TRIM(Src.LineItem)) --Do you really need UPPER()? Are you using a case sensitive collation?
AND Trg.TickerID = #TickerID
WHEN MATCHED THEN UPDATE SET XFundCode = Src.XFundCode,
Action = 'U',
Insertdate = GETDATE()
WHEN NOT MATCHED THEN INSERT (TickerID,
LineItem,
XFundCode,
Action,
UserID,
Insertdate)
VALUES (TRIM(#TickerID), TRIM(Src.LineItem), TRIM(Src.XFundCode), 'I', #UserID, GETDATE());

Related

DB2: Insert new rows and ignore duplicates

I have about 100 rows to insert in a table - Some of them have already been inserted before and some of them have not
This is my insert that works fine if the primary key doesn't exist.. I'm going to run this 100 time with different values each time.. However, if the primary key exist, it fails and stop future commands to run.
How to ignore the failure and keep going or simply ignore duplicates?
INSERT INTO MY_TABLE
VALUES( 12342, 'fdbvdfb', 'svsdv', '5019 teR','' , 'saa', 'AL',35005 , 'C', 37, '0',368 , 'P', '2023-02-13', '2023-01-01', '2023-01-10', '2023-01-20','' , 'Test', 'Test', 'Test', 'JFK', '', null, 'Y', 'Y', '', '', '', '', '', '',2385 ,2 , '', 'N', '2023-01-16', '2023-01-20', '', NULL,NULL, NULL, NULL, 'Y', 'Test', 'Test', '', 'N', 'Test', '')
This is the error:
SQL0803N One or more values in the INSERT statement, UPDATE statement, or foreign key update caused by a DELETE statement are not valid because the primary key, unique constraint or unique index identified by "XPS01ME1" constrains
Insert IGNORE into throws:
The use of the reserved word "IGNORE" following "" is not valid.
If it can help I'm using WinSQL 10.0.157.697
You don't mention what platform and version of Db2, so I'll point you to the Linux/Unix/Windows (LUW) documentation for the MERGE statement...
Since I don't know your table or column names, I'll just give you an example with dummy names.
merge into MYTABLE as tgt
using (select *
from table( values(1,1,'CMW',5,1)
) tmp ( tblKey, fld1, fld2, fld3, fld4)
) as src
on src.tblKey = tgt.tblekey
when not matched then
insert ( tblKey, fld1, fld2, fld3, fld4)
values ( src.tblKey, src.fld1, src.fld2, src.fld3, src.fld4);
You're basically building a temporary table on the fly of one row
table( values(1,1,'CMW',5,1) ) tmp ( tblKey, fld1, fld2, fld3, fld4)
Then if there's no matching record via on src.tblKey = tgt.tblekey you do an insert.
Note that while you could do this 100 times, it is a much better performing solution to do all 100 rows at a time.
merge into MYTABLE as tgt
using (select *
from table( values (1,1,'CMW1',5,1)
, (2,11,'CMW2',50,11)
, (3,21,'CMW3',8,21)
-- , <more rows here>
) tmp ( tblKey, fld1, fld2, fld3, fld4)
) as src
on src.tblKey = tgt.tblekey
when not matched then
insert ( tblKey, fld1, fld2, fld3, fld4)
values ( src.tblKey, src.fld1, src.fld2, src.fld3, src.fld4);
Optionally, you could create an actual temporary table, insert the 100 rows (preferably in a single insert) and then use MERGE.
You may do it with a compound statement like below:
--#SET TERMINATOR #
CREATE TABLE MY_TABLE (ID INT NOT NULL PRIMARY KEY)#
BEGIN
DECLARE CONTINUE HANDLER FOR SQLSTATE '23505' BEGIN END;
INSERT INTO MY_TABLE (ID) VALUES (1);
END#
BEGIN
DECLARE CONTINUE HANDLER FOR SQLSTATE '23505' BEGIN END;
INSERT INTO MY_TABLE (ID) VALUES (1), (2), (3);
END#
BEGIN
DECLARE CONTINUE HANDLER FOR SQLSTATE '23505' BEGIN END;
INSERT INTO MY_TABLE (ID) VALUES (4);
END#
SELECT * FROM MY_TABLE#
ID
1
4
fiddle
This is how you ignore an error in db2 --
note, this is not the correct sqlstate for your problem -- replace with the one you need
DECLARE CONTINUE HANDLER FOR SQLSTATE '23505'
BEGIN -- ignore error for duplicate value
END;
Documented here
https://www.ibm.com/docs/en/db2-for-zos/12?topic=procedure-ignoring-condition-in-sql

SQL Server During Merge can i store source table value into variable

See my code to understand what i am trying.
DECLARE #LI NVARCHAR(100)
DECLARE #XFundCode VARCHAR(20)
SET #LI = ''
MERGE INTO TblLineItemTemplate Trg
USING
(
SELECT MAX(Section) AS Section,
MAX(LineItem) AS LineItem,
MAX(XFundCode) AS XFundCode,
MAX(StandardDate) AS StandardDate,
MAX(StandardValue) AS StandardValue,
MAX(ActualProvidedByCompany) AS ActualProvidedByCompany,
MAX(TickerID) AS TickerID
FROM #TmpTenQKData
GROUP BY LineItem
) AS Src
ON UPPER(TRIM(Trg.LineItem)) = UPPER(TRIM(Src.LineItem)) AND Trg.TickerID = Src.TickerID
WHEN MATCHED THEN
UPDATE SET
XFundCode = Src.XFundCode,
Action = 'U',
Insertdate = GETDATE()
WHEN NOT MATCHED THEN
SET #LI=Src.LineItem
SET #XFundCode = Src.XFundCode
INSERT
(
TickerID,
LineItem,
XFundCode,
Action,
UserID,
Insertdate
)
VALUES
(
TRIM(#TickerID),
TRIM(#LI),
TRIM(#XFundCode),
'I', #UserID,
GETDATE()
);
i want to store this way without OUTPUT clause
SET #LI=Src.LineItem
SET #XFundCode = Src.XFundCode
is it possible ?
please tell me a way to store source table value into variable during
insert/update from merge statement.
for each insert how can i store source table value by output clause. thanks

Taking result from SQL/T-SQL Subselect into the parent select statement

I want to extend ListA with Company coming from #MyList.CompanyNo, plese refer to the code listing
Data&Init:
begin /*Just the init data*/
DECLARE #MyList TABLE (Mail nvarchar(max), CompanyNo int)
INSERT INTO #MyList VALUES ('...com',20)
INSERT INTO #MyList VALUES ('...com',230)
INSERT INTO #MyList VALUES ('...com',120)
INSERT INTO #MyList VALUES ('...com',223)
end
--DECLARE
DECLARE #ListA TABLE (Id nvarchar(max), Mail nvarchar(max))
DECLARE #ListB TABLE (Id nvarchar(max), Mail nvarchar(max),Company int)
Starting point(this works):
INSERT INTO #ListA(Id,Mail) select someId,name from [somedb].[dbo].aers where name IN (SELECT Mail FROM #MyList)
I was trying to do it the following way:
INSERT INTO #ListB(Id,Mail,Company) select someId,name,#MyList.CompanyNo from [somedb].[dbo].aers where name IN (SELECT Mail FROM #MyList)
So actually I want to extend ListB with the corrosponding #MyList.CompanyNo.
Thanks, what can I do ?
You could use JOIN based on condition from WHERE:
INSERT INTO #ListB(Id,Mail,Company)
select a.someId,a.name,m.CompanyNo
from [somedb].[dbo].aers a
join #MyList m
ON a.name = m.Mail;

EF5 "the selected stored procedure or function returns no columns"

I am using EF 5 and this is my SP
USE [MYDatabase] GO
SET ANSI_NULLS ON GO
SET QUOTED_IDENTIFIER ON GO
ALTER PROCEDURE [dbo].[SP_FirstAttend] #serviceStart date, #serviceEnd date AS
BEGIN
SET NOCOUNT OFF
SET FMTONLY OFF
--IF (1=0)
--BEGIN
--SET FMTONLY ON
BEGIN
DROP TABLE #temp1
CREATE TABLE #temp1 (id int, sid int, npi int, fiscal int, serviceStart date, serviceEnd date, fcode varchar(10), tid int, StudName varchar(200), TherName varchar (200))
INSERT INTO #temp1
SELECT ID,
mand.SID,
mand.NPI,
FiscalYear,
ServiceStart,
ServiceEnd,
FundingCode,
ther.TID,
RTRIM(stud.StudentLastName) + ' ' + RTRIM(stud.StudentFirstName),
RTRIM(ther.LastName) + ' ' + RTRIM(ther.FirstName)
FROM MandateMaster AS mand
JOIN TherapistMaster AS ther ON ther.NPI = mand.NPI
JOIN StudentMaster AS stud ON stud.SID = mand.SID
SELECT *,
(SELECT top(1) sid
FROM SessionDetail
WHERE SID = tb1.sid
AND TID = tb1.tid) AS val1
FROM #temp1 AS tb1
WHERE ServiceStart >= #serviceStart
AND ServiceStart <= #serviceEnd;
END
-- END
END
and its still giving me "Stored procedure returns no columns".
I read somewhere to set the
integrated security=True; in the connection string on web.config but still nothing worked.
I been trying to find the solutions for this but keep getting the same message. Please let me know what to do .
Thanks.
You got nothing as result because this condition IF (1=0) always returns false then your select statement is never hit.
Just remove this IF (1=0) and your stored procedure will return some data.

Most succinct way to transform a CSV string to a table in T-SQL?

-- Given a CSV string like this:
declare #roles varchar(800)
select #roles = 'Pub,RegUser,ServiceAdmin'
-- Question: How to get roles into a table view like this:
select 'Pub'
union
select 'RegUser'
union
select 'ServiceAdmin'
After posting this, I started playing with some dynamic SQL. This seems to work, but seems like there might be some security risks by using dynamic SQL - thoughts on this?
declare #rolesSql varchar(800)
select #rolesSql = 'select ''' + replace(#roles, ',', ''' union select ''') + ''''
exec(#rolesSql)
If you're working with SQL Server compatibility level 130 then the STRING_SPLIT function is now the most succinct method available.
Reference link: https://msdn.microsoft.com/en-gb/library/mt684588.aspx
Usage:
SELECT * FROM string_split('Pub,RegUser,ServiceAdmin',',')
RESULT:
value
-----------
Pub
RegUser
ServiceAdmin
See my answer from here
But basically you would:
Create this function in your DB:
CREATE FUNCTION dbo.Split(#origString varchar(max), #Delimiter char(1))
returns #temptable TABLE (items varchar(max))
as
begin
declare #idx int
declare #split varchar(max)
select #idx = 1
if len(#origString )<1 or #origString is null return
while #idx!= 0
begin
set #idx = charindex(#Delimiter,#origString)
if #idx!=0
set #split= left(#origString,#idx - 1)
else
set #split= #origString
if(len(#split)>0)
insert into #temptable(Items) values(#split)
set #origString= right(#origString,len(#origString) - #idx)
if len(#origString) = 0 break
end
return
end
and then call the function and pass in the string you want to split.
Select * From dbo.Split(#roles, ',')
Here's a thorough discussion of your options:
Arrays and Lists in SQL Server
What i do in this case is just using some string replace to convert it to json and open the json like a table. May not be suitable for every use case but it is very simple to get running and works with strings and files. With files you just need to watch your line break character, mostly i find it to be "Char(13)+Char(10)"
declare #myCSV nvarchar(MAX)= N'"Id";"Duration";"PosX";"PosY"
"•P001";223;-30;35
"•P002";248;-28;35
"•P003";235;-26;35'
--CSV to JSON
--convert to json by replacing some stuff
declare #myJson nvarchar(MAX)= '[['+ replace(#myCSV, Char(13)+Char(10), '],[' ) +']]'
set #myJson = replace(#myJson, ';',',') -- Optional: ensure coma delimiters for json if the current delimiter differs
-- set #myJson = replace(#myJson, ',,',',null,') -- Optional: empty in between
-- set #myJson = replace(#myJson, ',]',',null]') -- Optional: empty before linebreak
SELECT
ROW_NUMBER() OVER (ORDER BY (SELECT 0))-1 AS LineNumber, *
FROM OPENJSON( #myJson )
with (
col0 varchar(255) '$[0]'
,col1 varchar(255) '$[1]'
,col2 varchar(255) '$[2]'
,col3 varchar(255) '$[3]'
,col4 varchar(255) '$[4]'
,col5 varchar(255) '$[5]'
,col6 varchar(255) '$[6]'
,col7 varchar(255) '$[7]'
,col8 varchar(255) '$[8]'
,col9 varchar(255) '$[9]'
--any name column count is possible
) csv
order by (SELECT 0) OFFSET 1 ROWS --hide header row
Using SQL Server's built in XML parsing is also an option. Of course, this glosses over all the nuances of an RFC-4180 compliant CSV.
-- Given a CSV string like this:
declare #roles varchar(800)
select #roles = 'Pub,RegUser,ServiceAdmin'
-- Here's the XML way
select split.csv.value('.', 'varchar(100)') as value
from (
select cast('<x>' + replace(#roles, ',', '</x><x>') + '</x>' as xml) as data
) as csv
cross apply data.nodes('/x') as split(csv)
If you are using SQL 2016+, using string_split is better, but this is a common way to do this prior to SQL 2016.
Using BULK INSERT you can import a csv file into your sql table -
http://blog.sqlauthority.com/2008/02/06/sql-server-import-csv-file-into-sql-server-using-bulk-insert-load-comma-delimited-file-into-sql-server/
Even the accepted answer is working fine. but I got this function much faster even for thousands of record. create below function and use.
IF EXISTS (
SELECT 1
FROM Information_schema.Routines
WHERE Specific_schema = 'dbo'
AND specific_name = 'FN_CSVToStringListTable'
AND Routine_Type = 'FUNCTION'
)
BEGIN
DROP FUNCTION [dbo].[FN_CSVToStringListTable]
END
GO
CREATE FUNCTION [dbo].[FN_CSVToStringListTable] (#InStr VARCHAR(MAX))
RETURNS #TempTab TABLE (Id NVARCHAR(max) NOT NULL)
AS
BEGIN
;-- Ensure input ends with comma
SET #InStr = REPLACE(#InStr + ',', ',,', ',')
DECLARE #SP INT
DECLARE #VALUE VARCHAR(1000)
WHILE PATINDEX('%,%', #INSTR) <> 0
BEGIN
SELECT #SP = PATINDEX('%,%', #INSTR)
SELECT #VALUE = LEFT(#INSTR, #SP - 1)
SELECT #INSTR = STUFF(#INSTR, 1, #SP, '')
INSERT INTO #TempTab (Id)
VALUES (#VALUE)
END
RETURN
END
GO
---Test like this.
declare #v as NVARCHAR(max) = N'asdf,,as34df,234df,fs,,34v,5fghwer,56gfg,';
SELECT Id FROM dbo.FN_CSVToStringListTable(#v)
I was about you use the solution mentioned in the accepted answer, but doing more research led me to use Table Value Types:
These are far more efficient and you don't need a TVF (Table valued function) just to create a table from csv. You can use it directly in your scripts or pass that to a stored procedure as a Table Value Parameter. The Type can be created as :
CREATE TYPE [UniqueIdentifiers] AS TABLE(
[Id] [varchar](20) NOT NULL
)