Insert multiple row to table in firebird - firebird

I added one thousand rows to one of my tables while working on SQL Server, I used something like:
DECLARE #cnt2 INT = 0;
WHILE #cnt2 < 1000
BEGIN
insert into [MyDB].[dbo].[Table] (ID, LastName, FirstName, StreetAddress, City, ZipCode, PhoneNumber, Email, EnteringDate, GroupID)
values (00+ Convert(varchar(5), #cnt2), 'StudentLastName-' + Convert(varchar(5), #cnt2), 'FirstName', 'Street', 'City', 'xx-xxx', '500-000-000', 'email#student.xyz', GETDATE(), 0, 1)
SET #cnt2 = #cnt2 + 1;
END;
And it works correctly, but I must have similar code to insert values during using Firebird.
Could you help me with this?

The equivalent for this in Firebird would require an execute block (an anonymous procedure), or a stored procedure.
The code would be something like:
execute block as
declare variable cnt2 integer = 0;
begin
while (cnt2 < 1000) do
begin
insert into Table (ID, LastName, FirstName, StreetAddress, City, ZipCode, PhoneNumber, Email, EnteringDate, GroupID)
values ('00' || :cnt2, 'StudentLastName-' || :cnt2, 'FirstName', 'Street', 'City', 'xx-xxx', '500-000-000', 'email#student.xyz', CURRENT_DATE, 0, 1);
cnt2 = cnt2 + 1;
end
end

Related

Query to send mail to mutliple users with their correspoing tickets

I have a table which has a list of ticket owners and and tickets which are assigned to them which are of high priority.
For ex:
enter image description here
I need to send mail from sql server using sp_send_dbmail to John with all the tickets assigned to him in a single mail. This is a huge table so I want send mails to users with their assigned tickets in single mail instead of sending one mail per ticket.
Any help would be appreciated.
A very quick google search would give you all the information you need:
https://msdn.microsoft.com/en-gb/library/ms190307.aspx
This page tells you exactly what you need to know.
This should do the trick...
SET NOCOUNT ON;
--sample of your tickets table
CREATE TABLE #Tickets(
TicketId INT IDENTITY,
TicketOwner VARCHAR(100),
TicketDetails VARCHAR(MAX)
)
;
--some sample values
INSERT INTO #Tickets(
TicketOwner,
TicketDetails
)
VALUES
('John', 'This is the first ticket'),
('John', 'This is the second ticket'),
('Jeremy', 'This is the third ticket')
;
--gets the dense_rank so you can iterate through the list of distinct users
SELECT
TicketId,
TicketOwner,
TicketDetails,
DENSE_RANK() OVER (ORDER BY TicketOwner) AS RowNum
INTO #Temp
FROM #Tickets
ORDER BY TicketOwner, TicketId
;
--holds the details of each email
DECLARE
#RowNum INT = (SELECT MAX(RowNum) FROM #Temp),
#MySubject VARCHAR(100) = '',
#MyBody VARCHAR(MAX) = ''
;
--iterate through distinct TicketOwnders by using the dense rank value above
WHILE #RowNum > 0
BEGIN
--assign variables
SELECT #MySubject = TicketOwner + '''s tickets:',
#MyBody = #MyBody + 'Ticket #' + CAST(TicketId AS VARCHAR(10)) + ': ' + TicketDetails + CHAR(10) + CHAR(13)
FROM #Temp
WHERE RowNum = #RowNum
;
--send mail
EXEC msdb.dbo.sp_send_dbmail
#recipients = 'jgiaco#fanatics.com',
#subject = #MySubject,
#body = #MyBody
;
SET #RowNum = #RowNum - 1;
SET #MyBody = '';
SET #MySubject = '';
END
;

TSQL Pivoting Issue - looking for better approach

This is a T-SQL related question. I am using SQL Server 2012.
I have a table like this:
I would like to have output like this:
Explanation:
For each employee, there will be a row. An employee has one or more assignments. Batch Id specifies this. Based on the batch Id, the column names will change (e.g. Country 1, Country 2 etc.).
Approach so far:
Un-pivot the source table like the following:
select
EmpId, 'Country ' + cast(BatchId as varchar) as [ColumnName],
Country as [ColumnValue]
from
SourceTable
UNION
select
EmpId, 'Pass ' + cast(BatchId as varchar) as [ColumnName],
Pass as [ColumnValue]
from
SourceTable
which gives each column's values as rows. Then, this result can be pivoted to get the desired output.
Questions:
Is there a better way of doing this?
At the moment, I know there will be fixed amount of batches, but, for future, if I like to make the pivoting part dynamic, what is the best approach?
Using tools like SSIS or SSRS, is it easier to handle the pivot dynamically?
Screw doing it in SQL.
Let SSRS do the work for you with a MATRIX. It will PIVOT for you without having to create dynamic SQL to handle the terrible limitation of needing to know all the columns.
For your data, you would have EMP ID as the ROW Group and PASS as your column grouping.
https://msdn.microsoft.com/en-us/library/dd207149.aspx
There are many possible solutions to achieve what you want (search for Dynamic Pivot on multiple columns)
SqlFiddleDemo
Warning: I assume that columns Country and Pass are NOT NULL
CREATE TABLE SourceTable(EmpId INT, BatchId INT,
Country NVARCHAR(100) NOT NULL, Pass NVARCHAR(5) NOT NULL);
INSERT INTO SourceTable(EmpId, BatchId, Country, Pass)
VALUES
(100, 1, 'UK', 'M'), (200, 2, 'USA', 'U'),
(100, 2, 'Romania', 'M'), (100, 3, 'India', 'MA'),
(100, 4, 'Hongkong', 'MA'), (300, 1, 'Belgium', 'U'),
(300, 2, 'Poland', 'U'), (200, 1, 'Australia', 'M');
/* Get Number of Columns Groups Country1..Country<MaxCount> */
DECLARE #max_count INT
,#sql NVARCHAR(MAX) = ''
,#columns NVARCHAR(MAX) = ''
,#i INT = 0
,#i_s NVARCHAR(10);
WITH cte AS
(
SELECT EmpId
,[cnt] = COUNT(*)
FROM SourceTable
GROUP BY EmpId
)
SELECT #max_count = MAX(cnt)
FROM cte;
WHILE #i < #max_count
BEGIN
SET #i += 1;
SET #i_s = CAST(#i AS NVARCHAR(10));
SET #columns += N',MAX(CASE WHEN [row_no] = ' + #i_s + ' THEN Country END) AS Country' + #i_s +
',MAX(CASE WHEN [row_no] = ' + #i_s + ' THEN Pass END) AS Pass' + #i_s;
END
SELECT #sql =
N';WITH cte AS (
SELECT EmpId, Country, Pass, [row_no] = ROW_NUMBER() OVER (PARTITION BY EmpId ORDER BY BatchId)
FROM SourceTable)
SELECT EmpId ' + #columns + N'
FROM cte
GROUP BY EmpId';
/* Debug */
/* SELECT #sql */
EXEC(#sql);
Or:
SQLFiddleDemo2
DECLARE #cols NVARCHAR(MAX),
#sql NVARCHAR(MAX) = '';
;WITH cte(col_name, rn) AS(
SELECT DISTINCT col_name = col_name + CAST(BatchId AS VARCHAR(10)),
rn = ROW_NUMBER() OVER(PARTITION BY EmpId ORDER BY BatchId)
FROM SourceTable
CROSS APPLY (VALUES ('Country', Country), ('Pass', Pass)) AS c(col_name, val)
)
SELECT #cols = STUFF((SELECT ',' + QUOTENAME(col_name)
FROM cte
ORDER BY rn /* If column order is important for you */
FOR XML PATH(''), TYPE
).value('.', 'NVARCHAR(MAX)')
, 1, 1, '');
SET #sql =
N';WITH cte AS
(
SELECT EmpId, col_name = col_name + CAST(BatchId AS VARCHAR(10)), val
FROM SourceTable
CROSS APPLY (VALUES (''Country'', Country), (''Pass'', Pass)) AS c(col_name, val)
)
SELECT *
FROM cte
PIVOT
(
MAX(val)
FOR col_name IN (' + #cols + ')
) piv';
EXEC(#sql);

Conversion failed when converting the varchar value '1, 2, 3' to data type int

I can't seem to find a solution to this problem. Following is my query:
Declare #MY_STUDENT_ID Varchar(100)
Select #MY_STUDENT_ID = COALESCE(#MY_STUDENT_ID + ',', '') + Convert(varchar, STUDENT_ID) From Some_TABLE Where FISCAL_YEAR = '2014'
SELECT * FROM Table_Students WHERE STUDENT_ID IN (#MY_STUDENT_ID)
Basically first query runs and give me all student IDs as a string concatenated with , for e.g. 1,2,3
And then this value is passed into second query but second query is giving this error which I have posted in title. No idea what to do so any help will be appreciated.
Type of STUDENT_ID field is int.
There is absolutely no need to mess about with comma delimited lists here.
Just use the sub query directly
SELECT *
FROM Table_Students
WHERE STUDENT_ID IN (SELECT StudentId
From Some_TABLE Where FISCAL_YEAR = '2014')
Your approach does not work as it ends up generating something with semantics of
SELECT *
FROM Table_Students
WHERE STUDENT_ID IN ('1,2,3')
Which is not the same as
SELECT *
FROM Table_Students
WHERE STUDENT_ID IN (1,2,3)
As it just is a single string parameter with contents that happen to resemble an in list, rather than 3 int parameters.
You could do so using dynamic SQL, but in this scenario, Martin SMith's answer seems to be better. Should you, however, wish to use dynamic SQL, this would be the way to do so (untested pseudo-code):
Declare #MY_STUDENT_ID varchar(100);
DECLARE #sql nvasrchar(max);
Select #MY_STUDENT_ID = COALESCE(#MY_STUDENT_ID + ',', '') + Convert(varchar, STUDENT_ID) From Some_TABLE Where FISCAL_YEAR = '2014'
SELECT #sql = 'SELECT * FROM Table_Students WHERE STUDENT_ID IN (' + #MY_STUDENT_ID + ')';
EXEC sp_executesql #sql;

What's wrong with this T-SQL MERGE statement?

I am new to MERGE, and I'm sure I have some error in my code.
This code will run and create my scenario:
I have two tables, one that is called TempUpsert that fills from a SqlBulkCopy operation (100s of millions of records) and a Sales table that holds the production data which is to be indexed and used.
I wish to merge the TempUpsert table with the Sales one
I am obviously doing something wrong as it fails with even the smallest example
IF EXISTS (SELECT * FROM sys.objects WHERE object_id = OBJECT_ID(N'[dbo].[TempUpsert]') )
drop table TempUpsert;
CREATE TABLE [dbo].[TempUpsert](
[FirstName] [varchar](200) NOT NULL,
[LastName] [varchar](200) NOT NULL,
[Score] [int] NOT NULL
) ON [PRIMARY] ;
CREATE TABLE [dbo].[Sales](
[FullName] [varchar](200) NOT NULL,
[LastName] [varchar](200) NOT NULL,
[FirstName] [varchar](200) NOT NULL,
[lastUpdated] [date] NOT NULL,
CONSTRAINT [PK_Sales] PRIMARY KEY CLUSTERED
(
[FullName] ASC
)
---- PROC
CREATE PROCEDURE [dbo].[sp_MoveFromTempUpsert_to_Sales]
(#HashMod int)
AS
BEGIN
-- SET NOCOUNT ON added to prevent extra result sets from
-- interfering with SELECT statements.
SET NOCOUNT ON;
MERGE Sales AS trget
USING (
SELECT
--- Edit: Thanks to Mikal added DISTINCT
DISTINCT
FirstName, LastName , [Score], LastName+'.'+FirstName AS FullName
FROM TempUpsert AS ups) AS src (FirstName, LastName, [Score], FullName)
ON
(
src.[Score] = #hashMod
AND
trget.FullName=src.FullName
)
WHEN MATCHED
THEN
UPDATE SET trget.lastUpdated = GetDate()
WHEN NOT MATCHED
THEN INSERT ([FullName], [LastName], [FirstName], [lastUpdated])
VALUES (FullName, src.LastName, src.FirstName, GetDate())
OUTPUT $action, Inserted.*, Deleted.* ;
--print ##rowcount
END
GO
--- Insert dummie data
INSERT INTO TempUpsert (FirstName, LastName, Score)
VALUES ('John','Smith',2);
INSERT INTO TempUpsert (FirstName, LastName, Score)
VALUES ('John','Block',2);
INSERT INTO TempUpsert (FirstName, LastName, Score)
VALUES ('John','Smith',2); --make multiple on purpose
----- EXECUTE PROC
GO
DECLARE #return_value int
EXEC #return_value = [dbo].[sp_MoveFromTempUpsert_to_Sales]
#HashMod = 2
SELECT 'Return Value' = #return_value
GO
This returns:
(1 row(s) affected)
(1 row(s) affected)
(1 row(s) affected)
Msg 2627, Level 14, State 1, Procedure sp_MoveFromTempUpsert_to_Sales, Line 12
Violation of PRIMARY KEY constraint 'PK_Sales'. Cannot insert duplicate key in object
'dbo.Sales'. The statement has been terminated.
(1 row(s) affected)
What am I doing wrong please?
Greatly appreciated
The first two rows in your staging table will give you the duplicate PK. violation. Conc is the PK and you insert tmain+dmain with the same value twice.
In Summation
MERGE requires its input (Using) to be duplicates free
the Using is a regular SQL statement, so you can use Group By, distinct and having as well as Where clauses.
My final Merge looks like so :
MERGE Sales AS trget
USING (
SELECT FirstName, LastName, Score, LastName + '.' + FirstName AS FullName
FROM TempUpsert AS ups
WHERE Score = #hashMod
GROUP BY FirstName, LastName, Score, LastName + '.' + FirstName
) AS src (FirstName, LastName, [Score], FullName)
ON
(
-- src.[Score] = #hashMod
--AND
trget.FullName=src.FullName
)
WHEN MATCHED
THEN
UPDATE SET trget.lastUpdated = GetDate()
WHEN NOT MATCHED
THEN INSERT ([FullName], [LastName], [FirstName], [lastUpdated])
VALUES (FullName, src.LastName, src.FirstName, GetDate())
OUTPUT $action, Inserted.*, Deleted.* ;
--print ##rowcount
END
And it works!
Thanks to you all :)
Without DISTINCT or proper AGGREGATE function in subquery used in USING part of MERGE there will be two rows which suits criteria used in ON part of MERGE, which is not allowed. (Two John.Smith)
AND
Move the condition src.[Score] = #hashMod inside the subquery,
instead if ON clause not succeed, for example John.Smith have score of 2, and #HashMod = 1 - then if you already have the row with John.Smith in target table - you'll get an error with Primary Key Constraint

How to create a date lookup table to speed up stored procs?

I want to reduce the time it takes for one of my stored procs that currently uses the following logic to calculate the date field, both in Select and Group portion:
left(datename(month, a.QXP_REPORT_DATE), 3) + ' ''' + right(datename(year, a.QXP_REPORT_DATE), 2)
Would a simple lookup table take less time? If so, then how would I populate for the following fields for all dates in the last 2 years?
CREATE TABLE #CALENDAR(
FULLDATE DATETIME,
MONTHNAME NVARCHAR(3),
sYEAR SMALLINT
)
INSERT INTO #CALENDAR
SELECT '4/19/2011', left(datename(month, '4/19/2011'), 3), right(datename(year, '4/19/2011'), 2)
I'm starting to think maybe a function call would be better than a lookup table. Here is all of my SQL stored proc:
DECLARE
#FirstMonthDate DATETIME,
#LastMonthDate DATETIME,
#TheLevel INT,
#ProductGroup VARCHAR(255),
#TheCategory VARCHAR(255),
#ListNumber VARCHAR(50)
--AS
-- SET NOCOUNT ON;
--ComplaintTrendingDrillDown3p '3/1/10', '3/31/11 23:59:59', 3 , 'RealTime IVD', 'Procedure Not Followed', ''
SET #FirstMonthDate = '3/1/11'
SET #LastMonthDate = '3/31/11 23:59:59'
SET #TheLevel = 3
SET #ProductGroup = 'RealTime IVD'
SET #TheCategory = 'Procedure Not followed'
--SET #ListNumber = '2G31-90'
DECLARE #SelectedLevels table (LevelId int not null primary key)
declare #OneYearAgo datetime
set #OneYearAgo = dateadd(year, -1, #FirstMonthDate)
IF #TheLevel = 3
BEGIN
INSERT INTO #SelectedLevels (LevelId) VALUES (1)
INSERT INTO #SelectedLevels (LevelId) VALUES (2)
END
ELSE if #TheLevel = 5
BEGIN
INSERT INTO #SelectedLevels (LevelId) VALUES (0)
INSERT INTO #SelectedLevels (LevelId) VALUES (1)
INSERT INTO #SelectedLevels (LevelId) VALUES (2)
END
ELSE
BEGIN
INSERT INTO #SelectedLevels (LevelId) VALUES (#TheLevel)
END
SELECT count(distinct a.QXP_EXCEPTION_NO) AS QXP_EXCEPTION_NO, PRODUCT_CODE_STD, a.qxp_short_desc,
left(datename(month, a.QXP_REPORT_DATE), 3) + ' ''' +
right(datename(year, a.QXP_REPORT_DATE), 2) AS MonthYear ,
CASE WHEN a.QXP_SHORT_DESC = #TheCategory OR ISNULL(#TheCategory, '') = '' THEN 1 ELSE 0 END AS SELECTED_CATEGORY
FROM ALL_COMPLAINTS a
INNER JOIN #SelectedLevels F ON A.[LEVEL] = F.LevelId
LEFT OUTER JOIN MANUAL.PRODUCTS b ON a.EPA_PRD_CODE = b.LIST_NUMBER
LEFT OUTER JOIN SMARTSOLVE.V_CXP_CUSTOMER_PXP c ON a.QXP_ID = c.QXP_ID
WHERE a.QXP_REPORT_DATE >= #OneYearAgo AND
a.QXP_REPORT_DATE <= #LastMonthDate AND a.QXP_SHORT_DESC <> 'Design Control'
AND (c.QXP_EXCEPTION_TYPE <> 'Non-Diagnostic' OR c.QXP_EXCEPTION_TYPE IS NULL)
AND PRODUCT_GROUP= #ProductGroup
AND (PRODUCT_CODE_STD = #ListNumber OR ISNULL(#ListNumber, '') = '')
and left(datename(month, a.QXP_REPORT_DATE), 3) = 'may'
GROUP BY PRODUCT_CODE_STD, left(datename(month, a.QXP_REPORT_DATE), 3) + ' ''' + right(datename(year, a.QXP_REPORT_DATE), 2) , a.qxp_short_desc
order by left(datename(month, a.QXP_REPORT_DATE), 3) + ' ''' +
right(datename(year, a.QXP_REPORT_DATE), 2), product_code_std, qxp_short_desc
Execution plan recommendations:
CREATE NONCLUSTERED INDEX [<Name of Missing Index, sysname,>]
ON [SMARTSOLVE].[V_CXP_CUSTOMER_PXP] ([QXP_REPORT_DATE],[QXP_UDF_STRING_8],[QXP_XRS_DESCRIPTION])
INCLUDE ([QXP_ID],[QXP_EXCEPTION_NO],[QXP_BASE_EXCEPTION],[QXP_OCCURENCE_DATE],[QXP_COORD_ID],[QXP_ROOT_CAUSE],[QXP_DESCRIPTION],[QXP_QEI_ID],[QXP_EXCEPTION_TYPE],[QXP_UDF_STRING_2],[QXP_UDF_STRING_5],[CXP_ID],[CXP_AWARE_DATE],[QXP_XSV_CODE],[QXP_COORD_NAME],[QXP_ORU_NAME],[QXP_RESOLUTION_DESC],[QXP_CLOSED_DATE],[CXP_CLIENT_CODE],[CXP_CLIENT_NAME])