How to delete row from a heap with a batch size 10000 - sql-delete

No supported-
DELETE TOP(10000) FROM dataArchival.MyTable
WHERE DateLocal BETWEEN '2018-03-01' AND '2018-10-01'
delete dataArchival.MyTable
from dataArchival.MyTable d,#myTemp d2
where d.DateLocal=d2.DateLocal
delete d from dataArchival.MyTable d
(
SELECT
*,
RN = ROW_NUMBER() OVER(ORDER BY (SELECT NULL))
FROM dataArchival.MyTable
where DateLocal BETWEEN '2018-03-01' AND '2018-10-01'
)A where A.RN <=10000
And ofcource CTE and ##rowcount are also not supported.

As per the documentation Azure SQL Data Warehouse does not support TOP(n) with DELETE at this point:
-- Syntax for Azure SQL Data Warehouse and Parallel Data Warehouse
DELETE FROM [database_name . [ schema ] . | schema. ] table_name
[ WHERE <search_condition> ]
[ OPTION ( <query_options> [ ,...n ] ) ]
[; ]
However you can script for a similar effect, eg this example is adapted from this example to delete by date:
CREATE TABLE #tmp
WITH (
DISTRIBUTION = ROUND_ROBIN
)
AS
SELECT
ROW_NUMBER() OVER( ORDER BY ( SELECT NULL ) ) AS rowId,
DateLocal
FROM ( SELECT DISTINCT DateLocal FROM dataArchival.MyTable ) x;
DECLARE #sql_code NVARCHAR(4000) = 'DELETE dataArchival.MyTable WHERE DateLocal = #p1'
DECLARE #nbr_statements INT = ( SELECT COUNT(*) FROM #tmp ),
#i INT = 1;
WHILE #i <= #nbr_statements
BEGIN
DECLARE #p1 DATE = ( SELECT DateLocal FROM #tmp WHERE rowId = #i );
EXEC sp_executesql #sql_code, N'#p1 DATE', #p1
SET #i +=1;
END
You could easily alter this example to delete in batches of 10,000 if deleting by date is unsatisfactory. There is an additional example of batching up deletes here:
https://learn.microsoft.com/en-us/azure/sql-data-warehouse/sql-data-warehouse-develop-best-practices-transactions#minimize-logging-with-small-batches

Related

Issue with PK violation on insert

I have a scenario where almost all of the tables have issues with the PK value as follows. This results is a database error or the violation of the PK insert.
When using the DBCC CheckIdent it displays an inconsistency between the next value and the current one.
Can anyone have a reason for the mismatch happening on several tables?
Since this database is then replicate, I'm afraid this error will propagate across the environment.
I adapted this script to fix it, but really trying to figure out the root of the problem.
/** Version 3.0 **/
if object_id('tempdb..#temp') is not null
drop table #temp
;
with cte as (
SELECT
distinct
A.TABLE_CATALOG AS CATALOG,
A.TABLE_SCHEMA AS "SCHEMA",
A.TABLE_NAME AS "TABLE",
B.COLUMN_NAME AS "COLUMN",
IDENT_SEED (A.TABLE_NAME) AS Seed,
IDENT_INCR (A.TABLE_NAME) AS Increment,
IDENT_CURRENT (A.TABLE_NAME) AS Curr_Value
, DBPS.row_count AS NumberOfRows
FROM INFORMATION_SCHEMA.TABLES A
inner join INFORMATION_SCHEMA.COLUMNS B on b.TABLE_NAME = a.TABLE_NAME and b.TABLE_SCHEMA = a.TABLE_SCHEMA
inner join sys.identity_columns IC on OBJECT_NAME (IC.object_id) = a.TABLE_NAME
inner join sys.dm_db_partition_stats DBPS ON DBPS.object_id =IC.object_id
inner join sys.indexes as IDX ON DBPS.index_id =IDX.index_id
WHERE A.TABLE_CATALOG = B.TABLE_CATALOG AND
A.TABLE_SCHEMA = B.TABLE_SCHEMA AND
A.TABLE_NAME = B.TABLE_NAME AND
COLUMNPROPERTY (OBJECT_ID (B.TABLE_NAME), B.COLUMN_NAME, 'IsIdentity') = 1 AND
OBJECTPROPERTY (OBJECT_ID (A.TABLE_NAME), 'TableHasIdentity') = 1 AND
A.TABLE_TYPE = 'BASE TABLE'
)
select 'DBCC CHECKIDENT ('''+A.[SCHEMA]+'.'+a.[TABLE]+''', reseed)' command
, ROW_NUMBER() OVER(ORDER BY a.[SCHEMA], a.[TABLE] asc) AS ID
, A.Curr_Value
, a.[TABLE]
into #temp
from cte A
ORDER BY A.[SCHEMA], A.[TABLE]
declare #i int = 1, #count int = (select max(ID) from #temp)
declare #text varchar(max) = ''
select #COUNT= count(1) FROM #temp
WHILE #I <= #COUNT
BEGIN
SET #text = (SELECT command from #temp where ID=#I)
EXEC (#text + ';')
print #text
select Curr_Value OldValue, ident_current([TABLE]) FixValue, [TABLE] from #temp where ID=#I
SET #I = #I + 1
SET #text='';
END
go
maybe someone or something with enough permissions made a mistake by reseeding?
As simple as this:
create table testid (
id int not null identity (1,1) primary key,
data varchar (3)
)
insert into testid (data) values ('abc'),('cde')
DBCC CHECKIDENT ('testid', RESEED, 1)
insert into testid (data) values ('bad')

Can I insert a dynamic number of rows into a table using values from the table?

I want to insert a dynamic number of rows into a table, based on information in that table.
I can do it using the code below, but I'm wondering if there's a way to avoid the loop.
The commented out section was my best attempt at what I was trying to do, but it gave me an error of:
"The reference to column "iCount" is not allowed in an argument to a TOP, OFFSET, or FETCH clause. Only references to columns at an outer scope or standalone expressions and subqueries are allowed here."
DECLARE #TableX TABLE (
TDate DATE
, TType INT
, Fruit NVARCHAR(20)
, Vegetable NVARCHAR(20)
, Meat NVARCHAR(20)
, Bread NVARCHAR(20)
)
INSERT INTO #TableX VALUES
('2016-11-10',1,'Apple','Artichoke',NULL,NULL)
, ('2016-11-10',1,'Banana','Beet',NULL,NULL)
, ('2016-11-10',1,'Canteloupe','Cauliflower',NULL,NULL)
, ('2016-11-10',1,'Durian','Daikon',NULL,NULL)
, ('2016-11-10',2,NULL,NULL,'Rabbit','Rye')
, ('2016-11-10',2,NULL,NULL,'Sausage','Sourdough')
, ('2016-11-11',1,'Elderberry','Eggplant',NULL,NULL)
, ('2016-11-11',2,NULL,NULL,'Turkey','Tortilla')
, ('2016-11-11',2,NULL,NULL,'Venison','Vienna')
SELECT * FROM #TableX
DECLARE #BlankRow TABLE (
ID INT IDENTITY
, TDate DATE
, TType INT
, iCount INT
)
DECLARE #Counter1 INT = 0
, #RowCount INT
; WITH BR1
AS (
SELECT TDate, TType, COUNT(*) AS iCount
FROM #TableX
WHERE TType = 1
GROUP BY TDate, TType
)
, BR2
AS (
SELECT TDate, TType, COUNT(*) AS iCount
FROM #TableX
WHERE TType = 2
GROUP BY TDate, TType
)
INSERT INTO #BlankRow
SELECT ISNULL(BR1.TDate, BR2.TDate) AS TDate,
CASE WHEN ISNULL(BR1.iCount,0) < ISNULL(BR2.iCount,0) THEN 1 ELSE 2 END AS TType,
ABS(ISNULL(BR1.iCount,0) - ISNULL(BR2.iCount,0)) AS iCount
FROM BR1
FULL JOIN BR2
ON BR1.TDate = BR2.TDate
WHILE #Counter1 < (SELECT MAX(ID) FROM #BlankRow)
BEGIN
SET #Counter1 += 1
SET #RowCount = (SELECT iCount FROM #BlankRow WHERE ID = #Counter1)
INSERT INTO #TableX
SELECT TOP (#RowCount) tx.TDate, br.TType, NULL, NULL, NULL, NULL
FROM #TableX tx
LEFT JOIN #BlankRow br
ON tx.TDate = br.TDate
WHERE br.ID = #Counter1
END
/*INSERT INTO #TableX
SELECT TOP (tx.iCount) tx.TDate, br.TType, NULL, NULL, NULL, NULL
FROM #TableX tx
JOIN #BlankRow br
ON tx.TDate = br.TDate*/
SELECT *
FROM #TableX
ORDER BY TDate, TType,
ISNULL(Fruit,REPLICATE(CHAR(255),20)),
ISNULL(Vegetable,REPLICATE(CHAR(255),20)),
ISNULL(Meat,REPLICATE(CHAR(255),20)),
ISNULL(Bread,REPLICATE(CHAR(255),20))
The data is silly, I know, but my end goal is to have two different Tablix's in ReportBuilder that end up with the same number of rows so the headers of my groups show up at the same place on the page.
Something like this:
declare #TableX table(TDate date
,TType int
,Fruit nvarchar(20)
,Vegetable nvarchar(20)
,Meat nvarchar(20)
,Bread nvarchar(20)
);
insert into #TableX values
('2016-11-10',1,'Apple','Artichoke',NULL,NULL)
,('2016-11-10',1,'Banana','Beet',NULL,NULL)
,('2016-11-10',1,'Canteloupe','Cauliflower',NULL,NULL)
,('2016-11-10',1,'Durian','Daikon',NULL,NULL)
,('2016-11-10',2,NULL,NULL,'Rabbit','Rye')
,('2016-11-10',2,NULL,NULL,'Sausage','Sourdough')
,('2016-11-11',1,'Elderberry','Eggplant',NULL,NULL)
,('2016-11-11',2,NULL,NULL,'Turkey','Tortilla')
,('2016-11-11',2,NULL,NULL,'Venison','Vienna');
with DataRN as
(
select *
,row_number() over (partition by TDate, TType order by TDate) rn
from #TableX
)
,RowsRN as
(
select tt.TDate
,tt.TType
,td.rn
from (select distinct TDate, TType
from #TableX
) tt
full join (select distinct t1.TDate
,row_number() over (partition by t1.TDate, t1.TType order by t1.TDate) rn
from #TableX t1
) td
on(tt.TDate = td.TDate)
)
select r.TDate
,r.TType
,d.Fruit
,d.Vegetable
,d.Meat
,d.Bread
from DataRN d
full join RowsRN r
on(d.TDate = r.TDate
and d.TType = r.TType
and d.rn = r.rn
)
order by r.TDate
,r.TType
,isnull(d.Fruit,REPLICATE(CHAR(255),20))
,isnull(d.Vegetable,REPLICATE(CHAR(255),20))
,isnull(d.Meat,REPLICATE(CHAR(255),20))
,isnull(d.Bread,REPLICATE(CHAR(255),20))
In response to your comment, here is how you would use another cte to generate the full list of dates that you would need, if you havn't got a Dates reference table already (These are tremendously useful):
declare #MinDate date = (select min(TDate) from #TableX);
declare #MaxDate date = (select max(TDate) from #TableX);
with Dates as
(
select #MinDate as DateValue
union all
select dateadd(d,1,DateValue)
from Dates
where DateValue < #MaxDate
)
select DateValue
from Dates
option (maxrecursion 0);

Query to find duplicate rows in a table

I am running the following query which is terribly inefficient and can take hours. I am having SQL brain farts today and I do not know how to improve this query. There are several nullable varchar fields, and I need to identify the duplicate rows (all columns containing identical values as another row)
select * from transactions x where exists (
select Coalesce(ColA, ''),
Coalesce(ColB, ''),
Coalesce(ColC, '')
from transactions y
where Coalesce(x.ColA, '') = Coalesce(x.ColA, '') and
Coalesce(x.ColB, '') = Coalesce(x.ColB, '') and
Coalesce(x.ColC, '') = Coalesce(x.ColC, '')
group by Coalesce(ColA, ''),
Coalesce(ColB, ''),
Coalesce(ColC, '')
having count(*) > 1
)
Why does this take so long to run? There has to be a better way.
You could improve it by
removing unnecesssary checks
putting a composite index on ColA, ColB and ColC
What is unnecessary? It seems to be unnecessary to join the table with itself. Why don't you use a simple GROUP BY? You also don't need the WHERE:
SELECT COALESCE(ColA, '') AS ColA,
COALESCE(ColB, '') AS ColB,
COALESCE(ColC, '') AS ColC,
Count(*) As Cnt
FROM transactions t
GROUP BY COALESCE(ColA, ''), COALESCE(ColB, ''), COALESCE(ColC, '')
HAVING Count(*) > 1
Does this work?
DECLARE #transactions TABLE (
ColA INT
, ColB INT
, ColC INT
, ColD INT
, ColE INT
, ColF INT
)
DECLARE #Counter1 INT = 0
WHILE #Counter1 < 10000
BEGIN
SET #Counter1 += 1
INSERT INTO #transactions
SELECT ROUND(RAND()*10,0)
, ROUND(RAND()*10,0)
, ROUND(RAND()*10,0)
, ROUND(RAND()*10,0)
, ROUND(RAND()*10,0)
, ROUND(RAND()*10,0)
END
;WITH Dupe
AS (
SELECT *, ROW_NUMBER() OVER
(PARTITION BY ColA, ColB, ColC, ColD, ColE, ColF
ORDER BY ColA, ColB, ColC, ColD, ColE, ColF) AS rn
FROM #transactions
)
SELECT * FROM Dupe WHERE rn > 1
You can use an ISNULL on anything where you need to compare a value that might be null. Note that most of this I've written is just to generate a useful data set. With 6 columns and 10,000 rows I got 42 identical rows in less than a second. No triples. Bumped it up to 100,000 rows and I got 3,489 duplicate rows, including some triples. Took 3 seconds.
Here's an example using text. This whole thing took 25 seconds on 100,000 records, although my timer shows that less than 4 of that was finding the duplicates, with the remainder being the table population.
DECLARE #transactions2 TABLE (
ColA NVARCHAR(30)
, ColB NVARCHAR(30)
, ColC NVARCHAR(30)
, ColD NVARCHAR(30)
, ColE NVARCHAR(30)
, ColF NVARCHAR(30)
)
DECLARE #names TABLE (
ID INT IDENTITY
, Name NVARCHAR(30)
)
DECLARE #Counter2 INT = 0
, #ColA NVARCHAR(30)
, #ColB NVARCHAR(30)
, #ColC NVARCHAR(30)
, #ColD NVARCHAR(30)
, #ColE NVARCHAR(30)
, #ColF NVARCHAR(30)
INSERT INTO #names VALUES
('Anderson, Arthur')
, ('Broberg, Bruce')
, ('Chan, Charles')
, ('Davidson, Darwin')
, ('Eggert, Emily')
, ('Fox, Francesca')
, ('Garbo, Greta')
, ('Hollande, Hortense')
, ('Iguadolla, Ignacio')
, ('Jackson, Jurimbo')
, ('Katana, Ken')
, ('Lawrence, Larry')
, ('McDonald, Michael')
, ('Nyugen, Nathan')
, ('O''Dell, Oliver')
, ('Peterson, Phillip')
, ('Quigley, Quentin')
, ('Ramallah, Rodolfo')
, ('Smith, Samuel')
, ('Turner, Theodore')
, ('Uno, Umberto')
, ('Victor, Victoria')
, ('Wallace, William')
, ('Xing, Xiopan')
, ('Young, Yvette')
, ('Zapata, Zorro')
, (NULL)
WHILE #Counter2 < 100000
BEGIN
SET #Counter2 += 1
SET #ColA = (SELECT Name FROM #names WHERE ID = ROUND(RAND()*27 +.5,0))
SET #ColB = (SELECT Name FROM #names WHERE ID = ROUND(RAND()*27 +.5,0))
SET #ColC = (SELECT Name FROM #names WHERE ID = ROUND(RAND()*27 +.5,0))
SET #ColD = (SELECT Name FROM #names WHERE ID = ROUND(RAND()*27 +.5,0))
SET #ColE = (SELECT Name FROM #names WHERE ID = ROUND(RAND()*27 +.5,0))
SET #ColF = (SELECT Name FROM #names WHERE ID = ROUND(RAND()*27 +.5,0))
INSERT INTO #transactions2
SELECT #ColA, #ColB, #ColC, #ColD, #ColE, #ColD
END
PRINT CAST(GETDATE() AS DateTime2 (3))
;WITH Dupe
AS (
SELECT *, ROW_NUMBER() OVER
(PARTITION BY ISNULL(ColA,''), ISNULL(ColB,''), ISNULL(ColC,''), ISNULL(ColD,''), ISNULL(ColE,''), ISNULL(ColF,'')
ORDER BY ISNULL(ColA,''), ISNULL(ColB,''), ISNULL(ColC,''), ISNULL(ColD,''), ISNULL(ColE,''), ISNULL(ColF,'')) AS rn
FROM #transactions2
)
SELECT * FROM Dupe WHERE rn > 1 ORDER BY rn
PRINT CAST(GETDATE() AS DateTime2 (3))
Here is a much faster way using a subquery join. It ran in under 10 seconds
select * from transactions x
join (
select Coalesce(ColA, ''),
Coalesce(ColB, ''),
Coalesce(ColC, '')
from transactions
group by Coalesce(ColA, ''),
Coalesce(ColB, ''),
Coalesce(ColC, '')
having count(*) > 1
) dups on
dups.ColA = x.ColA and
dups.ColB = x.ColB and
dups.ColC = x.ColC
The important thing about this query is that it returns both/all rows, not just the duplicate(s)
If this is a one time job, and involves a huge number of rows, and not to be made as a View, then perhaps you'd opt to INSERT SELECT it into a table with UNIQUE index with IGNORE_DUP_KEY option.

reuse table data in round robin manner

Let us say I have some data I would like to repeat N times. A naive approach would be this:
IF OBJECT_ID('dbo.Data', 'U') IS NOT NULL
DROP TABLE dbo.Data
CREATE TABLE Data
(
DataId INT NOT NULL PRIMARY KEY,
DataValue NVARCHAR(MAX) NOT NULL
)
INSERT INTO Data (DataId, DataValue)
SELECT 1, 'Value1' UNION ALL
SELECT 2, 'Value2' UNION ALL
SELECT 3, 'Value3' UNION ALL
SELECT 4, 'Value4' UNION ALL
SELECT 5, 'Value5'
DECLARE #RowsRequired INT
DECLARE #Counter INT
DECLARE #NumberOfRows INT
SET #RowsRequired = 22
IF OBJECT_ID('tempdb..#TempData') IS NOT NULL DROP TABLE #TempData
CREATE TABLE #TempData
(
Id INT IDENTITY(1,1),
DataValue NVARCHAR(MAX)
)
SELECT #NumberOfRows = COUNT(*) FROM Data
SET #Counter = 1
WHILE #RowsRequired > 0
BEGIN
INSERT INTO #TempData
SELECT DataValue FROM Data WHERE DataId = #Counter
SET #Counter = #Counter + 1
SET #RowsRequired = #RowsRequired - 1
IF(#Counter > #NumberOfRows)
BEGIN
SET #Counter = 1
END
END
SELECT * FROM #TempData
Here #RowsRequired determines how many rows are required. Could this be rephrased in a set based form? Thanks.
Here is a SQLFiddle with the code.
Try this instead:
DECLARE #RowsRequired INT = 22
;WITH CTE AS
(
SELECT DataId, DataValue, ROW_NUMBER() over (PARTITION BY DataId ORDER BY DataId) sort
FROM DATA
CROSS JOIN
(
SELECT TOP (#RowsRequired) 0 d
FROM master..spt_values
) d
)
SELECT TOP (#RowsRequired) ROW_NUMBER() over (order by sort), DataValue
FROM CTE
ORDER BY sort, 1
I tried this and worked for me.
declare #requiredrows int
set #requiredrows = 22;
declare #foreachrow int
select #foreachrow = #requiredrows / Count(*) from Data;
select top (#requiredrows) * from
(
select *, ROW_NUMBER() over(partition by dataId order by number) rno
from Data
Cross Join master..spt_values
) A
where rno <= #foreachrow + 1
Hope it will help.

Implementing and applying a string split in T-SQL

I have this statement in T-SQL.
SELECT Bay From TABLE where uid in (
select B_Numbers from Info_Step WHERE uid = 'number'
)
I am selecting "multiple" BAYs from TABLE where their uid is equal to a string of numbers like this:
B_Numbers = 1:45:34:98
Therefore, I should be selecting 4 different BAYs from TABLE. I basically need to split the string 1:45:34:98 up into 4 different numbers.
I'm thinking that Split() would work, but it doesn't and I get a syntax error.
Any thoughts from the T-SQL gods would be awesome!
Here is an implementation of a split function that returns the list of numbers as a table:
http://rbgupta.blogspot.com/2007/03/split-function-tsql.html
Looks like this would set you on your way...
Here is a method that uses an auxiliary numbers table to parse the input string. The logic can easily be added to a function that returns a table. That table can then be joined to lookup the correct rows.
Step 1: Create the Numbers table
SET NOCOUNT ON
GO
IF EXISTS
(
SELECT 1
FROM INFORMATION_SCHEMA.TABLES
WHERE TABLE_NAME = 'Numbers'
AND TABLE_SCHEMA = 'dbo'
AND TABLE_TYPE = 'BASE TABLE'
)
BEGIN
DROP TABLE dbo.Numbers
END
GO
CREATE TABLE dbo.Numbers
(
Number smallint IDENTITY(1, 1) PRIMARY KEY
)
GO
WHILE 1 = 1
BEGIN
INSERT INTO dbo.Numbers DEFAULT VALUES
IF SCOPE_IDENTITY() = 32767
BEGIN
BREAK
END
END
GO
Step 2: Parse the Input String
CREATE FUNCTION dbo.ParseString(#input_string varchar(8000), #delim varchar(8000) = " ")
RETURNS TABLE
AS RETURN
(
SELECT Number
FROM dbo.Numbers
WHERE CHARINDEX
(
#delim + CONVERT(VARCHAR(12),Number) + #delim,
#delim + #input_string + #delim
) > 0
)
GO
**EXAMPLE**
SELECT * FROM dbo.ParseString('1:45:34:98',':')
Step 3: Use the results however you want/need
Number
------
1
34
45
98
End-To-End Example
Create function that returns the appropriate BNumber (of course change it to use the commented out SQL)
SET ANSI_NULLS ON
GO
SET QUOTED_IDENTIFIER ON
GO
CREATE FUNCTION dbo.GetBNumber (#uid int)
RETURNS VARCHAR(8000)
AS
BEGIN
RETURN '1:45:34:98'
--select B_Numbers from Info_Step WHERE uid = #uid
END
GO
Use the use functions to return the desired results
-- Using Test Data
SELECT N.Number FROM Numbers N
JOIN dbo.ParseString(dbo.GetBNumber(12345),':') Q ON Q.Number = N.Number
-- Using Your Data (Untested but should work.)
SELECT N.Bay
FROM TABLE N
JOIN dbo.ParseString(dbo.GetBNumber(ENTER YOU NUMBER HERE),':') Q ON Q.Number = N.uid
Results
Number
------
1
34
45
98
You should keep your arrays as rows but if I understand your question I think this will work.
SELECT
Bay
From
TABLE
join Info_Step
on B_Numbers like '%'+ uid +'%'
where
Info_Step.uid = 'number'
This query will do a full table scan because of the like operator.
What you can do is loop through the B_Numbers entries and do your own split on : Insert those entries into a temp table and then perform your query.
DECLARE #i int
DECLARE #start int
DECLARE #B_Numbers nvarchar(20)
DECLARE #temp table (
number nvarchar(10)
)
-- SELECT B_Numbers FROM Info_Step WHERE uid = 'number'
SELECT #B_Numbers = '1:45:34:98'
SET #i = 0
SET #start = 0
-- Parse out characters delimited by ":";
-- Would make a nice user defined function.
WHILE #i < len(#B_Numbers)
BEGIN
IF substring(#B_Numbers, #i, 1) = ':'
BEGIN
INSERT INTO #temp
VALUES (substring(#B_Numbers, #start, #i - #start))
SET #start = #i + 1
END
SET #i = #i + 1
END
-- Insert last item
INSERT INTO #temp
VALUES (substring(#B_Numbers, #start, #i - #start + 1))
-- Do query with parsed values
SELECT Bay FROM TABLE WHERE uid in (SELECT * FROM #temp)
You can even try this
declare #str varchar(50)
set #str = '1:45:34:98'
;with numcte as(
select 1 as rn union all select rn+1 from numcte where rn<LEN(#str)),
getchars as(select
ROW_NUMBER() over(order by rn) slno,
rn,chars from numcte
cross apply(select SUBSTRING(#str,rn,1) chars)X where chars = ':')
select top 1
Bay1 = SUBSTRING(#str,0,(select rn from getchars where slno = 1))
,Bay2 = SUBSTRING(#str,
(select rn from getchars where slno = 1) + 1,
(((select rn from getchars where slno = 2)-
(select rn from getchars where slno = 1)
)-1))
,Bay3 = SUBSTRING(#str,
(select rn from getchars where slno = 2) + 1,
(((select rn from getchars where slno = 3)-
(select rn from getchars where slno = 2)
)-1))
,Bay4 = SUBSTRING(#str,
(select rn from getchars where slno = 3)+1,
LEN(#str))
from getchars
Output:
Bay1 Bay2 Bay3 Bay4
1 45 34 98