update multiple columns using case statement in sql - tsql

select #calc
update UserTransaction set DP=(case
when (TotalBV >= 201 and TotalBV < 600) then (0.1*TotalBV)
when (TotalBV >= 601 and TotalBV <1600 ) then (0.15*TotalBV)
when (TotalBV >= 1601 and TotalBV< 5000) then (0.18*TotalBV)
when (TotalBV >= 5001 and TotalBV< 15000) then (0.21*TotalBV)
when (TotalBV >= 15001 and TotalBV< 30000) then (0.24*TotalBV)
when (TotalBV >= 30001 and TotalBV< 50000) then (0.27*TotalBV)
when (TotalBV >= 50001) then (0.30*TotalPBV)
else null end)
where User_Id=#UserId and Sponsor_Id=#SponsorId
Above is the update query I am using.Right now it is updating only one row with the specific User_Id and Sponsor_Id.How will I check this for multiple User_Id and Sponsor_Id at once?

Instead of Equal to operator use IN clause
UPDATE UserTransaction
set DP=(CASE .. END)
WHERE user_id IN
(SELECT user_id
FROM TABLE)
AND sponsorid IN
(SELECT sponsorid
FROM TABLE)

Related

Optimize Query contains join and SubQuery

I need to run this query but it takes so long and I got timeout exception.
would you please help me how can I decrease the execution time of this query or how how can I make it simpler?
here is my Postgres Query:
select
AR1.patient_id,
CONCAT(Ac."firstName", ' ', Ac."lastName") as doctor_full_name,
to_json(Ac.expertise::json->0->'id')::text as expertise_id,
to_json(Ac.expertise::json->0->'title')::text as expertise_title,
AP."phoneNumbers" as mobile,
AC.account_id as account_id,
AC.city_id
from
tb1 as AR1
LEFT JOIN tb2 as AA
on AR1.appointment_id = AA.id
LEFT JOIN tb3 as AC
on AC.account_id = AA.appointment_owner_id
LEFT JOIN tb4 as AP
on AP.id = AR1.patient_id
where AR1.status = 'canceled'
and AR1.updated_at >= '2022-12-30 00:00:00'
and AR1.updated_at < '2022-12-30 23:59:59'
and AP."phoneNumbers" <> ''
and patient_id not in (
select
AR2.patient_id
from
tb1 as AR2
LEFT JOIN tb2 as AA2
on AR2.appointment_id = AA2.id
LEFT JOIN tb3 as AC2
on AC2.account_id = AA2.appointment_owner_id
where AR2.status = 'submited'
and AR2.created_at >= '2022-12-30 00:00:00'
and ( to_json(Ac2.expertise::json->0->'id')::text = to_json(Ac.expertise::json->0->'id')::text or ac2.account_id = ac.account_id )
)
Try creating an index on tb1 to handle the WHERE clauses you use in your outer query.
CREATE INDEX status_updated ON tb1
(status, updated_at, patient_id);
And, create this index to handle your inner query.
CREATE INDEX status_created ON tb1
(status, created_at, patient_id);
These work because the query planner can random access these BTREE indexes to the the first eligible row by status and date, and then sequentially scan the index until the last eligible row.
The comments about avoiding f(column) expressions in WHERE and ON conditions are correct. You want those conditions to be sargable whenever possible.
And, by the way, you want this for a datestamp range
and AR1.updated_at >= '2022-12-30 00:00:00'
and AR1.updated_at < '2022-12-31 00:00:00'
You have
and AR1.updated_at >= '2022-12-30 00:00:00'
and AR1.updated_at < '2022-12-30 23:59:59'
which excludes, rather than includes, rows from the last moment of 2022-12-30. In can be very hard to figure out what went wrong if you exclude a row improperly with a date-range off-by-one-error. (Ask how I know this sometime :-)

how to select count in select condition mysql

so i want to count the buyer who do transaction in january with the sum of the transaction is >= 600000, can you guys tell me the exact syntax, here's my syntax which are wrong:
select count in (select users_id, total_price_star_member from order_star_member where createdAt >= '2020-01-01' and createdAt < '2020-02-01' group by users_id having sum(total_price_star_member) >= 600000);
the point is i want to know how much the buyer who doing the transaction in january and the transaction is >= 600000
Here is the right syntax for MySQL:
select count(*) from (select users_id, sum(total_price_star_member) from order_star_member where createdAt >= '2020-01-01' and createdAt < '2020-02-01' group by users_id having sum(total_price_star_member) >= 600000) inner_query;

SELECT DISTINCT ON optimization in Postgresql 10

I want to select last unique rows based on time, so:
SELECT DISTINCT ON (track) *
FROM eco.tracks WHERE id > (SELECT id FROM eco.tracks WHERE time_track < ((SELECT time_track FROM eco.tracks ORDER BY id DESC LIMIT 1) - INTERVAL '300 seconds') ORDER BY id DESC LIMIT 1)
ORDER BY track, time_track DESC;
It gives me 20s, that too slow.
If I replace id by real value. it gives me 2ms
SELECT DISTINCT ON (track) *
FROM eco.tracks WHERE id > 48000000
ORDER BY track, time_track DESC;
That query
SELECT id FROM eco.tracks WHERE time_track < ((SELECT time_track FROM eco.tracks ORDER BY id DESC LIMIT 1) - INTERVAL '300 seconds') ORDER BY id DESC LIMIT 1
gives only 2ms..
Whats wrong?!

Count Distinct with Answer side by side instead of underneath

Here is my query:
SELECT substring(date,1,10), count(distinct id),
CASE WHEN name IS NOT NULL THEN 1 ELSE 0 END
FROM table
WHERE (date >= '2015-09-01')
GROUP BY substring(date,1,10), CASE WHEN name IS NOT NULL THEN 1 ELSE 0 END
ORDER BY substring(date,1,10)
This is my result:
substring count case
2015-09-01 20472 0
2015-09-01 7 1
2015-09-02 20465 0
2015-09-02 470 1
What I want it to look like is this:
substring count count
2015-09-01 20472 7
2015-09-02 20465 470
Thank you!
With PostgreSQL 9.4 or newer, we can filter directly an aggregate with the new FILTER clause:
SELECT substring(date,1,10),
count(distinct id),
count(*) FILTER (WHERE name IS NOT NULL)
FROM table
WHERE (date >= '2015-09-01')
GROUP BY 1
ORDER BY 1
SELECT substring(date,1,10)
, count(distinct CASE WHEN name IS NOT NULL THEN id ELSE null END ) AS count1
, count(distinct CASE WHEN name IS NOT NULL THEN null ELSE id END ) AS count2
FROM event
WHERE (date >= '2015-09-01')
GROUP BY substring(date,1,10)
ORDER BY substring(date,1,10)
This gave me an answer like this: (which is exactly what I wanted so thank you so much)
substring count1 count2
2015-09-01 7 20472
2015-09-02 470 20465
Use case in count to get columns for some condition (name IS NOT NULL), like this:
SELECT substring(date,1,10)
, count(distinct CASE WHEN name IS NOT NULL THEN id ELSE null END ) AS count1
, count(distinct CASE WHEN name IS NOT NULL THEN null ELSE id END ) AS count2
FROM table
WHERE (date >= '2015-09-01')
GROUP BY substring(date,1,10)
ORDER BY substring(date,1,10)
you can also use subquery to create columns:
SELECT dt, Count(id1) count1, Count(distinct id2) count2
FROM (
SELECT distinct substring(date,1,10) AS dt
, CASE WHEN name IS NOT NULL THEN id ELSE null END AS id1
, CASE WHEN name IS NOT NULL THEN null ELSE id END AS id2,
FROM table
WHERE (date >= '2015-09-01')) d
GROUP BY dt
ORDER BY dt

Need to retrieve n-rows that are not at the beginning or in the end of the selected list

I have written sql statement :
select * from (
select count(*) as NumberofSignals,signals.transmitter_account,signals.class,signals.type,signals.signal_mode,
signals.area_id,signals.sector_id,signals.region_info_id,signals.zone_info_id,signals.user_id,signals.device_id,
signals.panel_name,signals.panel_id,signals.sector_name,signals.region_code,signals.area_name,signals.zone_code,
signals.description,signals.transmitter_name,signals.transmitter_id,signals.color,'event' as Event,get_name(signals.id,'event') as event_value,
'packetnumber' as packetnumber,get_name(signals.id,'packetnumber') as packetnumber_value,wm_concat(distinct get_name(signals.id,'repeater')) as repeater,
round(avg(get_name(signals.id,'signallevel'))) as avg_signallevel,min(to_char(signals.signal_forming_time, 'yyyy/mm/dd hh24:mi:ss')) as formingtime,
get_name(signals.id,'address') as address,get_name(signals.id,'username') as username,get_name(signals.id,'chaneltype') as channeltype,
get_name(signals.id,'code') as code,get_name(signals.id,'account') as account
from signals,signal_custom_fields where signals.id = signal_custom_fields.signal_id and
signals.id in (select id from (select id,rownum num from((select signals.id
from signals,signal_custom_fields where signal_custom_fields.field_name = 'event'
and signal_custom_fields.field_value is not null and signals.id = signal_custom_fields.signal_id
and signals.signal_forming_time >= to_date('2011/5/10 14:34:44', 'yyyy/mm/dd hh24:mi:ss')
AND signals.signal_forming_time <= to_date('2011/5/10 15:34:44', 'yyyy/mm/dd hh24:mi:ss'))
intersect (select distinct signals.id from signals,signal_custom_fields
where signal_custom_fields.field_name = 'packetnumber' and signal_custom_fields.field_value is not null
and signals.id = signal_custom_fields.signal_id
and signals.signal_forming_time >= to_date('2011/5/10 14:34:44', 'yyyy/mm/dd hh24:mi:ss')
AND signals.signal_forming_time <= to_date('2011/5/10 15:34:44', 'yyyy/mm/dd hh24:mi:ss')))
order by id desc)) group by 'event',signals.transmitter_account,signals.class,
signals.type,signals.signal_mode,signals.area_id,signals.sector_id,signals.region_info_id,signals.zone_info_id,
signals.user_id,signals.device_id,signals.panel_name,signals.panel_id,signals.sector_name,signals.region_code,
signals.area_name,signals.zone_code,signals.description,signals.transmitter_name,signals.transmitter_id,
signals.color, get_name(signals.id,'event'), 'packetnumber',get_name(signals.id,'username'),
get_name(signals.id,'chaneltype'),
get_name(signals.id,'code'),
get_name(signals.id,'account'), get_name(signals.id,'packetnumber'),get_name(signals.id,'address'),
TO_CHAR(signals.signal_forming_time ,'dd/mm/yyyy hh24'),
TRUNC(to_number(to_char(signals.signal_forming_time ,'mi'))/(30))
order by event)where rownum < 300
and here i get the first 300 rows, but how i need to rewright this statment to retrieve second 300 rows ???
Your query doesn't have the rownum listed in the first nested table. Add a rownum column in the first nested table then you can do a between function in the where clause at the top level:
--create a demo table
DROP TABLE paging_test;
CREATE TABLE paging_test AS
(SELECT rownum x FROM user_tables
);
--count how many records exist (in my case there is 821)
SELECT COUNT(*)
FROM paging_test;
--get the first 300 rows
SELECT *
FROM
(SELECT rownum rn, x FROM paging_test ORDER BY x
) pt
WHERE pt.rn BETWEEN 1 AND 300 ;
--get the next 300 rows
SELECT *
FROM
(SELECT rownum rn, x FROM paging_test ORDER BY x
) pt
WHERE pt.rn BETWEEN 300 AND 600 ;
You might also be interested in my reference:
References:
http://asktom.oracle.com/pls/asktom/f?p=100:11:0::::P11_QUESTION_ID:948366252775