ERROR: UNION types integer and character varying cannot be matched [SQL State=42804] - amazon-redshift

I am doing union of two views but getting above error. Please help me to resolve this issue.
some values are hardcoded there and need to extract the same in final view.
I am using below query in redshift-
query:
DROP VIEW IF EXISTS jgbl.vw_jgvcc_crm_case_activity CASCADE;
CREATE OR REPLACE VIEW jgbl.vw_jgvcc_crm_case_activity
AS
SELECT case_number as "CASE Number",
parent_case_number as "Parent Case Number",
date_opened as "Date Opened",
number_of_questions as "Number of Questions",
case_record_type as "CASE Record Type",
NULL as "Sub Type",
category as "Category",
NULL as "Sub Category",
country as Country,
customer_type as "Customer Type",
primary_account_subtype as "Primary Account Subtype",
source as Source,
call_center_location as "Call Center Location",
region as Region,
customer_region as "Customer Region",
NULL as "AE",
NULL as "PQC",
'ASPAC' as datasource
FROM JG_ASPAC.vw_jgvcc_aspac_crm_activity
UNION ALL
SELECT case_num as "CASE Number",
NULL as "Parent Case Number",
open_dt as "Date Opened",
cast(num_of_ques as integer) as "Number of Questions",
rec_type_nm as "CASE Record Type",
rec_sub_type as "Sub Type",
cat_desc as "Category",
sctgy_desc as "Sub Category",
custm_latam_ctry_nm as Country,
acct_type as "Customer Type",
NULL as "Primary Account Subtype",
src_in as Source,
case when alph_fl='Y' then 'Alphanumeric' else 'LATAM Center' end as "Call Center Location",
'LATAM' as Region,
NULL as "Customer Region",
Case when rec_type_nm='AE/PQC' and rec_sub_type in ('ADVERSE EVENT','AE + PQC') then 'Y' else 'N' End as "AE",
Case when rec_type_nm='AE/PQC' and rec_sub_type in ('AE + PQC', 'PRODUCT QUALITY COMPLAINT') then 'Y' else 'N' End as "PQC",
'LATAM' as datasource
FROM JG_LTM.vw_jgvcc_latam_crm_activity as v2
--LEFT JOIN jgbl.dim_iso_reg_cntry as t1 on t1.region = v2.ctry_iso2_cd
with no schema binding;
error:
An error occurred when executing the SQL command:
select * from jgbl.vw_jgvcc_crm_case_activity limit 10
ERROR: Invalid digit, Value '.', Pos 1, Type: Integer
Detail:
error: Invalid digit, Value '.', Pos 1, Type: Integer
code: 1207
context: 1.0
query: 87576
location: :0
process: query1_562_87576 [pid=0]
----------------------------------------------- [SQL State=XX000]
1 statement failed.
Execution time: 4.81s

Related

Snowflake MERGE with columns that have spaces in their names

I'm using Snowflake and have a table that has columns with spaces in their names. The code to create the table is:
create or replace MYTABLE (
"string 1" VARCHAR,
"number 1" NUMBER(38,0)
);
I want to now MERGE some data into the table using the following code:
merge into mytable using (
select * from
(
values('a', 4),('b', 5)
)
) as T ("string 1", "number 1") on FALSE
when not matched then
insert
("string 1", "number 1")
values
('string 1', 'number 1');
When I run the above, I get the following error:
Numeric value 'number 1' is not recognized
I've tried all manners of wrapping the column names in single quotes, double quotes, and $$ all to no avail. Suggestions?
Column could be referenced as T."""string 1""":
merge into mytable
using (
select * from
(
values('a', 4),('b', 5)
)
) as T ("string 1", "number 1") on FALSE
when not matched then
insert
("string 1", "number 1")
values
(T."""string 1""", T."""number 1""");
Output:
Rationale:
select *
from (values('a', 4),('b', 5)
) as T ("string 1", "number 1");
DESCRIBE RESULT LAST_QUERY_ID();
-- name
-- "string 1"
-- "number 1"
" is part of the column name "string 1" therefore when referencing """string 1"""

Postgresql RIGHT function giving an error

SELECT DISTINCT
purchase_order_master.po1_num AS "PO #",
RIGHT(purchase_order_history.poh1_gl, 2) AS "Dept",
purchase_order_history.poh1_gl,
purchase_order_master.po1_podate AS "PO Date",
purchase_order_history.poh1_gl AS "GL Account #",
purchase_order_master.po1_dept,
purchase_order_master.po1_vname AS "Vendor Name",
purchase_order_master.po1_vnum AS "Vendor Number"
FROM
purchase_order_master
INNER JOIN
purchase_order_line_items
ON
purchase_order_master.po1_num = purchase_order_line_items.po2_num AND
purchase_order_master.po1_arid = purchase_order_line_items.po2_arid
INNER JOIN
purchase_order_history
ON
purchase_order_line_items.po2_num = purchase_order_history.poh1_ponum AND
purchase_order_line_items.po2_arid = purchase_order_history.poh1_arid
ORDER BY
"PO Date" DESC;
Gives error:
ERROR: function right(numeric, integer) does not exist
LINE 3: RIGHT(purchase_order_history.poh1_gl, 2) AS Dept,
^
HINT: No function matches the given name and argument types. You might need to add explicit type >casts.
Do I need to change the column values before I try?

Merge queries matching on dates and leave null if no match found

I have got two tables below (examples):
Table SFID
Sales Force ID
Type
Name
Assistant
From
To
123
ABC
Store A
Ben
01/04/2020
30/04/2020
123
ABC
Store A
Jen
01/05/2020
31/05/2020
123
ABC
Store A
Ben
01/06/2020
21/06/2020
126
DEF
Store B
Tim
01/04/2020
30/04/2020
126
DEF
Store B
Tim
01/04/2020
null
and
Table Activity
Transaction ID
Date
Sales Force ID
1
03/05/2020
123
2
03/06/2020
200
3
01/01/2021
123
4
02/01/2021
126
I want my end result to be
Transaction ID
Date
Sales Force ID
Type
Name
Assistant
1
03/05/2020
123
ABC
Store A
Jen
2
03/06/2020
200
null
null
null
3
01/01/2021
123
null
null
null
4
02/01/2021
126
DEF
Store B
Tim
To do this, the best solution was the one posted in here with some modifications (allow both To and From to be null). However, only the row on transaction ID 2 disappears because that Sales Force had already had assistant entries (they get wiped out on the filtered row step). I also tried the solution presented in here but it takes ages to load.
I would like to know if there was a way to guarantee all transactions appear without having to introduce lines to table SFID for periods they don't have assistants and without making the query a really slow one.
This is my code:
let
Source = Source,
#"Merged Queries" = Table.NestedJoin(Source,{"Sales Force ID"},SFID,{"SFID"},"SFID",JoinKind.LeftOuter),
#"Expanded SFID" = Table.ExpandTableColumn(#"Merged Queries", "SFID", {"Type", "Name", "Assistant", "From", "To"}, {"Type", "Name", "Assistant", "From", "To"}),
#"Changed Type" = Table.TransformColumnTypes(#"SFID",{{"Date", type date}, {"From", type date}, {"To", type date}}),
FilteredRows = Table.SelectRows(#"Changed Type", each ([Date] >= [From] and [Date] <= [To]) or ([Date] >= [From] and [To] = null)or ([From] = null and [To] = null)),
#"Removed Columns" = Table.RemoveColumns(FilteredRows,{"From", "To"})
in
#"Removed Columns"
So after almost 2 weeks with no response and an unjustified downgrade, I found a solution!
I created a query that would basically produce this table with the above mentioned code.
Transaction ID
Date
Assistant
1
01/01/2021
Jen
4
02/01/2021
Tim
let
Source = Source,
#"Merged Queries" = Table.NestedJoin(Source,{"Sales Force ID"},SFID,{"SFID"},"SFID",JoinKind.LeftOuter),
#"Expanded SFID" = Table.ExpandTableColumn(#"Merged Queries", "SFID", {"Type", "Name", "Assistant", "From", "To"}, {"Type", "Name", "Assistant", "From", "To"}),
#"Changed Type" = Table.TransformColumnTypes(#"Expanded SFID",{{"Date", type date}, {"From", type date}, {"To", type date}}),
FilteredRows = Table.SelectRows(#"Changed Type", each ([Date] >= [From] and [Date] <= [To]) or ([Date] >= [From] and [To] = null)),
#"Removed Columns" = Table.RemoveColumns(FilteredRows,{"From", "To"})
in
#"Removed Columns"
And left-joined it on my initial version of Table Activity
I kept the information of type Type and Name in a separate Query (as they don't change) and then again left-joined it on the Table Activity.
Final query looks like this:
let
Source = Source,
#"Merged Queries1" = Table.NestedJoin(Source,{"Sales Force Code"},Info,{"SFID"},"Info",JoinKind.LeftOuter),
#"Expanded Info" = Table.ExpandTableColumn(#"Merged Queries1", "Info", {"Type", "Name"}, {"Type", "Name"}),
#"Merged Queries" = Table.NestedJoin(#"Expanded Info",{"ID"},IDvsAssistant,{"ID"},"IDvsAssistant",JoinKind.LeftOuter),
#"Expanded IDvsAssistant" = Table.ExpandTableColumn(#"Merged Queries", "IDvsAssistant", {"Assistant"}, {"Assistant"})
in
#"Expanded IDvsAssistant"

postgresql/pgAdmin - accepting start_date & end_date arguments as input on query run

This is a postgresql db I'm working with using pgAdmin.
Forgive me if this is somewhat common knowledge, I'm new to postgresql in particular... and I didn't find any direct answers through prior searching.
I'm wondering if there's a simple way to implement start_time/end_time arguments as inputs when the query runs using pgadmin and any of it's built in features.
The data type I'm working with here is "timestamp with timezone".
Looking for some direction on the best way to implement this.
I considered declaring start_time and end_time as variables, then using WHERE to filter based on those, but without 3rd party/application level solutions, is there a way to prompt for input when the query runs inside of pgadmin?
I appreciate any suggestions- here's my attempt at getting something working, but it errors out: query has no destination for result data.
do $$
DECLARE
start_date timestamp := '2020-10-1';
end_date timestamp := '2020-10-5';
begin
select distinct on (account.id, menu.name, kitchen_item.name)
account.id as "Account ID",
account.firstname as "Seller First Name",
account.lastname as "Seller Last Name",
account.email as "Seller Email",
account.phone as "Seller Phone",
address.address as "Seller Address (Street)",
address.address_2 as "Seller Address 2",
account.zip_code as "Seller Zip",
address.neighborhood as "Seller Neighborhood",
menu.name as "Name of active menu",
kitchen_item.name as "Dishes",
kitchen_item.price as "Price",
kitchen_item.daily_max_orders as "Quantity",
menu.pickup_start_time as "Start time",
menu.pickup_end_time as "End time",
menu.repeat_mon as "Monday",
menu.repeat_tues as "Tuesday",
menu.repeat_wed as "Wednesday",
menu.repeat_thurs as "Thursday",
menu.repeat_fri as "Friday",
menu.repeat_sat as "Saturday",
menu.repeat_sun as "Sunday",
order_item.created as "Date of last sale"
from account
left join store on account.id = store.account_id
left join menu on store.id = menu.store_id
left join menu_item on menu.id = menu_item.menu_id
left join kitchen_item on (menu_item.kitchen_item_id = kitchen_item.id and store.id = kitchen_item.store_id)
left join orders on (orders.store_id = store.id)
left join order_item on (order_item.order_id = orders.id)
join store_address on store.id = store_address.store_id
join address on store_address.address_id = address.id
where orders.placed BETWEEN start_date AND end_date
order by account.id asc, menu.name, kitchen_item.name asc, order_item.created desc;
end $$;
DO creates an anonymous function that returns no data.
You can use WITH:
WITH input (start_date, end_date) AS
(SELECT '2020-10-01'::timestamp AS start_date,
'2020-10-05'::timestamp AS end_date)
SELECT ...
FROM...
JOIN input
WHERE orders.placed BETWEEN input.start_date AND input.end_date

Recursive PostgreSQL dynamic average

I have PostgreSQL dynamic averaging problem that I cannot solve:
I have data for individuals with start and finish dates for employment are as follows:
"parentid" "Name" "startdate" "enddate"
"01e7de72-843d-4aa5-b3ae-2e2887d1b342" "Isabelle Smith" "2011-05-23" "2016-04-16"
"027ee658-8c4d-4910-b93e-62c0900f2147" "Emelie Blogs" "2012-09-17" "2016-03-16"
"02cbb478-adf3-4a8b-a5aa-ae9f03943ce4" "Joshauh Jow" "2015-04-04" NULL
"0328f382-2845-4623-a940-ab68af5d11cc" "VICTORIA Fred" "2015-05-11" NULL
"03823a20-51bc-4ae5-ab73-79056355ea36" "Elin Tree" "2014-03-24" NULL
"03878ef8-1c3a-4310-b3d5-7b8d18634707" "Michaela Apple" "2011-07-08" NULL
"03c36926-395b-4e3c-9f77-c6214ce763a2" "Immad Cheese" "2012-05-15" NULL
"0436824c-29a6-4140-ba4a-d0f56facd8fc" "Burak Teal" "2009-06-22" NULL
"04d7a07a-0ad4-4091-98d2-a7ff35798b6f" "Roberto Purple" "2015-03-30" "2016-03-01"
"04f32c2f-887f-4e03-be67-bc023aa3a7c2" "Iftikar Orange" "2012-06-27" NULL
"055b690a-153a-49c8-8ac0-112681f79551" "Josef Red" "2014-02-21" "2016-04-13"
"055be2f6-baec-4626-b876-7ff16dc95464" "Harry Green" "2016-03-27" NULL
"05a570b0-ec76-49d9-a742-5bf08f215fec" "Sofie Blue" "2010-06-15" "2016-05-16"
"05c92e7a-efde-44f0-a57c-298cbe129259" "BANARAS Yellow" "2015-06-22" NULL
"05fe0113-9bda-407b-bd72-5bf2a9deae15" "Bengt Drury" "2015-03-30" "2016-06-16"
"063c454f-2e97-48a8-96fc-9e84d29f5d96" "Son That" "2016-03-27" NULL
"07b76b47-8086-4df6-a3da-50dcfcd2de89" "Sam This" "2015-03-21" "2016-05-24"
"082771ee-2f02-4623-abc2-696447f9f791" "Felix This" "2014-11-24" "2016-05-31"
"08e39639-176b-4f44-ae75-1025219730c6" "ROBIN That" "2015-10-26" NULL
"09ab8491-9d9a-4091-b448-8315e3b5d3f0" "Kaziah This" "2016-05-14" NULL
"0a74dd0c-e1ee-4b32-a893-c486f7402363" "Luke Him" "2015-12-16" NULL
"0b098799-7d92-47df-9778-b48edf948af9" "MARIA Her" "2015-05-11" NULL
"0b480b25-8d2b-441b-8039-48b4e9188769" "That Adebayor" "2015-04-09" NULL
"0b86b44e-f3e0-4ddf-8e72-e0d7f9470279" "This Ă…lund" "2012-02-07" "2016-06-05"
"0c3e13d0-f602-41da-b10c-f70072605e63" "First Ekmark" "2013-02-08" NULL
"0d2367f4-a6b4-4381-b7dc-3e0c9063285f" "Anna Check" "2015-03-13" NULL
"0e31731b-0384-43ef-adeb-503ad5a137f9" "Assign Test1" "2015-05-22" NULL
"0e3f8b57-cba2-4240-abd4-d157832ef421" "Ramises Person "2016-10-11" NULL
"0f6af1c8-7672-4f0b-912c-91675cf52845" "Lars Surname" "2016-03-28" NULL
For this report a user would input two dates startOfPeriod and endOfPeriod
I need an SQL statement that for those dynamic dates would give me a week by week output on the number of people who were employed for each week during that period.
(A week would constitute each 7 days from the startOfPeriod date)
Is this possible in PostgreSQL and how would I do it?
Use the type daterange and the overlap operator &&.
The first query in WITH defines the period, the second generates series of weeks:
with period(start_of_period, end_of_period) as (
values ('2012-01-20'::date, '2012-02-15'::date)
),
weeks as (
select daterange(d::date, d::date+ 7) a_week
from period,
lateral generate_series (start_of_period, end_of_period, '7d'::interval) d
)
select lower(a_week) start_of_week, count(*)
from weeks
left join a_table
on daterange(startdate, enddate) && a_week
group by 1
order by 1;
start_of_week | count
---------------+-------
2012-01-20 | 4
2012-01-27 | 4
2012-02-03 | 5
2012-02-10 | 5
(4 rows)
Idea is generate series of week between start and end date, select starting and ending week from employment, then for each week count.
I've not tested it for bound cases but something OP coud starts with
WITH startDate(d) as (VALUES ('2010-01-01'::DATE))
, endDate(d) as (VALUES ('2016-06-06'::DATE))
, weeks as (select to_char(startDate.d+s.a,'YYYY-WW') as w
from startDate,endDate,generate_series(0,(endDate.d - startDate.d),7) as s(a))
, emp as (select name,to_char(sd,'YYYY-WW') as sw
, to_char(coalesce(ed,endDate.d),'YYYY-WW') as ew
from startDate,endDate,public.so where sd > startDate.d )
SELECT
w.w
,(select ARRAY_AGG(name) from emp Where w.w BETWEEN sw AND ew ) as emps
,(select count(name) from emp Where w.w BETWEEN sw AND ew ) as empCount
FROM weeks w
Test setup
create table public.so (
name TEXT
,sd DATE
,ed DATE
);
INSERT INTO public.so (name,sd,ed) VALUES
('a','2011-05-23','2016-04-16')
,('b','2012-09-17','2016-03-16')
,('c','2009-12-12',null)
,('d','2015-03-30','2016-03-01')
,('e','2012-06-27',null)
,('f','2014-02-21','2016-04-13')
,('g','2016-03-27',null)
,('h','2010-06-15','2016-05-16')
;