I have populate a table using the copy from command which in turn will create record in summary table. While after the copy command successfully run, I can not see any record in the summary table. Anyone can shed some light on me? Pls find the table as well as the store procedure below:-
CREATE TABLE apache_log (
log_name character varying(255),
line integer,
client_address character varying(255),
rfc1413 character varying(32),
user_name character varying(32),
local_time timestamp with time zone,
log_date date,
log_hour smallint,
tenminute_bucket smallint,
fiveminute_bucket smallint,
method character varying(10),
url character varying(8192),
protocol character varying(10),
status_code smallint,
bytes_sent integer,
referer character varying(8192),
agent character varying(8192),
canon_name character varying(512)
);
CREATE INDEX apache_log_local_time ON apache_log USING btree (local_time);
CREATE INDEX apache_log_client_address ON apache_log USING btree (client_address);
CREATE INDEX apache_log_user_name ON apache_log USING btree (user_name);
CREATE INDEX apache_log_canon_name ON apache_log USING btree (canon_name);
CREATE INDEX apache_log_url ON apache_log USING btree (url);
CREATE INDEX apache_log_method ON apache_log USING btree (method);
CREATE INDEX apache_log_status_code ON apache_log USING btree (status_code);
CREATE UNIQUE INDEX apache_log_name_line ON apache_log (log_name, line);
CREATE TABLE tenminute_summary (
log_date date,
log_hour smallint,
bucket smallint,
hit integer,
bytes_sent bigint,
status_code smallint
);
CREATE INDEX tenminute_summary_log_date_log_hour_bucket ON tenminute_summary (log_date, log_hour, bucket);
CREATE UNIQUE INDEX tenminute_summary_log_date_log_hour_bucket_status_code ON tenminute_summary (log_date, log_hour, bucket, status_code);
CREATE TABLE fiveminute_summary (
log_date date,
log_hour smallint,
bucket smallint,
hit integer,
bytes_sent bigint,
status_code smallint
);
CREATE INDEX fiveminute_summary_log_date_log_hour_bucket ON fiveminute_summary (log_date, log_hour, bucket);
CREATE UNIQUE INDEX fiveminute_summary_log_date_log_hour_bucket_status_code ON fiveminute_summary (log_date, log_hour, bucket, status_code);
CREATE OR REPLACE FUNCTION update_history(history_log_date date, history_log_hour smallint, history_status_code smallint, history_fiveminute_bucket smallint, history_tenminute_bucket smallint, history_fiveminute_bytes_sent bigint, history_fiveminute_hit integer, history_fiveminute_bytes_sent bigint, history_fiveminute_hit integer) RETURNS INTEGER AS
$update_history$
BEGIN
IF ( history_fiveminute_bucket IS NOT NULL) THEN
<<fiveminute_update>>
LOOP
UPDATE fiveminute_summary
SET bytes_sent = bytes_sent + history_fiveminute_bytes_sent,
hit = hit + history_fiveminute_hit
WHERE log_date = history_log_date AND
log_hour = history_log_hour AND
bucket = history_fiveminute_bucket AND
status_code = history_status_code;
EXIT fiveminute_update WHEN found;
BEGIN
INSERT INTO fiveminute_summary (
log_date,
log_hour,
bucket,
status_code,
bytes_sent,
hit)
VALUES (
history_log_date,
history_log_hour,
history_fiveminute_bucket,
history_status_code,
history_fiveminute_bytes_sent,
history_fiveminute_hit);
EXIT fiveminute_update;
EXCEPTION
WHEN UNIQUE_VIOLATION THEN
-- do nothing
END;
END LOOP fiveminute_update;
END IF;
IF ( history_tenminute_bucket IS NOT NULL) THEN
<<tenminute_update>>
LOOP
UPDATE tenminute_summary
SET bytes_sent = bytes_sent + history_tenminute_bytes_sent,
hit = hit + history_tenminute_hit
WHERE log_date = history_log_date AND
log_hour = history_log_hour AND
bucket = history_tenminute_bucket AND
status_code = history_status_code;
EXIT tenminute_update WHEN found;
BEGIN
INSERT INTO tenminute_summary (
log_date,
log_hour,
bucket,
status_code,
bytes_sent,
hit)
VALUES (
history_log_date,
history_log_hour,
history_tenminute_bucket,
history_status_code,
history_tenminute_bytes_sent,
history_tenminute_hit);
EXIT tenminute_update;
EXCEPTION
WHEN UNIQUE_VIOLATION THEN
-- do nothing
END;
END LOOP tenminute_update;
END IF;
RETURN 0;
END;
$update_history$
LANGUAGE plpgsql;
CREATE OR REPLACE FUNCTION update_apache_log() RETURNS TRIGGER AS $update_apache_log$
DECLARE
history_log_date date := null;
history_log_hour smallint := null;
history_status_code smallint := null;
history_fiveminute_bucket smallint := null;
history_tenminute_bucket smallint := null;
history_fiveminute_bytes_sent bigint := null;
history_fiveminute_hit integer := null;
history_tenminute_bytes_sent bigint := null;
history_tenminute_hit integer := null;
future_log_date date := null;
future_log_hour smallint := null;
future_status_code smallint := null;
future_fiveminute_bucket smallint := null;
future_tenminute_bucket smallint := null;
future_fiveminute_bytes_sent bigint := null;
future_fiveminute_hit integer := null;
future_tenminute_bytes_sent bigint := null;
future_tenminute_hit integer := null;
dummy integer := 0;
BEGIN
IF (TG_OP = 'DELETE') THEN
history_log_date := OLD.log_date;
history_log_hour := OLD.log_hour;
history_fiveminute_bucket := OLD.fiveminute_bucket;
history_tenminute_bucket := OLD.tenminute_bucket;
history_status_code := OLD.status_code;
history_fiveminute_bytes_sent := 0 - OLD.bytes_sent;
history_fiveminute_hit := -1;
history_tenminute_bytes_sent := 0 - OLD.bytes_sent;
history_tenminute_hit := -1;
dummy:=update_history(history_log_date, history_log_hour, history_status_code, history_fiveminute_bucket, history_tenminute_bucket, history_fiveminute_bytes_sent, history_fiveminute_hit, history_fiveminute_bytes_sent, history_fiveminute_hit);
RETURN OLD;
ELSIF (TG_OP = 'INSERT') THEN
NEW.log_date := extract(date from NEW.log_date AT TIME ZONE 'GMT+8');
NEW.log_hour := extract(hour from NEW.log_date AT TIME ZONE 'GMT+8');
NEW.fiveminute_bucket := floor(extract(minute from NEW.log_date AT TIME ZONE 'GMT+8') / 5);
NEW.tenminute_bucket := floor(extract(minute from NEW.log_date AT TIME ZONE 'GMT+8') / 10);
future_log_date := NEW.log_date;
future_log_hour := NEW.log_hour;
future_status_code := NEW.status_code;
future_fiveminute_bucket := NEW.fiveminute_bucket;
future_tenminute_bucket := NEW.tenminute_bucket;
future_fiveminute_bytes_sent := NEW.bytes_sent;
future_fiveminute_hit := 1;
future_tenminute_bytes_sent := NEW.bytes_sent;
future_tenminute_hit := 1;
dummy:=update_history(future_log_date, future_log_hour, future_status_code, future_fiveminute_bucket, future_tenminute_bucket, future_fiveminute_bytes_sent, future_fiveminute_hit, future_fiveminute_bytes_sent, future_fiveminute_hit);
RETURN NEW;
ELSIF (TG_OP = 'UPDATE') THEN
IF (NEW.log_date <> OLD.log_date) THEN
NEW.date := extract(date from NEW.log_date AT TIME ZONE 'GMT+8');
NEW.hour := extract(hour from NEW.log_date AT TIME ZONE 'GMT+8');
NEW.fiveminute_bucket := floor(extract(minute from NEW.log_date AT TIME ZONE 'GMT+8') / 5);
NEW.tenminute_bucket := floor(extract(minute from NEW.log_date AT TIME ZONE 'GMT+8') / 10);
history_log_date := OLD.log_date;
history_log_hour := OLD.log_hour;
history_fiveminute_bucket := OLD.fiveminute_bucket;
history_tenminute_bucket := OLD.tenminute_bucket;
history_status_code := OLD.status_code;
IF (OLD.status_code = NEW.status_code) THEN
history_fiveminute_bytes_sent := 0 - OLD.bytes_sent;
history_fiveminute_hit := -1;
history_tenminute_bytes_sent := 0 - OLD.bytes_sent;
history_tenminute_hit := -1;
future_log_date := NEW.log_date;
future_log_hour := NEW.log_hour;
future_status_code := NEW.status_code;
future_fiveminute_bucket := NEW.fiveminute_bucket;
future_tenminute_bucket := NEW.tenminute_bucket;
future_fiveminute_bytes_sent := NEW.bytes_sent;
future_fiveminute_hit := 1;
future_tenminute_bytes_sent := NEW.bytes_sent;
future_tenminute_hit := 1;
dummy:=update_history(future_log_date, future_log_hour, future_status_code, future_fiveminute_bucket, future_tenminute_bucket, future_fiveminute_bytes_sent, future_fiveminute_hit, future_fiveminute_bytes_sent, future_fiveminute_hit);
ELSE
IF (OLD.fiveminute_bucket = NEW.fiveminute_bucket AND OLD.log_date = NEW.log_date AND OLD.log_hour = OLD.log_hour) THEN
history_fiveminute_bytes_sent := NEW.bytes_sent - OLD.bytes_sent;
history_tenminute_bytes_sent := NEW.bytes_sent - OLD.bytes_sent;
history_tenminute_hit := 0;
ELSE
history_fiveminute_bytes_sent := 0 - OLD.bytes_sent;
history_fiveminute_hit := -1;
future_log_date := NEW.log_date;
future_log_hour := NEW.log_hour;
future_status_code := NEW.status_code;
future_fiveminute_bucket := NEW.fiveminute_bucket;
future_fiveminute_bytes_sent := NEW.bytes_sent;
future_fiveminute_hit := 1;
IF (OLD.tenminute_bucket = NEW.tenminute_bucket) THEN
history_tenminute_bytes_sent := NEW.bytes_sent - OLD.bytes_sent;
history_tenminute_hit := 0;
ELSE
history_tenminute_bytes_sent := 0 - OLD.bytes_sent;
history_tenminute_hit := -1;
future_tenminute_bucket := NEW.tenminute_bucket;
future_tenminute_bytes_sent := NEW.bytes_sent;
future_tenminute_hit := 1;
END IF;
dummy:=update_history(future_log_date, future_log_hour, future_status_code, future_fiveminute_bucket, future_tenminute_bucket, future_fiveminute_bytes_sent, future_fiveminute_hit, future_fiveminute_bytes_sent, future_fiveminute_hit);
END IF;
END IF;
ELSE
history_log_date := OLD.log_date;
history_log_hour := OLD.log_hour;
history_status_code := OLD.status_code;
history_fiveminute_bucket := OLD.fiveminute_bucket;
history_tenminute_bucket := OLD.tenminute_bucket;
IF (OLD.status_code <> NEW.status_code) THEN
history_fiveminute_bytes_sent := 0 - OLD.bytes_sent;
history_fiveminute_hit := -1;
history_tenminute_bytes_sent := 0 - OLD.bytes_sent;
history_tenminute_hit := -1;
future_log_date := NEW.log_date;
future_log_hour := NEW.log_hour;
future_status_code := NEW.status_code;
future_fiveminute_bucket := NEW.fiveminute_bucket;
future_tenminute_bucket := NEW.tenminute_bucket;
future_fiveminute_bytes_sent := NEW.bytes_sent;
future_fiveminute_hit := 1;
future_tenminute_bytes_sent := NEW.bytes_sent;
future_tenminute_hit := 1;
dummy:=update_history(future_log_date, future_log_hour, future_status_code, future_fiveminute_bucket, future_tenminute_bucket, future_fiveminute_bytes_sent, future_fiveminute_hit, future_fiveminute_bytes_sent, future_fiveminute_hit);
ELSIF (OLD.bytes_sent <> NEW.bytes_sent) THEN
history_fiveminute_bytes_sent := NEW.bytes_sent - OLD.bytes_sent;
history_tenminute_bytes_sent := NEW.bytes_sent - OLD.bytes_sent;
END IF;
END IF;
dummy:=update_history(history_log_date, history_log_hour, history_status_code, history_fiveminute_bucket, history_tenminute_bucket, history_fiveminute_bytes_sent, history_fiveminute_hit, history_fiveminute_bytes_sent, history_fiveminute_hit);
RETURN NEW;
END IF;
RETURN NULL;
END;
$update_apache_log$ LANGUAGE plpgsql;
CREATE TRIGGER update_apache_log
BEFORE INSERT OR UPDATE OR DELETE ON apache_log
FOR EACH ROW EXECUTE PROCEDURE update_apache_log();
The function update_history uses two parameters with the same name twice:
ERROR: parameter name "history_fiveminute_bytes_sent" used more than once
SQL status:42P13
See:
CREATE OR REPLACE FUNCTION update_history(
history_log_date date,
history_log_hour smallint,
history_status_code smallint,
history_fiveminute_bucket smallint,
history_tenminute_bucket smallint,
history_fiveminute_bytes_sent bigint, <=== See errormessage
history_fiveminute_hit integer, <=== And this one as well
history_fiveminute_bytes_sent bigint, <===
history_fiveminute_hit integer <===
) RETURNS INTEGER AS
PostgreSQL 9.0 beta doesn't like this and it doesn't make sense. Older versions might not complain but might have the same problems with execution. Did you check the errorlogs?
And raise in both functions a notice, just to see if the trigger is activated.
RAISE NOTICE 'function X is doing something';
I found a lot of errors while Postgresql 8.4 didn't complaint at all. Anyway, I have given up the store procedure approach and decided to populate the table using sql directly since I am doing batch update onto the table in a very adhoc manner. Also populating the table using sql is much more efficient in terms of time spent on the whole process.
Related
CREATE OR REPLACE PROCEDURE addl_field_pkg.del_symm_dopl224_sp ( i_ORDE_NO varchar(30),
i_ACTN_CODE varchar(30), i_CIRC_NO varchar(30), i_ADDL_FIELD text,
i_ADDL_FIELD_ACTN text, i_ADDL_CONTENT text, o_ERR_CODE INOUT bigint, o_ERR_MSG INOUT text ) AS $body$
DECLARE
ORDER_COMPLETED_EX exception;
w_order_no varchar(20);
BEGIN
if ( current_setting('addl_field_pkg.w_completed')::char = milestone_pkg.CHK_IPLC_ORDER_STATUS_F(i_orde_no) ) then
raise ORDER_COMPLETED_EX;
end if;
select orde_no into STRICT w_order_no
from symm_dopl224
where orde_no = i_orde_no
and actn_code = i_actn_code
and circ_no = i_circ_no
and addl_field = i_addl_field
and addl_field_actn = i_addl_field_actn
and addl_content = i_addl_content;
delete from symm_dopl224
where orde_no = i_orde_no
and actn_code = i_actn_code
and circ_no = i_circ_no
and addl_field = i_addl_field
and addl_field_actn = i_addl_field_actn
and addl_content = i_addl_content;
Exception
when no_data_found then
ROLLBACK;
o_err_code := SQLSTATE;
o_err_msg := 'Update failure - Record does not exist';
when others then
ROLLBACK;
o_err_code := SQLSTATE;
o_err_msg := 'System error - '||sqlerrm;
END;
$body$
LANGUAGE PLPGSQL
;
I have a database with a tree hierarchy. It has some tables, but the main ones are two tables, one of them containing the nodes, with a code field like PK, and another one with the relationships between nodes, with a id field like PK.
Something like that (the relationships):
id(PK) - codefather - codeson - more...
0 - CODE1 - CODE11 -
1 - CODE1 - CODE12 -
2 - CODE11 - CODE112 -
3 - CODE12 - CODE121 -
4 - CODE12 - CODE122 -
---------------------
When I want to see the entire tree I use this function:
CREATE OR REPLACE FUNCTION public.recorrer_principal(
IN nombretabla character varying,
IN codigopadre character varying DEFAULT NULL::character varying,
IN _nivel integer DEFAULT 0,
IN primer_elemento boolean DEFAULT true)
RETURNS TABLE(codigo character varying, naturaleza integer, ud character varying, resumen character varying, preciomed numeric, nivel integer) AS
$BODY$
DECLARE
tablaconceptos character varying := nombretabla || '_Conceptos';
tablarelacion character varying := nombretabla || '_Relacion';
existe boolean;
nombre_codigo character varying;
codpadre character varying := codigopadre;
c tp_concepto%ROWTYPE;
indice integer;
str_null_case character varying;
BEGIN
IF (codigopadre = '') IS NOT FALSE THEN
str_null_case := ' IS NULL';
ELSE
str_null_case := ' = '||quote_literal(codigopadre);
END IF;
--ROOT ELEMENT
IF primer_elemento IS TRUE THEN
EXECUTE FORMAT ('SELECT codhijo FROM %I WHERE codpadre %s',tablarelacion,str_null_case) INTO nombre_codigo;
EXECUTE FORMAT ('SELECT * FROM %I WHERE codigo = %s',tablaconceptos, quote_literal(nombre_codigo)) INTO c;
codigo := c.codigo;
nivel := _nivel;
ud := c.ud;
naturaleza := c.naturaleza;
resumen := c.resumen;
preciomed := c.preciomed;
RETURN NEXT;
codpadre := c.codigo;--if it is the first level I change codpadre to that instead the argument of the function
END IF;
--BEGIN TO ITERATE
FOR nombre_codigo in EXECUTE FORMAT ('SELECT codhijo FROM %I WHERE codpadre = %s', tablarelacion, quote_literal(codpadre))
LOOP
EXECUTE FORMAT ('SELECT * FROM %I WHERE codigo = %s', tablaconceptos, quote_literal(nombre_codigo)) INTO c;
codigo := nombre_codigo;
nivel := _nivel+1;
ud := c.ud;
naturaleza := c.naturaleza;
resumen := c.resumen;
preciomed := c.preciomed;
RETURN NEXT;
EXECUTE FORMAT ('SELECT EXISTS (SELECT * FROM %I WHERE codpadre = %s )', tablarelacion , quote_literal(nombre_codigo)) INTO existe;
--SI QUEDAN MAS HIJOS:
IF existe = TRUE THEN
RETURN QUERY SELECT * FROM recorrer_principal(nombretabla,nombre_codigo,nivel,'false');
END IF;
END LOOP;
END;
$BODY$
LANGUAGE plpgsql VOLATILE
COST 100
ROWS 1000;
ALTER FUNCTION public.recorrer_principal(character varying, character varying, integer, boolean)
OWNER TO postgres;
The function basically runs over the table of relationships, creating a table with the sons of each node. After that, with the sons of each son, and so on recursively.
Well, surprisingly for me, in a table with 12587 nodes, 39084 relationships, the function returns 52117 rows in 02:25 minutes. I think that it is too much time for this task, but maybe is a normal performance. (In this case I'll have to change the function or even the strategy, because always I open my program this tree is created, and it is too much time for wait)
Thank you in advance
Converted a Standalone Procedure from Oracle to Postgres but not sure why there is an run error even the code is successfully compiled
Converted the below code from Oracle to Postgres
CREATE OR REPLACE FUNCTION ssp2_pcat.pop_hoa_contracts_for_prod(
)
RETURNS void
LANGUAGE 'plpgsql'
COST 100
VOLATILE
AS $BODY$
DECLARE
C1 CURSOR for
SELECT MARKET_CODE, CONTRACT_COMBO_ID, COUNT(*) FROM
ssp2_pcat.VPF_HOA_CONTRACTS_FOR_PROD A
WHERE start_Date IN
(SELECT MAX(start_date)
FROM VPF_HOA_CONTRACTS_FOR_PROD b
WHERE A.MARKET_CODE = b.MARKET_CODE
AND A.CONTRACT_COMBO_ID = b.CONTRACT_COMBO_ID
AND A.CONTRACT_ID = B.CONTRACT_ID
AND b.start_date <= current_date
AND b.end_date > current_date )
GROUP BY MARKET_CODE, CONTRACT_COMBO_ID
ORDER BY MARKET_CODE, CONTRACT_COMBO_ID;
C2 CURSOR(iMktCode VARCHAR, iCombo integer) for
SELECT MARKET_CODE, CONTRACT_COMBO_ID, CONTRACT_ID
FROM ssp2_pcat.VPF_HOA_CONTRACTS_FOR_PROD A
WHERE start_Date IN
(SELECT MAX(start_date)
FROM ssp2_pcat.VPF_HOA_CONTRACTS_FOR_PROD b
WHERE A.MARKET_CODE = b.MARKET_CODE
AND A.CONTRACT_COMBO_ID = b.CONTRACT_COMBO_ID
AND A.CONTRACT_ID = B.CONTRACT_ID
AND b.start_date <= current_date
AND b.end_date > current_date )
AND MARKET_CODE = iMktCode
AND CONTRACT_COMBO_ID = iCombo
ORDER BY MARKET_CODE, CONTRACT_COMBO_ID, START_DATE;
Contracts VARCHAR(32000);
Contract_Val1 VARCHAR(4000) := NULL;
Contract_Val2 VARCHAR(4000) := NULL;
Contract_Val3 VARCHAR(4000) := NULL;
Contract_Val4 VARCHAR(4000) := NULL;
Contract_Val5 VARCHAR(4000) := NULL;
Contract_Val6 VARCHAR(4000) := NULL;
Contract_Val7 VARCHAR(4000) := NULL;
Contract_Val8 VARCHAR(4000) := NULL;
Num INTEGER;
Cont_Num INTEGER;
l_start TIMESTAMP := clock_timestamp();
l_end TIMESTAMP := clock_timestamp();
Time_Taken VARCHAR(20);
i record;
j record;
BEGIN
l_start := clock_timestamp();
DELETE FROM ssp2_pcat.HOA_CONTRACTS_KH;
FOR i IN C1 LOOP
BEGIN
Num := 0;
Contracts := NULL;
Cont_Num := 1;
FOR j IN C2 (i.MARKET_CODE, i.CONTRACT_COMBO_ID) LOOP
Num := Num + 1;
IF Num = 1 THEN
Contracts := '|' || j.CONTRACT_ID || '|';
ELSE
IF LENGTH(Contracts || j.CONTRACT_ID || '|') > 4000 THEN
PERFORM ssp2_pcat.Assign (Cont_Num, SUBSTRING(Contracts, 1,
LENGTH(Contracts)-1));
Num := 1;
Contracts := '|' || j.CONTRACT_ID || '|';
Cont_Num := Cont_Num + 1;
ELSE
Contracts := Contracts || j.CONTRACT_ID || '|';
END IF;
END IF;
END LOOP;
PERFORM ssp2_pcat.Assign (Cont_Num, Contracts);
IF Cont_Num > 5 THEN
raise notice'%', ('MARKET_CODE: ' || i.MARKET_CODE || ', CONTRACT_COMBO_ID: ' || i.CONTRACT_COMBO_ID || ' has more than 32K in size. These Contracts are left out: ' || Contracts);
END IF;
INSERT INTO HOA_CONTRACTS_KH
(
MARKET_CODE,
CONTRACT_COMBO_ID,
CONTRACT_ID,
CONTRACT_ID2,
CONTRACT_ID3,
CONTRACT_ID4,
CONTRACT_ID5,
LAST_UPDATED
)
VALUES
(
i.MARKET_CODE,
i.CONTRACT_COMBO_ID,
Contract_Val1,
Contract_Val2,
Contract_Val3,
Contract_Val4,
Contract_Val5,
CURRENT_TIMESTAMP::TIMESTAMP(0)
);
Contract_Val1 := NULL;
Contract_Val2 := NULL;
Contract_Val3 := NULL;
Contract_Val4 := NULL;
Contract_Val5 := NULL;
Contract_Val6 := NULL;
Contract_Val7 := NULL;
Contract_Val8 := NULL;
EXCEPTION
WHEN OTHERS THEN
ROLLBACK;
raise notice'%', ('1) POP_HOA_CONTRACTS_FOR_PROD: ' || SQLERRM);
END;
END LOOP;
RAISE NOTICE 'Function excution time Took: %', l_start;
RAISE NOTICE 'Function excution time Took: %',l_end-l_start;
SELECT l_end-l_start INTO Time_Taken;
raise notice'%',('POP_HOA_CONTRACTS_FOR_PROD Took: ' || Time_Taken );
EXCEPTION
WHEN OTHERS THEN
raise notice'%', ('2) POP_HOA_CONTRACTS_FOR_PROD: ' || SQLERRM);
END;
$BODY$;
The code is compiled successfully, but giving a run time error as follows,
NOTICE: 2) POP_HOA_CONTRACTS_FOR_PROD: cannot begin/end transactions in PL/pgSQL
Debugged the whole code and looks like still I'm unable to identify the issue, can any one help me in making me understand more about Postgres as I'm new to this Database. Found out in unit testing that its not calling the assign function mentioned in the code,
Please suggest me how I can achieve this (From Oracle to Postgres).
For these following functions.
ENCRYPTION
PROCEDURE sp_encrypt
(
i_ustring IN VARCHAR2,
o_estring OUT VARCHAR2
)
IS
crypt_part VARCHAR2(10);
BEGIN
o_estring := NULL;
FOR pos in 1..length(i_ustring)
loop
crypt_part := NULL;
crypt_part := utl_raw.bit_xor(HEXTORAW(utl_raw.cast_to_raw(SUBSTR(i_ustring,pos,1))),HEXTORAW(fn_num_to_hex(MOD(pos,256))));
if length(crypt_part)<2
then
crypt_part := '0'||crypt_part;
end if;
o_estring := o_estring||crypt_part;
end loop;
o_estring := utl_raw.cast_to_varchar2(utl_raw.reverse(utl_raw.cast_to_raw(o_estring)));
END sp_encrypt;
DECRYPTION
PROCEDURE sp_decrypt
(
i_estring IN VARCHAR2,
o_ustring OUT VARCHAR2
)
IS
estring VARCHAR2(32767);
crypt_part VARCHAR2(10);
pos NUMBER;
BEGIN
estring := utl_raw.cast_to_varchar2(utl_raw.reverse(utl_raw.cast_to_raw(i_estring)));
pos := 1;
FOR cnt in 1..length(estring)/2
loop
crypt_part := NULL;
crypt_part := CHR(fn_hex_to_num(utl_raw.bit_xor(HEXTORAW(SUBSTR(estring,pos,2)),HEXTORAW(fn_num_to_hex(MOD(NVL(LENGTH(o_ustring),0)+1,256))))));
o_ustring := o_ustring||crypt_part;
pos:=pos+2;
end loop;
END sp_decrypt;
I need to insert a big batch rows to my PostgreSQL(v9.5) table.
Here is the table:
create table TaskLog(
id serial,
taskid integer,
x double precision,
y double precision,
loc character varying(50),
speed smallint,
gpstime timestamp without time zone,
veh character varying(16),
vin character(17),
regdate date,
enabled boolean,
remake character varying(100),
isdel boolean,
alarm integer,
CONSTRAINT pk_delphi_id PRIMARY KEY (id)
)
I use FireDAC(Delphi XE10.1) to insert the rows:
function RandomStr(aLength : Integer) : string;
var
X: Integer;
begin
if aLength <= 0 then exit;
SetLength(Result, aLength);
for X:=1 to aLength do
Result[X] := Chr(Random(26) + 65);
end;
procedure TForm7.Button6Click(Sender: TObject);
var
i: Integer;
Watch: TStopwatch;
begin
Watch := TStopwatch.StartNew;
try
FDQuery1.SQL.Text :=
'insert into TaskLog values(default, :f1, :f2, :f3, :f4, :f5, :f6, ' +
':f7, :f8, :f9, :f10, :f11, :f12, :f13)';
FDQuery1.Params.ArraySize := StrToInt(Edit1.text); //<--- Change the ArraySize
for i := 0 to FDQuery1.Params.ArraySize - 1 do
begin
FDQuery1.Params[0].AsIntegers[i] := Random(9999999);
FDQuery1.Params[1].AsFloats[i] := Random(114) + Random;
FDQuery1.Params[2].AsFloats[i] := Random(90) + Random;
FDQuery1.Params[3].AsStrings[i] := RandomStr(Random(50));
FDQuery1.Params[4].AsSmallInts[i] := Random(1990);
FDQuery1.Params[5].AsDateTimes[i] := IncSecond(IncDay(Now, -(Random(100) + 1)), Random(99999));
FDQuery1.Params[6].AsStrings[i] := RandomStr(Random(16));
FDQuery1.Params[7].AsStrings[i] := RandomStr(Random(17));
FDQuery1.Params[8].AsDates[i] := IncDay(Now, -(Random(365) + 1));
FDQuery1.Params[9].AsBooleans[i] := Odd(Random(200));
FDQuery1.Params[10].AsStrings[i] := RandomStr(Random(100));
FDQuery1.Params[11].AsBooleans[i] := Odd(Random(100));
FDQuery1.Params[12].AsIntegers[i] := Random(100000);
end;
FDQuery1.Execute(FDQuery1.Params.ArraySize);
Watch.Stop;
Memo1.Lines.Add('Should be inserted ' + IntToStr(FDQuery1.Params.ArraySize) + ' lines');
Memo1.Lines.Add('Actually inserted ' + IntToStr(FDQuery1.RowsAffected) + ' lines');
Memo1.Lines.Add('Take ' + Watch.ElapsedMilliseconds.ToString + ' seconds');
except
Memo1.Lines.Add(Exception(ExceptObject).Message);
end;
end;
It work fine when I set the FDQuery1.Params.ArraySize:=1000,
It work failed when I set the FDQuery1.Params.ArraySize:=10000, no record can been inserted.
Is the ArraySize property have a size limit with PostgreSQL?
const batch_part = 2000;
****
PostgreSQL.Query.Params.ArraySize := Total;
****
// отправка пакетами в цикле / sending packets in a loop
if Total > batch_part then begin
j:= Total div batch_part;
for n := 1 to j do begin
PostgreSQL.Query.Execute(n*batch_part, (n-1)*batch_part); // #Batch INSERT operation
LogFile.Write(Format('FDPostgreSQL.Query.Execute(%d,%d)', [n*batch_part, (n-1)*batch_part]));
end;
if (Total mod batch_part) > 0 then begin // остаток / remnant
PostgreSQL.Query.Execute((Total mod batch_part)+(n-1)*batch_part, (n-1)*batch_part); // #Batch INSERT operation
LogFile.Write(Format('FDPostgreSQL.Query.Execute(%d,%d)', [(Total mod batch_part)+(n-1)*batch_part, (n-1)*batch_part]));
end;
end
else begin
PostgreSQL.Query.Execute(Total, 0);
LogFile.Write(Format('FDPostgreSQL.Query.Execute(%d,0)', [Total]));
end;
LogFile:
[04.08.2022 10:23:21.678] Batch INSERT in FDPostgreSQL...
[04.08.2022 10:23:22.079] FDPostgreSQL Execute(2000,0)
[04.08.2022 10:23:22.154] FDPostgreSQL Execute(4000,2000)
[04.08.2022 10:23:22.227] FDPostgreSQL Execute(6000,4000)
[04.08.2022 10:23:22.299] FDPostgreSQL Execute(8000,6000)
[04.08.2022 10:23:22.373] FDPostgreSQL Execute(10000,8000)
[04.08.2022 10:23:22.443] FDPostgreSQL Execute(12000,10000)
[04.08.2022 10:23:22.516] FDPostgreSQL Execute(13014,12000)
[04.08.2022 10:23:22.540] Batch INSERT finished: 13014 records.
Server side SQL:
INSERT INTO propose_sid VALUES(DEFAULT, $1, $2, $3, $4, $5, $6, $7, $8),(DEFAULT, $9, $10, $11, $12, $13, $14, $15, $16),(DEFAULT, $17, $18, $19, $20, $21, $22, $23, $24)... -- 1999 times