CREATE FUNCTION get_or_create_id(scheduleid integer,member_id character varying, user_id integer,role_id integer, _appointment_date timestamp without time zone,active boolean) RETURNS INT AS
$$
WITH get AS (
SELECT memid FROM member_appointment_details d WHERE appointment_date=_appointment_date
), new AS (
INSERT INTO member_appointment_details (schedule_id,memberid,userid,roleid,appointment_date,active)
VALUES (scheduleid,member_id,user_id,role_id,_appointment_date,active)
ON CONFLICT (appointment_date) DO Nothing
RETURNING memid
)
SELECT memid FROM get
UNION ALL
SELECT memid FROM new
$$
LANGUAGE sql;
table 1
appointment_details has columns :
id integer NOT NULL GENERATED ALWAYS AS IDENTITY
appointment_date timestamp without time zone
table 2
appointment_schedule
id integer NOT NULL GENERATED ALWAYS AS IDENTITY
time_duration integer,
now i want to use conflict for the appointment_date from tbl1 with given duration dynamically like 15 mins 20 mins etc
can i use join in this function ? and how to get my exact output?
currently im getting 10:33:00 like this if i give 10:34:00 it is accepting but i want 10:33:00 + 15 mins = 10:48:00 i.e it should accept time + given duration of the given time
Related
Is there a better way of doing this?
Basically, I have a users table, and on of the columns is birth_date (date)
I am supposed to filter by age range, meaning, I will get a range like 18-24.
This will be passed to a function in a jsonb parameter, as an array of 2 integers.
So I have done the following
create or replace function my_filter_function(
p_search_parameters jsonb
)
returns TABLE(
user_id bigint,
birth_date date,
age interval,
years double precision
)
security definer
language plpgsql
as
$$
begin
return query
select u.user_id, u.birth_date, age(u.birth_date), date_part('year', age(u.birth_date))
from users u
where u.birth_date is not null
and ( (p_search_parameters#>>'{age,0}') is null or u.birth_date <= (now() - ((p_search_parameters#>>'{age,0}')::integer * interval '1 year'))::date)
and ( (p_search_parameters#>>'{age,1}') is null or u.birth_date >= (now() - ((p_search_parameters#>>'{age,1}')::integer * interval '1 year'))::date)
;
end;
$$;
-- this is just a aluttle helpder function to better post and explain the question
This seems to be doing the job, but was hoping to find other ways of doing this while still getting a jsonb parameter, array of 2 integers
Any ideas?
Please. I have two tables with the most common first and last names. Each table has basically two fields:
Tables
CREATE TABLE "common_first_name" (
"first_name" text PRIMARY KEY, --The text representing the name
"ratio" numeric NOT NULL, -- the % of how many times it occurs compared to the other names.
"inserted_at" timestamp WITH time zone DEFAULT timezone('utc'::text, now()) NOT NULL,
"updated_at" timestamp WITH time zone DEFAULT timezone('utc'::text, now()) NOT NULL
);
CREATE TABLE "common_last_name" (
"last_name" text PRIMARY KEY, --The text representing the name
"ratio" numeric NOT NULL, -- the % of how many times it occurs compared to the other names.
"inserted_at" timestamp WITH time zone DEFAULT timezone('utc'::text, now()) NOT NULL,
"updated_at" timestamp WITH time zone DEFAULT timezone('utc'::text, now()) NOT NULL
);
P.S: The TOP 1 name occurs only ~ 1.8% of the time. The tables have 1000 rows each.
Function (Pseudo, not READY)
CREATE OR REPLACE FUNCTION create_sample_data(p_number_of_records INT)
RETURNS VOID
AS $$
DECLARE
SUM_OF_WEIGHTS CONSTANT INT := 100;
BEGIN
FOR i IN 1..coalesce(p_number_of_records, 0) LOOP
--Get the random first and last name but taking in consideration their probability (RATIO)round(random()*SUM_OF_WEIGHTS);
--create_person (random_first_name || ' ' || random_last_name);
END LOOP;
END
$$
LANGUAGE plpgsql VOLATILE;
P.S.: The sum of all ratios for each name (per table) sums up to 100%.
I want to run a function N times and get a name and a surname to create sample data... both tables have 1000 rows each.
The sample size can be anywhere from 1000 full names to 1000000 names, so if there is a "fast" way of doing this random weighted function, even better.
Any suggestion of how to do it in PL/PGSQL?
I am using PG 13.3 on SUPABASE.IO.
Thanks
Given the small input dataset, it's straightforward to do this in pure SQL. Use CTEs to build lower & upper bound columns for each row in each of the common_FOO_name tables, then use generate_series() to generate sets of random numbers. Join everything together, and use the random value between the bounds as the WHERE clause.
with first_names_weighted as (
select first_name,
sum(ratio) over (order by first_name) - ratio as lower_bound,
sum(ratio) over (order by first_name) as upper_bound
from common_first_name
),
last_names_weighted as (
select last_name,
sum(ratio) over (order by last_name) - ratio as lower_bound,
sum(ratio) over (order by last_name) as upper_bound
from common_last_name
),
randoms as (
select random() * (select sum(ratio) from common_first_name) as f_random,
random() * (select sum(ratio) from common_last_name) as l_random
from generate_series(1, 32)
)
select r, first_name, last_name
from randoms r
cross join first_names_weighted f
cross join last_names_weighted l
where f.lower_bound <= r.f_random and r.f_random <= f.upper_bound
and l.lower_bound <= r.l_random and r.l_random <= l.upper_bound;
Change the value passed to generate_series() to control how many names to generate. If it's important that it be a function, you can just use a LANGAUGE SQL function definition to parameterize that number:
https://www.db-fiddle.com/f/mmGQRhCP2W1yfhZTm1yXu5/3
I'm trying to write a PL/PGSQL function that takes delimited text as input and inserts rows into a table that includes points. I've got a test function that works OK:
rowsaz := string_to_array(input,'?');
INSERT INTO test (num1, num2, my_name)
SELECT * FROM unnest(string_to_array(rowsaz[1],',')::integer[],string_to_array(rowsaz[2],',')::integer[],string_to_array(rowsaz[3],',')::varchar[]);
return true;
So if you call the function with
SELECT myfunction('1,2,3?4,5,6?a,b,c')
Then you get a table like
1 4 a
2 5 b
3 6 c
But how do you do this when you have a table like
CREATE TABLE public.gps_points
(
id integer NOT NULL DEFAULT nextval('gps_id_seq'::regclass),
location geometry(Point,4326),
created_at timestamp with time zone DEFAULT CURRENT_TIMESTAMP,
user_id integer
)
An insert would look like
INSERT INTO gps_points (location, user_id) VALUES (ST_GeomFromText('POINT(-71.060316 48.432044)', 4326),2);
But this gets tricky with the unnest because you have to pass the coordinates into the ST_POINT function. Then I would call the function with:
SELECT myfunction('36.98,36.99,36.97?45.85,45.86,45.87?1,2,3')
I'm trying to do the following, but it won't work
insert into gps_points( geom, user_id)
select unnest( (ST_GeomFromText('POINT(string_to_array(rowsaz[1],',')::double precision[] string_to_array(rowsaz[2],',')::double precision[])', 4326),string_to_array(rowsaz[3],',')::double precision[]));
Somebody helped me get it:
insert into gps_points( geom, user_id)
select ST_SetSRID(ST_MakePoint(lng, lat), 4326), uid from unnest(string_to_array(rowsaz[1],',')::double precision[],string_to_array(rowsaz[2],',')::double precision[],string_to_array(rowsaz[3],',')::integer[]) as u(lng,lat,uid);
My database has a table with tons of geometry(PointZ,4326) I am doing a lot of my processing on the database side and I've noticed that when I use the ST_MakeLine I seem to be hitting a cap on the number of points it will make a line from. My table and function/query is below.
It works as long as the number of track_points returned from the sub query is less than 97. I know this because the insert puts data in the table for all columns when there are 96 points or fewer. For all records where there are 97 or more points all it inserts is the track_id, start_time and end_time.
I'm wondering if this is a bug in the ST_makeLine function of postgis or is it a setting in postgres that I need to modify.
CREATE TABLE track_line_strings(
track_id bigint NOT NULL,
linestring geometry(LINESTRINGZ,4326),
start_time bigint NOT NULL,
end_time bigint NOT NULL,
CONSTRAINT track_line_strings_pk PRIMARY KEY (track_id)
);
CREATE OR REPLACE FUNCTION create_track_line_string() RETURNS trigger
LANGUAGE plpgsql
AS $$
DECLARE
TRACKITEMID bigint := new.track_item_id;
TRACKID bigint := track_id from track_item ti where ti.id = TRACKITEMID;
STARTTIME bigint := MIN(ti.item_time) from track_item ti where ti.track_id = TRACKID;
ENDTIME bigint := MAX(ti.item_time) from track_item ti where ti.track_id = TRACKID;
BEGIN
IF EXISTS (SELECT track_id from track_line_strings where track_id = TRACKID)
THEN
UPDATE track_line_strings
SET start_time = STARTTIME, end_time = ENDTIME, linestring = (
SELECT ST_Makeline(e.trackPosition) FROM
(
Select track_id, tp.track_position AS trackPosition
FROM track_point tp JOIN track_item ti ON tp.track_item_id = ti.id
where ti.track_id = TRACKID ORDER BY ti.item_time ASC
) E )
WHERE track_id = TRACKID;
ELSE
INSERT INTO track_line_strings(track_id, linestring, start_time, end_time)
SELECT TRACKID, ST_Makeline(e.trackPosition), STARTTIME, ENDTIME FROM
(
Select track_id, tp.track_position AS trackPosition
FROM track_point tp JOIN track_item ti ON tp.track_item_id = ti.id
where ti.track_id = TRACKID ORDER BY ti.item_time ASC
)E;
END IF;
RETURN new;
END;
$$;
The database limits are pretty high, 1 GB data worth of geometry data in a field. It depends on what kind of point geometry, but it will be on the order of tens of millions of point geometries that can be used to construct a LineString.
You will see a proper error message with something about "exceeded size" if it is a limitation.
Apparent empty or missing data with pgAdminIII is a common question, but not related to database limitations:
http://postgis.net/2013/10/05/tip_pgAdmin_shows_no_data
http://postgis.net/docs/manual-dev/PostGIS_FAQ.html#pgadmin_shows_no_data_in_geom
There doesnt appear to be a limit. I was viewing results in pgAdminIII and there must be a limit on the number of characters the data output can handle for each column. I only realized this by copy pasting the results into a text file to see that it did infact return a value for the lines that have more than 96 points.
I am trying to get a query like the following one to work:
SELECT EXTRACT(DAY FROM INTERVAL to_date - from_date) FROM histories;
In the referenced table, to_date and from_date are of type timestamp without time zone. A regular query like
SELECT to_date - from_date FROM histories;
Gives me interval results such as '65 days 04:58:09.99'. But using this expression inside the first query gives me an error: invalid input syntax for type interval. I've tried various quotations and even nesting the query without luck. Can this be done?
SELECT EXTRACT(DAY FROM INTERVAL to_date - from_date) FROM histories;
This makes no sense. INTERVAL xxx is syntax for interval literals. So INTERVAL from_date is a syntax error, since from_date isn't a literal. If your code really looks more like INTERVAL '2012-02-01' then that's going to fail, because 2012-02-01 is not valid syntax for an INTERVAL.
The INTERVAL keyword here is just noise. I suspect you misunderstood an example from the documentation. Remove it and the expression will be fine.
I'm guessing you're trying to get the number of days between two dates represented as timestamp or timestamptz.
If so, either cast both to date:
SELECT to_date::date - from_date::date FROM histories;
or get the interval, then extract the day component:
SELECT extract(day from to_date - from_date) FROM histories;
This example demontrates the creation of a table with trigger which updates the difference between a stop_time and start_time in DDD HH24:MI:SS format where the DDD stands for the amount of dates ...
DROP TABLE IF EXISTS benchmarks ;
SELECT 'create the "benchmarks" table'
;
CREATE TABLE benchmarks (
guid UUID NOT NULL DEFAULT gen_random_uuid()
, id bigint UNIQUE NOT NULL DEFAULT cast (to_char(current_timestamp, 'YYMMDDHH12MISS') as bigint)
, git_hash char (8) NULL DEFAULT 'hash...'
, start_time timestamp NOT NULL DEFAULT DATE_TRUNC('second', NOW())
, stop_time timestamp NOT NULL DEFAULT DATE_TRUNC('second', NOW())
, diff_time varchar (20) NOT NULL DEFAULT 'HH:MI:SS'
, update_time timestamp DEFAULT DATE_TRUNC('second', NOW())
, CONSTRAINT pk_benchmarks_guid PRIMARY KEY (guid)
) WITH (
OIDS=FALSE
);
create unique index idx_uniq_benchmarks_id on benchmarks (id);
-- START trigger trg_benchmarks_upsrt_diff_time
-- hrt = human readable time
CREATE OR REPLACE FUNCTION fnc_benchmarks_upsrt_diff_time()
RETURNS TRIGGER
AS $$
BEGIN
-- NEW.diff_time = age(NEW.stop_time::timestamp-NEW.start_time::timestamp);
NEW.diff_time = to_char(NEW.stop_time-NEW.start_time, 'DDD HH24:MI:SS');
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER trg_benchmarks_upsrt_diff_time
BEFORE INSERT OR UPDATE ON benchmarks
FOR EACH ROW EXECUTE PROCEDURE fnc_benchmarks_upsrt_diff_time();
--
-- STOP trigger trg_benchmarks_upsrt_diff_time
Just remove the keyword INTERVAL:
SELECT EXTRACT(DAY FROM to_date - from_date) FROM histories;