I have wkt data and I am trying to create a JSON output in PostgreSQL.
I know that there is a function ST_AsGeoJSON (https://postgis.net/docs/ST_AsGeoJSON.html) which creates for example:
SELECT ST_AsGeoJSON('LINESTRING(77.29 29.07,77.42 29.26,77.27 29.31,77.29 29.07)');
Output:
{"type":"LineString","coordinates":[[77.29,29.07],[77.42,29.26],[77.27,29.31],[77.29,29.07]]}
But, I am looking to create an ouput as shown below:
{"type":"LineString","coordinates":[{"x":77.29,"y":29.07},{"x":77.42,"y":29.26},{"x":77.27,"y":29.31},{"x":77.29,"y":29.07}]}
Please note that I am looking for a generic solution for all types of geometry objects.
Thank you
You could use regex to replace the [a,b] with {"x":a,"y":b} with something like this:
CREATE OR REPLACE FUNCTION ST_AsCustomGeoJson(geom geometry)
RETURNS TEXT
AS
$$
-- Look for each coordinate and replace [number_a,number_b] with {"x":number_a,"y":number_b}
SELECT REGEXP_REPLACE(
ST_AsGeoJSON(geom),
'\[(-?[0-9]+\.?[0-9]*)(e\+[0-9]+)?,(-?[0-9]+\.?[0-9]*)(e\+[0-9]+)?\]',
'{"x":\1\2,"y":\3\4}',
'g');
$$
LANGUAGE SQL IMMUTABLE STRICT PARALLEL SAFE;
Using the new function, you get the expected response:
# Select ST_AsCustomGeoJson('LINESTRING(77.29 29.07,77.42 29.26,77.27 29.31,77.29 29.07)'::geometry); st_ascustomgeojson
---------------------------------------------------------------------------------------------------------------
{"type":"LineString","coordinates":[{x:77.29,y:29.07},{x:77.42,y:29.26},{x:77.27,y:29.31},{x:77.29,y:29.07}]}
(1 row)
And it should work with other geometry types too:
# Select ST_AsCustomGeoJson('POLYGON((0 0, 0 10, 10 10, 10 0, 0 0),(1 1, 1 9, 9 9, 9 1, 1 1))'::geometry);
st_ascustomgeojson
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
{"type":"Polygon","coordinates":[[{"x":0,"y":0},{"x":0,"y":10},{"x":10,"y":10},{"x":10,"y":0},{"x":0,"y":0}],[{"x":1,"y":1},{"x":1,"y":9},{"x":9,"y":9},{"x":9,"y":1},{"x":1,"y":1}]]}
(1 row)
# Select ST_AsCustomGeoJson('LINESTRING(3e20 3e20, 1e100 40)'::geometry);
st_ascustomgeojson
---------------------------------------------------------------------------------
{"type":"LineString","coordinates":[{"x":3e+20,"y":3e+20},{"x":1e+100,"y":40}]}
(1 row)
Even geometry collections:
# Select ST_AsCustomGeoJson('GEOMETRYCOLLECTION (POINT(-1 0), LINESTRING(4 4,5 5))');
st_ascustomgeojson
-----------------------------------------------------------------------------------------------------------------------------
---------------------------------
{"type":"GeometryCollection","geometries":[{"type":"Point","coordinates":{"x":-1,"y":0}},{"type":"LineString","coordinates":
[{"x":4,"y":4},{"x":5,"y":5}]}]}
I believe a simple loop with jsonb_build_obejct over a result set from ST_DumpPoints would suffice. If you also want to apply this function in multipart geometries, you have to build another loop to extract all geometries beforehand using ST_Dump:
CREATE OR REPLACE FUNCTION generate_custom_geojson(g GEOMETRY)
RETURNS json AS $$
DECLARE
j geometry;
i geometry;
coords jsonb[] := '{}';
coords_multi jsonb[] := '{}';
BEGIN
FOR j IN SELECT (ST_Dump(g)).geom LOOP
FOR i IN SELECT (ST_DumpPoints(j)).geom LOOP
coords := coords || jsonb_build_object('x',ST_X(i),'y',ST_Y(i));
END LOOP;
IF ST_NumGeometries(g)=1 THEN
coords_multi := coords;
ELSE
coords_multi := coords_multi || jsonb_agg(coords);
END IF;
END LOOP;
RETURN json_build_object('type',replace(ST_GeometryType(g),'ST_',''),
'coordinates',coords_multi);
END;
$$ LANGUAGE plpgsql;
This function simply extracts all points of a given geometry and puts them into an array - appended using ||. This array is later on used to create the coordinates set of x,y pairs. The geometry type is extracted using ST_GeometryType.
Test:
WITH j (g) AS (
VALUES ('LINESTRING(77.29 29.07,77.42 29.26,77.27 29.31,77.29 29.07)'),
('POLYGON ((30 10, 40 40, 20 40, 10 20, 30 10))'),
('MULTILINESTRING ((10 10, 20 20, 10 40),(40 40, 30 30, 40 20, 30 10))'),
('MULTIPOLYGON (((30 20, 45 40, 10 40, 30 20)),((15 5, 40 10, 10 20, 5 10, 15 5)))'),
('MULTIPOINT (10 40, 40 30, 20 20, 30 10)')
)
SELECT generate_custom_geojson(g) FROM j;
---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
{"type" : "LineString", "coordinates" : [{"x": 77.29, "y": 29.07},{"x": 77.42, "y": 29.26},{"x": 77.27, "y": 29.31},{"x": 77.29, "y": 29.07}]}
{"type" : "Polygon", "coordinates" : [{"x": 30, "y": 10},{"x": 40, "y": 40},{"x": 20, "y": 40},{"x": 10, "y": 20},{"x": 30, "y": 10}]}
{"type" : "MultiLineString", "coordinates" : [[[{"x": 10, "y": 10}, {"x": 20, "y": 20}, {"x": 10, "y": 40}]],[[{"x": 10, "y": 10}, {"x": 20, "y": 20}, {"x": 10, "y": 40}, {"x": 40, "y": 40}, {"x": 30, "y": 30}, {"x": 40, "y": 20}, {"x": 30, "y": 10}]]]}
{"type" : "MultiPolygon", "coordinates" : [[[{"x": 30, "y": 20}, {"x": 45, "y": 40}, {"x": 10, "y": 40}, {"x": 30, "y": 20}]],[[{"x": 30, "y": 20}, {"x": 45, "y": 40}, {"x": 10, "y": 40}, {"x": 30, "y": 20}, {"x": 15, "y": 5}, {"x": 40, "y": 10}, {"x": 10, "y": 20}, {"x": 5, "y": 10}, {"x": 15, "y": 5}]]]}
{"type" : "MultiPoint", "coordinates" : [[[{"x": 10, "y": 40}]],[[{"x": 10, "y": 40}, {"x": 40, "y": 30}]],[[{"x": 10, "y": 40}, {"x": 40, "y": 30}, {"x": 20, "y": 20}]],[[{"x": 10, "y": 40}, {"x": 40, "y": 30}, {"x": 20, "y": 20}, {"x": 30, "y": 10}]]]}
(5 Zeilen)
Related
I am rendering a gauge component in the following way, within my Streamlit app:
option = {
"series": [
{
"type": "gauge",
"startAngle": 180,
"endAngle": 0,
"min": min_range_val,
"max": max_range_val,
"center": ["40%", "40%"],
"splitNumber": 5,
"axisLine": {
"lineStyle": {
"width": 6,
"color": [
[0.25, "#FF403F"],
[0.5, "#ffa500"],
[0.75, "#FDDD60"],
[1, "#64C88A"],
],
}
},
"pointer": {
"icon": "path://M12.8,0.7l12,40.1H0.7L12.8,0.7z",
"length": "12%",
"width": 30,
"offsetCenter": [0, "-60%"],
"itemStyle": {"color": "auto"},
},
"axisTick": {"length": 10, "lineStyle": {"color": "auto", "width": 2}},
"splitLine": {"length": 15, "lineStyle": {"color": "auto", "width": 5}},
"axisLabel": {
"color": "#464646",
"fontSize": 12,
"distance": -60,
},
"title": {"offsetCenter": [0, "-20%"], "fontSize": 20},
"detail": {
"fontSize": 30,
"offsetCenter": [0, "0%"],
"valueAnimation": True,
"color": "auto",
"formatter": "{value}%",
},
"data": [{"value": value, "name": caption}],
}
]
}
st_echarts(option, width="450px", height="350px", key="gauge")
However, it seems like an additional empty extra white space is added at the bottom of the component (as from the following image).
How can I effectively remove that and keep only a tiny margin all around the gauge?
The following parameters must be added:
radius: '120%',
center: ['50%', '80%']
The latter one should be adjusted according to specific use cases.
I'm looking at using mongodb and so far most things that I've tried work. But I don't know why this find doesn't work.
col = db.create_collection("test")
x = col.insert_many([
{"item": "journal", "qty": 25, "size": {"h": 14, "w": 21, "uom": "cm"}, "status": "A"},
{"item": "notebook", "qty": 50, "size": {"h": 8.5, "w": 11, "uom": "in"}, "status": "A"},
{"item": "paper", "qty": 100, "size": {"h": 8.5, "w": 11, "uom": "in"}, "status": "D"},
{"item": "planner", "qty": 75, "size": {"h": 22.85, "w": 30, "uom": "cm"}, "status": "D"},
{"item": "postcard", "qty": 45, "size": {"h": 10, "w": 15.25, "uom": "cm"}, "status": "A"}
])
cursor = col.find({"size": {"h": 14, "w": 21, "uom": "cm"}})
if cursor.retrieved == 0:
print("found nothing") # <<<<<<<<< prints this
As explained into docs into section Match an Embedded/Nested Document:
Equality matches on the whole embedded document require an exact match of the specified document, including the field order.
So, you have to set the object into find stage in the same order that exists into DB.
I really don't know if keys into objects follows an strict order (alphabetically or whatever) but using this query almost everything output the result. Not always so I think there is a "random" (or not possible to handle) concept to store data -at least into mongo playground-.
By the way, the correct way to ensure results is to use dot notation so this query will always works ok.
coll.find({
"size.h": 14,
"size.w": 21,
"size.uom": "cm"
})
I was thinking that cursor.retrieved was non zero if it found something. I guess not. I found that this works:
lst = list(cursor)
print(lst)
cursor.rewind()
print(list(cursor))
if len(lst) != 0:
for d in lst:
print(d)
I need to receive some information from Weather Company Data For IBM Bluemix APIs about a specific period (from jan 2012 to jan 2015).
The documentation includes this example API:
https://twcservice.mybluemix.net:443/api/weather/v1/geocode/33.40/-83.42/almanac/daily.json?units=e&start=0112&end=0115
But this is the result:
{"metadata":{"transaction_id":"1472145329818:-319071226","status_code":400},"success":false,"errors":[{"error":{"code":"PVE-0003","message":"The field 'start' contains a value '112' which is outside the expected range of [1 to 12]."}}]}
Can you let me know how I can search the historic information?
Thank you
https://twcservice.eu-gb.mybluemix.net/api/weather/v1/geocode/33.40/-83.42/almanac/daily.json?start=0112&end=0115&units=e
It works for me!!
{
"metadata": {
"language": "en-US",
"transaction_id": "1501849033676:-1423101749",
"version": "1",
"latitude": 33.4,
"longitude": -83.42,
"units": "e",
"expire_time_gmt": 1501869684,
"status_code": 200
},
"almanac_summaries": [
{
"class": "almanac",
"station_id": "095988",
"station_name": "MONTICELLO",
"almanac_dt": "0112",
"interval": "D",
"avg_hi": 56,
"avg_lo": 28,
"record_hi": 75,
"record_hi_yr": 1916,
"record_lo": 0,
"record_lo_yr": 1982,
"mean_temp": 42,
"avg_precip": 0.13,
"avg_snow": 0.1,
"record_period": 30
},
{
"class": "almanac",
"station_id": "095988",
"station_name": "MONTICELLO",
"almanac_dt": "0113",
"interval": "D",
"avg_hi": 56,
"avg_lo": 28,
"record_hi": 77,
"record_hi_yr": 1911,
"record_lo": 8,
"record_lo_yr": 1918,
"mean_temp": 42,
"avg_precip": 0.12,
"avg_snow": 0,
"record_period": 30
},
{
"class": "almanac",
"station_id": "095988",
"station_name": "MONTICELLO",
"almanac_dt": "0114",
"interval": "D",
"avg_hi": 56,
"avg_lo": 28,
"record_hi": 78,
"record_hi_yr": 1937,
"record_lo": 10,
"record_lo_yr": 1918,
"mean_temp": 42,
"avg_precip": 0.13,
"avg_snow": 0,
"record_period": 30
},
{
"class": "almanac",
"station_id": "095988",
"station_name": "MONTICELLO",
"almanac_dt": "0115",
"interval": "D",
"avg_hi": 56,
"avg_lo": 28,
"record_hi": 80,
"record_hi_yr": 1932,
"record_lo": 11,
"record_lo_yr": 1964,
"mean_temp": 42,
"avg_precip": 0.12,
"avg_snow": 0,
"record_period": 30
}
]
}
I have the following table:
CREATE TABLE mytable (
id serial PRIMARY KEY
, employee text UNIQUE NOT NULL
, data jsonb
);
With the following data:
INSERT INTO mytable (employee, data)
VALUES
('Jim', '{"sales_tv": [{"value": 10, "yr": "2010", "loc": "us"}, {"value": 5, "yr": "2011", "loc": "europe"}, {"value": 40, "yr": "2012", "loc": "asia"}], "sales_radio": [{"value": 11, "yr": "2010", "loc": "us"}, {"value": 8, "yr": "2011", "loc": "china"}, {"value": 76, "yr": "2012", "loc": "us"}], "another_key": "another value"}'),
('Rob', '{"sales_radio": [{"value": 7, "yr": "2014", "loc": "japan"}, {"value": 3, "yr": "2009", "loc": "us"}, {"value": 37, "yr": "2011", "loc": "us"}], "sales_tv": [{"value": 4, "yr": "2010", "loc": "us"}, {"value": 18, "yr": "2011", "loc": "europe"}, {"value": 28, "yr": "2012", "loc": "asia"}], "another_key": "another value"}')
Notice that there are other keys in there besides just "sales_tv" and "sales_radio". For the queries below I just need to focus on "sales_tv" and "sales_radio".
I'm trying to return a list of objects for Jim for anything that starts with "sales_". In each object w/in the list I just need to return the value and the yr (ignoring "location" or any other keys) e.g.:
employee | sales_
Jim | {"sales_tv": [{"value": 10, "yr": "2010"}, {"value": 5, "yr": "2011"}, {"value": 40, "yr": "2012"}],
"sales_radio": [{"value": 11, "yr": "2010"}, {"value": 8, "yr": "2011"}, {"value": 76, "yr": "2012"}]}
I am able to get each of the values but without the year nor the list format I'd like:
SELECT t.employee, json_object_agg(a.k, d.value) AS sales
FROM mytable t
, jsonb_each(t.data) a(k,v)
, jsonb_to_recordset(a.v) d(yr text, value float)
WHERE t.employee = 'Jim'
AND a.k LIKE 'sales_%'
GROUP BY 1
Results:
employee | sales
---------- | --------
Jim | { "sales_tv" : 10, "sales_tv" : 5, "sales_tv" : 40, "sales_radio" : 11, "sales_radio" : 8, "sales_radio" : 76 }
The principle is the same as the question you asked yesterday, the first query (even though this question is yesterday's second query): peel away layers of hierarchy in your json data and then re-assemble it with whatever data you are interested in, into whatever new json format.
SELECT employee, json_object_agg(k, jarr) AS sales
FROM (
SELECT t.employee, a.k,
json_agg(json_build_object('value', d.value, 'yr', d.yr)) AS jarr
FROM mytable t,
jsonb_each(t.data) a(k, v),
jsonb_to_recordset(a.v) d(yr text, value float)
WHERE t.employee = 'Jim'
AND a.k like 'sales_%'
GROUP BY 1, 2) sub
GROUP BY 1;
In the FROM clause you break down the JSON hierarchy with functions like jsonb_each and jsonb_to_recordset. As the last function's name already implies, each of these produces a set of records that you can work with just like you would with any other table and its columns. In the column selection list you select the required data and the appropriate aggregate functions json_agg and json_object_agg to piece the JSON result back together. For every level of hierarchy you need one aggregate function and therefore one level of sub-query.
When requesting ad stat for an add that is running longer than 1 day I recieve unique_imperssion = 0.
However when requesting stats for a day the unique_impression is populated.
Is it a bug or intentional? is there a workaround to get unique impressions for more than 1 day?
Querying for one day:
https://graph.facebook.com/<ad_group_id>/stats/1362787200/1362873600
I get:
{
"id": "<ad_group_id>/stats/1362787200/1362873600",
"impressions": 8616,
"clicks": 67,
"spent": 715,
"social_impressions": 20,
"social_clicks": 0,
"social_spent": 0,
"unique_impressions": 3544,
"social_unique_impressions": 11,
"unique_clicks": 67,
"social_unique_clicks": 0,
"actions": null,
"inline_actions": {
"title_clicks": 0,
"like": 9,
"rsvp_yes": 0,
"rsvp_maybe": 0,
"post_like": 0,
"comment": 0,
"photo_view": 0,
"link_click": 0,
"video_play": 0,
"question_vote": 0
},...
Querying for all time:
https://graph.facebook.com/<ad_group_id>/stats
I get:
{
"id": "<ad_group_id>/stats",
"impressions": 8616,
"clicks": 67,
"spent": 715,
"social_impressions": 20,
"social_clicks": 0,
"social_spent": 0,
"unique_impressions": 0,
"social_unique_impressions": 11,
"unique_clicks": 67,
"social_unique_clicks": 0,
"actions": null,
"inline_actions": {
"title_clicks": 0,
"like": 9,
"rsvp_yes": 0,
"rsvp_maybe": 0,
"post_like": 0,
"comment": 0,
"photo_view": 0,
"link_click": 0,
"video_play": 0,
"question_vote": 0
},...
Thanks,
Amit
Are you forgetting to use timestamps which are for a 1, 7 or 28 day period, and adjusted to match the timezone of the ad account itself? This is the most likely explanation here
For example, if your ad account (/act_<ACCOUNT_NUMBER_HERE) is like this:
"account_id": "<SNIP>",
"id": "act_<SNIP",
"name": "<SNIP> default (EUR) account",
"account_status": 1,
"currency": "EUR",
"timezone_id": 69,
"timezone_name": "Europe/Dublin",
"timezone_offset_hours_utc": 1,
You need to adjust the timestamps of your stats calls to UTC +1
So for retrieving unique stats, you need to request 1, 7 or 28 days of data, from midnight-midnight, in UTC +1 , this example should work for you:
<SNIP>/stats/2013-02-01 00:00:00 +0100/2013-03-01 00:00:00 +0100
or
<SNIP>/stats/1359673200/1362092400
Example response (truncated)
{
"id": "<SNIP>/stats/1359673200/1362092400",
"impressions": "166561",
"clicks": "6304",
"spent": "45257",
"social_impressions": "166556",
"social_clicks": "6304",
"social_spent": "45257",
"unique_impressions": 111368,
"social_unique_impressions": 111368,
"unique_clicks": 5992,
"social_unique_clicks": 5992,
"actions": {