I've never used JSONB columns before, so I'm struggling with a simple query.
I need to select all the value fields from this json below. The output should be: value1, value2, value3, value4, value5, value6, value7, value8
That's as far as I got, but I could not find a way to go deeper in the json.
SELECT result -> 'report' ->> 'products' AS values FROM example_table
I appreciate your help.
CREATE TABLE example_table(
id SERIAL PRIMARY KEY,
result JSONB NOT NULL);
INSERT INTO example_table(result)
VALUES('{
"report": {
"products": [
{
"productName": "Product One",
"types": [
{
"type": "Type One",
"metadata": {
"prices": [
{
"price": {
"value": "value1"
}
},
{
"price": {
"value": "value2"
}
}
]
}
},
{
"type": "Type Two",
"metadata": {
"prices": [
{
"price": {
"value": "value3"
}
},
{
"price": {
"value": "value4"
}
}
]
}
}
]
},
{
"productName": "Product Two",
"types": [
{
"type": "Type One",
"metadata": {
"prices": [
{
"price": {
"value": "value5"
}
},
{
"price": {
"value": "value6"
}
}
]
}
},
{
"type": "Type Two",
"metadata": {
"prices": [
{
"price": {
"value": "value7"
}
},
{
"price": {
"value": "value8"
}
}
]
}
}
]
}
]
}
}');
You should use CROSS JOIN and jsonb_array_elements functions to extract array for each element of records
Demo
select
et.id,
string_agg(ptmp.value -> 'price' ->> 'value', ',')
from
example_table et
cross join jsonb_array_elements(et.result -> 'report' -> 'products') p
cross join jsonb_array_elements (p.value -> 'types') pt
cross join jsonb_array_elements (pt.value -> 'metadata' -> 'prices') ptmp
group by et.id
Related
Table public.challenge, column lines JSONB
My initial JSON in lines :
[
{
"line": 1,
"blocs": [
{
"size": 100,
"name": "abc"
},
{
"size": 100,
"name": "def"
},
{
"size": 100,
"name": "ghi"
}
]
},
{
"line": 2,
"blocs": [
{
"size": 100,
"name": "xyz"
}
]
}
]
Desired result (add a new object wrapper for every bloc) :
[
{
"line": 1,
"blocs": [
{
"size": 100,
"name": "abc",
"wrapper": {
"nestedName": "abc",
"type": "regular"
}
},
{
"size": 100,
"name": "def",
"wrapper": {
"nestedName": "def",
"type": "regular"
}
},
{
"size": 100,
"name": "ghi",
"wrapper": {
"nestedName": "ghi",
"type": "regular"
}
}
]
},
{
"line": 2,
"blocs": [
{
"size": 100,
"name": "xyz",
"wrapper": {
"nestedName": "xyz",
"type": "regular"
}
}
]
}
]
I have the following query (from here) :
WITH cte AS (
SELECT id_lines,
jsonb_agg(
jsonb_set(val1, '{blocs}',
(
SELECT jsonb_agg(arr2 ||
json_build_object(
'wrapper', json_build_object('nestedName', arr2->'name', 'type', 'regular')
)::jsonb
)
FROM jsonb_array_elements(arr1.val1->'blocs') arr2
WHERE arr2->'name' IS NOT NULL
)
))
FROM public.challenge, jsonb_array_elements(lines) arr1(val1)
GROUP BY 1
)
UPDATE public.challenge SET lines=(cte.jsonb_agg) FROM cte
WHERE public.challenge.id_lines=cte.id_lines;
The condition WHERE arr2->'name' IS NOT NULL does not filter out blocs where name is null, I struggle to find out why.. thanks!
You have to distinguish between SQL NULL and JSON null.
The IS NOT NULL predicate tests for SQL NULL, which would mean that the attribute is not present in the JSON.
To test for JSON null, use
WHERE arr2->'name' <> 'null'::jsonb
The type cast to jsonb is not necessary and would be performed implicitly.
Lets say I have a table and in a "data" NVARCHAR(MAX) column, I have this JSON:
[
{
"room": "kitchen",
"items": [
{
"name": "table",
"price": 100
}
]
},
{
"room": "bedroom",
"items": [
{
"name": "bed",
"price": 250
},
{
"name": "lamp",
"price": 50
}
]
},
{
"room": "bathroom",
"items": [
{
"name": "toilet",
"price": 101
},
{
"name": "shower",
"items": [
{
"name": "shower curtain",
"price": 10
},
{
"name": "shower head",
"price": 40
}
]
}
]
}
]
Using TSQL, can I somehow SUM all prices in the JSON? Please notice that my "price" is in different levels in the JSON file.
And furthermore, can I make a computed column that will SUM all the prices in the JSON column?
In JSon you would have to treat all nodes and check if another sublevel including "Price" exists. As text it is easier. This example is for one cell.
The idea above to insert the result to another column in the table is a good one.
You can implement a trigger after every INSERT / UPDATE to calculate instead of a computed column.
declare #str varchar(4000)='[ { "room": "kitchen", "items": [ { "name": "table", "price": 100 } ] }, { "room": "bedroom", "items": [ { "name": "bed", "price": 250 }, { "name": "lamp", "price": 50 } ] }, { "room": "bathroom", "items": [ { "name": "toilet", "price": 101 }, { "name": "shower", "items": [ { "name": "shower curtain", "price": 10 }, { "name": "shower head", "price": 40 } ] } ] } ]'
, #sub varchar(15);
drop table if exists #prices;
create table #prices(price int);
WHILE patindex('%"price": %',#str) > 0
begin
SELECT #sub=SUBSTRING(#str, patindex('%"price": %',#str), 15)
WHILE PatIndex('%[^0-9]%', #sub) > 0
SET #sub = Stuff(#sub, PatIndex('%[^0-9]%', #sub), 1, '');
insert into #prices select try_cast(#sub as int);
SET #str = Stuff(#str, patindex('%"price": %',#str), 15, '');
end;
select sum(price) from #prices;
Im using mongoose, I have the following data of user collection:
[{
"_id": "1",
"notes": [
{
"value": "A90",
"text": "math"
},
{
"value": "A80",
"text": "english"
},
{
"value": "A70",
"text": "art"
}
]
},
{
"_id": "2",
"notes": [
{
"value": "A90",
"text": "math"
},
{
"value": "A80",
"text": "english"
}
]
},
{
"_id": "3",
"notes": [
{
"value": "A80",
"text": "art"
}
]
}]
and I have as a parameters the following array: [ "A90", "A80" ]
so I want to make a query to use this array to return only the records that have all the array items in the notes (value) table.
So for the example above it will return:
[{
"_id": "1",
"notes": [
{
"value": "A90",
"text": "math"
},
{
"value": "A80",
"text": "english"
},
{
"value": "A70",
"text": "art"
}
]
},
{
"_id": "2",
"notes": [
{
"value": "A90",
"text": "math"
},
{
"value": "A80",
"text": "english"
}
]
}]
I tried the following find query:
{ "notes": { $elemMatch: { value: { $in: valuesArray } } }}
but it returns a record even if just one element in valuesArray exist.
it turned out to be quite easy:
find({ "notes.value": { $all: arrayValues } })
I have this kind of jsonb data in one of column named "FORM" in a my table "process" and I want to create view with some data which are inside of row named field I just want name and value form field named array in this jsonb.
here the jsonb:
{
"column": [
{
"row": {
"id": "ebc7afddad474aee8f82930b6dc328fe",
"name": "Details",
"field": [
{
"name": {
"id": "50a5613e97e04cb5b8d32afa8a9975d1",
"label": "name"
},
"value": {
"stringValue": "yhfghg"
}
}
]
}
},
{
"row": {
"id": "5b7471413cbc44c1a39895020bf2ec58",
"name": "leave details",
"field": [
{
"name": {
"id": "bb127e8284c84692aa217539c4312394",
"label": "date"
},
"value": {
"dateValue": 1549065600
}
},
{
"name": {
"id": "33b2c5d1a968481d9d5e386db487de52",
"label": "days",
"options": {
"allowedValues": [
{
"item": "1"
},
{
"item": "2"
},
{
"item": "3"
},
{
"item": "4"
},
{
"item": "5"
}
]
},
"defaultValue": {
"radioButtonValue": "1"
}
},
"value": {
"radioButtonValue": "3"
}
}
]
}
}
]
}
and i want to this kind of jsonb in view data comes from subarray called field inside the object named row......
[
{
"name": {
"id": "50a5613e97e04cb5b8d32afa8a9975d1"
},
"value": {
"stringValue": "yhfghg"
}
},
{
"name": {
"id": "bb127e8284c84692aa217539c4312394"
},
"value": {
"dateValue": 1549065600
}
},
{
"name": {
"id": "33b2c5d1a968481d9d5e386db487de52"
},
"value": {
"radioButtonValue": "3"
}
}
]
How can I do this?
I used jsonb_array_elements twice to expand the two arrays, then used json_build_object to make the result structure and jsonb_agg combine the several rows generated above into a single JSONB array.
I included a row number is the results so I could later apply group by so that results from several "process" rows would not be accidentally combined by the jsonb_agg.
with cols as (select jsonb_array_elements( "FORM" ->'column') as r
,row_number() over () as n from "process" )
,cols2 as (select jsonb_array_elements(r->'row'->'field') as v
,n from cols)
select jsonb_agg(json_build_object('name',v->'id','value',v->'value'))
from cols2 group by n;
Here's what I want to do:
1. Concatenate first and last name with name
2. Change id to employeeID and add prefix with employee ID: emp_id
3. If department is equal to sales, than department should be "SL"
4. If department is equal to sales, than department should be "RET"
Here's my input:
{
"employees": [{
"f_name": "tom",`
"l_name": "smith",
"id": "100",
"department": "sales",
"company": "ABC Intelligence"
},
{
"f_name": "john",
"l_name": "doe",
"id": "102",
"department": "returns",
"company": "ABC Intelligence"
}, {
"f_name": "jane",
"l_name": "doe",
"id": "103",
"department": "sales",
"company": "ABC Intelligence"
}
]
}
specs:
[{
"operation": "shift",
"spec": {
"employees": {
"*": {
"name": "=concat(#(1,f_name),' ',#(1,l_name))"
}
}
}
},
{
"operation": "remove",
"spec": {
"employees": {
"*": {
"f_name": "",
"l_name": ""
}
}
}
}
]
desired output:
{
"employees": [
{
"name": "tom smith",
"employeeID": "emp_100",
"department": "SL",
"company": "ABC Intelligence"
},
{
"name": "john doe",
"employeeID": "emp_102",
"department": "RET",
"company": "ABC Intelligence"
},
{
"name": "jane doe",
"employeeID": "emp_103",
"department": "SL",
"company": "ABC Intelligence"
}
]
}
I was able to get the first rule but still struggling with the others. Any help would be appreciated
Spec
[
{
"operation": "modify-default-beta",
"spec": {
// add the mapping of department name to code, so we can use it later
"deptMap": {
"sales": "SL",
"returns": "RET"
},
"employees": {
"*": {
// build the fullName from the first and last names
"name": "=concat(#(1,f_name),' ',#(1,l_name))",
// build the employeeID
"employeeID": "=concat(emp_,#(1,id))"
}
}
}
},
{
"operation": "shift",
"spec": {
"employees": {
"*": { // employees arrays
// pass name, company, and employeeID thru
"name": "employees[&1].name",
"company": "employees[&1].company",
"employeeID": "employees[&1].employeeID",
// lookup the deparment code
"department": {
"*": { // value of dept
// got up 5 levels, come back down the deptMap
"#(4,deptMap.&)": "employees[&3].department"
}
}
}
}
}
}
]