How to search and join multi indexes with SphinxQL? - sphinx

I have 2 indexes, indexA and indexB. There 2 indexes have different columns.
Example:
Index A:
+---+-----+
|id |text |
+---+-----+
|1 |john |
|2 |tom |
|3 |sam |
+---+-----+
Index B:
+---+---------+-----+
|id |parentid |num |
+---+---------+-----+
|1 |1 |64 |
|2 |1 |128 |
|3 |2 |256 |
+---+---------+-----+
Question:
How do I get result like this?
/*Client search*/
SELECT
A.id, A.text, B.num
FROM
indexa A
INNER JOIN
indexb B ON A.id = B.parentid
WHERE
B.num > 100
Result:
+-----+--------+-------+
|A.id | A.text |B.num |
+-----+--------+-------+
|1 |john |128 |
|2 |tom |256 |
+-----+--------+-------+

After edit index query, problem solved.
Solved index query:
SELECT
A.id,A.text,B.num
FROM
tableA A
LEFT JOIN
tableB B ON A.id=B.parentid
Search query:
SELECT * FROM indexA

Related

Loop through large dataframe in Pyspark - alternative

df_hrrchy
|lefId |Lineage |
|-------|--------------------------------------|
|36326 |["36326","36465","36976","36091","82"]|
|36121 |["36121","36908","36976","36091","82"]|
|36380 |["36380","36465","36976","36091","82"]|
|36448 |["36448","36465","36976","36091","82"]|
|36683 |["36683","36465","36976","36091","82"]|
|36949 |["36949","36908","36976","36091","82"]|
|37349 |["37349","36908","36976","36091","82"]|
|37026 |["37026","36908","36976","36091","82"]|
|36879 |["36879","36465","36976","36091","82"]|
df_trans
|tranID | T_Id |
|-----------|-------------------------------------------------------------------------|
|1000540 |["36121","36326","37349","36949","36380","37026","36448","36683","36879"]|
df_creds
|T_Id |T_val |T_Goal |Parent_T_Id |Parent_Val |parent_Goal|
|-------|-------|-------|---------------|----------------|-----------|
|36448 |100 |1 |36465 |200 |1 |
|36465 |200 |1 |36976 |300 |2 |
|36326 |90 |1 |36465 |200 |1 |
|36091 |500 |19 |82 |600 |4 |
|36121 |90 |1 |36908 |200 |1 |
|36683 |90 |1 |36465 |200 |1 |
|36908 |200 |1 |36976 |300 |2 |
|36949 |90 |1 |36908 |200 |1 |
|36976 |300 |2 |36091 |500 |19 |
|37026 |90 |1 |36908 |200 |1 |
|37349 |100 |1 |36908 |200 |1 |
|36879 |90 |1 |36465 |200 |1 |
|36380 |90 |1 |36465 |200 |1 |
Desired Result
T_id
children
T_Val
T_Goal
parent_T_id
parent_Goal
trans_id
36091
["36976"]
500
19
82
4
1000540
36465
["36448","36326","36683","36879","36380"]
200
1
36976
2
1000540
36908
["36121","36949","37026","37349"]
200
1
36976
2
1000540
36976
["36465","36908"]
300
2
36091
19
1000540
36683
null
90
1
36465
1
1000540
37026
null
90
1
36908
1
1000540
36448
null
100
1
36465
1
1000540
36949
null
90
1
36908
1
1000540
36326
null
90
1
36465
1
1000540
36380
null
90
1
36465
1
1000540
36879
null
90
1
36465
1
1000540
36121
null
90
1
36908
1
1000540
37349
null
100
1
36908
1
1000540
Code Tried
from pyspark.sql import functions as F
from pyspark.sql import DataFrame
from pyspark.sql.functions import explode, collect_set, expr, col, collect_list,array_contains, lit
from functools import reduce
for row in df_transactions.rdd.toLocalIterator():
# def find_nodemap(row):
dfs = []
df_hy_set = (df_hrrchy.filter(df_hrrchy. lefId.isin(row["T_ds"]))
.select(explode("Lineage").alias("Terrs"))
.agg(collect_set(col("Terrs")).alias("hierarchy_list"))
.select(F.lit(row["trans_id"]).alias("trans_id "),"hierarchy_list")
)
df_childrens = (df_creds.join(df_ hy _set, expr("array_contains(hierarchy_list, T_id)"))
.select("T_id", "T_Val","T_Goal","parent_T_id", "parent_Goal", "trans _id" )
.groupBy("parent_T_id").agg(collect_list("T_id").alias("children"))
)
df_filter_creds = (df_creds.join(df_ hy _set, expr("array_contains(hierarchy_list, T_id)"))
.select ("T_id", "T_val","T_Goal","parent_T_id", "parent_Goal”, "trans_id")
)
df_nodemap = (df_filter_ creds.alias("A").join(df_childrens.alias("B"), col("A.T_id") == col("B.parent_T_id"), "left")
.select("A.T_id","B.children", "A.T_val","A.terr_Goal","A.parent_T_id", "A.parent_Goal", "A.trans_ id")
)
display(df_nodemap)
# dfs.append(df_nodemap)
# df = reduce(DataFrame.union, dfs)
# display(df)
# # display(df)
My problem - Its a bad design. df_trans is having millions of data and looping through dataframe , its taking forever. Without looping can I do it. I tried couple of other methods, not able to get the desired result.
You certainly need to process entire DataFrame in batch, not iterate row by row.
Key points are to "reverse" df_hrrchy, ie. from parent lineage obtain list of children for every T_Id:
val df_children = df_hrrchy.withColumn("children", slice($"Lineage", lit(1), size($"Lineage") - 1))
.withColumn("parents", slice($"Lineage", 2, 999999))
.select(explode(arrays_zip($"children", $"parents")).as("rels"))
.distinct
.groupBy($"rels.parents".as("T_Id"))
.agg(collect_set($"rels.children").as("children"))
df_children.show(false)
+-----+-----------------------------------+
|T_Id |children |
+-----+-----------------------------------+
|36091|[36976] |
|36465|[36448, 36380, 36326, 36879, 36683]|
|36976|[36465, 36908] |
|82 |[36091] |
|36908|[36949, 37349, 36121, 37026] |
+-----+-----------------------------------+
then expand list of T_Ids in df_trans and also include all T_Ids from the hierarchy:
val df_trans_map = df_trans.withColumn("T_Id", explode($"T_Id"))
.join(df_hrrchy, array_contains($"Lineage", $"T_Id"))
.select($"tranID", explode($"Lineage").as("T_Id"))
.distinct
df_trans_map.show(false)
+-------+-----+
|tranID |T_Id |
+-------+-----+
|1000540|36976|
|1000540|82 |
|1000540|36091|
|1000540|36465|
|1000540|36326|
|1000540|36121|
|1000540|36908|
|1000540|36380|
|1000540|36448|
|1000540|36683|
|1000540|36949|
|1000540|37349|
|1000540|37026|
|1000540|36879|
+-------+-----+
With this it is just a simple join to obtain final result:
df_trans_map.join(df_creds, Seq("T_Id"))
.join(df_children, Seq("T_Id"), "left_outer")
.show(false)
+-----+-------+-----+------+-----------+----------+-----------+-----------------------------------+
|T_Id |tranID |T_val|T_Goal|Parent_T_Id|Parent_Val|parent_Goal|children |
+-----+-------+-----+------+-----------+----------+-----------+-----------------------------------+
|36976|1000540|300 |2 |36091 |500 |19 |[36465, 36908] |
|36091|1000540|500 |19 |82 |600 |4 |[36976] |
|36465|1000540|200 |1 |36976 |300 |2 |[36448, 36380, 36326, 36879, 36683]|
|36326|1000540|90 |1 |36465 |200 |1 |null |
|36121|1000540|90 |1 |36908 |200 |1 |null |
|36908|1000540|200 |1 |36976 |300 |2 |[36949, 37349, 36121, 37026] |
|36380|1000540|90 |1 |36465 |200 |1 |null |
|36448|1000540|100 |1 |36465 |200 |1 |null |
|36683|1000540|90 |1 |36465 |200 |1 |null |
|36949|1000540|90 |1 |36908 |200 |1 |null |
|37349|1000540|100 |1 |36908 |200 |1 |null |
|37026|1000540|90 |1 |36908 |200 |1 |null |
|36879|1000540|90 |1 |36465 |200 |1 |null |
+-----+-------+-----+------+-----------+----------+-----------+-----------------------------------+
You need to re-write this to use the full cluster, using a localIterator means that you aren't fully utilizing the cluster for shared work.
Below code was not run as you didn't provide a workable data set to test. If you do I'll run the code to make sure it's sound.
from pyspark.sql import functions as F
from pyspark.sql import DataFrame
from pyspark.sql.functions import explode, collect_set, expr, col, collect_list,array_contains, lit
from functools import reduce
#uses explode I know this will create a lot of short lived records but the flip side is it will use the entire cluster to complete the work instead of the driver.
df_trans_expld = df_trans.select( df_trans.tranID, explode(df_trans.T_Id).alias("T_Id") )
#uses explode
df_hrrchy_expld = df_hrrchy.select( df_hrrchy.leftId, explode( df_hrrchy.Lineage ).alias("Lineage") )
#uses exploded data to join which is the same as a filter.
df_hy_set = df_trans_expld.join( df_hrrchy_expld, df_hrrchy_expld.lefId === df_trans_expld.T_id, "left").select( "trans_id" ).agg(collect_set(col("Lineage")).alias("hierarchy_list"))
.select(F.lit(col("trans_id")).alias("trans_id "),"hierarchy_list")
#logic unchanged from here down
df_childrens = (df_creds.join(df_hy_set, expr("array_contains(hierarchy_list, T_id)"))
.select("T_id", "T_Val","T_Goal","parent_T_id", "parent_Goal", "trans _id" )
.groupBy("parent_T_id").agg(collect_list("T_id").alias("children"))
)
df_filter_creds = (df_creds.join(ddf_hy_set, expr("array_contains(hierarchy_list, T_id)"))
.select ("T_id", "T_val","T_Goal","parent_T_id", "parent_Goal”, "trans_id")
)
df_nodemap = (df_filter_creds.alias("A").join(df_childrens.alias("B"), col("A.T_id") == col("B.parent_T_id"), "left")
.select("A.T_id","B.children", "A.T_val","A.terr_Goal","A.parent_T_id", "A.parent_Goal", "A.trans_ id")
)
# no need to append/union data as it's now just one dataframe df_nodemap
I'd have to look into this more but I'm pretty sure you are pulling all the data through the driver(with your existing code), which will really slow things down, this will make use of all executors to complete the work.
There may be another optimization to get rid of the array_contains (and use a join instead). I'd have to look at the explain to see if you could get even more performance out of it. Don't remember off the top of my head, you are avoiding a shuffle so it may be better as is.

MariaDB - Conjunction-Search in Many-to-Many

I have problems to implement an "and-concatenated" search with many-to-many tables. I tried to present a simple example below. I use MariaDB.
I have a table with process. To the process a can assign persons and tags. There is a table for tags and a table for persons.
There a two many-to-many relationships: tags_to_processes and persons_to_processes.
example: Find all process with person 1 and person 2 and with tag 1 and 2. Result: process 1.
example: Find all process with person 1 and person 2 and with tag 2. Result: Process 1 and Process 2.
Thank you very much!
'processes' Table
+-----------+-------------------+
|process_id |process_name |
+-----------+-------------------+
|1 |Process 1 |
|2 |Process 2 |
|3 |Process 3 |
+-----------+-------------------+
'persons' table
+----------+------------+
|person_id |person_name |
+----------+------------+
|1 |Person 1 |
|2 |Person 2 |
|3 |Person 3 |
|4 |Person 4 |
|5 |Person 5 |
+----------+------------+
'tags' table
+----------+-----------+
|tag_id |tag_name |
+----------+-----------+
|1 |Tag 1 |
|2 |Tag 2 |
|3 |Tag 3 |
|4 |Tag 4 |
|5 |Tag 5 |
|6 |Tag 6 |
+----------+-----------+
'persons_to_processes' table
+----------+-----------+
|person_id |process_id |
+----------+-----------+
|1 |1 |
|2 |1 |
|3 |1 |
|4 |1 |
|5 |1 |
|1 |2 |
|2 |2 |
|4 |3 |
+----------+-----------+
'tags_to_processes' table
+----------+-----------+
|tag_id |process_id |
+----------+-----------+
|1 |1 |
|2 |1 |
|3 |1 |
|6 |1 |
|2 |2 |
|2 |3 |
+----------+-----------+
You can join persons_to_processes to persons, filter the resuults for the persons that you want and use aggregation:
SELECT ptp.process_id
FROM persons_to_processes ptp INNER JOIN persons p
ON p.person_id = ptp.person_id
WHERE p.person_name IN ('Person 1', 'Person 2')
GROUP BY ptp.process_id
HAVING COUNT(*) = 2 -- 2 persons
Similarly for the tables tags_to_processes and tags:
SELECT ttp.process_id
FROM tags_to_processes ttp INNER JOIN tags t
ON t.tag_id = ttp.tag_id
WHERE t.tag_name IN ('Tag 1', 'Tag 2')
GROUP BY ttp.process_id
HAVING COUNT(*) = 2 -- 2 tags
Finally, you can combine the 2 queries to get their common results with INTERSECT:
WITH
cte1 AS (
SELECT ptp.process_id
FROM persons_to_processes ptp INNER JOIN persons p
ON p.person_id = ptp.person_id
WHERE p.person_name IN ('Person 1', 'Person 2')
GROUP BY ptp.process_id
HAVING COUNT(*) = 2 -- 2 persons
),
cte2 AS (
SELECT ttp.process_id
FROM tags_to_processes ttp INNER JOIN tags t
ON t.tag_id = ttp.tag_id
WHERE t.tag_name IN ('Tag 1', 'Tag 2')
GROUP BY ttp.process_id
HAVING COUNT(*) = 2 -- 2 tags
)
SELECT process_id FROM cte1
INTERSECT
SELECT process_id FROM cte2;
See the demo.

Know the values ​of a field related to each other in a postgres table

I have a postgres table made like this
|id |name | parent_id|
|1 |Giovanni | 1|
|2 |Giovanni | 2|
|3 |Michele | 2|
|4 |Francesca| 3|
|5 |Antonio | 3|
|6 |Michele | 3|
|7 |Angela | 4|
|8 |Francesca| 4|
|9 |Anna | 5|
I would need that once the first name is selected eg. "Giovanni"
it should find me all "Giovanni" and based on parent_id value find all linked names. At this point iterate by finding all the other names connected with those found in precedence, eg.
|1 |Giovanni | 1|
|2 |Giovanni | 2|
|3 |Michele | 2|<--new parent_id
|6 |Michele | 3|
|4 |Francesca| 3|<--new parent_id
|5 |Antonio | 3|<--new parent_id
|8 |Francesca| 4|
|7 |Angela | 4|
I figured I should protect recursive queries but I can't find a correct query
thanks
Consider the following recursive query:
with recursive cte as (
select id, name, parent_id from mytable where name = 'Giovanni'
union all
select t.id, t.name, t.parent_id
from cte c
inner join mytable t on t.parent_id = c.id and t.id <> c.id
)
select * from cte
The anchor of the recursive query selects all rows whose name is Giovanni, and then all related children.
It might be useful to keep track of the path to each node, so you can order the results:
with recursive cte as (
select id, name, parent_id, array[id] path from mytable where name = 'Giovanni'
union all
select t.id, t.name, t.parent_id, c.path || t.id
from cte c
inner join mytable t on t.parent_id = c.id and t.id <> c.id
)
select * from cte order by path
Sorry #GMB you are absolutely right. I tried a table with slightly different values.
Values ​​are like that
|id |name | parent_id|
|1 |Rossi | 13|
|2 |Paolo | 13|
|3 |Giovanni | 15|
|4 |Giovanni | 16|
|5 |Rossi | 16|
|6 |Mario | 17|
the result should be
|3 |Giovanni | 15|
|4 |Giovanni | 16|
|5 |Rossi | 16|
|1 |Rossi | 13|
|2 |Paolo | 13|
thank you very much for your help

How to get number of lines resulted by join in Spark

Consider these two Dataframes:
+---+
|id |
+---+
|1 |
|2 |
|3 |
+---+
+---+-----+
|idz|word |
+---+-----+
|1 |bat |
|1 |mouse|
|2 |horse|
+---+-----+
I am doing a Left join on ID=IDZ:
val r = df1.join(df2, (df1("id") === df2("idz")), "left_outer").
withColumn("ID_EMPLOYE_VENDEUR", when(col("word") =!= ("null"), col("word")).otherwise(null)).drop("word")
r.show(false)
+---+----+------------------+
|id |idz |ID_EMPLOYE_VENDEUR|
+---+----+------------------+
|1 |1 |mouse |
|1 |1 |bat |
|2 |2 |horse |
|3 |null|null |
+---+----+------------------+
But what if I only want to keep the lines whose ID only have one equal IDZ? If not, I would Like to have null in ID_EMPLOYE_VENDEUR. Desired output is:
+---+----+------------------+
|id |idz |ID_EMPLOYE_VENDEUR|
+---+----+------------------+
|1 |1 |null | --Because the Join resulted two different lines
|2 |2 |horse |
|3 |null|null |
+---+----+------------------+
I should precise that I am working on a large DF. The solution should be not very expensive in time.
Thank you
As per you mentioned data your data is too large, so groupBy is not good option to group data and join Windows over function as below :
import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.functions._
def windowSpec = Window.partitionBy("idz")
val newDF = df1.withColumn("count", count("idz").over(windowSpec)).dropDuplicates("idz").withColumn("word", when(col("count") >=2 , lit(null)).otherwise(col("word"))).drop("count")
val r = df1.join(newDF, (df1("id") === newDF("idz")), "left_outer").withColumn("ID_EMPLOYE_VENDEUR", when(col("word") =!= ("null"), col("word")).otherwise(null)).drop("word")
r show
+---+----+------------------+
| id| idz|ID_EMPLOYE_VENDEUR|
+---+----+------------------+
| 1| 1| null|
| 3|null| null|
| 2| 2| horse|
+---+----+------------------+
You can retrieve easily the information that more than two df2's idz matched a single df1's id with a groupBy and a join.
r.join(
r.groupBy("id").count().as("g"),
$"g.id" === r("id")
)
.withColumn(
"ID_EMPLOYE_VENDEUR",
expr("if(count != 1, null, ID_EMPLOYE_VENDEUR)")
)
.drop($"g.id").drop("count")
.distinct()
.show()
Note: Both the groupBy and the join do not trigger any additional exchange step (shuffle around network) because the dataframe r is already partitioned on id (because it is the result of a join on id).

Sort hierarchical table CTE query

How I can sort a hierarchical table with CTE query ?
sample table :
|ID|Name |ParentID|
| 0| |-1 |
| 1|1 |0 |
| 2|2 |0 |
| 3|1-1 |1 |
| 4|1-2 |1 |
| 5|2-1 |2 |
| 6|2-2 |2 |
| 7|2-1-1 |5 |
and my favorite result is :
|ID|Name |ParentID|Level
| 0| |-1 |0
| 1|1 |0 |1
| 3|1-1 |1 |2
| 4|1-2 |1 |2
| 2|2 |0 |1
| 5|2-1 |2 |2
| 7|2-1-1 |5 |3
| 6|2-2 |2 |2
another Sample :
an other sample :
|ID|Name |ParentID|
| 0| |-1 |
| 1|Book |0 |
| 2|App |0 |
| 3|C# |1 |
| 4|VB.NET |1 |
| 5|Office |2 |
| 6|PhotoShop |2 |
| 7|Word |5 |
and my favorite result is :
|ID|Name |ParentID|Level
| 0| |-1 |0
| 1|Book |0 |1
| 3|C# |1 |2
| 4|VB.NET |1 |2
| 2|App |0 |1
| 5|Office |2 |2
| 7|Word |5 |3
| 6|PhotoShop |2 |2
The hierarchyid datatype is able to represent hierarchical data, and already has the desired sorting order. If you can't replace your ParentID column, then you can convert to it on the fly:
(Most of this script is data setup, the actual answer is quite small)
declare #t table (ID int not null,Name varchar(10) not null,ParentID int not null)
insert into #t(ID,Name,ParentID)
select 0,'' ,-1 union all
select 1,'Book' ,0 union all
select 2,'App' ,0 union all
select 3,'C#' ,1 union all
select 4,'VB.NET' ,1 union all
select 5,'Office' ,2 union all
select 6,'PhotoShop' ,2 union all
select 7,'Word' ,5
;With Sensible as (
select ID,Name,NULLIF(ParentID,-1) as ParentID
from #t
), Paths as (
select ID,CONVERT(hierarchyid,'/' + CONVERT(varchar(10),ID) + '/') as Pth
from Sensible where ParentID is null
union all
select s.ID,CONVERT(hierarchyid,p.Pth.ToString() + CONVERT(varchar(10),s.ID) + '/')
from Sensible s inner join Paths p on s.ParentID = p.ID
)
select
*
from
Sensible s
inner join
Paths p
on
s.ID = p.ID
order by p.Pth
ORDER BY Name should work as desired:
WITH CTE
AS(
SELECT parent.*, 0 AS Level
FROM #table parent
WHERE parent.ID = 0
UNION ALL
SELECT parent.*, Level+1
FROM #table parent
INNER JOIN CTE prev ON parent.ParentID = prev.ID
)
SELECT * FROM CTE
ORDER BY Name
Here's your sample data(add it next time yourself):
declare #table table(ID int,Name varchar(10),ParentID int);
insert into #table values(0,'',-1);
insert into #table values(1,'1',0);
insert into #table values(2,'2',0);
insert into #table values(3,'1-1',1);
insert into #table values(4,'1-2',1);
insert into #table values(5,'2-1',2);
insert into #table values(6,'2-2',2);
insert into #table values(7,'2-1-1',5);
Result:
ID Name ParentID Level
0 -1 0
1 1 0 1
3 1-1 1 2
4 1-2 1 2
2 2 0 1
5 2-1 2 2
7 2-1-1 5 3
6 2-2 2 2