pyspark get week number of month by starting week on Thursday - pyspark

I need to get the weekday of a month. However the starting day of the week should be Thursday.
The data is given below
Currently my code gives the starting day as Sunday.
df = df.withColumn("Week_Number",date_format(to_date("inv_dt", "yyyy-MM-dd"), "W"))
However I want the week to start on a Thursday

Consider creating a User Defined Function (UDF) to handle the special case.
For example, the following UDF handles special case when a week starts other than Sunday.
The argument start_day_of_week takes an integer as isoweekday, where Monday is 1 and Sunday is 7:
from pyspark.sql.functions import udf
def week_number(date_str, start_day_of_week=7):
#udf("long")
def _week_number(date_str):
from datetime import datetime, date
d = datetime.strptime(date_str, '%Y-%m-%d') # compatible with Python2
wd_d1 = date(d.year, d.month, 1).isoweekday()
offset = (wd_d1 - start_day_of_week + 7 ) % 7
return (d.day - 1 + offset) // 7 + 1
return _week_number(date_str)
Note: code above is tested with Python3 (3.7.3) and Python (2.7.15) in PySpark (2.4.4). By default PySpark starts with Python (2.7.15). To use Python3, start PySpark with PYSPARK_PYTHON=python3 pyspark, or add export PYSPARK_PYTHON=python3 to startup script.
If the UDF only get used in Python3, one can simply use d = date.fromisoformat(date_str) and only import date from datetime.
Demo
Consider input data like:
from pyspark.sql.functions import *
inv_dt = [ '2018-09-{:02d}'.format(x) for x in range(1, 31) ]
# if with Pandas installed
import pandas as pd
df_pd = pd.DataFrame({'inv_dt':inv_dt})
df = spark.createDataFrame(df_pd)
# if without Pandas
from pyspark.sql.types import StringType
df = spark.createDataFrame(inv_dt, StringType()).withColumnRenamed('value', 'inv_dt')
df.show()
# +----------+
# | inv_dt|
# +----------+
# |2018-09-01|
# |2018-09-02|
# |2018-09-03|
# |2018-09-04|
# |2018-09-05|
# |2018-09-06|
# |2018-09-07|
# |2018-09-08|
# |2018-09-09|
# |2018-09-10|
# |2018-09-11|
# |2018-09-12|
# |2018-09-13|
# |2018-09-14|
# |2018-09-15|
# |2018-09-16|
# |2018-09-17|
# |2018-09-18|
# |2018-09-19|
# |2018-09-20|
# +----------+
# only showing top 20 rows
And call our UDF week_number just like other Spark SQL functions such as date_format.
We can compare our UDF output week_number("inv_dt", 7) with Spark builtin date_format(to_date("inv_dt", "yyyy-MM-dd"), "W").
( df
.withColumn("Day_of_Week", date_format(to_date("inv_dt", "yyyy-MM-dd"), "u"))
.withColumn("Week_Number_Sun_ref", date_format(to_date("inv_dt", "yyyy-MM-dd"), "W"))
.withColumn("Week_Number_Sun", week_number("inv_dt", 7))
.withColumn("Day_of_Week_Thu", week_number("inv_dt", 4))
.withColumn("Day_of_Week_Mon", week_number("inv_dt", 1))
).show()
# +----------+-----------+-------------------+---------------+---------------+---------------+
# | inv_dt|Day_of_Week|Week_Number_Sun_ref|Week_Number_Sun|Day_of_Week_Thu|Day_of_Week_Mon|
# +----------+-----------+-------------------+---------------+---------------+---------------+
# |2018-09-01| 6| 1| 1| 1| 1|
# |2018-09-02| 7| 2| 2| 1| 1|
# |2018-09-03| 1| 2| 2| 1| 2|
# |2018-09-04| 2| 2| 2| 1| 2|
# |2018-09-05| 3| 2| 2| 1| 2|
# |2018-09-06| 4| 2| 2| 2| 2|
# |2018-09-07| 5| 2| 2| 2| 2|
# |2018-09-08| 6| 2| 2| 2| 2|
# |2018-09-09| 7| 3| 3| 2| 2|
# |2018-09-10| 1| 3| 3| 2| 3|
# |2018-09-11| 2| 3| 3| 2| 3|
# |2018-09-12| 3| 3| 3| 2| 3|
# |2018-09-13| 4| 3| 3| 3| 3|
# |2018-09-14| 5| 3| 3| 3| 3|
# |2018-09-15| 6| 3| 3| 3| 3|
# |2018-09-16| 7| 4| 4| 3| 3|
# |2018-09-17| 1| 4| 4| 3| 4|
# |2018-09-18| 2| 4| 4| 3| 4|
# |2018-09-19| 3| 4| 4| 3| 4|
# |2018-09-20| 4| 4| 4| 4| 4|
# +----------+-----------+-------------------+---------------+---------------+---------------+
# only showing top 20 rows
#

The "pure" PySpark way is as follows
from pyspark.sql import functions as F
def week_number(date_str, start_day_of_week=7):
day_of_month = F.dayofmonth(date_str)
first_day = F.date_sub(date_str, day_of_month - 1)
first_day_day_of_week = F.dayofweek(first_day)
offset = (first_day_day_of_week - start_day_of_week + 7) % 7
week_num = F.floor((day_of_month - 1 + offset) / 7) + 1
return week_num
week_num_from_thurs = df.withColumn('week_num_from_thurs', week_number('inv_dt', 4))
It's 2 a.m. and I don't have a local Spark cluster or anything so this might be buggy. Credit should go to #Quar for the solution. I'm just converting Python to the Pyspark SQL API here. Variable names are deliberately verbose.

Related

Compare sum of values between two specific date ranges over different categories

I'm working in databricks. I have the following dataframe:
+----------+---+-----+
| date|cat|value|
+----------+---+-----+
|2022-08-11| a| 1|
|2022-08-12| a| 1|
|2022-08-13| a| 1|
|2022-08-14| a| 1|
|2022-08-15| a| 1|
|2022-08-16| a| 1|
|2022-08-17| a| 2|
|2022-08-18| a| 2|
|2022-08-19| a| 2|
|2022-08-20| a| 2|
|2022-08-21| a| 2|
|2022-08-22| a| 2|
|2022-08-11| b| 1|
|2022-08-12| b| 1|
|2022-08-13| b| 1|
|2022-08-14| b| 1|
|2022-08-15| b| 1|
|2022-08-16| b| 1|
|2022-08-17| b| 3|
|2022-08-18| b| 3|
|2022-08-19| b| 3|
|2022-08-20| b| 3|
|2022-08-21| b| 3|
|2022-08-22| b| 3|
+----------+---+-----+
I want to be able to compare the sum of the values between the 17 and the 22 (week1) and between the 11 and the 16 (week2). Start end and end date of each period are predefined.
So far I've tried something like this:
w = (Window.partitionBy('cat'))
df = (df
.withColumn('date', f.to_date('date', 'yyyy-MM-dd'))
.withColumn('value_week_1',
f.when(
(f.col('date') >= '2022-08-17') &
(f.col('date') <= '2022-08-22'),
f.sum('value').over(w)
)
)
.withColumn('value_week_2',
f.when(
(f.col('date') >= '2022-08-11') &
(f.col('date') <= '2022-08-16'),
f.sum('value').over(w)
)
)
)
but It doesn't work and I'm not sure I'm going in the right direction.
Ultimately I'd like to have something like this:
+----------+---+-----+----+------+--------+
| date|cat|value| w1| w2| diff|
+----------+---+-----+----+------+--------+
|2022-08-11| a| 1| 6| 12| 6|
|2022-08-12| a| 1| 6| 12| 6|
|2022-08-13| a| 1| 6| 12| 6|
|2022-08-14| a| 1| 6| 12| 6|
|2022-08-15| a| 1| 6| 12| 6|
|2022-08-16| a| 1| 6| 12| 6|
|2022-08-17| a| 2| 6| 12| 6|
|2022-08-18| a| 2| 6| 12| 6|
|2022-08-19| a| 2| 6| 12| 6|
|2022-08-20| a| 2| 6| 12| 6|
|2022-08-21| a| 2| 6| 12| 6|
|2022-08-22| a| 2| 6| 12| 6|
|2022-08-11| b| 3| 18| 30| 12|
|2022-08-12| b| 3| 18| 30| 12|
|2022-08-13| b| 3| 18| 30| 12|
|2022-08-14| b| 3| 18| 30| 12|
|2022-08-15| b| 3| 18| 30| 12|
|2022-08-16| b| 3| 18| 30| 12|
|2022-08-17| b| 5| 18| 30| 12|
|2022-08-18| b| 5| 18| 30| 12|
|2022-08-19| b| 5| 18| 30| 12|
|2022-08-20| b| 5| 18| 30| 12|
|2022-08-21| b| 5| 18| 30| 12|
|2022-08-22| b| 5| 18| 30| 12|
+----------+---+-----+----+------+--------+
I think we don't need to use window in your case, we can just:
df_agg = df\
.withColumn('week', func.when((func.col('date')>='2022-08-17')&(func.col('date')<='2022-08-22'), func.lit('w1')).otherwise(func.lit('w2')))\
.groupby('cat').pivot('week')\
.agg(func.sum('value'))\
.withColumn('diff', func.col('w2')-func.col('w1'))
We can just create a new column called week to see if the date is under which week, then create a pivot table.
w =Window.partitionBy('cat').orderBy('cat')
df1 = (
#Create week column to help partion. Use row number to create cululative day count. Find each 7th day using pytho's modulo
df.withColumn('wk',(~(row_number().over(w)%7>0)).cast('int')).withColumn('wk',F.sum('wk').over(Window.partitionBy('cat').orderBy().rowsBetween(-sys.maxsize, 0))+1)
#Finfd the cumulative sum per group per week
.groupby('cat','wk').agg(F.collect_list('date').alias('date'),F.sum('value').alias('value')).withColumn('date', explode('date'))
# #Put the total sum in an array in preparation for pivot
.withColumn('value_1', F.collect_set('value').over(Window.partitionBy('cat').orderBy('date','value').rowsBetween(-sys.maxsize, sys.maxsize)))
# #pivot and create week columns
.withColumn('wk',F.array(F.struct(*[F.col('value_1')[i].alias(f"week_{i+1}")for i in range(2)]))).selectExpr('*','inline(wk)').drop('wk','value_1')
# #Find the difference
.withColumn('diff', abs(col('week_1')-col('week_2')))
).show()
There are somethings about this problem that do not make sense. Please see end of article for my observations.
First, the dates from 8/11 to 8/16 do not make up a whole week. Second, the labels of week-1 being 8/17 to 8/22 and week-2 being 8/11 to 8/16 are logically backwards.
I am going to solve this problem using PySpark and Spark SQL since it is straight forward.
#
# Create sample data
#
dat1 = [
("2022-08-11","a",1),
("2022-08-12","a",1),
("2022-08-13","a",1),
("2022-08-14","a",1),
("2022-08-15","a",1),
("2022-08-16","a",1),
("2022-08-17","a",2),
("2022-08-18","a",2),
("2022-08-19","a",2),
("2022-08-20","a",2),
("2022-08-21","a",2),
("2022-08-22","a",2),
("2022-08-11","b",1),
("2022-08-12","b",1),
("2022-08-13","b",1),
("2022-08-14","b",1),
("2022-08-15","b",1),
("2022-08-16","b",1),
("2022-08-17","b",3),
("2022-08-18","b",3),
("2022-08-19","b",3),
("2022-08-20","b",3),
("2022-08-21","b",3),
("2022-08-22","b",3)
]
col1 = ["date", "cat", "value"]
df1 = spark.createDataFrame(data=dat1, schema=col1)
df1.createOrReplaceTempView("sample_data")
The above code create a temporary view with the data set.
#
# Core data - add category w0
#
stmt = """
select
date,
cat,
value,
case
when date >= "2022-08-11" and date <= "2022-08-16" then 2
when date >= "2022-08-17" and date <= "2022-08-22" then 1
else 0
end as w0
from sample_data as q1
"""
df2 = spark.sql(stmt)
df2.createOrReplaceTempView("core_data")
The code above labels the data as week-1 or week-2 and save this category information as w0. This could have been hard coded into the dataset above.
#
# Pivot data - sum vaule by cat, pivot on w0
#
stmt = """
select * from
(
select cat, w0, value from core_data
)
pivot (
cast(sum(value) as DECIMAL(4, 2)) as total
for w0 in (1 w1, 2 w2)
)
"""
df3 = spark.sql(stmt)
df3.createOrReplaceTempView("pivot_data")
The code above creates a column per week category and summarizes the values.
Please note, the result set has 3/5 for cat = b while the original data set has 1/3. I am using your original data set.
Last but not least, we join the core_data to the pivot_data and create a calculated column of the difference of (w1-w2).
You can use spark.sql() to create a dataframe and save this result as a file if you want.
To recap, the length of the week categories is not 7 days, labeling a prior week a greater number than the current does not make sense, and the expected result set is wrong in your example since the input set has different numbers.
In short, working with temporary views allows you to leverage your existing T-SQL skills.

Efficient code for imputation of negative values using pyspark

I am working on a data set which contains item wise- date wise information about the quantity sold of that particular item. However, there are some negative values in the ' quantity sold' column which I intend to impute. The logic used here would be to replace such negative values with the mode of the quantity sold for each item at date level. I have already computed the count of each distinct value of the quantity sold and obtained the maximum quantity sold of a particular item on each given date. However I am unable to find a function that would replace the negative values with the max qty sold for each item* date combination. I am relatively newer to pyspark. Which would be best approach to use in this case?
Based on the limited information you provided , you can try something like this -
from pyspark import SparkContext
from pyspark.sql import SQLContext
from functools import reduce
import pyspark.sql.functions as F
from pyspark.sql import Window
sc = SparkContext.getOrCreate()
sql = SQLContext(sc)
input_list = [
(1,10,"2019-11-07")
,(1,5,"2019-11-07")
,(1,5,"2019-11-07")
,(1,5,"2019-11-08")
,(1,6,"2019-11-08")
,(1,7,"2019-11-09")
,(1,7,"2019-11-09")
,(1,8,"2019-11-09")
,(1,8,"2019-11-09")
,(1,8,"2019-11-09")
,(1,-10,"2019-11-09")
,(2,10,"2019-11-07")
,(2,3,"2019-11-07")
,(2,9,"2019-11-07")
,(2,9,"2019-11-08")
,(2,-10,"2019-11-08")
,(2,5,"2019-11-09")
,(2,5,"2019-11-09")
,(2,2,"2019-11-09")
,(2,2,"2019-11-09")
,(2,2,"2019-11-09")
,(2,-10,"2019-11-09")
]
sparkDF = sql.createDataFrame(input_list,['product_id','sold_qty','date'])
sparkDF = sparkDF.withColumn('date',F.to_date(F.col('date'), 'yyyy-MM-dd'))
Mode Implementation
#### Mode Implemention
modeDF = sparkDF.groupBy('date', 'sold_qty')\
.agg(F.count(F.col('sold_qty')).alias('mode_count'))\
.select(F.col('date'),F.col('sold_qty').alias('mode_sold_qty'),F.col('mode_count'))
window = Window.partitionBy("date").orderBy(F.desc("mode_count"))
#### Filtering out the most occurred value
modeDF = modeDF\
.withColumn('order', F.row_number().over(window))\
.where(F.col('order') == 1)\
Merging back with Base DataFrame to impute
sparkDF = sparkDF.join(modeDF
,sparkDF['date'] == modeDF['date']
,'inner'
).select(sparkDF['*'],modeDF['mode_sold_qty'],modeDF['mode_count'])
sparkDF = sparkDF.withColumn('imputed_sold_qty',F.when(F.col('sold_qty') < 0,F.col('mode_sold_qty'))\
.otherwise(F.col('sold_qty')))
>>> sparkDF.show(100)
+----------+--------+----------+-------------+----------+----------------+
|product_id|sold_qty| date|mode_sold_qty|mode_count|imputed_sold_qty|
+----------+--------+----------+-------------+----------+----------------+
| 1| 7|2019-11-09| 2| 3| 7|
| 1| 7|2019-11-09| 2| 3| 7|
| 1| 8|2019-11-09| 2| 3| 8|
| 1| 8|2019-11-09| 2| 3| 8|
| 1| 8|2019-11-09| 2| 3| 8|
| 1| -10|2019-11-09| 2| 3| 2|
| 2| 5|2019-11-09| 2| 3| 5|
| 2| 5|2019-11-09| 2| 3| 5|
| 2| 2|2019-11-09| 2| 3| 2|
| 2| 2|2019-11-09| 2| 3| 2|
| 2| 2|2019-11-09| 2| 3| 2|
| 2| -10|2019-11-09| 2| 3| 2|
| 1| 5|2019-11-08| 9| 1| 5|
| 1| 6|2019-11-08| 9| 1| 6|
| 2| 9|2019-11-08| 9| 1| 9|
| 2| -10|2019-11-08| 9| 1| 9|
| 1| 10|2019-11-07| 5| 2| 10|
| 1| 5|2019-11-07| 5| 2| 5|
| 1| 5|2019-11-07| 5| 2| 5|
| 2| 10|2019-11-07| 5| 2| 10|
| 2| 3|2019-11-07| 5| 2| 3|
| 2| 9|2019-11-07| 5| 2| 9|
+----------+--------+----------+-------------+----------+----------------+

pyspark: groupby and aggregate avg and first on multiple columns

I have a following sample pyspark dataframe and after groupby I want to calculate mean, and first of multiple columns, In real case I have 100s of columns, so I cant do it individually
sp = spark.createDataFrame([['a',2,4,'cc','anc'], ['a',4,7,'cd','abc'], ['b',6,0,'as','asd'], ['b', 2, 4, 'ad','acb'],
['c', 4, 4, 'sd','acc']], ['id', 'col1', 'col2','col3', 'col4'])
+---+----+----+----+----+
| id|col1|col2|col3|col4|
+---+----+----+----+----+
| a| 2| 4| cc| anc|
| a| 4| 7| cd| abc|
| b| 6| 0| as| asd|
| b| 2| 4| ad| acb|
| c| 4| 4| sd| acc|
+---+----+----+----+----+
This is what I am trying
mean_cols = ['col1', 'col2']
first_cols = ['col3', 'col4']
sc.groupby('id').agg(*[ f.mean for col in mean_cols], *[f.first for col in first_cols])
but it's not working. How can I do it like this with pyspark
The best way for multiple functions on multiple columns is to use the .agg(*expr) format.
import pyspark.sql.functions as F
from pyspark.sql.functions import udf
from pyspark.sql.types import *
import numpy as np
#Test data
tst = sqlContext.createDataFrame([(1,2,3,4),(3,4,5,1),(5,6,7,8),(7,8,9,2)],schema=['col1','col2','col3','col4'])
fn_l = [F.min,F.max,F.mean,F.first]
col_l=['col1','col2','col3']
expr = [fn(coln).alias(str(fn.__name__)+'_'+str(coln)) for fn in fn_l for coln in col_l]
tst_r = tst.groupby('col4').agg(*expr)
The result will be
tst_r.show()
+----+--------+--------+--------+--------+--------+--------+---------+---------+---------+----------+----------+----------+
|col4|min_col1|min_col2|min_col3|max_col1|max_col2|max_col3|mean_col1|mean_col2|mean_col3|first_col1|first_col2|first_col3|
+----+--------+--------+--------+--------+--------+--------+---------+---------+---------+----------+----------+----------+
| 5| 5| 6| 7| 7| 8| 9| 6.0| 7.0| 8.0| 5| 6| 7|
| 4| 1| 2| 3| 3| 4| 5| 2.0| 3.0| 4.0| 1| 2| 3|
+----+--------+--------+--------+--------+--------+--------+---------+---------+---------+----------+----------+----------+
For selectively applying functions on columns, you can have multiple expression arrays and concatenate them in aggregation.
fn_l = [F.min,F.max]
fn_2=[F.mean,F.first]
col_l=['col1','col2']
col_2=['col1','col3','col4']
expr1 = [fn(coln).alias(str(fn.__name__)+'_'+str(coln)) for fn in fn_l for coln in col_l]
expr2 = [fn(coln).alias(str(fn.__name__)+'_'+str(coln)) for fn in fn_2 for coln in col_2]
tst_r = tst.groupby('col4').agg(*(expr1+expr2))
A simpler way to do:
import pyspark.sql.functions as F
tst_r = ( tst.groupby('col4')
.agg(*[F.mean(col).alias(f"{col}_mean") for col in means_col],
*[F.first(col).alias(f"{col}_first") for col in firsts_col]) )

pyspark: Auto filling in implicit missing values

I have a dataframe
user day amount
a 2 10
a 1 14
a 4 5
b 1 4
You see that, the maximum value of day is 4, and the minimum value is 1. I want to fill 0 for amount column in all missing days of all users, so the above data frame will become.
user day amount
a 2 10
a 1 14
a 4 5
a 3 0
b 1 4
b 2 0
b 3 0
b 4 0
How could I do that in PySpark? Many thanks.
Here is one approach. You can get the min and max values first , then group on user column and pivot, then fill in missing columns and fill all nulls as 0, then stack them back:
min_max = df.agg(F.min("day"),F.max("day")).collect()[0]
df1 = df.groupBy("user").pivot("day").agg(F.first("amount").alias("amount")).na.fill(0)
missing_cols = [F.lit(0).alias(str(i)) for i in range(min_max[0],min_max[1]+1)
if str(i) not in df1.columns ]
df1 = df1.select("*",*missing_cols)
#+----+---+---+---+---+
#|user| 1| 2| 4| 3|
#+----+---+---+---+---+
#| b| 4| 0| 0| 0|
#| a| 14| 10| 5| 0|
#+----+---+---+---+---+
#the next step is inspired from https://stackoverflow.com/a/37865645/9840637
arr = F.explode(F.array([F.struct(F.lit(c).alias("day"), F.col(c).alias("amount"))
for c in df1.columns[1:]])).alias("kvs")
(df1.select(["user"] + [arr])
.select(["user"]+ ["kvs.day", "kvs.amount"]).orderBy("user")).show()
+----+---+------+
|user|day|amount|
+----+---+------+
| a| 1| 14|
| a| 2| 10|
| a| 4| 5|
| a| 3| 0|
| b| 1| 4|
| b| 2| 0|
| b| 4| 0|
| b| 3| 0|
+----+---+------+
Note, since column day was pivotted , the dtype might have changed so you may have to cast them back to the original dtype
Another way to do this is to use sequence, array functions and explode. (spark2.4+)
from pyspark.sql import functions as F
from pyspark.sql.window import Window
w=Window().partitionBy(F.lit(0))
df.withColumn("boundaries", F.sequence(F.min("day").over(w),F.max("day").over(w),F.lit(1)))\
.groupBy("user").agg(F.collect_list("day").alias('day'),F.collect_list("amount").alias('amount')\
,F.first("boundaries").alias("boundaries")).withColumn("boundaries", F.array_except("boundaries","day"))\
.withColumn("day",F.flatten(F.array("day","boundaries"))).drop("boundaries")\
.withColumn("zip", F.explode(F.arrays_zip("day","amount")))\
.select("user","zip.day", F.when(F.col("zip.amount").isNull(),\
F.lit(0)).otherwise(F.col("zip.amount")).alias("amount")).show()
#+----+---+------+
#|user|day|amount|
#+----+---+------+
#| a| 2| 10|
#| a| 1| 14|
#| a| 4| 5|
#| a| 3| 0|
#| b| 1| 4|
#| b| 2| 0|
#| b| 3| 0|
#| b| 4| 0|
#+----+---+------+

How to aggregate contiguous rows in pyspark

I have an immense amount of user data (billions of rows) where I need to summarize the amount of time spent in a specific state by each user.
Let's say it's historical web data, and I want to sum the amount of time each user has spent on the site. The data only says if the user is present.
df = spark.createDataFrame([("A", 1), ("A", 2), ("A", 3),("B", 4 ),("B", 5 ),("A", 6 ),("A", 7 ),("A", 8 )], ["user","timestamp"])
+----+---------+
|user|timestamp|
+----+---------+
| A| 1|
| A| 2|
| A| 3|
| B| 4|
| B| 5|
| A| 6|
| A| 7|
| A| 8|
+----+---------+
The correct answer would be this since I'm summing the total per contiguous segment.
+----+---------+
|user| ttl |
+----+---------+
| A| 4|
| B| 1|
+----+---------+
I tried doing a max()-min() and groupby but that resulted in segment A being 8-1 and gave the wrong answer.
In sqlite I was able to get the answer by creating a partition number and then finding the difference and summing. I created the partition with this...
SELECT
COUNT(*) FILTER (WHERE a.user <>
( SELECT b.user
FROM foobar AS b
WHERE a.timestamp > b.timestamp
ORDER BY b.timestamp DESC
LIMIT 1
))
OVER (ORDER BY timestamp) c,
user,
timestamp
FROM foobar a;
which gave me...
+----+---------+---+
|user|timestamp| c |
+----+---------+---+
| A| 1| 1 |
| A| 2| 1 |
| A| 3| 1 |
| B| 4| 2 |
| B| 5| 2 |
| A| 6| 3 |
| A| 7| 3 |
| A| 8| 3 |
+----+---------+---+
Then the LAST() - FIRST() functions in sql made that easy to finish.
Any ideas on how to scale this and do it in pyspark? I can't seem to find adequate substitutes for the "count(*) where(...)" sqlite offered
We can do this:
Create the DataFrame
from pyspark.sql.window import Window
from pyspark.sql.functions import max, min
from pyspark.sql import functions as F
df = spark.createDataFrame([("A", 1), ("A", 2), ("A", 3),("B", 4 ),("B", 5 ),("A", 6 ),("A", 7 ),("A", 8 )], ["user","timestamp"])
df.show()
+----+---------+
|user|timestamp|
+----+---------+
| A| 1|
| A| 2|
| A| 3|
| B| 4|
| B| 5|
| A| 6|
| A| 7|
| A| 8|
+----+---------+
Assign a row_number to each row, which are ordered by timestamp. The column dummy is used such that we can use window function row_number.
df = df.withColumn('dummy', F.lit(1))
w1 = Window.partitionBy('dummy').orderBy('timestamp')
df = df.withColumn('row_number', F.row_number().over(w1))
df.show()
+----+---------+-----+----------+
|user|timestamp|dummy|row_number|
+----+---------+-----+----------+
| A| 1| 1| 1|
| A| 2| 1| 2|
| A| 3| 1| 3|
| B| 4| 1| 4|
| B| 5| 1| 5|
| A| 6| 1| 6|
| A| 7| 1| 7|
| A| 8| 1| 8|
+----+---------+-----+----------+
We want to create a sub group within each user group here.
(1) For each user group, compute the difference of current row's row_number to previous row's row_number. So any difference larger than 1 indicating there's a new contiguous group. This results diff, note the first row in each group has a value of -1.
(2) We then assign null to every row with diff==1. This results column diff2.
(3) Next, we use the last function to fill the rows with diff2 == null using the last non-null value in column diff2. This results subgroupid.
This is the sub group we want to create for each user group.
w2 = Window.partitionBy('user').orderBy('timestamp')
df = df.withColumn('diff', df['row_number'] - F.lag('row_number').over(w2)).fillna(-1)
df = df.withColumn('diff2', F.when(df['diff']==1, None).otherwise(F.abs(df['diff'])))
df = df.withColumn('subgroupid', F.last(F.col('diff2'), True).over(w2))
df.show()
+----+---------+-----+----------+----+-----+----------+
|user|timestamp|dummy|row_number|diff|diff2|subgroupid|
+----+---------+-----+----------+----+-----+----------+
| B| 4| 1| 4| -1| 1| 1|
| B| 5| 1| 5| 1| null| 1|
| A| 1| 1| 1| -1| 1| 1|
| A| 2| 1| 2| 1| null| 1|
| A| 3| 1| 3| 1| null| 1|
| A| 6| 1| 6| 3| 3| 3|
| A| 7| 1| 7| 1| null| 3|
| A| 8| 1| 8| 1| null| 3|
+----+---------+-----+----------+----+-----+----------+
We now group by both user and subgroupid to compute the time each user spent on each contiguous time interval.
Lastly, we group by user only to sum up the total time spent by each user.
s = "(max('timestamp') - min('timestamp'))"
df = df.groupBy(['user', 'subgroupid']).agg(eval(s))
s = s.replace("'","")
df = df.groupBy('user').sum(s).select('user', F.col("sum(" + s + ")").alias('total_time'))
df.show()
+----+----------+
|user|total_time|
+----+----------+
| B| 1|
| A| 4|
+----+----------+