Simulating Ogata's Thinning Algorithm in R - simulation

I am trying to implement Ogata's Thinning Algorithm exactly as given in Algorithm 3 in https://www.math.fsu.edu/~ychen/research/Thinning%20algorithm.pdf with the parameters they specify to generate Figure 2.
This is the code that replicates it verbatim.
# Simulation of a Univariate Hawkes Poisson with
# Exponential Kernel γ(u) = αe^(−βu), on [0, T].
# Based on: https://www.math.fsu.edu/~ychen/research/Thinning%20algorithm.pdf
library(tidyverse)
# Initialize parameters that remains the same for all realizations
mu <- 1.2
alpha <- 0.6
beta <- 0.8
T <- 10
# Initialize other variables that change through realizations
bigTau <- vector('numeric')
bigTau <- c(bigTau,0)
s <- 0
n <- 0
lambda_vec_accepted <- c(mu)
# Begin loop
while (s < T) {
# -------------------------------
# Compute lambda_bar
# -------------------------------
sum_over_big_Tau <- 0
for (i in c(1:length(bigTau))) {
sum_over_big_Tau <- sum_over_big_Tau + alpha*exp(-beta*(s-bigTau[i]))
}
lambda_bar <- mu + sum_over_big_Tau
u <- runif(1)
# so that w ∼ exponential(λ_bar)
w <- -log(u)/lambda_bar
# so that s is the next candidate point
s <- s+w
# Generate D ∼ uniform(0,1)
D <- runif(1)
# -------------------------------
# Compute lambda_s
# -------------------------------
sum_over_big_Tau <- 0
for (i in c(1:length(bigTau))) {
sum_over_big_Tau <- sum_over_big_Tau + alpha*exp(-beta*(s-bigTau[i]))
}
lambda_s <- mu + sum_over_big_Tau
# -------------------------------
# Accepting with prob. λ_s/λ_bar
# -------------------------------
if (D*lambda_bar <= lambda_s ) {
lambda_vec_accepted <- c(lambda_vec_accepted,lambda_s)
# updating the number of points accepted
n <- n+1
# naming it t_n
t_n <- s
# adding t_n to the ordered set bigTau
bigTau <- c(bigTau,t_n)
}
}
df<-data.frame(x=bigTau,y=lambda_vec_accepted)
ggplot(df) + geom_line(aes(x=bigTau,y=lambda_vec_accepted))
However, the plot I managed to get (running several times) for lambda vs time is something like this and nowhere near what they got in Figure 2 (exponentially decreasing).
I am not sure what is the mistake I am doing. It will be great if anyone can help. This is needed for my research. I am from biology and so please excuse if I am doing something silly. Thanks.

Related

Effect size estimates for tbl_svysummary

I had a quick question. Is there any way to add effect size estimates (i.e., Cohen's D and/or Cramer's V) for tbl_svysummary when comparing demographic factors to one another? I am looking for pretty much the same answer that was provided for this post (How to add the Chi-square effect size Cramer's V in the summary table using R package “gtsummary”?)
library(gtsummary)
my_ES_test <- function(data, variable, by, ...) {
rstatix::cohens_d(data, as.formula(glue::glue("{variable} ~
{by}")))$effsize
}
my_cramer_v <- function(data, variable, by, ...) {
table(data[[variable]], data[[by]]) %>%
rstatix::cramer_v()
}
gtTable <-
mtcars %>%
select(hp, vs, am) %>%
tbl_summary(by = vs) %>%
add_p() %>%
add_stat(
fns = list(all_continuous() ~ my_ES_test,
all_categorical() ~ my_cramer_v)) %>%
modify_header(add_stat_1 ~ "**Effect size**")
however, when I tried the methodology suggested therein, it did not work for tbl_svysummary. Example below:
library(tidyverse)
library(rstatix)
library(gtsummary)
my_ES_test <- function(data, variable, by, ...) {
rstatix::cohens_d(data, as.formula(glue::glue("{variable} ~
{by}")))$effsize
}
my_cramer_v <- function(data, variable, by, ...) {
table(data[[variable]], data[[by]]) %>%
rstatix::cramer_v()
}
tbl_svysummary_ex1 <-
survey::svydesign(~1, data = as.data.frame(Titanic), weights =
~Freq) %>%
tbl_svysummary(by = Survived, percent = "row", include =
c(Class, Age)) %>%
add_p(test = list(all_categorical() ~ "svy.chisq.test")) %>%
add_stat(
fns = list(all_continuous() ~ my_ES_test,
all_categorical() ~ my_cramer_v)) %>%
modify_header(add_stat_1 ~ "**Effect size**")
Furthermore, on the gtsummary website, there do not seem to be any instructions for how to do this in tbl_svysummary either. Any guidance here would be much appreciated!
The examples below shoe the statistic and the DFs. If you want an effect size that is not returned by default, then you'll need to write a custom method for add_difference() that includes the estimate.
library(gtsummary)
# create summary table
tbl_svysummary_ex1 <-
survey::svydesign(~1, data = as.data.frame(Titanic), weights = ~Freq) %>%
tbl_svysummary(by = Survived, percent = "row", include = c(Class, Age)) %>%
add_p(test = list(all_categorical() ~ "svy.chisq.test"))
tbl_svysummary_ex1$table_body |> names()
#> [1] "variable" "test_name" "var_type" "var_label" "row_type"
#> [6] "label" "stat_1" "stat_2" "test_result" "statistic"
#> [11] "p.value" "ndf" "ddf"
# unhide the statistic and DF columns by assigning a header
tbl_svysummary_ex1 |>
modify_header(
statistic = "**Chi-square**",
ndf = "**ndf**",
ddf = "**ddf**"
) |>
modify_fmt_fun(c(statistic, ndf, ddf) ~ style_sigfig) |>
as_kable()
Characteristic
No, N = 1,490
Yes, N = 711
Chi-square
p-value
ndf
ddf
Class
0.41
0.7
2.6
81
1st
122 (38%)
203 (62%)
2nd
167 (59%)
118 (41%)
3rd
528 (75%)
178 (25%)
Crew
673 (76%)
212 (24%)
Age
0.63
0.4
1.0
31
Child
52 (48%)
57 (52%)
Adult
1,438 (69%)
654 (31%)
Created on 2022-11-20 with reprex v2.0.2

stats::step failed in function because can't find the data in lm object

everyone!
I tried using step function in my own function, but it seems that step function only check global variable but not variables in function.
here is my example code :
library(tidyverse)
# simple test function
my_step_function <- function(model_data, formula) {
mod <- lm(formula, model_data, x = TRUE, y = TRUE)
step_mod <- step(mod, direction = "both", trace = FALSE)
summary(step_mod)
}
# test data
test <- tibble(
x1 = 1:100,
x2 = -49:50+9*rnorm(100),
x3 = 50+5*rnorm(100),
x4 = 10*rnorm(100),
x5 = sqrt(1:100),
y = 5*x1 + 2*x2 + 10*x5 + rnorm(100)
) %>% nest(data = everything())
# can't work in map() function, this is where I first find the problem
test %>%
mutate(RW = map(
data,
~ my_step_function(.x,formula = formula(y~.))
))
# error:can't find object 'model_data'
# can't work when used directly
my_step_function(test$data[[1]],formula = (y~.))
# error:can't find object 'model_data'
# still can't work when give a test variable name
test_data <- test$data[[1]]
my_step_function(test_data,formula = (y~.))
# error:can't find object 'model_data'
# work when the global variable name is same with the variable name in the function
model_data <- test$data[[1]]
my_step_function(model_data,formula = (y~.))
# success!
I will appreciate it if someone can solve my puzzle !Thank everyone!

How I read mouse data In a non blocking way

I'm implementing a fail safe handover procedure in ROS and I'm using python scripts to do so.
I'm using the optical sensor from a mouse to keep under control the acceleration of the object so I can detect when is falling. Everything seems to works fine but now I want to give give a limit to the monitoring procedure (let's say 1000 times) before declaring the handover succeded. The problem is that the function read that I use for the mouse get stucked, if no movement are detected the next iteration is not performed. How can I read from the device without encountering this issue?
Here is the code I'm using to read from the mouse:
def getMouseEvent():
buf = file.read(3)
x, y = struct.unpack( "bb", buf[1:] ) # <--- X and Y deltas.
return [x , y]
Here the loop I want to implement
release_grasp()
rospy.loginfo( "Force detected -- Release mode active")
# If the object is falling regrasp it.
detected= False
trials = 0
while (not(detected) and trials < 1000):
trials = trials + 1
rospy.loginfo ("Acc monitored for the" + str(trials) + "th time"
if fall_test():
cilindrical_grasp()
rospy.loginfo("Fall detected -- Object regrasped")
detected = True
rate.sleep()
The output I get blocks to a given iteration until the mouse does not detect some kind of movement.
UPDATE: Here is the full code
#!/usr/bin/env python2
import rospy
import numpy
import struct
from reflex_sf_msgs.msg import SFPose
from matteo.msg import force
from matteo.msg import acc
# Defining force treshold in each direction ( to be completed and tuned )
rospy.init_node('DetectionFail')
xt = 0.5
yt = xt
zt = 0.3
# For the future try to handle the initialization.
fx = None
fy = None
fz = None
ax = None
ay = None
rate = rospy.Rate(100) # <--- Rate Hz
#-----------------------------MOUSE-----------------------------------#
# Open the mouse device. To be sure if it is "mouse2" type in the terminal: cat /proc/bus/input/devices, look for the device whose name is "Logitech optical USB mouse" and get the name of the handler. If you need root permissions type: sudo chmod 777 /dev/input/(handler)
file = open ("/dev/input/mouse3" , "rb")
#Defining the function to read mouse deltas.
def getMouseEvent():
buf = file.read(3);
x,y = struct.unpack( "bb", buf[1:] ); # <--- X and Y deltas.
return [x , y]
#Defining the function to estimate the acceleraton.
def acc_comp():
vx_old = 0
vy_old = 0
vx_new = getMouseEvent()[0]
vy_new = getMouseEvent()[1]
x_acc = (vx_old - vx_new)*100
y_acc = (vy_old - vy_new)*100
vx_old = vx_new
vy_old = vy_new
return [x_acc , y_acc]
#---------------------------------------------------------------------#
#Defining function fall test
def fall_test():
if ( acc_comp()[1] >= 3000 or acc_comp()[1] <= -3000 ):
return True
else:
return False
#---------------------------------------------------------------------#
# Initialize hand publisher.
hand_pub = rospy.Publisher('/reflex_sf/command', SFPose, queue_size=1)
rospy.sleep(0.5)
#---------------------------------------------------------------------#
# Defining sferical grasp.
def cilindrical_grasp():
hand_pub.publish ( 2.5 , 2.5 , 2.5, 0)
#---------------------------------------------------------------------#
# Define release position.
def release_grasp():
hand_pub.publish ( 2, 2 , 2 , 0)
#---------------------------------------------------------------------#
# Define test for the force measure
def force_treshold ( fx, fy , fz):
if ( fx > xt and fy > yt or fz > zt):
return True
else:
return False
#---------------------------------------------------------------------#
# Callback function to save the datas obtained by the force sensor
def callback_force(msg):
global fx
global fy
global fz
fx = msg.fx
fy = msg.fy
fz = msg.fz
# Main loop.
def main():
#Apply the sferical grasp.
rospy.loginfo("Applying grasp")
cilindrical_grasp()
while not(rospy.is_shutdown()):
rospy.Subscriber("/Forces", force, callback_force )
if force_treshold ( fx , fy , fz ):
release_grasp()
rospy.loginfo( "Force detected -- Release mode active")
# If the object is falling regrasp it.
detected= False
trials = 0
while (not(detected) and trials < 1000):
trials = trials +1
if fall_test():
cilindrical_grasp()
rospy.loginfo("Fall detected -- Object regrasped")
detected = True
rate.sleep()
if rospy.is_shutdown() :
break
Yesterday I came out with this code:
#!/usr/bin/env python
import struct
import rospy
from matteo.msg import acc
import struct
import os
import time
i = 0
# Mouse read with a non blocking structure, the problem is that does not provide the same output as
# mouse_clean.py, probably there is a problem with the unpacking or the reading.
while i < 1000:
i += 1
try:
file = os.open("/dev/input/mouse0", os.O_RDONLY | os.O_NONBLOCK)
time.sleep(0.1)
buf = os.read(file , 3)
x,y = struct.unpack( "bb", buf[1:] ) # <--- X and Y deltas.
print ( "X:" +str ( x ) + "---" +"Y:" +str ( y ) )
except OSError as err:
if err.errno == 11:
print ( "No motion detected")
continue
os.close(file)
It works fine, if there is no motion the message is printed out but, in case of motion the output I get is quite different from the "vanilla" mode.

How to implement a Slope graph in R for two variables

I'm analyzing how many users have used a particular hashtag and how they have contributed to the total number of tweets. My results are:
Data:
20.68% of tweets related to #HashtagX are created by 20 users. Now, these 20 users only represent 0.001% of the total of 14,432 users who have ever used the hashtag #HashtagX.
What happens if we take the top 100 users by number of tweets? 44% of tweets are created by the top 100 users.
If we extend to the top 500 users by number of users we see that 72% of tweets is created by the top 500.
I am wondering how to implement a slope graph because I think that is a good way to show the relationship between both variables, but it is not a default graph provides for any library.
One of the ways to show the relationship between both variables ("Users" vs "Tweets") is a Slope Chart.
Visualization obtained (solved graph for the question):
Slope Chart
1) Libraries
library(ggplot2)
library(scales)
library(ggrepel)
theme_set(theme_classic())
2) Data example
Country = c('20 accounts', '50 accounts', '100 accounts','200 accounts','300 accounts',
'500 accounts','1000 accounts','14.443 accounts')
January = c(0.14, 0.34, 0.69,1.38,2.07,3.46,6.92,100)
April = c(20.68, 33.61, 44.94, 57.49,64.11,72,80,100)
Tweets_N = c(26797, 43547, 58211, 74472,83052,93259,103898,129529)
a = data.frame(Country, January, April)
left_label <- paste(a$Country, paste0(a$January,"%"),sep=" | ")
right_label <- paste(paste0(round(a$April),"%"),paste0(Tweets_N," tweets"),sep=" | ")
a$color_class <- "green"
3) Plot
p <- ggplot(a) + geom_segment(aes(x=1, xend=2, y=January, yend=April, col=color_class), size=.25, show.legend=F) +
geom_vline(xintercept=1, linetype="dashed", size=.1) +
geom_vline(xintercept=2, linetype="dashed", size=.1) +
scale_color_manual(labels = c("Up", "Down"),
values = c("blue", "red")) +
labs(
x="", y = "Percentage") +
xlim(.5, 2.5) + ylim(0,(1.1*(max(a$January, a$April)))) # X and Y axis limits
# Add texts
p <- p + geom_text_repel(label=left_label, y=a$January, x=rep(1, NROW(a)), hjust=1.1, size=3.5,direction = "y")
p <- p + geom_text(label=right_label, y=a$April, x=rep(2, NROW(a)), hjust=-0.1, size=3.5)
p <- p + geom_text(label="Accounts", x=1, y=1.1*(max(a$January, a$April)), hjust=1.2, size=4, check_overlap = TRUE) # title
p <- p + geom_text(label="Tweeets (% of Total)", x=2, y=1.1*(max(a$January, a$April)), hjust=-0.1, size=4, check_overlap = TRUE)
# title
# Minify theme
p + theme(panel.background = element_blank(),
panel.grid = element_blank(),
axis.ticks = element_blank(),
axis.text.x = element_blank(),
panel.border = element_blank(),
plot.margin = unit(c(1,2,1,2), "cm"))

How to code a matrix in WinBUGS?

I am trying to code the 2X2 matrix sigma with the 4 elements. Not sure how to code in WINBUGS. My goal is to get the posterior p's, their means and variances and create an ellipse region covered by the two posterior p's. Heres my code below:
model{
#likelihood
for(j in 1 : Nf){
p1[j, 1:2 ] ~ dmnorm(gamma[1:2], T[1:2 ,1:2])
for (i in 1:2){
logit(p[j,i]) <- p1[j,i]
Y[j,i] ~ dbin(p[j,i],n)
}
X_mu[j,1]<-p[j,1]-mean(p[,1])
X_mu[j,2]<-p[j,2]-mean(p[,2])
v1<-sd(p[,1])*sd(p[,1])
v2<-sd(p[,2])*sd(p[,2])
v12<-(inprod(X_mu[j,1],X_mu[j,2]))/(sd(p[,1])*sd(p[,2]))
sigma[1,1]<-v1
sigma[1,2]<-v12
sigma[2,1]<-v12
sigma[2,2]<-v2
sigmaInv[1:2, 1:2] <- inverse(sigma[,])
T1[j,1]<-inprod(sigmaInv[1,],X_mu[j,1])
T1[j,2]<-inprod(sigmaInv[2,],X_mu[j,2])
ell[j,1]<-inprod(X_mu[j,1],T1[j,1])
ell[j,2]<-inprod(X_mu[j,2],T1[j,2])
}
#priors
gamma[1:2] ~ dmnorm(mn[1:2],prec[1:2 ,1:2])
expit[1] <- exp(gamma[1])/(1+exp(gamma[1]))
expit[2] <- exp(gamma[2])/(1+exp(gamma[2]))
T[1:2 ,1:2] ~ dwish(R[1:2 ,1:2], 2)
sigma2[1:2, 1:2] <- inverse(T[,])
rho <- sigma2[1,2]/sqrt(sigma2[1,1]*sigma2[2,2])
}
# Data
list(Nf =20, mn=c(-0.69, -1.06), n=60,
prec = structure(.Data = c(.001, 0,
0, .001),.Dim = c(2, 2)),
R = structure(.Data = c(.001, 0,
0, .001),.Dim = c(2, 2)),
Y= structure(.Data=c(32,13,
32,12,
10,4,
28,11,
10,5,
25,10,
4,1,
16,5,
28,10,
21,7,
19,9,
18,12,
31,12,
13,3,
10,4,
18,7,
3,2,
27,5,
8,1,
8,4),.Dim = c(20, 2))
You have to specify each element in turn. You can use the inverse function (rather than solve) to invert the matrix.
model{
sigma[1,1]<-v1
sigma[1,2]<-v12
sigma[2,1]<-v21
sigma[2,2]<-v2
sigmaInv[1:2, 1:2] <- inverse(sigma[,])
}