Translation using mouse for rgl plot - mouse

I could not find any way to translates a rgl plot using mouse (mouseMode can not be set to a translate mode). Does anyone know how to do it ?
Thank.

Ok I found the solution from help of rgl.setMouseCallbacks
pan3d <- function(button) {
start <- list()
begin <- function(x, y) {
start$userMatrix <<- par3d("userMatrix")
start$viewport <<- par3d("viewport")
start$scale <<- par3d("scale")
start$projection <<- rgl.projection()
start$pos <<- rgl.window2user( x/start$viewport[3], 1 - y/start$viewport[4], 0.5,
projection=start$projection)
}
update <- function(x, y) {
xlat <- (rgl.window2user( x/start$viewport[3], 1 - y/start$viewport[4], 0.5,
projection = start$projection) - start$pos)*start$scale
mouseMatrix <- translationMatrix(xlat[1], xlat[2], xlat[3])
par3d(userMatrix = start$userMatrix %*% t(mouseMatrix) )
}
rgl.setMouseCallbacks(button, begin, update)
cat("Callbacks set on button", button, "of rgl device",rgl.cur(),"\n")
}
pan3d(3)

Related

R queue simulation: Finding a function whith two arguments the process and a state, which returns the amount of time spent in that state

I have a code for a simulated birth/death process. And would like to find a function which takes a simluted process and a state of that process and returns the amount of time the process spent in that state.
I think I can use parts of the code i already have like the vector "time" in the code in some way. But I can't see it. For example I would like to find a function time_in_state <- function(s,process). Where the process can be for example process1 <- bd_process(2, 10, 0, 100) and the state s=2. Then the function should return the amount of time process1 spent in state 2.
bd_process <- function(lambda, mu, initial_state = 0, steps = 500) {
time_now <- 0
state_now <- initial_state
time <- 0
state <- initial_state
for (i in 1:steps) {
if (state_now == 3) {
lambda_now <- 0
} else {
lambda_now <- lambda
}
if (state_now == 0) {
mu_now <- 0
} else {
mu_now <- mu
}
if (((mu_now + lambda_now )* runif(1)) < mu_now) {
state_now <- state_now - 1
} else {
state_now <- state_now + 1
}
time_now <- time_now + time_to_transition
time <- c(time, time_now)
state <- c(state, state_now)
}
list(tid = time, state = state, steps=steps)
}

Simulating Ogata's Thinning Algorithm in R

I am trying to implement Ogata's Thinning Algorithm exactly as given in Algorithm 3 in https://www.math.fsu.edu/~ychen/research/Thinning%20algorithm.pdf with the parameters they specify to generate Figure 2.
This is the code that replicates it verbatim.
# Simulation of a Univariate Hawkes Poisson with
# Exponential Kernel γ(u) = αe^(−βu), on [0, T].
# Based on: https://www.math.fsu.edu/~ychen/research/Thinning%20algorithm.pdf
library(tidyverse)
# Initialize parameters that remains the same for all realizations
mu <- 1.2
alpha <- 0.6
beta <- 0.8
T <- 10
# Initialize other variables that change through realizations
bigTau <- vector('numeric')
bigTau <- c(bigTau,0)
s <- 0
n <- 0
lambda_vec_accepted <- c(mu)
# Begin loop
while (s < T) {
# -------------------------------
# Compute lambda_bar
# -------------------------------
sum_over_big_Tau <- 0
for (i in c(1:length(bigTau))) {
sum_over_big_Tau <- sum_over_big_Tau + alpha*exp(-beta*(s-bigTau[i]))
}
lambda_bar <- mu + sum_over_big_Tau
u <- runif(1)
# so that w ∼ exponential(λ_bar)
w <- -log(u)/lambda_bar
# so that s is the next candidate point
s <- s+w
# Generate D ∼ uniform(0,1)
D <- runif(1)
# -------------------------------
# Compute lambda_s
# -------------------------------
sum_over_big_Tau <- 0
for (i in c(1:length(bigTau))) {
sum_over_big_Tau <- sum_over_big_Tau + alpha*exp(-beta*(s-bigTau[i]))
}
lambda_s <- mu + sum_over_big_Tau
# -------------------------------
# Accepting with prob. λ_s/λ_bar
# -------------------------------
if (D*lambda_bar <= lambda_s ) {
lambda_vec_accepted <- c(lambda_vec_accepted,lambda_s)
# updating the number of points accepted
n <- n+1
# naming it t_n
t_n <- s
# adding t_n to the ordered set bigTau
bigTau <- c(bigTau,t_n)
}
}
df<-data.frame(x=bigTau,y=lambda_vec_accepted)
ggplot(df) + geom_line(aes(x=bigTau,y=lambda_vec_accepted))
However, the plot I managed to get (running several times) for lambda vs time is something like this and nowhere near what they got in Figure 2 (exponentially decreasing).
I am not sure what is the mistake I am doing. It will be great if anyone can help. This is needed for my research. I am from biology and so please excuse if I am doing something silly. Thanks.

Generating a simple algebraic expression in swift

I'm looking to create a function that returns a solve for x math equation that can be preformed in ones head (Clearly thats a bit subjective but I'm not sure how else to phrase it).
Example problem: (x - 15)/10 = 6
Note: Only 1 x in the equation
I want to use the operations +, -, *, /, sqrt (Only applied to X -> sqrt(x))
I know that let mathExpression = NSExpression(format: question) converts strings into math equations but when solving for x I'm not sure how to go about doing this.
I previously asked Generating random doable math problems swift for non solving for x problems but I'm not sure how to convert that answer into solving for x
Edit: Goal is to generate an equation and have the user solve for the variable.
Since all you want is a string representing an equation and a value for x, you don't need to do any solving. Just start with x and transform it until you have a nice equation. Here's a sample: (copy and paste it into a Playground to try it out)
import UIKit
enum Operation: String {
case addition = "+"
case subtraction = "-"
case multiplication = "*"
case division = "/"
static func all() -> [Operation] {
return [.addition, .subtraction, .multiplication, .division]
}
static func random() -> Operation {
let all = Operation.all()
let selection = Int(arc4random_uniform(UInt32(all.count)))
return all[selection]
}
}
func addNewTerm(formula: String, result: Int) -> (formula: String, result: Int) {
// choose a random number and operation
let operation = Operation.random()
let number = chooseRandomNumberFor(operation: operation, on: result)
// apply to the left side
let newFormula = applyTermTo(formula: formula, number: number, operation: operation)
// apply to the right side
let newResult = applyTermTo(result: result, number: number, operation: operation)
return (newFormula, newResult)
}
func applyTermTo(formula: String, number:Int, operation:Operation) -> String {
return "\(formula) \(operation.rawValue) \(number)"
}
func applyTermTo(result: Int, number:Int, operation:Operation) -> Int {
switch(operation) {
case .addition: return result + number
case .subtraction: return result - number
case .multiplication: return result * number
case .division: return result / number
}
}
func chooseRandomNumberFor(operation: Operation, on number: Int) -> Int {
switch(operation) {
case .addition, .subtraction, .multiplication:
return Int(arc4random_uniform(10) + 1)
case .division:
// add code here to find integer factors
return 1
}
}
func generateFormula(_ numTerms:Int = 1) -> (String, Int) {
let x = Int(arc4random_uniform(10))
var leftSide = "x"
var result = x
for i in 1...numTerms {
(leftSide, result) = addNewTerm(formula: leftSide, result: result)
if i < numTerms {
leftSide = "(" + leftSide + ")"
}
}
let formula = "\(leftSide) = \(result)"
return (formula, x)
}
func printFormula(_ numTerms:Int = 1) {
let (formula, x) = generateFormula(numTerms)
print(formula, " x = ", x)
}
for i in 1...30 {
printFormula(Int(arc4random_uniform(3)) + 1)
}
There are some things missing. The sqrt() function will have to be implemented separately. And for division to be useful, you'll have to add in a system to find factors (since you presumably want the results to be integers). Depending on what sort of output you want, there's a lot more work to do, but this should get you started.
Here's sample output:
(x + 10) - 5 = 11 x = 6
((x + 6) + 6) - 1 = 20 x = 9
x - 2 = 5 x = 7
((x + 3) * 5) - 6 = 39 x = 6
(x / 1) + 6 = 11 x = 5
(x * 6) * 3 = 54 x = 3
x * 9 = 54 x = 6
((x / 1) - 6) + 8 = 11 x = 9
Okay, let’s assume from you saying “Note: Only 1 x in the equation” that what you want is a linear equation of the form y = 0 = β1*x + β0, where β0 and β1 are the slope and intercept coefficients, respectively.
The inverse of (or solution to) any linear equation is given by x = -β0/β1. So what you really need to do is generate random integers β0 and β1 to create your equation. But since it should be “solvable” in someone’s head, you probably want β0 to be divisible by β1, and furthermore, for β1 and β0/β1 to be less than or equal to 12, since this is the upper limit of the commonly known multiplication tables. In this case, just generate a random integer β1 ≤ 12, and β0 equal to β1 times some integer n, 0 ≤ n ≤ 12.
If you want to allow simple fractional solutions like 2/3, just multiply the denominator and the numerator into β0 and β1, respectively, taking care to prevent the numerator or denominator from getting too large (12 is again a good limit).
Since you probably want to make y non-zero, just generate a third random integer y between -12 and 12, and change your output equation to y = β1*x + β0 + y.
Since you mentioned √ could occur over the x variable only, that is pretty easy to add; the solution (to 0 = β1*sqrt(x) + β0) is just x = (β0/β1)**2.
Here is some very simple (and very problematic) code for generating random integers to get you started:
import func Glibc.srand
import func Glibc.rand
import func Glibc.time
srand(UInt32(time(nil)))
print(rand() % 12)
There are a great many answers on this website that deal with better ways to generate random integers.

How do I "tune my program" to get relative errors down to E-10?

The problem
I wrote a recursive program to calculate the first 26 values of the spherical Bessel functions for a given input x. The program seems to start to accumulate relatively large errors at j_8(x). Here is the output of some code that found the relative errors based on some known high precision spherical Bessel function values for the first 25 SBF's (l=0 to l=24):
5.27753443514687e-16
9.21595688945492e-16
3.02006896635059e-15
7.62720601251909e-14
4.57824286846449e-12
4.42939097671948e-10
6.23673401270476e-08
1.20281992792456e-05
0.00304187901758528
0.976196008573827
387.486227014647
186360.170424407
106776056.913571
71856241567.0684
56117385601216.0
5.03357079061797e+16
5.13915094291702e+19
5.92532834322625e+22
7.6613338261931e+25
1.10398479254694e+29
1.76304629085447e+32
3.10469930465156e+35
6.00134333269024e+38
1.26807670217943e+42
2.91783063679757e+45
As you can see, after the j_8(x) value the error is starting to increase. The problem simply states that I should "tune" my program to get the error down to E-10, but how do I do this? I'm completely lost...
The code:
import Foundation
var x: Double = 0.0
var j_up_val: Double = 0.0
func j_up(x_val: Double, l_val: Double) -> Double {
if l_val == 0 {
j_up_val = sin(x_val)/x_val
} else if l_val == 1 {
j_up_val = (sin(x_val)/pow(x_val,2.0)) - (cos(x_val)/x_val)
} else if l_val == 2 {
j_up_val = ((2*(l_val-1)+1)/x_val) * ((sin(x_val)/pow(x_val,2.0)) - cos(x_val)/x_val) - sin(x_val)/x_val
} else {
let l2: Double = l_val - 1
let l3: Double = l_val - 2
j_up_val = ((2*(l_val-1)+1)/x_val)*j_up(x_val: x, l_val: l2) - j_up(x_val: x, l_val: l3)
}
return j_up_val
}
x = 1
print("x:", x)
for i in 0...25 {
var i_double: Double = 0.0
i_double = Double(i)
print("l:", i)
print(j_up(x_val: x, l_val: i_double))
print(" \n")
}
print("**********************")

How to code a matrix in WinBUGS?

I am trying to code the 2X2 matrix sigma with the 4 elements. Not sure how to code in WINBUGS. My goal is to get the posterior p's, their means and variances and create an ellipse region covered by the two posterior p's. Heres my code below:
model{
#likelihood
for(j in 1 : Nf){
p1[j, 1:2 ] ~ dmnorm(gamma[1:2], T[1:2 ,1:2])
for (i in 1:2){
logit(p[j,i]) <- p1[j,i]
Y[j,i] ~ dbin(p[j,i],n)
}
X_mu[j,1]<-p[j,1]-mean(p[,1])
X_mu[j,2]<-p[j,2]-mean(p[,2])
v1<-sd(p[,1])*sd(p[,1])
v2<-sd(p[,2])*sd(p[,2])
v12<-(inprod(X_mu[j,1],X_mu[j,2]))/(sd(p[,1])*sd(p[,2]))
sigma[1,1]<-v1
sigma[1,2]<-v12
sigma[2,1]<-v12
sigma[2,2]<-v2
sigmaInv[1:2, 1:2] <- inverse(sigma[,])
T1[j,1]<-inprod(sigmaInv[1,],X_mu[j,1])
T1[j,2]<-inprod(sigmaInv[2,],X_mu[j,2])
ell[j,1]<-inprod(X_mu[j,1],T1[j,1])
ell[j,2]<-inprod(X_mu[j,2],T1[j,2])
}
#priors
gamma[1:2] ~ dmnorm(mn[1:2],prec[1:2 ,1:2])
expit[1] <- exp(gamma[1])/(1+exp(gamma[1]))
expit[2] <- exp(gamma[2])/(1+exp(gamma[2]))
T[1:2 ,1:2] ~ dwish(R[1:2 ,1:2], 2)
sigma2[1:2, 1:2] <- inverse(T[,])
rho <- sigma2[1,2]/sqrt(sigma2[1,1]*sigma2[2,2])
}
# Data
list(Nf =20, mn=c(-0.69, -1.06), n=60,
prec = structure(.Data = c(.001, 0,
0, .001),.Dim = c(2, 2)),
R = structure(.Data = c(.001, 0,
0, .001),.Dim = c(2, 2)),
Y= structure(.Data=c(32,13,
32,12,
10,4,
28,11,
10,5,
25,10,
4,1,
16,5,
28,10,
21,7,
19,9,
18,12,
31,12,
13,3,
10,4,
18,7,
3,2,
27,5,
8,1,
8,4),.Dim = c(20, 2))
You have to specify each element in turn. You can use the inverse function (rather than solve) to invert the matrix.
model{
sigma[1,1]<-v1
sigma[1,2]<-v12
sigma[2,1]<-v21
sigma[2,2]<-v2
sigmaInv[1:2, 1:2] <- inverse(sigma[,])
}