Homemade Python Concave Hull but why a bit off - convex-hull

I could not find a fast and reliable concave hull and implemented my own (though a bit heterodox). It is fast and does the job (there is even an adjustment parameter) but it is a bit off. I got confuse with r,c vs x,y a couple of time maybe it has to do with that but I couldn't see it and any help is appreciated.
import numpy as np
import matplotlib.pyplot as plt
from scipy import ndimage
from skimage.measure import label, find_contours
import matplotlib.cm as cm
# points = datapoints
def concaveHull(points,pixel_adjustment=3):
mins = np.min(points, axis=0)
maxes = np.max(points, axis=0) - mins
#so if we make each point 10x10=100 pixels it should mostly fill the whole dummy image
points = points - mins
shape= np.array(10*maxes *np.sqrt(len(points))/ maxes.min(), dtype='int')+50
factor= shape.max() /points.max()
points_aligned = factor* (points - np.min(points,axis=0))
points_aligned = points_aligned.astype('int')
# plt.scatter(points_aligned[:,0],points_aligned[:,1])
# plt.show()
nmb_contours = 100
threshold = 3
hit=0
while ((nmb_contours > 1 ) or (hit<2)):
dummy_image = np.zeros(shape)
for i in points_aligned:
dummy_image[(i[0] - threshold):(i[0] + threshold), (i[1] - threshold):(i[1] + threshold)] = 1
dummy_image_larger = np.zeros(shape + 2)
dummy_image_larger[1:-1, 1:-1] = dummy_image
img_fill_holes = ndimage.morphology.binary_fill_holes(dummy_image_larger)
label_image = label(img_fill_holes)
contours = find_contours(label_image, 0.5)
nmb_contours = len(contours)
if len(contours) == 1:
contour = contours[0]
threshold += pixel_adjustment
if hit<1: print(f'threshold set to={threshold}')
hit += 1
else:
threshold += 1
continue
final_contour = mins+contour/factor
return final_contour
datapoints = np.random.rand(1000,2)
label_points = np.zeros((1000))
label_points[:] = 3
label_points[datapoints[:, 1] < 0.6]=2
label_points[datapoints[:, 1] < 0.3]=1
plt.scatter(datapoints[:,0],datapoints[:,1], c=label_points)
for subset_label in range(1,4):
subset = datapoints[label_points == subset_label]
outer_line=concaveHull(subset)
plt.fill(outer_line[:,0], outer_line[:,1], facecolor='none', edgecolor=cm.Set1(subset_label), lw=2)
plt.show()
label_points[:] = 3
label_points[datapoints[:, 1] < 0.6]=2
label_points[np.logical_and(datapoints[:, 1] < 0.9, datapoints[:, 0] < 0.15)]=1
label_points[np.logical_and(datapoints[:, 1] < 0.3, datapoints[:, 0] < 0.5)]=1
label_points[np.logical_and(datapoints[:, 1] < 0.3, datapoints[:, 0] > 0.7)]=4
fig, ax = plt.subplots()
ax.scatter(datapoints[:,0],datapoints[:,1], c=label_points)
for subset_label in range(1,5):
subset = datapoints[label_points == subset_label]
outer_line=concaveHull(subset)
plt.fill(outer_line[:,0], outer_line[:,1], facecolor='none',edgecolor=cm.Set1(subset_label), lw=2)
plt.show()

Related

Find interception of rocket and skeet - TypeError

For an assignment I need to calculate the angle beta for the launch of a rocket.
This rocket should hit a skeet in the air and the rocket is just accelerating in the first 0,5s.
In my first part of the exercise I already calculated the trajectory of the skeet. Next to the given parameters there is a wind in x-direction as well.
I am trying to calculate the angle beta by minimizing a wrapper function for the distance of both trajectories.
I always get the TypeError: loop of ufunc does not support argument 0 of type int which has no callable sqrt method
I dont know what is my mistake and i hope someone can help me.
The experiment is looking like this:
Picture - rocket - skeet
from scipy.optimize import minimize
from scipy.optimize import fsolve
from scipy.integrate import solve_ivp
import numpy as np
import matplotlib.pyplot as plt
v_wind = 3.5 #m / s in positive x direction
rho_l = 1.2 # kg/m3
# skeet params
cw_skeet_top = 1.11
cw_skeet_side = 0.5
A_skeet_top = 0.0086
A_skeet_side = 0.00432
m_skeet = 0.105 #kg
v0_skeet = 40
x0_skeet = 60
y0_skeet = 0
# rocket params
cw_rocket_side = 1
cw_rocket_front = 0.5
A_rocket_side = 0.08
A_rocket_front = 0.0312
m_rocket = 2
v0_rocket = 0
x0_rocket = 20
y0_rocket = 0
F_accel_rocket = 120 #in Newton, divide by m_rocket to find accel in m/s^2
t_accel_end = 0.5
def drag(v_rel, rho_l, cw, A):
Fw = rho_l * cw * A * v_rel**2/2.0
if (v_rel < 0):
Fw = -Fw
return Fw
def skeet(t,y,rho_l,cw_skeet_side, cw_skeet_top,A_skeet_side,A_skeet_top,m_skeet):
# Zur Übersichtlichkeit:
# y[0] - x-position
# y[1] - x-velocity
# y[2] - y-position
# y[3] - y-velocity
d2xdt2 = -drag(y[1]-v_wind, rho_l, cw_skeet_side , A_skeet_side)/m_skeet
dxdt = -y[1]
d2ydt2 = -9.81 - drag(y[3], rho_l, cw_skeet_top, A_skeet_top)/m_skeet
dydt = y[3]
return dxdt, d2xdt2, dydt, d2ydt2
def hit_ground(t,y, *args):
return y[2]
hit_ground.terminal = True
hit_ground.direction = -1
final_x = 0 #m
DGLargs=(rho_l,cw_skeet_side, cw_skeet_top,A_skeet_side,A_skeet_top,m_skeet)
def wrapperAlpha(alpha,final_x,DGLargs):
tspan = [0, 100]
maxStep=tspan[1]/1000
start_vec = [x0_skeet, v0_skeet*np.cos(alpha/180*np.pi), y0_skeet, v0_skeet*np.sin(alpha/180*np.pi)]
sol = solve_ivp(skeet,tspan,start_vec,args=(DGLargs),max_step=maxStep,events=(hit_ground))
return np.abs(sol.y[0,-1] - final_x)
starting_guess = 10 #deg
res = fsolve(wrapperAlpha, x0 = starting_guess, args = (final_x,DGLargs))
angle = res
print(angle)
tspan = [0, 100]
maxStep=tspan[1]/1000
start_vec = [x0_skeet, v0_skeet*np.cos(angle/180*np.pi), y0_skeet, v0_skeet*np.sin(angle/180*np.pi)]
sol = solve_ivp(skeet,tspan,start_vec,args=(DGLargs),max_step=maxStep,events=(hit_ground))
x_skeet = sol.y[0,:]
y_skeet = sol.y[2,:]
plt.plot(x_skeet,y_skeet)
#important exercise:
RocketArgs = (rho_l,cw_rocket_side, cw_rocket_front,A_rocket_side,A_rocket_front,m_rocket,F_accel_rocket,t_accel_end)
def rocket(t,y,beta, rho_l,cw_rocket_side, cw_rocket_front,A_rocket_side,A_rocket_front,m_rocket,F_accel_rocket,t_accel_end):
if t_accel_end > t:
d2xdt2 = (-drag(y[1]-v_wind, rho_l, cw_rocket_side , A_rocket_side)+(F_accel_rocket*np.cos(beta/180*np.pi)))/m_rocket
dxdt = y[1]
d2ydt2 = (-9.81 - drag(y[3], rho_l, cw_rocket_front, A_rocket_front)+(F_accel_rocket*np.sin(beta/180*np.pi)))/m_rocket
dydt = y[3]
else:
d2xdt2 = (-drag(y[1]-v_wind, rho_l, cw_rocket_side , A_rocket_side))/m_rocket
dxdt = y[1]
d2ydt2 = (-9.81 - drag(y[3], rho_l, cw_rocket_front, A_rocket_front))/m_rocket
dydt = y[3]
return dxdt, d2xdt2, dydt, d2ydt2
def wrapperRocket(beta,x_skeet,y_skeet,RocketArgs):
tspan = [0, 100]
maxStep = 0.01
start_vec = [x0_rocket, v0_rocket*np.cos(beta/180*np.pi), y0_rocket, v0_rocket*np.sin(beta/180*np.pi)]
sol2 = solve_ivp(rocket,tspan,start_vec,args=(beta,rho_l,cw_rocket_side, cw_rocket_front,A_rocket_side,A_rocket_front,m_rocket,F_accel_rocket,t_accel_end),max_step=maxStep,dense_output=True)
for i in range(len(x_skeet)):
distance = np.sqrt((sol2.y[0,i]-x_skeet)**2+(sol2.y[2,i]-y_skeet)**2)
return distance
starting_guess = 20 #deg
res = minimize(wrapperRocket, x0 = starting_guess, args = (x_skeet,y_skeet,RocketArgs))
angle = res
print(angle)

Modelling membrane evolution over time

I am trying to model the time evolution of a membrane based on the following code in MATLAB.
The basic outline is that the evolution is based on a differential equation
where j=0,1 and x^0 = x, x^1 = y and x^j(s_i) = x^j_i.
My code is the following.
import numpy as np
from matplotlib import pyplot as plt
R0 = 5 #radius
N = 360 #number of intervals
x0 = 2*np.pi*R0/(N/2) #resting membrane lengths
phi = np.linspace(0,2*np.pi, num=360, dtype=float)
R1 = R0 + 0.5*np.sin(20*phi)
X = R1*np.cos(phi)
Y = R1*np.sin(phi)
L = np.linspace(-1,358, num=360, dtype=int)
R = np.linspace(1,360, num=360,dtype=int) #right and left indexing vectors
R[359] = 0
X = R1*np.cos(phi)
Y = R1*np.sin(phi)
plt.plot(X,Y)
plt.axis("equal")
plt.show()
ds = 1/N
ds2 = ds**2
k = 1/10
w = 10**6
for i in range(0,20000):
lengths = np.sqrt( (X[R]-X)**2 + (Y[R]-Y)**2 )
Ex = k/ds2*(X[R] - 2*X + X[L] - x0*( (X[R]-X)/lengths - (X-X[L])/lengths[L]) )
Ey = k/ds2*(Y[R] - 2*Y + Y[L] - x0*( (Y[R]-Y)/lengths - (Y-Y[L])/lengths[L]) )
X = X + 1/w*Ex
Y = Y + 1/w*Ey
plt.plot(X,Y)
plt.axis("equal")
plt.show()
The model is supposed to devolve into a circular membrane, as below
but this is what mine does
Your definition of x0 is wrong.
In the Matlab code, it is equal to
x0 = 2*pi*R/N/2 # which is pi*R/N
while in your Python code it is
x0 = 2*np.pi*R0/(N/2) # which is 4*np.pi*R0/N
Correcting that, the end result is a circular shape, but with a different radius. I'm assuming that this is because of the reduced number of iterations (20000 instead of 1000000).
Edit:
As expected, using the correct number of iterations results in a plot similar to your expected one.

How to zoom in in this mandlebrot code

I have the following jupyter notebook code that draws a mandlebrot fractal 50 times and saves it in 50 different image files. I would love it if someone could help me figure out how to zoom into the center each image. I messed around with the numbers a good bit, but I would love it if I could zoom in on a point and keep the window steady as well. Thanks!
import numpy
from numba import jit
import matplotlib.pyplot as plt
#jit
def mandlebrot(Re, Im, max_iter, zoom):
c = complex(Re, Im)
z = 0.0j
for i in range(max_iter):
z = (z*z) + c
if(z.real*z.real + z.imag*z.imag) >= 4:
return i
return max_iter
zoom = 1
for i in range(50):
columns = 1920
rows = 1080
result = numpy.zeros([rows, columns])
for row_index, Re in enumerate(numpy.linspace(-2, 1, num = rows)):
for column_index, Im in enumerate(numpy.linspace(-1, 1, num = columns)):
result[row_index, column_index] = mandlebrot(Re, Im, 50, zoom)
plt.figure(dpi = 100)
plt.imshow(result.T, cmap = 'hot', interpolation = 'bilinear', extent = [-2, 1, -1, 1])
zoom+=.05
plt.xlabel('Real')
plt.ylabel('Imaginary')
filepath = r"C:\Users\Riley\Desktop\mandlebrot\mandlebrot" + str(i) + ".png"
plt.savefig(filepath)
plt.close()

Fast CVX solvers in Matlab

I am wondering what is the fastest convex optimizer in Matlab or is there any way to speed up current solvers? I'm using CVX, but it's taking forever to solve the optimization problem I have.
The optimization I have is to solve
minimize norm(Ax-b, 2)
subject to
x >= 0
and x d <= delta
where the size of A and b are very large.
Is there any way that I can solve this by a least square solver and then transfer it to the constraint version to make it faster?
I'm not sure what x.d <= delta means, but I'll just assume it's supposed to be x <= delta.
You can solve this problem using the projected gradient method or an accelerated projected gradient method (which is just a slight modification of the projected gradient method, which "magically" converges much faster). Here is some python code that shows how to minimize .5|| Ax - b ||^2 subject to the constraint that 0 <= x <= delta using FISTA, which is an accelerated projected gradient method. More details about the projected gradient method and FISTA can be found for example in Boyd's manuscript on proximal algorithms.
import numpy as np
import matplotlib.pyplot as plt
def fista(gradf,proxg,evalf,evalg,x0,params):
# This code does FISTA with line search
maxIter = params['maxIter']
t = params['stepSize'] # Initial step size
showTrigger = params['showTrigger']
increaseFactor = 1.25
decreaseFactor = .5
costs = np.zeros((maxIter,1))
xkm1 = np.copy(x0)
vkm1 = np.copy(x0)
for k in np.arange(1,maxIter+1,dtype = np.double):
costs[k-1] = evalf(xkm1) + evalg(xkm1)
if k % showTrigger == 0:
print "Iteration: " + str(k) + " cost: " + str(costs[k-1])
t = increaseFactor*t
acceptFlag = False
while acceptFlag == False:
if k == 1:
theta = 1
else:
a = tkm1
b = t*(thetakm1**2)
c = -t*(thetakm1**2)
theta = (-b + np.sqrt(b**2 - 4*a*c))/(2*a)
y = (1 - theta)*xkm1 + theta*vkm1
(gradf_y,fy) = gradf(y)
x = proxg(y - t*gradf_y,t)
fx = evalf(x)
if fx <= fy + np.vdot(gradf_y,x - y) + (.5/t)*np.sum((x - y)**2):
acceptFlag = True
else:
t = decreaseFactor*t
tkm1 = t
thetakm1 = theta
vkm1 = xkm1 + (1/theta)*(x - xkm1)
xkm1 = x
return (xkm1,costs)
if __name__ == '__main__':
delta = 5.0
numRows = 300
numCols = 50
A = np.random.randn(numRows,numCols)
ATrans = np.transpose(A)
xTrue = delta*np.random.rand(numCols,1)
b = np.dot(A,xTrue)
noise = .1*np.random.randn(numRows,1)
b = b + noise
def evalf(x):
AxMinusb = np.dot(A, x) - b
val = .5 * np.sum(AxMinusb ** 2)
return val
def gradf(x):
AxMinusb = np.dot(A, x) - b
grad = np.dot(ATrans, AxMinusb)
val = .5 * np.sum(AxMinusb ** 2)
return (grad, val)
def evalg(x):
return 0.0
def proxg(x,t):
return np.maximum(np.minimum(x,delta),0.0)
x0 = np.zeros((numCols,1))
params = {'maxIter': 500, 'stepSize': 1.0, 'showTrigger': 5}
(x,costs) = fista(gradf,proxg,evalf,evalg,x0,params)
plt.figure()
plt.plot(x)
plt.plot(xTrue)
plt.figure()
plt.semilogy(costs)

Different intensity values for same image in OpenCV and MATLAB

I'm using Python 2.7 and OpenCV 3.x for my project for omr sheet evaluation using web camera.
While finding the number of white pixels in around the center of circle,I came to know that the intensity values are wrong, but it shows the correct values in MATLAB when I'm using imtool('a1.png').
I'm using .png image (datatype uint8).
just run the code and in the image go to [360:370,162:172] coordinate and see the intensity values.. it should not be 0.
find the images here -> a1.png a2.png
Why is this happening?
import numpy as np
import cv2
from matplotlib import pyplot as plt
#select radius of circle
radius = 10;
#function for finding white pixels
def thresh_circle(img,ptx,pty):
centerX = ptx;
centerY = pty;
cntOfWhite = 0;
for i in range((centerX - radius),(centerX + radius)):
for j in range((centerY - radius), (centerY + radius)):
if(j < img.shape[0] and i < img.shape[1]):
val = img[i][j]
if (val == 255):
cntOfWhite = cntOfWhite + 1;
return cntOfWhite
MIN_MATCH_COUNT = 10
img1 = cv2.imread('a1.png',0) # queryImage
img2 = cv2.imread('a2.png',0) # trainImage
sift = cv2.SIFT()# Initiate SIFT detector
kp1, des1 = sift.detectAndCompute(img1,None)# find the keypoints and descriptors with SIFT
kp2, des2 = sift.detectAndCompute(img2,None)
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks = 50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1,des2,k=2)
good = []# store all the good matches as per Lowe's ratio test.
for m,n in matches:
if m.distance < 0.7*n.distance:
good.append(m)
if len(good)>MIN_MATCH_COUNT:
src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.LMEDS,5.0)
#print M
matchesMask = mask.ravel().tolist()
h,w = img1.shape
else:
print "Not enough matches are found - %d/%d" % (len(good),MIN_MATCH_COUNT)
matchesMask = None
img3 = cv2.warpPerspective(img1, M, (img2.shape[1],img2.shape[0]))
blur = cv2.GaussianBlur(img3,(5,5),0)
ret3,th3 = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
ret,th2 = cv2.threshold(blur,ret3,255,cv2.THRESH_BINARY_INV)
print th2[360:370,162:172]#print a block of image
plt.imshow(th2, 'gray'),plt.show()
cv2.waitKey(0)
cv2.imwrite('th2.png',th2)
ptyc = np.array([170,200,230,260]);#y coordinates of circle center
ptxc = np.array([110,145,180,215,335,370,405,440])#x coordinates of circle center
pts_src = np.zeros(shape = (32,2),dtype=np.int);#x,y coordinates of circle center
ct = 0;
for i in range(0,4):
for j in range(0,8):
pts_src[ct][1] = ptyc[i];
pts_src[ct][0] = ptxc[j];
ct = ct+1;
boolval = np.zeros(shape=(8,4),dtype=np.bool)
ct = 0;
for j in range(0,8):
for i in range(0,4):
a1 = thresh_circle(th2,pts_src[ct][0],pts_src[ct][1])
ct = ct+1;
if(a1 > 50):
boolval[j][i] = 1
else:
boolval[j][i] = 0