How to zoom in in this mandlebrot code - fractals

I have the following jupyter notebook code that draws a mandlebrot fractal 50 times and saves it in 50 different image files. I would love it if someone could help me figure out how to zoom into the center each image. I messed around with the numbers a good bit, but I would love it if I could zoom in on a point and keep the window steady as well. Thanks!
import numpy
from numba import jit
import matplotlib.pyplot as plt
#jit
def mandlebrot(Re, Im, max_iter, zoom):
c = complex(Re, Im)
z = 0.0j
for i in range(max_iter):
z = (z*z) + c
if(z.real*z.real + z.imag*z.imag) >= 4:
return i
return max_iter
zoom = 1
for i in range(50):
columns = 1920
rows = 1080
result = numpy.zeros([rows, columns])
for row_index, Re in enumerate(numpy.linspace(-2, 1, num = rows)):
for column_index, Im in enumerate(numpy.linspace(-1, 1, num = columns)):
result[row_index, column_index] = mandlebrot(Re, Im, 50, zoom)
plt.figure(dpi = 100)
plt.imshow(result.T, cmap = 'hot', interpolation = 'bilinear', extent = [-2, 1, -1, 1])
zoom+=.05
plt.xlabel('Real')
plt.ylabel('Imaginary')
filepath = r"C:\Users\Riley\Desktop\mandlebrot\mandlebrot" + str(i) + ".png"
plt.savefig(filepath)
plt.close()

Related

Homemade Python Concave Hull but why a bit off

I could not find a fast and reliable concave hull and implemented my own (though a bit heterodox). It is fast and does the job (there is even an adjustment parameter) but it is a bit off. I got confuse with r,c vs x,y a couple of time maybe it has to do with that but I couldn't see it and any help is appreciated.
import numpy as np
import matplotlib.pyplot as plt
from scipy import ndimage
from skimage.measure import label, find_contours
import matplotlib.cm as cm
# points = datapoints
def concaveHull(points,pixel_adjustment=3):
mins = np.min(points, axis=0)
maxes = np.max(points, axis=0) - mins
#so if we make each point 10x10=100 pixels it should mostly fill the whole dummy image
points = points - mins
shape= np.array(10*maxes *np.sqrt(len(points))/ maxes.min(), dtype='int')+50
factor= shape.max() /points.max()
points_aligned = factor* (points - np.min(points,axis=0))
points_aligned = points_aligned.astype('int')
# plt.scatter(points_aligned[:,0],points_aligned[:,1])
# plt.show()
nmb_contours = 100
threshold = 3
hit=0
while ((nmb_contours > 1 ) or (hit<2)):
dummy_image = np.zeros(shape)
for i in points_aligned:
dummy_image[(i[0] - threshold):(i[0] + threshold), (i[1] - threshold):(i[1] + threshold)] = 1
dummy_image_larger = np.zeros(shape + 2)
dummy_image_larger[1:-1, 1:-1] = dummy_image
img_fill_holes = ndimage.morphology.binary_fill_holes(dummy_image_larger)
label_image = label(img_fill_holes)
contours = find_contours(label_image, 0.5)
nmb_contours = len(contours)
if len(contours) == 1:
contour = contours[0]
threshold += pixel_adjustment
if hit<1: print(f'threshold set to={threshold}')
hit += 1
else:
threshold += 1
continue
final_contour = mins+contour/factor
return final_contour
datapoints = np.random.rand(1000,2)
label_points = np.zeros((1000))
label_points[:] = 3
label_points[datapoints[:, 1] < 0.6]=2
label_points[datapoints[:, 1] < 0.3]=1
plt.scatter(datapoints[:,0],datapoints[:,1], c=label_points)
for subset_label in range(1,4):
subset = datapoints[label_points == subset_label]
outer_line=concaveHull(subset)
plt.fill(outer_line[:,0], outer_line[:,1], facecolor='none', edgecolor=cm.Set1(subset_label), lw=2)
plt.show()
label_points[:] = 3
label_points[datapoints[:, 1] < 0.6]=2
label_points[np.logical_and(datapoints[:, 1] < 0.9, datapoints[:, 0] < 0.15)]=1
label_points[np.logical_and(datapoints[:, 1] < 0.3, datapoints[:, 0] < 0.5)]=1
label_points[np.logical_and(datapoints[:, 1] < 0.3, datapoints[:, 0] > 0.7)]=4
fig, ax = plt.subplots()
ax.scatter(datapoints[:,0],datapoints[:,1], c=label_points)
for subset_label in range(1,5):
subset = datapoints[label_points == subset_label]
outer_line=concaveHull(subset)
plt.fill(outer_line[:,0], outer_line[:,1], facecolor='none',edgecolor=cm.Set1(subset_label), lw=2)
plt.show()

I can't figure out why the size of the tensors doesn't match in Pytorch

Some context:
I have been studying AI and ML for the last couple of month now and finally I am studying neural nets. Great! The problem is that when I follow a tutorial everything seems to be OK, but when I try to implement a NN by my self I always face issues related to the size of the tensors.
I have seem the answer to other questions (like this one) but they face the exact problem of the post. I am not looking for a code to just copy and paste. I want to understand why I am facing this problem, how to handle it and avoid it.
The error message:
/home/devops/aic/venv/lib/python3.8/site-packages/torch/nn/modules/loss.py:528: UserWarning: Using a target size (torch.Size([16, 2])) that is different to the input size (torch.Size([9, 2])). This will likely lead to incorrect results due to broadcasting. Please ensure they have the same size.
return F.mse_loss(input, target, reduction=self.reduction)
Traceback (most recent call last):
File "nn_conv.py", line 195, in
loss = loss_function(outputs, targets)
File "/home/devops/aic/venv/lib/python3.8/site-packages/torch/nn/modules/module.py", line 889, in _call_impl
result = self.forward(*input, **kwargs)
File "/home/devops/aic/venv/lib/python3.8/site-packages/torch/nn/modules/loss.py", line 528, in forward
return F.mse_loss(input, target, reduction=self.reduction)
File "/home/devops/aic/venv/lib/python3.8/site-packages/torch/nn/functional.py", line 2928, in mse_loss
expanded_input, expanded_target = torch.broadcast_tensors(input, target)
File "/home/devops/aic/venv/lib/python3.8/site-packages/torch/functional.py", line 74, in broadcast_tensors
return _VF.broadcast_tensors(tensors) # type: ignore
RuntimeError: The size of tensor a (9) must match the size of tensor b (16) at non-singleton dimension 0
This is my code:
import os
import cv2
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
class DogsVSCats():
IMG_SIZE = 50
CATS = 'PetImages/Cat'
DOGS = 'PetImages/Dog'
LABELS = {CATS: 0, DOGS: 1}
training_data = []
cats_count = 0
dogs_count = 0
def make_training_data(self):
for label in self.LABELS.keys():
for f in tqdm(os.listdir(label)):
try:
path = os.path.join(label, f)
# convert image to grayscale
img = cv2.imread(path)
if img is not None:
height, width = img.shape[:2]
if width > height:
height = round((height * self.IMG_SIZE) / width)
width = self.IMG_SIZE
right = 0
bottom = self.IMG_SIZE - height
else:
width = round((width * self.IMG_SIZE) / height)
height = self.IMG_SIZE
right = self.IMG_SIZE - width
bottom = 0
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
img = cv2.resize(img, (width, height))
img = cv2.copyMakeBorder(img,
top=0,
bottom=bottom,
left=0,
right=right,
borderType=cv2.BORDER_CONSTANT)
# Add a One-hot-vector of label of the image to self.training_data
self.training_data.append([np.array(img), np.eye(len(self.LABELS))[self.LABELS[label]]])
if label == self.CATS:
self.cats_count += 1
elif label == self.DOGS:
self.dogs_count += 1
except cv2.error as e:
pass
np.random.shuffle(self.training_data)
np.save("PetImages/training_data.npy", self.training_data)
print("Cats:", self.cats_count)
print("Dogs:", self.dogs_count)
training_data = np.load('PetImages/training_data.npy', allow_pickle=True)
plt.imsave('PetImages/trained_example.png', training_data[1][0])
class RunningMetrics():
def __init__(self):
self._sum = 0
self._count = 0
def __call__(self):
return self._sum/float(self._count)
def update(self, val, size):
self._sum += val
self._count += size
class Net(nn.Module):
def __init__(self, num_channels, conv_kernel_size=3, stride=1, padding=1, max_pool_kernel_size=2):
super(Net, self).__init__()
self._num_channels = num_channels
self._max_pool_kernel_size = max_pool_kernel_size
self.conv1 = nn.Conv2d(1, self._num_channels, conv_kernel_size, stride, padding)
self.conv2 = nn.Conv2d(self._num_channels, self._num_channels*2, conv_kernel_size, stride, padding)
self.conv3 = nn.Conv2d(self._num_channels*2, self._num_channels*4, conv_kernel_size, stride, padding)
# Calc input of first
self.fc1 = nn.Linear(self._num_channels*4*8*8, self._num_channels*8)
self.fc2 = nn.Linear(self._num_channels*8, 2)
def forward(self, x):
# Conv
x = self.conv1(x)
x = F.relu(F.max_pool2d(x, self._max_pool_kernel_size))
x = self.conv2(x)
x = F.relu(F.max_pool2d(x, self._max_pool_kernel_size))
x = self.conv3(x)
x = F.relu(F.max_pool2d(x, self._max_pool_kernel_size))
# Flatten
x = x.view(-1, self._num_channels*4*8*8)
# Fully Connected
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
# return F.log_softmax(x, dim=1)
return F.softmax(x, dim=1)
def save_model(path):
torch.save(save, path)
def load_model(path):
self = torch.load(PATH)
self.eval()
if __name__ == '__main__':
print('Loading dataset')
if not os.path.exists("PetImages/training_data.npy"):
dogsvcats = DogsVSCats()
dogsvcats.make_training_data()
training_data = np.load('PetImages/training_data.npy', allow_pickle=True)
print('Loading Net')
net = Net(num_channels=32)
# net = net.to(device)
# optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9 )
optimizer = optim.Adam(net.parameters(), lr=0.001)
# loss_function = nn.NLLLoss()
loss_function = nn.MSELoss()
print('Converting X tensor')
X = torch.Tensor([i[0] for i in training_data]).view(-1, 50, 50)
X = X/255.0
print('Converting Y tensor')
y = torch.Tensor([i[1] for i in training_data])
# Validation data
VAL_PERCENT = 0.1
val_size = int(len(X)*VAL_PERCENT)
X_train = X[:-val_size]
y_train = y[:-val_size]
X_test = X[-val_size:]
y_test = y[-val_size:]
print('Training Set:', len(X_train))
print('Testing Set:', len(X_test))
BATCH_SIZE = 16
EPOCHS = 2
IMG_SIZE=50
for epoch in range(EPOCHS):
print(f'Epoch {epoch+1}/{EPOCHS}')
running_loss = RunningMetrics()
running_acc = RunningMetrics()
for i in tqdm(range(0, len(X_train), BATCH_SIZE)):
inputs = X_train[i:i+BATCH_SIZE].view(-1,1, IMG_SIZE, IMG_SIZE)
targets = y_train[i:i+BATCH_SIZE]
# inputs, targets = inputs.to(device), targets.to(device)
optimizer.zero_grad()
outputs = net(inputs)
_, preds = torch.max(outputs, 1)
loss = loss_function(outputs, targets)
loss.backward()
optimizer.step()
running_loss.update(loss.item()*BATCH_SIZE,
BATCH_SIZE)
running_acc.update(toch.sum(preds == targets).float(),
BATCH_SIZE)
print(f'Loss: {running_loss:.4f}, Acc: {running_acc:.4f}')
print('-'*10)
Dataset:
I am using the Microsoft's dataset of cats and dogs images
EDIT:
The error previous message has been solved following Anonymous' advice but now I am getting another error:
Traceback (most recent call last):
File "nn_conv.py", line 203, in
running_acc.update(torch.sum(preds == targets).float(),
RuntimeError: The size of tensor a (16) must match the size of tensor b (2) at non-singleton dimension 1
Input : 16 x 1 x 50 x 50
After conv1/maxpool1 : 16 x 32 x 25 x 25
After conv2/maxpool2 : 16 x 64 x 12 x 12 (no padding so taking floor)
After conv3/maxpool3 : 16 x 128 x 6 x 6 (=73 728 neurons here is your error)
Flattening : you specified a view like -1 x 32 * 4 * 8 * 8 = 9 x 8192
The correct flattening is -1 x 32 * 4 * 6 * 6
Few tips :
as you begin pytorch, you should go see how to use a dataloader/dataset
the binary cross entropy is more commonly used for classification (though MSE is still possible)

Fast CVX solvers in Matlab

I am wondering what is the fastest convex optimizer in Matlab or is there any way to speed up current solvers? I'm using CVX, but it's taking forever to solve the optimization problem I have.
The optimization I have is to solve
minimize norm(Ax-b, 2)
subject to
x >= 0
and x d <= delta
where the size of A and b are very large.
Is there any way that I can solve this by a least square solver and then transfer it to the constraint version to make it faster?
I'm not sure what x.d <= delta means, but I'll just assume it's supposed to be x <= delta.
You can solve this problem using the projected gradient method or an accelerated projected gradient method (which is just a slight modification of the projected gradient method, which "magically" converges much faster). Here is some python code that shows how to minimize .5|| Ax - b ||^2 subject to the constraint that 0 <= x <= delta using FISTA, which is an accelerated projected gradient method. More details about the projected gradient method and FISTA can be found for example in Boyd's manuscript on proximal algorithms.
import numpy as np
import matplotlib.pyplot as plt
def fista(gradf,proxg,evalf,evalg,x0,params):
# This code does FISTA with line search
maxIter = params['maxIter']
t = params['stepSize'] # Initial step size
showTrigger = params['showTrigger']
increaseFactor = 1.25
decreaseFactor = .5
costs = np.zeros((maxIter,1))
xkm1 = np.copy(x0)
vkm1 = np.copy(x0)
for k in np.arange(1,maxIter+1,dtype = np.double):
costs[k-1] = evalf(xkm1) + evalg(xkm1)
if k % showTrigger == 0:
print "Iteration: " + str(k) + " cost: " + str(costs[k-1])
t = increaseFactor*t
acceptFlag = False
while acceptFlag == False:
if k == 1:
theta = 1
else:
a = tkm1
b = t*(thetakm1**2)
c = -t*(thetakm1**2)
theta = (-b + np.sqrt(b**2 - 4*a*c))/(2*a)
y = (1 - theta)*xkm1 + theta*vkm1
(gradf_y,fy) = gradf(y)
x = proxg(y - t*gradf_y,t)
fx = evalf(x)
if fx <= fy + np.vdot(gradf_y,x - y) + (.5/t)*np.sum((x - y)**2):
acceptFlag = True
else:
t = decreaseFactor*t
tkm1 = t
thetakm1 = theta
vkm1 = xkm1 + (1/theta)*(x - xkm1)
xkm1 = x
return (xkm1,costs)
if __name__ == '__main__':
delta = 5.0
numRows = 300
numCols = 50
A = np.random.randn(numRows,numCols)
ATrans = np.transpose(A)
xTrue = delta*np.random.rand(numCols,1)
b = np.dot(A,xTrue)
noise = .1*np.random.randn(numRows,1)
b = b + noise
def evalf(x):
AxMinusb = np.dot(A, x) - b
val = .5 * np.sum(AxMinusb ** 2)
return val
def gradf(x):
AxMinusb = np.dot(A, x) - b
grad = np.dot(ATrans, AxMinusb)
val = .5 * np.sum(AxMinusb ** 2)
return (grad, val)
def evalg(x):
return 0.0
def proxg(x,t):
return np.maximum(np.minimum(x,delta),0.0)
x0 = np.zeros((numCols,1))
params = {'maxIter': 500, 'stepSize': 1.0, 'showTrigger': 5}
(x,costs) = fista(gradf,proxg,evalf,evalg,x0,params)
plt.figure()
plt.plot(x)
plt.plot(xTrue)
plt.figure()
plt.semilogy(costs)

Different intensity values for same image in OpenCV and MATLAB

I'm using Python 2.7 and OpenCV 3.x for my project for omr sheet evaluation using web camera.
While finding the number of white pixels in around the center of circle,I came to know that the intensity values are wrong, but it shows the correct values in MATLAB when I'm using imtool('a1.png').
I'm using .png image (datatype uint8).
just run the code and in the image go to [360:370,162:172] coordinate and see the intensity values.. it should not be 0.
find the images here -> a1.png a2.png
Why is this happening?
import numpy as np
import cv2
from matplotlib import pyplot as plt
#select radius of circle
radius = 10;
#function for finding white pixels
def thresh_circle(img,ptx,pty):
centerX = ptx;
centerY = pty;
cntOfWhite = 0;
for i in range((centerX - radius),(centerX + radius)):
for j in range((centerY - radius), (centerY + radius)):
if(j < img.shape[0] and i < img.shape[1]):
val = img[i][j]
if (val == 255):
cntOfWhite = cntOfWhite + 1;
return cntOfWhite
MIN_MATCH_COUNT = 10
img1 = cv2.imread('a1.png',0) # queryImage
img2 = cv2.imread('a2.png',0) # trainImage
sift = cv2.SIFT()# Initiate SIFT detector
kp1, des1 = sift.detectAndCompute(img1,None)# find the keypoints and descriptors with SIFT
kp2, des2 = sift.detectAndCompute(img2,None)
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks = 50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1,des2,k=2)
good = []# store all the good matches as per Lowe's ratio test.
for m,n in matches:
if m.distance < 0.7*n.distance:
good.append(m)
if len(good)>MIN_MATCH_COUNT:
src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.LMEDS,5.0)
#print M
matchesMask = mask.ravel().tolist()
h,w = img1.shape
else:
print "Not enough matches are found - %d/%d" % (len(good),MIN_MATCH_COUNT)
matchesMask = None
img3 = cv2.warpPerspective(img1, M, (img2.shape[1],img2.shape[0]))
blur = cv2.GaussianBlur(img3,(5,5),0)
ret3,th3 = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
ret,th2 = cv2.threshold(blur,ret3,255,cv2.THRESH_BINARY_INV)
print th2[360:370,162:172]#print a block of image
plt.imshow(th2, 'gray'),plt.show()
cv2.waitKey(0)
cv2.imwrite('th2.png',th2)
ptyc = np.array([170,200,230,260]);#y coordinates of circle center
ptxc = np.array([110,145,180,215,335,370,405,440])#x coordinates of circle center
pts_src = np.zeros(shape = (32,2),dtype=np.int);#x,y coordinates of circle center
ct = 0;
for i in range(0,4):
for j in range(0,8):
pts_src[ct][1] = ptyc[i];
pts_src[ct][0] = ptxc[j];
ct = ct+1;
boolval = np.zeros(shape=(8,4),dtype=np.bool)
ct = 0;
for j in range(0,8):
for i in range(0,4):
a1 = thresh_circle(th2,pts_src[ct][0],pts_src[ct][1])
ct = ct+1;
if(a1 > 50):
boolval[j][i] = 1
else:
boolval[j][i] = 0

Plot a plane based on a normal vector and a point in Matlab or matplotlib

How would one go plotting a plane in matlab or matplotlib from a normal vector and a point?
For all the copy/pasters out there, here is similar code for Python using matplotlib:
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
point = np.array([1, 2, 3])
normal = np.array([1, 1, 2])
# a plane is a*x+b*y+c*z+d=0
# [a,b,c] is the normal. Thus, we have to calculate
# d and we're set
d = -point.dot(normal)
# create x,y
xx, yy = np.meshgrid(range(10), range(10))
# calculate corresponding z
z = (-normal[0] * xx - normal[1] * yy - d) * 1. /normal[2]
# plot the surface
plt3d = plt.figure().gca(projection='3d')
plt3d.plot_surface(xx, yy, z)
plt.show()
For Matlab:
point = [1,2,3];
normal = [1,1,2];
%# a plane is a*x+b*y+c*z+d=0
%# [a,b,c] is the normal. Thus, we have to calculate
%# d and we're set
d = -point*normal'; %'# dot product for less typing
%# create x,y
[xx,yy]=ndgrid(1:10,1:10);
%# calculate corresponding z
z = (-normal(1)*xx - normal(2)*yy - d)/normal(3);
%# plot the surface
figure
surf(xx,yy,z)
Note: this solution only works as long as normal(3) is not 0. If the plane is parallel to the z-axis, you can rotate the dimensions to keep the same approach:
z = (-normal(3)*xx - normal(1)*yy - d)/normal(2); %% assuming normal(3)==0 and normal(2)~=0
%% plot the surface
figure
surf(xx,yy,z)
%% label the axis to avoid confusion
xlabel('z')
ylabel('x')
zlabel('y')
For copy-pasters wanting a gradient on the surface:
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
import numpy as np
import matplotlib.pyplot as plt
point = np.array([1, 2, 3])
normal = np.array([1, 1, 2])
# a plane is a*x+b*y+c*z+d=0
# [a,b,c] is the normal. Thus, we have to calculate
# d and we're set
d = -point.dot(normal)
# create x,y
xx, yy = np.meshgrid(range(10), range(10))
# calculate corresponding z
z = (-normal[0] * xx - normal[1] * yy - d) * 1. / normal[2]
# plot the surface
plt3d = plt.figure().gca(projection='3d')
Gx, Gy = np.gradient(xx * yy) # gradients with respect to x and y
G = (Gx ** 2 + Gy ** 2) ** .5 # gradient magnitude
N = G / G.max() # normalize 0..1
plt3d.plot_surface(xx, yy, z, rstride=1, cstride=1,
facecolors=cm.jet(N),
linewidth=0, antialiased=False, shade=False
)
plt.show()
The above answers are good enough. One thing to mention is, they are using the same method that calculate the z value for given (x,y). The draw back comes that they meshgrid the plane and the plane in space may vary (only keeping its projection the same). For example, you cannot get a square in 3D space (but a distorted one).
To avoid this, there is a different way by using the rotation. If you first generate data in x-y plane (can be any shape), then rotate it by equal amount ([0 0 1] to your vector) , then you will get what you want. Simply run below code for your reference.
point = [1,2,3];
normal = [1,2,2];
t=(0:10:360)';
circle0=[cosd(t) sind(t) zeros(length(t),1)];
r=vrrotvec2mat(vrrotvec([0 0 1],normal));
circle=circle0*r'+repmat(point,length(circle0),1);
patch(circle(:,1),circle(:,2),circle(:,3),.5);
axis square; grid on;
%add line
line=[point;point+normr(normal)]
hold on;plot3(line(:,1),line(:,2),line(:,3),'LineWidth',5)
It get a circle in 3D:
A cleaner Python example that also works for tricky $z,y,z$ situations,
from mpl_toolkits.mplot3d import axes3d
from matplotlib.patches import Circle, PathPatch
import matplotlib.pyplot as plt
from matplotlib.transforms import Affine2D
from mpl_toolkits.mplot3d import art3d
import numpy as np
def plot_vector(fig, orig, v, color='blue'):
ax = fig.gca(projection='3d')
orig = np.array(orig); v=np.array(v)
ax.quiver(orig[0], orig[1], orig[2], v[0], v[1], v[2],color=color)
ax.set_xlim(0,10);ax.set_ylim(0,10);ax.set_zlim(0,10)
ax = fig.gca(projection='3d')
return fig
def rotation_matrix(d):
sin_angle = np.linalg.norm(d)
if sin_angle == 0:return np.identity(3)
d /= sin_angle
eye = np.eye(3)
ddt = np.outer(d, d)
skew = np.array([[ 0, d[2], -d[1]],
[-d[2], 0, d[0]],
[d[1], -d[0], 0]], dtype=np.float64)
M = ddt + np.sqrt(1 - sin_angle**2) * (eye - ddt) + sin_angle * skew
return M
def pathpatch_2d_to_3d(pathpatch, z, normal):
if type(normal) is str: #Translate strings to normal vectors
index = "xyz".index(normal)
normal = np.roll((1.0,0,0), index)
normal /= np.linalg.norm(normal) #Make sure the vector is normalised
path = pathpatch.get_path() #Get the path and the associated transform
trans = pathpatch.get_patch_transform()
path = trans.transform_path(path) #Apply the transform
pathpatch.__class__ = art3d.PathPatch3D #Change the class
pathpatch._code3d = path.codes #Copy the codes
pathpatch._facecolor3d = pathpatch.get_facecolor #Get the face color
verts = path.vertices #Get the vertices in 2D
d = np.cross(normal, (0, 0, 1)) #Obtain the rotation vector
M = rotation_matrix(d) #Get the rotation matrix
pathpatch._segment3d = np.array([np.dot(M, (x, y, 0)) + (0, 0, z) for x, y in verts])
def pathpatch_translate(pathpatch, delta):
pathpatch._segment3d += delta
def plot_plane(ax, point, normal, size=10, color='y'):
p = Circle((0, 0), size, facecolor = color, alpha = .2)
ax.add_patch(p)
pathpatch_2d_to_3d(p, z=0, normal=normal)
pathpatch_translate(p, (point[0], point[1], point[2]))
o = np.array([5,5,5])
v = np.array([3,3,3])
n = [0.5, 0.5, 0.5]
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.gca(projection='3d')
plot_plane(ax, o, n, size=3)
ax.set_xlim(0,10);ax.set_ylim(0,10);ax.set_zlim(0,10)
plt.show()