Point Interpolation with Metpython and Basemap - matplotlib-basemap

I change the script, and try to show it by using basemap.
# Copyright (c) 2016 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
[Point_Interpolation](https://unidata.github.io/MetPy/latest/examples/gridding/Point_Interpolation.html?highlight=basic_map)
"""
Import some library:
import cartopy.crs as ccrs
import cartopy.feature as cfeature
from matplotlib.colors import BoundaryNorm
import matplotlib.pyplot as plt
from matplotlib import cm as CM
from mpl_toolkits.basemap import Basemap
from matplotlib.patches import Polygon
from metpy.cbook import get_test_data
from metpy.gridding.gridding_functions import (interpolate, remove_nan_observations,
remove_repeat_coordinates)
import pandas as pd
import numpy as np
import numpy.ma as ma
Here some fuction:
def basic_map(proj):
"""Make our basic default map for plotting"""
fig = plt.figure(figsize=(15, 10))
#add_metpy_logo(fig, 0, 80, size='large')
view = fig.add_axes([0, 0, 1, 1], projection=proj)
view.set_extent([100, 130, 20, 50])
view.add_feature(cfeature.STATES.with_scale('50m'))
view.add_feature(cfeature.OCEAN)
view.add_feature(cfeature.COASTLINE)
view.add_feature(cfeature.BORDERS, linestyle=':')
return fig, view
get station_test_data:
def station_test_data(variable_names, proj_from=None, proj_to=None):
#with get_test_data('station_data.txt') as f:
all_data = np.loadtxt('2016072713.000', skiprows=1,
# all_data = np.loadtxt(f, skiprows=1,delimiter=',',
usecols=(0,1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
dtype=np.dtype([('stid', '5S'), ('lat', 'f'), ('lon', 'f'),
('aqi', 'f'), ('grade', 'f'),
('pm25', 'f'), ('pm10', 'f'),
('co', 'f'),('no2','f'),('o3','f'),
('o38h', 'f'), ('so2', 'f')]))
all_stids = [s.decode('ascii') for s in all_data['stid']]
data = np.concatenate([all_data[all_stids.index(site)].reshape(1, ) for site in all_stids])
value = data[variable_names]
lon = data['lon']
lat = data['lat']
if proj_from is not None and proj_to is not None:
try:
proj_points = proj_to.transform_points(proj_from, lon, lat)
return proj_points[:, 0], proj_points[:, 1], value
except Exception as e:
print(e)
return None
return lon, lat, value
def remove_inf_x(x, y, z):
x_ = x[~np.isinf(x)]
y_ = y[~np.isinf(x)]
z_ = z[~np.isinf(x)]
return x_, y_, z_
def remove_inf_y(x, y, z):
x_ = x[~np.isinf(y)]
y_ = y[~np.isinf(y)]
z_ = z[~np.isinf(y)]
return x_, y_, z_
add ploygen plot
def plot_rec(map,lower_left,upper_left,upper_right,lower_right,text):
lon_poly = np.array([lower_left[0], upper_left[0],upper_right[0], lower_right[0]])
lat_poly = np.array([lower_left[1], upper_left[1],upper_right[1], lower_right[1]])
X, Y = map(lon_poly, lat_poly)
xy = np.vstack([X,Y]).T
poly = Polygon(xy, closed=True, \
facecolor='None',edgecolor='red', \
linewidth=1.\
)
ax, ay = map(lower_left[0],lower_left[1])
plt.text(ax, ay,text,fontsize=6,fontweight='bold',
ha='left',va='bottom',color='k')
plt.gca().add_patch(poly)
Get the data:
from_proj = ccrs.Geodetic()
to_proj = ccrs.AlbersEqualArea(central_longitude=110.0000, central_latitude=32.0000)
x, y, temp = station_test_data('pm25', from_proj, to_proj)
x, y, temp = remove_nan_observations(x, y, temp)
x, y, temp = remove_inf_x(x, y, temp)
x, y, temp = remove_inf_y(x, y, temp)
x, y, temp = remove_repeat_coordinates(x, y, temp)
###########################################
Barnes Interpolation: search_radius = 100km min_neighbors = 3
gx, gy, img1 = interpolate(x, y, temp, interp_type='barnes', hres=75000, search_radius=100000)
img1 = np.ma.masked_where(np.isnan(img1), img1)
Then I make the plot:
fig = plt.figure(figsize=(8, 8))
#fig, view = basic_map(to_proj)
m = Basemap(width=8000000,height=7000000,
resolution='l',projection='aea',\
lat_1=0.,lat_2=40,lon_0=110,lat_0=20)
#m.shadedrelief()
m.drawparallels(np.arange(20.,40,2.5),linewidth=1, dashes=[4, 2], labels=[1,0,0,0], color= 'gray',zorder=0, fontsize=10)
m.drawmeridians(np.arange(100.,125.,2.),linewidth=1, dashes=[4, 2], labels=[0,0,0,1], color= 'gray',zorder=0, fontsize=10)
m.readshapefile('dijishi_2004','state',color='gray')
levels = list(range(0, 200, 10))
cmap = plt.get_cmap('viridis')
norm = BoundaryNorm(levels, ncolors=cmap.N, clip=True)
mmb = m.pcolormesh(gx, gy, img1, cmap=cmap, norm=norm)
plt.colorbar(label=r'$PM_{2.5}(\mu g/m^3 $)')
plt.show()
It seems the location is wrong!
Maybe I use the Point_Interpolation, which has changed the coordinate.
Here is the import data which I use like this:
99306 31.1654 121.412 NaN NaN NaN 37.000 0.875 10.000 141.000 NaN NaN
99299 31.2036 121.478 NaN NaN NaN 49.000 0.420 18.000 157.000 NaN 16.000
99302 31.1907 121.703 NaN NaN 75.000 112.000 1.571 54.000 167.000 NaN 34.000
99300 31.3008 121.467 NaN NaN 53.000 NaN 0.414 21.000 128.000 NaN 10.000
99304 31.2071 121.577 NaN NaN 20.000 66.000 NaN 20.000 192.000 NaN 28.000
99305 31.0935 120.978 NaN NaN NaN 5.000 0.717 23.000 140.000 NaN 13.000

My guess is that you need to adjust your grid locations or your projection. Basemap defines (0, 0) as the lower left corner of the map, whereas it looks like your data are assuming (0, 0) are the center.

Related

Pytorch linear/affine layer parameters confusing

I'm on the Pytorch documentation (https://pytorch.org/tutorials/beginner/blitz/neural_networks_tutorial.html) and I'm not really understanding why they are making the the affine layer (16 * 6 * 6, 120). I understand that the last outputs from the convolution layer were 16 and the output here is 120, but even with their annotation, I'm not understanding where the 6 * 6 comes from. Can someone please explain?
import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# 1 input image channel, 6 output channels, 3x3 square convolution
# kernel
self.conv1 = nn.Conv2d(1, 6, 3)
self.conv2 = nn.Conv2d(6, 16, 3)
# an affine operation: y = Wx + b
self.fc1 = nn.Linear(16 * 6 * 6, 120) # 6*6 from image dimension
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
# Max pooling over a (2, 2) window
x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
# If the size is a square you can only specify a single number
x = F.max_pool2d(F.relu(self.conv2(x)), 2)
x = x.view(-1, self.num_flat_features(x))
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def num_flat_features(self, x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
net = Net()
print(net)
The 6x6 comes from the height and width of x after it has been passed through your convolutions and maxpools.
Here is a simplified version where you can see how the shape changes at each point. It may help to print out the shapes in their example so you can see exactly how everything changes.
import torch
import torch.nn as nn
import torch.nn.functional as F
conv1 = nn.Conv2d(1, 6, 3)
conv2 = nn.Conv2d(6, 16, 3)
# Making a pretend input similar to theirs.
# We define an input with 1 batch, 1 channel, height 32, width 32
x = torch.ones((1,1,32,32))
# Simulating forward()
x = F.max_pool2d(F.relu(conv1(x)), (2, 2))
print(x.shape) # torch.Size([1, 6, 15, 15]) 1 batch, 6 channels, height 15, width 15
x = F.max_pool2d(F.relu(conv2(x)), 2)
print(x.shape) # torch.Size([1, 16, 6, 6]) 1 batch, 16 channels, height 6, width 6
Next they flatten x and pass it through fc1 which accepts 16*6*6 and produces 120 outputs.

Difference between Batch Normalization and Self Normalized Neural Network with SELU

I would like to know the difference between batch normalization and self normalized neural network. In other words, would SELU (Scaled Exponential Linear Unit) replace batch normalization and how?
Moreover, I after looking into the values of the SELU activations, they were in the range: [-1, 1]. While this is not the case with batch normalization. Instead, the values after the BN layer (before the relu activation), took the values of [-a, a] Approximately, and not [-1, 1].
Here is how I printed the values after the SELU activation and after batch norm layer:
batch_norm_layer = tf.Print(batch_norm_layer,
data=[tf.reduce_max(batch_norm_layer), tf.reduce_min(batch_norm_layer)],
message = name_scope + ' min and max')
And similar code for the SELU activations...
Batch norm layer is defined as follows:
def batch_norm(x, n_out, phase_train, in_conv_layer = True):
with tf.variable_scope('bn'):
beta = tf.Variable(tf.constant(0.0, shape=n_out),
name='beta', trainable=True)
gamma = tf.Variable(tf.constant(1.0, shape=n_out),
name='gamma', trainable=True)
if in_conv_layer:
batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments')
else:
batch_mean, batch_var = tf.nn.moments(x, [0, 1], name='moments')
ema = tf.train.ExponentialMovingAverage(decay=0.9999)
def mean_var_with_update():
ema_apply_op = ema.apply([batch_mean, batch_var])
with tf.control_dependencies([ema_apply_op]):
return tf.identity(batch_mean), tf.identity(batch_var)
mean, var = tf.cond(phase_train,
mean_var_with_update,
lambda: (ema.average(batch_mean), ema.average(batch_var)))
normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)
return normed
Therefore, since batch norm outputs higher values, the loss increases dramatically, and thus I got nans.
In addition, I tried reducing the learning rate with batch norm, but, that didn't help as well. So how to fix this problem???
Here is the following code:
import tensorflow as tf
import numpy as np
import os
import cv2
batch_size = 32
num_epoch = 102
latent_dim = 100
def weight_variable(kernal_shape):
weights = tf.get_variable(name='weights', shape=kernal_shape, dtype=tf.float32, trainable=True,
initializer=tf.truncated_normal_initializer(stddev=0.02))
return weights
def bias_variable(shape):
initial = tf.constant(0.0, shape=shape)
return tf.Variable(initial)
def batch_norm(x, n_out, phase_train, convolutional = True):
with tf.variable_scope('bn'):
exp_moving_avg = tf.train.ExponentialMovingAverage(decay=0.9999)
beta = tf.Variable(tf.constant(0.0, shape=n_out),
name='beta', trainable=True)
gamma = tf.Variable(tf.constant(1.0, shape=n_out),
name='gamma', trainable=True)
if convolutional:
batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments')
else:
batch_mean, batch_var = tf.nn.moments(x, [0], name='moments')
update_moving_averages = exp_moving_avg.apply([batch_mean, batch_var])
m = tf.cond(phase_train, lambda: exp_moving_avg.average(batch_mean), lambda: batch_mean)
v = tf.cond(phase_train, lambda: exp_moving_avg.average(batch_var), lambda: batch_var)
normed = tf.nn.batch_normalization(x, m, v, beta, gamma, 1e-3)
normed = tf.Print(normed, data=[tf.shape(normed)], message='size of normed?')
return normed, update_moving_averages # Note that we should run the update_moving_averages with sess.run...
def conv_layer(x, w_shape, b_shape, padding='SAME'):
W = weight_variable(w_shape)
tf.summary.histogram("weights", W)
b = bias_variable(b_shape)
tf.summary.histogram("biases", b)
# Note that I used a stride of 2 on purpose in order not to use max pool layer.
conv = tf.nn.conv2d(x, W, strides=[1, 2, 2, 1], padding=padding) + b
conv_batch_norm, update_moving_averages = batch_norm(conv, b_shape, phase_train=tf.cast(True, tf.bool))
name_scope = tf.get_variable_scope().name
conv_batch_norm = tf.Print(conv_batch_norm,
data=[tf.reduce_max(conv_batch_norm), tf.reduce_min(conv_batch_norm)],
message = name_scope + ' min and max')
activations = tf.nn.relu(conv_batch_norm)
tf.summary.histogram("activations", activations)
return activations, update_moving_averages
def deconv_layer(x, w_shape, b_shape, padding="SAME", activation='selu'):
W = weight_variable(w_shape)
tf.summary.histogram("weights", W)
b = bias_variable(b_shape)
tf.summary.histogram('biases', b)
x_shape = tf.shape(x)
out_shape = tf.stack([x_shape[0], x_shape[1] * 2, x_shape[2] * 2, w_shape[2]])
if activation == 'selu':
conv_trans = tf.nn.conv2d_transpose(x, W, out_shape, [1, 2, 2, 1], padding=padding) + b
conv_trans_batch_norm, update_moving_averages = \
batch_norm(conv_trans, b_shape, phase_train=tf.cast(True, tf.bool))
transposed_activations = tf.nn.relu(conv_trans_batch_norm)
else:
conv_trans = tf.nn.conv2d_transpose(x, W, out_shape, [1, 2, 2, 1], padding=padding) + b
conv_trans_batch_norm, update_moving_averages = \
batch_norm(conv_trans, b_shape, phase_train=tf.cast(True, tf.bool))
transposed_activations = tf.nn.sigmoid(conv_trans_batch_norm)
tf.summary.histogram("transpose_activation", transposed_activations)
return transposed_activations, update_moving_averages
tfrecords_filename_seq = ["C:/Users/user/PycharmProjects/AffectiveComputing/P16_db.tfrecords"]
filename_queue = tf.train.string_input_producer(tfrecords_filename_seq, num_epochs=num_epoch, shuffle=False, name='queue')
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
# Defaults are not specified since both keys are required.
features={
'height': tf.FixedLenFeature([], tf.int64),
'width': tf.FixedLenFeature([], tf.int64),
'image_raw': tf.FixedLenFeature([], tf.string),
'annotation_raw': tf.FixedLenFeature([], tf.string)
})
# This is how we create one example, that is, extract one example from the database.
image = tf.decode_raw(features['image_raw'], tf.uint8)
# The height and the weights are used to
height = tf.cast(features['height'], tf.int32)
width = tf.cast(features['width'], tf.int32)
# The image is reshaped since when stored as a binary format, it is flattened. Therefore, we need the
# height and the weight to restore the original image back.
image = tf.reshape(image, [height, width, 3])
annotation = tf.cast(features['annotation_raw'], tf.string)
min_after_dequeue = 100
num_threads = 1
capacity = min_after_dequeue + num_threads * batch_size
label_batch, images_batch = tf.train.batch([annotation, image],
shapes=[[], [112, 112, 3]],
batch_size=batch_size,
capacity=capacity,
num_threads=num_threads)
label_batch_splitted = tf.string_split(label_batch, delimiter=',')
label_batch_values = tf.reshape(label_batch_splitted.values, [batch_size, -1])
label_batch_numbers = tf.string_to_number(label_batch_values, out_type=tf.float32)
confidences = tf.slice(label_batch_numbers, begin=[0, 2], size=[-1, 1])
images_batch = tf.cast([images_batch], tf.float32)[0] # Note that casting the image will increases its rank.
with tf.name_scope('image_normal'):
images_batch = tf.map_fn(lambda img: tf.image.per_image_standardization(img), images_batch)
#images_batch = tf.Print(images_batch, data=[tf.reduce_max(images_batch), tf.reduce_min(images_batch)],
# message='min and max in images_batch')
with tf.variable_scope('conv1'):
conv1, uma_conv1 = conv_layer(images_batch, [4, 4, 3, 32], [32]) # image size: [56, 56]
with tf.variable_scope('conv2'):
conv2, uma_conv2 = conv_layer(conv1, [4, 4, 32, 64], [64]) # image size: [28, 28]
with tf.variable_scope('conv3'):
conv3, uma_conv3 = conv_layer(conv2, [4, 4, 64, 128], [128]) # image size: [14, 14]
with tf.variable_scope('conv4'):
conv4, uma_conv4 = conv_layer(conv3, [4, 4, 128, 256], [256]) # image size: [7, 7]
conv4_reshaped = tf.reshape(conv4, [-1, 7 * 7 * 256], name='conv4_reshaped')
w_c_mu = tf.Variable(tf.truncated_normal([7 * 7 * 256, latent_dim], stddev=0.1), name='weight_fc_mu')
b_c_mu = tf.Variable(tf.constant(0.1, shape=[latent_dim]), name='biases_fc_mu')
w_c_sig = tf.Variable(tf.truncated_normal([7 * 7 * 256, latent_dim], stddev=0.1), name='weight_fc_sig')
b_c_sig = tf.Variable(tf.constant(0.1, shape=[latent_dim]), name='biases_fc_sig')
epsilon = tf.random_normal([1, latent_dim])
tf.summary.histogram('weights_c_mu', w_c_mu)
tf.summary.histogram('biases_c_mu', b_c_mu)
tf.summary.histogram('weights_c_sig', w_c_sig)
tf.summary.histogram('biases_c_sig', b_c_sig)
with tf.variable_scope('mu'):
mu = tf.nn.bias_add(tf.matmul(conv4_reshaped, w_c_mu), b_c_mu)
tf.summary.histogram('mu', mu)
with tf.variable_scope('stddev'):
stddev = tf.nn.bias_add(tf.matmul(conv4_reshaped, w_c_sig), b_c_sig)
tf.summary.histogram('stddev', stddev)
with tf.variable_scope('z'):
latent_var = mu + tf.multiply(tf.sqrt(tf.exp(stddev)), epsilon)
tf.summary.histogram('features_sig', stddev)
w_dc = tf.Variable(tf.truncated_normal([latent_dim, 7 * 7 * 256], stddev=0.1), name='weights_dc')
b_dc = tf.Variable(tf.constant(0.0, shape=[7 * 7 * 256]), name='biases_dc')
tf.summary.histogram('weights_dc', w_dc)
tf.summary.histogram('biases_dc', b_dc)
with tf.variable_scope('deconv4'):
deconv4 = tf.nn.bias_add(tf.matmul(latent_var, w_dc), b_dc)
deconv4_batch_norm, uma_deconv4 = \
batch_norm(deconv4, [7 * 7 * 256], phase_train=tf.cast(True, tf.bool), convolutional=False)
deconv4 = tf.nn.relu(deconv4_batch_norm)
deconv4_reshaped = tf.reshape(deconv4, [-1, 7, 7, 256], name='deconv4_reshaped')
with tf.variable_scope('deconv3'):
deconv3, uma_deconv3 = deconv_layer(deconv4_reshaped, [3, 3, 128, 256], [128], activation='selu')
with tf.variable_scope('deconv2'):
deconv2, uma_deconv2 = deconv_layer(deconv3, [3, 3, 64, 128], [64], activation='selu')
with tf.variable_scope('deconv1'):
deconv1, uma_deconv1 = deconv_layer(deconv2, [3, 3, 32, 64], [32], activation='selu')
with tf.variable_scope('deconv_image'):
deconv_image_batch, uma_deconv = deconv_layer(deconv1, [3, 3, 3, 32], [3], activation='sigmoid')
# loss function.
with tf.name_scope('loss_likelihood'):
# temp1 shape: [32, 112, 112, 3]
temp1 = images_batch * tf.log(deconv_image_batch + 1e-9) + (1 - images_batch) * tf.log(1 - deconv_image_batch + 1e-9)
#temp1 = temp1 * confidences. This will give an error. Therefore, we should expand the dimension of confidence tensor
confidences_ = tf.expand_dims(tf.expand_dims(confidences, axis=1), axis=1) # shape: [32, 1, 1, 1].
temp1 = temp1 * confidences_
log_likelihood = -tf.reduce_sum(temp1, reduction_indices=[1, 2, 3])
log_likelihood_total = tf.reduce_sum(log_likelihood)
#l2_loss = tf.reduce_mean(tf.abs(tf.subtract(images_batch, deconv_image_batch)))
with tf.name_scope('loss_KL'):
# temp2 shape: [32, 200]
temp2 = 1 + tf.log(tf.square(stddev + 1e-9)) - tf.square(mu) - tf.square(stddev)
temp3 = temp2 * confidences # confidences shape is [32, 1]
KL_term = - 0.5 * tf.reduce_sum(temp3, reduction_indices=1)
KL_term_total = tf.reduce_sum(KL_term)
with tf.name_scope('total_loss'):
variational_lower_bound = tf.reduce_mean(log_likelihood + KL_term)
tf.summary.scalar('loss', variational_lower_bound)
with tf.name_scope('optimizer'):
optimizer = tf.train.AdamOptimizer(0.00001).minimize(variational_lower_bound)
init_op = tf.group(tf.local_variables_initializer(),
tf.global_variables_initializer())
saver = tf.train.Saver()
model_path = 'C:/Users/user/PycharmProjects/VariationalAutoEncoder/' \
'VariationalAutoEncoderFaces/tensorboard_logs/Graph_model/ckpt'
# Here is the session...
with tf.Session() as sess:
train_writer = tf.summary.FileWriter('C:/Users/user/PycharmProjects/VariationalAutoEncoder/'
'VariationalAutoEncoderFaces/tensorboard_logs/Event_files', sess.graph)
merged = tf.summary.merge_all()
# Note that init_op should start before the Coordinator and the thread otherwise, this will throw an error.
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
step = 0
to_run_list = [uma_conv1, uma_conv2, uma_conv3, uma_conv4, uma_deconv1, uma_deconv2, uma_deconv3,
uma_deconv4, uma_deconv, optimizer, variational_lower_bound, merged,
deconv_image_batch, image]
# Note that the last name "Graph_model" is the name of the saved checkpoints file => the ckpt is saved
# under tensorboard_logs.
ckpt = tf.train.get_checkpoint_state(
os.path.dirname(model_path))
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
print('checkpoints are saved!!!')
else:
print('No stored checkpoints')
epoch = 0
while not coord.should_stop():
_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, loss, summary, reconstructed_image, original_image = \
sess.run(to_run_list)
print('total loss:', loss)
original_image = cv2.cvtColor(np.array(original_image), cv2.COLOR_RGB2BGR)
reconstructed_image = cv2.cvtColor(np.array(reconstructed_image[0]), cv2.COLOR_RGB2BGR)
cv2.imshow('original_image', original_image)
cv2.imshow('reconstructed_image', reconstructed_image)
cv2.waitKey(1)
if step % 234 == 0:
epoch += 1
print('epoch:', epoch)
if epoch == num_epoch - 2:
coord.request_stop()
if step % 100 == 0:
train_writer.add_summary(summary, step)
#print('total loss:', loss)
#print('log_likelihood_', log_likelihood_)
#print('KL_term', KL_term_)
step += 1
save_path = saver.save(sess, model_path)
coord.request_stop()
coord.join(threads)
train_writer.close()
Any help is much appreciated!!
Here are some sample codes to show the trend of means and variances over 3 SELU layers. The numbers of nodes on the layers (including the input layer) are [15, 30, 30, 8]
import tensorflow as tf
import numpy as np
import os
#-----------------------------------------------#
# https://github.com/bioinf-jku/SNNs/blob/master/selu.py
# The SELU activation function
def selu(x):
with ops.name_scope('elu') as scope:
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946
return scale*tf.where(x>=0.0, x, alpha*tf.nn.elu(x))
#-----------------------------------------------#
# https://github.com/bioinf-jku/SNNs/blob/master/selu.py
# alpha-dropout
def dropout_selu(x, rate, alpha= -1.7580993408473766, fixedPointMean=0.0, fixedPointVar=1.0,
noise_shape=None, seed=None, name=None, training=False):
"""Dropout to a value with rescaling."""
def dropout_selu_impl(x, rate, alpha, noise_shape, seed, name):
keep_prob = 1.0 - rate
x = ops.convert_to_tensor(x, name="x")
if isinstance(keep_prob, numbers.Real) and not 0 < keep_prob <= 1:
raise ValueError("keep_prob must be a scalar tensor or a float in the "
"range (0, 1], got %g" % keep_prob)
keep_prob = ops.convert_to_tensor(keep_prob, dtype=x.dtype, name="keep_prob")
keep_prob.get_shape().assert_is_compatible_with(tensor_shape.scalar())
alpha = ops.convert_to_tensor(alpha, dtype=x.dtype, name="alpha")
alpha.get_shape().assert_is_compatible_with(tensor_shape.scalar())
if tensor_util.constant_value(keep_prob) == 1:
return x
noise_shape = noise_shape if noise_shape is not None else array_ops.shape(x)
random_tensor = keep_prob
random_tensor += random_ops.random_uniform(noise_shape, seed=seed, dtype=x.dtype)
binary_tensor = math_ops.floor(random_tensor)
ret = x * binary_tensor + alpha * (1-binary_tensor)
a = math_ops.sqrt(fixedPointVar / (keep_prob *((1-keep_prob) * math_ops.pow(alpha-fixedPointMean,2) + fixedPointVar)))
b = fixedPointMean - a * (keep_prob * fixedPointMean + (1 - keep_prob) * alpha)
ret = a * ret + b
ret.set_shape(x.get_shape())
return ret
with ops.name_scope(name, "dropout", [x]) as name:
return utils.smart_cond(training,
lambda: dropout_selu_impl(x, rate, alpha, noise_shape, seed, name),
lambda: array_ops.identity(x))
#-----------------------------------------------#
# build a 3-layer dense network with SELU activation and alpha-dropout
sess = tf.InteractiveSession()
w1 = tf.constant(np.random.normal(loc=0.0, scale=np.sqrt(1.0/15.0), size = [15, 30]))
b1 = tf.constant(np.random.normal(loc=0.0, scale=0.5, size = [30]))
x1 = tf.constant(np.random.normal(loc=0.0, scale=1.0, size = [200, 15]))
y1 = tf.add(tf.matmul(x1, w1), b1)
y1_selu = selu(y1)
y1_selu_dropout = dropout_selu(y1_selu, 0.05, training=True)
w2 = tf.constant(np.random.normal(loc=0.0, scale=np.sqrt(1.0/30.0), size = [30, 30]))
b2 = tf.constant(np.random.normal(loc=0.0, scale=0.5, size = [30]))
x2 = y1_selu_dropout
y2 = tf.add(tf.matmul(x2, w2), b2)
y2_selu = selu(y2)
y2_selu_dropout = dropout_selu(y2_selu, 0.05, training=True)
w3 = tf.constant(np.random.normal(loc=0.0, scale=np.sqrt(1.0/30.0), size = [30, 8]))
b3 = tf.constant(np.random.normal(loc=0.0, scale=0.5, size = [8]))
x3 = y2_selu_dropout
y3 = tf.add(tf.matmul(x3, w3), b3)
y3_selu = selu(y3)
y3_selu_dropout = dropout_selu(y3_selu, 0.05, training=True)
#-------------------------#
# evaluate the network
x1_v, y1_selu_dropout_v, \
x2_v, y2_selu_dropout_v, \
x3_v, y3_selu_dropout_v, \
= sess.run([x1, y1_selu_dropout, x2, y2_selu_dropout, x3, y3_selu_dropout])
#-------------------------#
# print each layer's mean and standard deviation (1st line: input; 2nd line: output)
print("Layer 1")
print(np.mean(x1_v), np.std(x1_v))
print(np.mean(y1_selu_dropout_v), np.std(y1_selu_dropout_v))
print("Layer 2")
print(np.mean(x2_v), np.std(x2_v))
print(np.mean(y2_selu_dropout_v), np.std(y2_selu_dropout_v))
print("Layer 3")
print(np.mean(x3_v), np.std(x3_v))
print(np.mean(y3_selu_dropout_v), np.std(y3_selu_dropout_v))
Here is one possible output. Over 3 layers, the mean and standard deviation are still close to 0 and 1, respectively.
Layer 1
-0.0101213033749 1.01375071842
0.0106228883975 1.09375593322
Layer 2
0.0106228883975 1.09375593322
-0.027910206754 1.12216643393
Layer 3
-0.027910206754 1.12216643393
-0.131790078631 1.09698413493

Matlab to Python code conversion: Binary phase-shift keying (BPSK)

I have this MATLAB code:
d=[1 0 1 1 0]; % Data sequence
b=2*d-1; % Convert unipolar to bipolar
T=1; % Bit duration
Eb=T/2; % This will result in unit amplitude waveforms
fc=3/T; % Carrier frequency
t=linspace(0,5,1000); % discrete time sequence between 0 and 5*T (1000 samples)
N=length(t); % Number of samples
Nsb=N/length(d); % Number of samples per bit
dd=repmat(d',1,Nsb); % replicate each bit Nsb times
bb=repmat(b',1,Nsb); dw=dd'; % Transpose the rows and columns
dw=dw(:)';
% Convert dw to a column vector (colum by column) and convert to a row vector
bw=bb';
bw=bw(:)'; % Data sequence samples
w=sqrt(2*Eb/T)*cos(2*pi*fc*t); % carrier waveform
bpsk_w=bw.*w; % modulated waveform
% plotting commands follow
subplot(4,1,1);
plot(t,dw); axis([0 5 -1.5 1.5])
subplot(4,1,2);
plot(t,bw); axis([0 5 -1.5 1.5])
subplot(4,1,3);
plot(t,w); axis([0 5 -1.5 1.5])
subplot(4,1,4);
plot(t,bpsk_w,'.'); axis([0 5 -1.5 1.5])
xlabel('time')
Which gives me the graphs shown below:
Below is my converted Python Code using Numpy / Scipy
import numpy as np
import scipy
import matplotlib.pylab as plt
plt.clf()
plt.close('all')
d = np.array(np.hstack((1, 0, 1, 1, 0)))
b = 2*d-1.
T = 1
Eb = T/2
fc = 3/T
t = np.linspace(0, 5, 1000)
N = t.shape
Nsb = np.divide(N, d.shape)
dd = np.tile(d.conj().T, Nsb)
bb = np.tile(b.conj().T, Nsb)
dw = dd.conj().T
dw = dw.flatten(0).conj()
bw = bb.conj().T
bw = bw.flatten(0).conj()
w = np.dot(np.sqrt(np.divide(2*Eb, T)), np.cos(np.dot(np.dot(2*np.pi, fc), t)))
bpsk_w = bw*w
plt.subplot(4, 1, 1)
plt.plot(t, dw)
plt.axis(np.array(np.hstack((0, 5, -1.5, 1.5))))
plt.subplot(4, 1, 2)
plt.plot(t, bw)
plt.axis(np.array(np.hstack((0, 5, -1.5, 1.5))))
plt.subplot(4, 1, 3)
plt.plot(t, w)
plt.axis(np.array(np.hstack((0, 5, -1.5, 1.5))))
plt.subplot(4, 1, 4)
plt.plot(t, bpsk_w, '.')
plt.axis(np.array(np.hstack((0, 5, -1.5, 1.5))))
plt.xlabel('time')
plt.show()
But I neither get an error nor the proper output:
Please let me know where is my error in migrating this code?
=====UPDATE======
When I change the Python code to use the following lines, I get some better output:
..............
b = 2.*d-1.
T = 1.
Eb = T/2.
fc = 3./T
...............
w = np.dot(np.sqrt(np.divide(2.*Eb, T)), np.cos(np.dot(np.dot(2.*np.pi, fc), t)))
.............
Your problem stems from using np.tile rather than np.repeat.
To give a simple example of the difference between both:
>>> a = np.arange(3)
>>> a
array([0, 1, 2])
>>> np.repeat(a, 4)
array([0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2])
>>> np.tile(a, 4)
array([0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2])
So basically tile takes a "tiling array" and concatenates it, similar to the way you would tile a kitchen floor, whereas repeat repeats each element in the vector a specified number of times before it takes the next element of that vector.
Now, using that knowledge you could rewrite the matlab sample and wind up with the following:
from __future__ import division
import numpy as np
import scipy
import matplotlib.pylab as plt
unipolar_arr = np.array([1, 0, 1, 1, 0])
bipolar = 2*unipolar_arr - 1
bit_duration = 1
amplitude_scaling_factor = bit_duration/2 # This will result in unit amplitude waveforms
freq = 3/bit_duration # carrier frequency
n_samples = 1000
time = np.linspace(0, 5, n_samples)
samples_per_bit = n_samples/unipolar_arr.size # no need for np.divide. Also, use size rather than shape if you want something similar to Matlab's "length"
# 1. Use repeat rather than tile (read the docs)
# 2. No need for conjugate transpose
dd = np.repeat(unipolar_arr, samples_per_bit) # replicate each bit Nsb times
bb = np.repeat(bipolar, samples_per_bit) # Transpose the rows and columns
dw = dd
# no idea why this is here
#dw = dw.flatten(0).conj()
bw = bb # one again, no need for conjugate transpose
# no idea why this is here
#bw = bw.flatten(0).conj()
waveform = np.sqrt(2*amplitude_scaling_factor/bit_duration) * np.cos(2*np.pi * freq * time) # no need for np.dot to perform scalar-scalar multiplication or scalar-array multiplication
bpsk_w = bw*waveform
f, ax = plt.subplots(4,1, sharex=True, sharey=True, squeeze=True)
ax[0].plot(time, dw)
ax[1].plot(time, bw)
ax[2].plot(time, waveform)
ax[3].plot(time, bpsk_w, '.')
ax[0].axis([0, 5, -1.5, 1.5])
ax[0].set_xlabel('time')
plt.show()
I've added more comments to show what is not needed at all (so much clutter, was the code you showed us somehow produced by a conversion program?) and taken the liberty to change most of your 1-2 character variable names into something more readable, that's just one of my pet peeves.
Also, in Python2.x, integer division is the default, so 5/2 will evaluate as 2, rather than 2.5. In Python3.x, this was changed for the better and by using the line from __future__ import division you can get that behaviour in Python2.x as well.

In Scipy LeastSq - How to add the penalty term

If the object function is
How to code it in python?
I've already coded the normal one:
import numpy as np
import scipy as sp
from scipy.optimize import leastsq
import pylab as pl
m = 9 #the degree of the polynomial
def real_func(x):
return np.sin(2*np.pi*x) #sin(2 pi x)
def fake_func(p, x):
f = np.poly1d(p) #polynomial
return f(x)
def residuals(p, y, x):
return y - fake_func(p, x)
#randomly choose 9 points as x
x = np.linspace(0, 1, 9)
x_show = np.linspace(0, 1, 1000)
y0 = real_func(x)
#add normalize noise
y1 = [np.random.normal(0, 0.1) + y for y in y0]
p0 = np.random.randn(m)
plsq = leastsq(residuals, p0, args=(y1, x))
print 'Fitting Parameters :', plsq[0]
pl.plot(x_show, real_func(x_show), label='real')
pl.plot(x_show, fake_func(plsq[0], x_show), label='fitted curve')
pl.plot(x, y1, 'bo', label='with noise')
pl.legend()
pl.show()
Since the penalization term is also just quadratic, you could just stack it together with thesquares of the error and use weights 1 for data and lambda for the penalization rows.
scipy.optimize.curvefit does weighted least squares, if you don't want to code it yourself.

Plot a plane based on a normal vector and a point in Matlab or matplotlib

How would one go plotting a plane in matlab or matplotlib from a normal vector and a point?
For all the copy/pasters out there, here is similar code for Python using matplotlib:
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
point = np.array([1, 2, 3])
normal = np.array([1, 1, 2])
# a plane is a*x+b*y+c*z+d=0
# [a,b,c] is the normal. Thus, we have to calculate
# d and we're set
d = -point.dot(normal)
# create x,y
xx, yy = np.meshgrid(range(10), range(10))
# calculate corresponding z
z = (-normal[0] * xx - normal[1] * yy - d) * 1. /normal[2]
# plot the surface
plt3d = plt.figure().gca(projection='3d')
plt3d.plot_surface(xx, yy, z)
plt.show()
For Matlab:
point = [1,2,3];
normal = [1,1,2];
%# a plane is a*x+b*y+c*z+d=0
%# [a,b,c] is the normal. Thus, we have to calculate
%# d and we're set
d = -point*normal'; %'# dot product for less typing
%# create x,y
[xx,yy]=ndgrid(1:10,1:10);
%# calculate corresponding z
z = (-normal(1)*xx - normal(2)*yy - d)/normal(3);
%# plot the surface
figure
surf(xx,yy,z)
Note: this solution only works as long as normal(3) is not 0. If the plane is parallel to the z-axis, you can rotate the dimensions to keep the same approach:
z = (-normal(3)*xx - normal(1)*yy - d)/normal(2); %% assuming normal(3)==0 and normal(2)~=0
%% plot the surface
figure
surf(xx,yy,z)
%% label the axis to avoid confusion
xlabel('z')
ylabel('x')
zlabel('y')
For copy-pasters wanting a gradient on the surface:
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
import numpy as np
import matplotlib.pyplot as plt
point = np.array([1, 2, 3])
normal = np.array([1, 1, 2])
# a plane is a*x+b*y+c*z+d=0
# [a,b,c] is the normal. Thus, we have to calculate
# d and we're set
d = -point.dot(normal)
# create x,y
xx, yy = np.meshgrid(range(10), range(10))
# calculate corresponding z
z = (-normal[0] * xx - normal[1] * yy - d) * 1. / normal[2]
# plot the surface
plt3d = plt.figure().gca(projection='3d')
Gx, Gy = np.gradient(xx * yy) # gradients with respect to x and y
G = (Gx ** 2 + Gy ** 2) ** .5 # gradient magnitude
N = G / G.max() # normalize 0..1
plt3d.plot_surface(xx, yy, z, rstride=1, cstride=1,
facecolors=cm.jet(N),
linewidth=0, antialiased=False, shade=False
)
plt.show()
The above answers are good enough. One thing to mention is, they are using the same method that calculate the z value for given (x,y). The draw back comes that they meshgrid the plane and the plane in space may vary (only keeping its projection the same). For example, you cannot get a square in 3D space (but a distorted one).
To avoid this, there is a different way by using the rotation. If you first generate data in x-y plane (can be any shape), then rotate it by equal amount ([0 0 1] to your vector) , then you will get what you want. Simply run below code for your reference.
point = [1,2,3];
normal = [1,2,2];
t=(0:10:360)';
circle0=[cosd(t) sind(t) zeros(length(t),1)];
r=vrrotvec2mat(vrrotvec([0 0 1],normal));
circle=circle0*r'+repmat(point,length(circle0),1);
patch(circle(:,1),circle(:,2),circle(:,3),.5);
axis square; grid on;
%add line
line=[point;point+normr(normal)]
hold on;plot3(line(:,1),line(:,2),line(:,3),'LineWidth',5)
It get a circle in 3D:
A cleaner Python example that also works for tricky $z,y,z$ situations,
from mpl_toolkits.mplot3d import axes3d
from matplotlib.patches import Circle, PathPatch
import matplotlib.pyplot as plt
from matplotlib.transforms import Affine2D
from mpl_toolkits.mplot3d import art3d
import numpy as np
def plot_vector(fig, orig, v, color='blue'):
ax = fig.gca(projection='3d')
orig = np.array(orig); v=np.array(v)
ax.quiver(orig[0], orig[1], orig[2], v[0], v[1], v[2],color=color)
ax.set_xlim(0,10);ax.set_ylim(0,10);ax.set_zlim(0,10)
ax = fig.gca(projection='3d')
return fig
def rotation_matrix(d):
sin_angle = np.linalg.norm(d)
if sin_angle == 0:return np.identity(3)
d /= sin_angle
eye = np.eye(3)
ddt = np.outer(d, d)
skew = np.array([[ 0, d[2], -d[1]],
[-d[2], 0, d[0]],
[d[1], -d[0], 0]], dtype=np.float64)
M = ddt + np.sqrt(1 - sin_angle**2) * (eye - ddt) + sin_angle * skew
return M
def pathpatch_2d_to_3d(pathpatch, z, normal):
if type(normal) is str: #Translate strings to normal vectors
index = "xyz".index(normal)
normal = np.roll((1.0,0,0), index)
normal /= np.linalg.norm(normal) #Make sure the vector is normalised
path = pathpatch.get_path() #Get the path and the associated transform
trans = pathpatch.get_patch_transform()
path = trans.transform_path(path) #Apply the transform
pathpatch.__class__ = art3d.PathPatch3D #Change the class
pathpatch._code3d = path.codes #Copy the codes
pathpatch._facecolor3d = pathpatch.get_facecolor #Get the face color
verts = path.vertices #Get the vertices in 2D
d = np.cross(normal, (0, 0, 1)) #Obtain the rotation vector
M = rotation_matrix(d) #Get the rotation matrix
pathpatch._segment3d = np.array([np.dot(M, (x, y, 0)) + (0, 0, z) for x, y in verts])
def pathpatch_translate(pathpatch, delta):
pathpatch._segment3d += delta
def plot_plane(ax, point, normal, size=10, color='y'):
p = Circle((0, 0), size, facecolor = color, alpha = .2)
ax.add_patch(p)
pathpatch_2d_to_3d(p, z=0, normal=normal)
pathpatch_translate(p, (point[0], point[1], point[2]))
o = np.array([5,5,5])
v = np.array([3,3,3])
n = [0.5, 0.5, 0.5]
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.gca(projection='3d')
plot_plane(ax, o, n, size=3)
ax.set_xlim(0,10);ax.set_ylim(0,10);ax.set_zlim(0,10)
plt.show()