Doing a final project for my school and need help because I'm not sure how to execute it.
I'm investigating how image distortion will affect ANN's learning ability. Have no background in coding whatsoever so any help will be hugely appreciated! Can't use TensorFlow as part of requirements.
This is what my prof used for network classification which is standard from the textbook we're using.
# neural network class definition
class neuralNetwork:
# initialise the neural network
def __init__(self, inputnodes, hiddennodes, outputnodes, learningrate):
# set number of nodes in each input, hidden, output layer
self.inodes = inputnodes
self.hnodes = hiddennodes
self.onodes = outputnodes
self.wih = numpy.random.normal(0.0, pow(self.inodes, -0.5), (self.hnodes, self.inodes))
self.who = numpy.random.normal(0.0, pow(self.hnodes, -0.5), (self.onodes, self.hnodes))
self.lr = learningrate
self.activation_function = lambda x: scipy.special.expit(x)
pass
# train the neural network
def train(self, inputs_list, targets_list):
inputs = numpy.array(inputs_list, ndmin=2).T
targets = numpy.array(targets_list, ndmin=2).T
hidden_inputs = numpy.dot(self.wih, inputs)
hidden_outputs = self.activation_function(hidden_inputs)
final_inputs = numpy.dot(self.who, hidden_outputs)
final_outputs = self.activation_function(final_inputs)
output_errors = targets - final_outputs
hidden_errors = numpy.dot(self.who.T, output_errors)
self.who += self.lr * numpy.dot((output_errors * final_outputs * (1.0 - final_outputs)), numpy.transpose(hidden_outputs))
self.wih += self.lr * numpy.dot((hidden_errors * hidden_outputs * (1.0 - hidden_outputs)), numpy.transpose(inputs))
pass
# query the neural network
def query(self, inputs_list):
inputs = numpy.array(inputs_list, ndmin=2).T
hidden_inputs = numpy.dot(self.wih, inputs)
hidden_outputs = self.activation_function(hidden_inputs)
final_inputs = numpy.dot(self.who, hidden_outputs)
final_outputs = self.activation_function(final_inputs)
return final_outputs
# number of input, hidden and output nodes
input_nodes = 784
hidden_nodes = 200
output_nodes = 10
learning_rate = 0.1
# create instance of neural network
n = neuralNetwork(input_nodes,hidden_nodes,output_nodes, learning_rate)
# load the mnist training data CSV file into a list
training_data_file = open("mnist_train.csv", 'r')
training_data_list = training_data_file.readlines()
training_data_file.close()
# train the neural network
epochs = 5
for e in range(epochs):
for record in training_data_list:
all_values = record.split(',')
inputs = (numpy.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01
targets = numpy.zeros(output_nodes) + 0.01
targets[int(all_values[0])] = 0.99
n.train(inputs, targets)
pass
pass
# load the mnist test data CSV file into a list
test_data_file = open("mnist_test.csv", 'r')
test_data_list = test_data_file.readlines()
test_data_file.close()
# test the neural network
# scorecard for how well the network performs, initially empty
scorecard = []
for record in test_data_list:
all_values = record.split(',')
correct_label = int(all_values[0])
inputs = (numpy.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01
outputs = n.query(inputs)
label = numpy.argmax(outputs)
if (label == correct_label):
scorecard.append(1)
else:
scorecard.append(0)
pass
pass
# calculate the performance score, the fraction of correct answers
scorecard_array = numpy.asarray(scorecard)
print ("performance = ", scorecard_array.sum() / scorecard_array.size)
So I came up with a sharpening kernel and basically what I want to accomplish is to apply this kernel to the network so that the images in the training dataset is now 'sharpened'. (Also am using the convolution method given by my prof) Where am I supposed to add this to the class above??
sharpen_kernel = np.array([[0, -1, 0],
[-1, 5, -1],
[0, -1, 0]])
# convolve your image with the kernel
matplotlib.rcParams['figure.figsize'] = 20,20
conv_image = numpy.ones((28,28))
print("shape of image_array",numpy.shape(image_array))
step = 3
i=0
while i < 25:
i+=1
j = 0
while j < 25 :
sub_image = image_array[i:(i+step),j:(j+step):] # array[starty : endy , start x : end x : step]
sub_image = numpy.reshape(sub_image,(1,(step ** 2)))
kernel = numpy.reshape(sharpen_kernel, ((step ** 2),1))
conv_scalar = numpy.dot(sub_image,kernel)
conv_image[i,j] = conv_scalar
j+=1
pass
pass
Basically want to measure the relationship of the performance of ANN and quality of image (e.g. blur, sharpening). Help!
Python version: 3.8
Pytorch version: 1.9.0+cpu
Platform: Anaconda Spyder5.0
To reproduce this problem, just copy every code below to a single file.
The ILSVRC2012_val_00000293.jpg file used in this code is shown below, you also need to download it and then change its destination in the code.
Some background of this problem:
I am now working on a project that aims to develop a hardware accelerator to complete the inference process of the MobileNet V2 network. I used pretrained quantized Pytorch model to simulate the outcome, and the result comes out very well.
In order to use hardware to complete this task, I wish to know every inputs and outputs as well as intermidiate variables during runing this piece of pytorch code. I used a package named torchextractor to fetch the outcomes of first layer, which in this case, is a 3*3 convolution layer.
import numpy as np
import torchvision
import torch
from torchvision import transforms, datasets
from PIL import Image
from torchvision import transforms
import torchextractor as tx
import math
#########################################################################################
##### Processing of input image
#########################################################################################
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
test_transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,])
preprocess = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
#image file destination
filename = "D:\Project_UM\MobileNet_VC709\MobileNet_pytorch\ILSVRC2012_val_00000293.jpg"
input_image = Image.open(filename)
input_tensor = preprocess(input_image)
input_batch = input_tensor.unsqueeze(0)
#########################################################################################
#########################################################################################
#########################################################################################
#----First verify that the torchextractor class should not influent the inference outcome
# ofmp of layer1 before putting into torchextractor
a,b,c = quantize_tensor(input_batch)# to quantize the input tensor and return an int8 tensor, scale and zero point
input_qa = torch.quantize_per_tensor(torch.tensor(input_batch.clone().detach()), b, c, torch.quint8)# Using quantize_per_tensor method of torch
# Load a quantized mobilenet_v2 model
model_quantized = torchvision.models.quantization.mobilenet_v2(pretrained=True, quantize=True)
model_quantized.eval()
with torch.no_grad():
output = model_quantized.features[0][0](input_qa)# Ofmp of layer1, datatype : quantized_tensor
# print("FM of layer1 before tx_extractor:\n",output.int_repr())# Ofmp of layer1, datatype : int8 tensor
output1_clone = output.int_repr().detach().numpy()# Clone ofmp of layer1, datatype : ndarray
#########################################################################################
#########################################################################################
#########################################################################################
# ofmp of layer1 after adding torchextractor
model_quantized_ex = tx.Extractor(model_quantized, ["features.0.0"])#Capture of the module inside first layer
model_output, features = model_quantized_ex(input_batch)# Forward propagation
# feature_shapes = {name: f.shape for name, f in features.items()}
# print(features['features.0.0']) # Ofmp of layer1, datatype : quantized_tensor
out1_clone = features['features.0.0'].int_repr().numpy() # Clone ofmp of layer1, datatype : ndarray
if(out1_clone.all() == output1_clone.all()):
print('Model with torchextractor attached output the same value as the original model')
else:
print('Torchextractor method influence the outcome')
Here I define a numpy quantization scheme based on the quantization scheme proposed by
Quantization and Training of Neural Networks for Efficient
Integer-Arithmetic-Only Inference
# Convert a normal regular tensor to a quantized tensor with scale and zero_point
def quantize_tensor(x, num_bits=8):# to quantize the input tensor and return an int8 tensor, scale and zero point
qmin = 0.
qmax = 2.**num_bits - 1.
min_val, max_val = x.min(), x.max()
scale = (max_val - min_val) / (qmax - qmin)
initial_zero_point = qmin - min_val / scale
zero_point = 0
if initial_zero_point < qmin:
zero_point = qmin
elif initial_zero_point > qmax:
zero_point = qmax
else:
zero_point = initial_zero_point
# print(zero_point)
zero_point = int(zero_point)
q_x = zero_point + x / scale
q_x.clamp_(qmin, qmax).round_()
q_x = q_x.round().byte()
return q_x, scale, zero_point
#%%
# #############################################################################################
# --------- Simulate the inference process of layer0: conv33 using numpy
# #############################################################################################
# get the input_batch quantized buffer data
input_scale = b.item()
input_zero = c
input_quantized = a[0].detach().numpy()
# get the layer0 output scale and zero_point
output_scale = model_quantized.features[0][0].state_dict()['scale'].item()
output_zero = model_quantized.features[0][0].state_dict()['zero_point'].item()
# get the quantized weight with scale and zero_point
weight_scale = model_quantized.features[0][0].state_dict()["weight"].q_scale()
weight_zero = model_quantized.features[0][0].state_dict()["weight"].q_zero_point()
weight_quantized = model_quantized.features[0][0].state_dict()["weight"].int_repr().numpy()
# print(weight_quantized)
# print(weight_quantized.shape)
# bias_quantized,bias_scale,bias_zero= quantize_tensor(model_quantized.features[0][0].state_dict()["bias"])# to quantize the input tensor and return an int8 tensor, scale and zero point
# print(bias_quantized.shape)
bias = model_quantized.features[0][0].state_dict()["bias"].detach().numpy()
# print(input_quantized)
print(type(input_scale))
print(type(output_scale))
print(type(weight_scale))
Then I write a quantized 2D convolution using numpy, hope to figure out every details in pytorch data flow during the inference.
#%% numpy simulated layer0 convolution function define
def conv_cal(input_quantized, weight_quantized, kernel_size, stride, out_i, out_j, out_k):
weight = weight_quantized[out_i]
input = np.zeros((input_quantized.shape[0], kernel_size, kernel_size))
for i in range(weight.shape[0]):
for j in range(weight.shape[1]):
for k in range(weight.shape[2]):
input[i][j][k] = input_quantized[i][stride*out_j+j][stride*out_k+k]
# print(np.dot(weight,input))
# print(input,"\n")
# print(weight)
return np.multiply(weight,input).sum()
def QuantizedConv2D(input_scale, input_zero, input_quantized, output_scale, output_zero, weight_scale, weight_zero, weight_quantized, bias, kernel_size, stride, padding, ofm_size):
output = np.zeros((weight_quantized.shape[0],ofm_size,ofm_size))
input_quantized_padding = np.full((input_quantized.shape[0],input_quantized.shape[1]+2*padding,input_quantized.shape[2]+2*padding),0)
zero_temp = np.full(input_quantized.shape,input_zero)
input_quantized = input_quantized - zero_temp
for i in range(input_quantized.shape[0]):
for j in range(padding,padding + input_quantized.shape[1]):
for k in range(padding,padding + input_quantized.shape[2]):
input_quantized_padding[i][j][k] = input_quantized[i][j-padding][k-padding]
zero_temp = np.full(weight_quantized.shape, weight_zero)
weight_quantized = weight_quantized - zero_temp
for i in range(output.shape[0]):
for j in range(output.shape[1]):
for k in range(output.shape[2]):
# output[i][j][k] = (weight_scale*input_scale)*conv_cal(input_quantized_padding, weight_quantized, kernel_size, stride, i, j, k) + bias[i] #floating_output
output[i][j][k] = weight_scale*input_scale/output_scale*conv_cal(input_quantized_padding, weight_quantized, kernel_size, stride, i, j, k) + bias[i]/output_scale + output_zero
output[i][j][k] = round(output[i][j][k])
# int_output
return output
Here I input the same image, weight, and bias together with their zero_point and scale, then compare this "numpy simulated" result to the PyTorch calculated one.
quantized_model_out1_int8 = np.squeeze(features['features.0.0'].int_repr().numpy())
print(quantized_model_out1_int8.shape)
print(quantized_model_out1_int8)
out1_np = QuantizedConv2D(input_scale, input_zero, input_quantized, output_scale, output_zero, weight_scale, weight_zero, weight_quantized, bias, 3, 2, 1, 112)
np.save("out1_np.npy",out1_np)
for i in range(quantized_model_out1_int8.shape[0]):
for j in range(quantized_model_out1_int8.shape[1]):
for k in range(quantized_model_out1_int8.shape[2]):
if(out1_np[i][j][k] < 0):
out1_np[i][j][k] = 0
print(out1_np)
flag = np.zeros(quantized_model_out1_int8.shape)
for i in range(quantized_model_out1_int8.shape[0]):
for j in range(quantized_model_out1_int8.shape[1]):
for k in range(quantized_model_out1_int8.shape[2]):
if(quantized_model_out1_int8[i][j][k] == out1_np[i][j][k]):
flag[i][j][k] = 1
out1_np[i][j][k] = 0
quantized_model_out1_int8[i][j][k] = 0
# Compare the simulated result to extractor fetched result, gain the total hit rate
print(flag.sum()/(112*112*32)*100,'%')
If the "numpy simulated" results are the same as the extracted one, call it a hit. Print the total hit rate, it shows that numpy gets 92% of the values right. Now the problem is, I have no idea why the rest 8% of values come out wrong.
Comparison of two outcomes:
The picture below shows the different values between Numpy one and PyTorch one, the sample channel is index[1]. The left upper corner is Numpy one, and the upright corner is PyTorch one, I have set all values that are the same between them to 0, as you can see, most of the values just have a difference of 1(This can be view as the error brought by the precision loss of fixed point arithmetics), but some have large differences, e.g. the value[1][4], 121 vs. 76 (I don't know why)
Focus on one strange value:
This code is used to replay the calculation process of the value[1][4], originally I was expecting a trial and error process could lead me to solve this problem, to get my wanted number of 76, but no matter how I tried, it didn't output 76. If you want to try this, I paste this code for your convenience.
#%% A test code to check the calculation process
weight_quantized_sample = weight_quantized[2]
M_t = input_scale * weight_scale / output_scale
ifmap_t = np.int32(input_quantized[:,1:4,7:10])
weight_t = np.int32(weight_quantized_sample)
bias_t = bias[2]
bias_q = bias_t/output_scale
res_t = 0
for ch in range(3):
ifmap_offset = ifmap_t[ch]-np.int32(input_zero)
weight_offset = weight_t[ch]-np.int32(weight_zero)
res_ch = np.multiply(ifmap_offset, weight_offset)
res_ch = res_ch.sum()
res_t = res_t + res_ch
res_mul = M_t*res_t
# for n in range(1, 30):
# res_mul = multiply(n, M_t, res_t)
res_t = round(res_mul + output_zero + bias_q)
print(res_t)
Could you help me out of this, have been stuck here for a long time.
I implemented my own version of quantized convolution and got from 99.999% to 100% hitrate (and mismatch of a single value is by 1 that I can consider to be a rounding issue). The link on the paper in the question helped a lot.
But I found that your formulas are the same as mine. So I don't know what was your issue. As I understand quantization in pytorch is hardware dependent.
Here is my code:
def my_Conv2dRelu_b2(input_q, conv_layer, output_shape):
'''
Args:
input_q: quantized tensor
conv_layer: quantized tensor
output_shape: the pre-computed shape of the result
Returns:
'''
output = np.zeros(output_shape)
# extract needed float numbers from quantized operations
weights_scale = conv_layer.weight().q_per_channel_scales()
input_scale = input_q.q_scale()
weights_zp = conv_layer.weight().q_per_channel_zero_points()
input_zp = input_q.q_zero_point()
# extract needed convolution parameters
padding = conv_layer.padding
stride = conv_layer.stride
# extract float numbers for results
output_zp = conv_layer.zero_point
output_scale = conv_layer.scale
conv_weights_int = conv_layer.weight().int_repr()
input_int = input_q.int_repr()
biases = conv_layer.bias().numpy()
for k in range(input_q.shape[0]):
for i in range(conv_weights_int.shape[0]):
output[k][i] = manual_convolution_quant(
input_int[k].numpy(),
conv_weights_int[i].numpy(),
biases[i],
padding=padding,
stride=stride,
image_zp=input_zp, image_scale=input_scale,
kernel_zp=weights_zp[i].item(), kernel_scale=weights_scale[i].item(),
result_zp=output_zp, result_scale=output_scale
)
return output
def manual_convolution_quant(image, kernel, b, padding, stride, image_zp, image_scale, kernel_zp, kernel_scale,
result_zp, result_scale):
H = image.shape[1]
W = image.shape[2]
new_H = H // stride[0]
new_W = W // stride[1]
results = np.zeros([new_H, new_W])
M = image_scale * kernel_scale / result_scale
bias = b / result_scale
paddedIm = np.pad(
image,
[(0, 0), (padding[0], padding[0]), (padding[1], padding[1])],
mode="constant",
constant_values=image_zp,
)
s = kernel.shape[1]
for i in range(new_H):
for j in range(new_W):
patch = paddedIm[
:, i * stride[0]: i * stride[0] + s, j * stride[1]: j * stride[1] + s
]
res = M * ((kernel - kernel_zp) * (patch - image_zp)).sum() + result_zp + bias
if res < 0:
res = 0
results[i, j] = round(res)
return results
Code to compare pytorch and my own version.
def calc_hit_rate(array1, array2):
good = (array1 == array2).astype(np.int).sum()
all = array1.size
return good / all
# during inference
y2 = model.conv1(y1)
y2_int = torch.int_repr(y2)
y2_int_manual = my_Conv2dRelu_b2(y1, model.conv1, y2.shape)
print(f'y2 hit rate= {calc_hit_rate(y2.int_repr().numpy(), y2_int_manual)}') #hit_rate=1.0
I try to set up a simple model with electrical or thermal power flows between sources and sinks.
I seem to have the same problem as treated in this topic although I used only one pair of flow and potential variables in my connector:
connector PowerPortE
flow SI.Power P;
SI.Voltage v "Dummy potential-variable to balance flow-variable P";
end PowerPortE;
A simple example with a signal responsed power sink looks like this:
model PowerSinkE
SimplePowerSystem.PowerPortE Port;
Modelica.Blocks.Interfaces.RealInput P(unit = "W");
SI.Voltage v(start = 230);
equation
Port.P = P;
Port.v = v;
end PowerSinkE;
model Test
SimplePowerSystem.PowerSinkE Verbraucher ;
Modelica.Blocks.Sources.Sine sine1(freqHz = 50) ;
equation
connect(sine1.y,Verbraucher.P);
end Test;
Checking PowerSinkE goes well, but when trying to simulate, I get the following errors:
Internal error pre-optimization module removeSimpleEquations failed.
Internal error Found Equation without time dependent variables Verbraucher.Port.P = const.k
An independent subset of the model has imbalanced number of equations (1) and variables (2).
variables:
Verbraucher.v
Verbraucher.Port.v
equations:
1 : Verbraucher.Port.v = Verbraucher.v
An independent subset of the model has imbalanced number of equations (4) and variables (3).
variables:
sine1.y
Verbraucher.P
Verbraucher.Port.P
equations:
1 : Verbraucher.Port.P = Verbraucher.P
2 : sine1.y = sine1.offset + (if time < sine1.startTime then 0.0 else sine1.amplitude * sin(6.283185307179586 * sine1.freqHz * (time - sine1.startTime) + sine1.phase))
3 : Verbraucher.Port.P = 0.0
4 : Verbraucher.P = sine1.y
Initially I wanted to leave the variable v completely out of the model (though I had to leave it in the connector to be balanced) but this didn't work out either:
Model is structurally singular, error found sorting equations
1: 0.0 = sine1.offset + (if time < sine1.startTime then 0.0 else sine1.amplitude * sin(6.283185307179586 * sine1.freqHz * (time - sine1.startTime) + sine1.phase));
for variables
Verbraucher.Port.v(1)
The problem seems to be that I need the flow variable power but don't have a corresponding potential variable. I am running out of ideas how to fix this, so thanks for the help.
Why do you try to use a connector in this case? If you don't need the "pyhsical meaning" of flow and potential variables inside the connector, you just can use real inputs and outputs to handle signals.
package SimplePowerSystem
model PowerSinkE
import SI = Modelica.SIunits;
SI.Power P;
Modelica.Blocks.Interfaces.RealInput P_in(unit="W");
equation
P = P_in;
end PowerSinkE;
model Test
SimplePowerSystem.PowerSinkE Verbraucher;
Modelica.Blocks.Sources.Sine sine1(freqHz = 50);
equation
connect(sine1.y, Verbraucher.P_in);
end Test;
end SimplePowerSystem;
My initial thought is that port in the consumer is unconnected. This adds the equation consumer.port.P = 0.0. But what you really need is an equation for the voltage in the port.
You need to use voltage and current on your electrical connector and you need an electrical ground. I suggest you have a look at Modelica by Example for more about both electrical and thermal component modeling.
When I do a spectrogram in matlab / octave I can create a swept signal that looks like the RED plot line below. But how can I create a swept signal like the BLUE line in the 1st plot using the equation below.
thanks to Daniel and David for getting me this far with the code is below
startfreq=200;
fs=44100;
endfreq=20;
dursec= 10;%duration of signal in seconds
t=(0:dursec*fs)/fs; %Time vector
alpha=log(startfreq/endfreq)/dursec;
sig = exp(-j*2*pi*startfreq/alpha*exp(-alpha*t));
sig=(sig/max(abs(sig))*.8); %normalize signal
wavwrite([sig'] ,fs,32,strcat('/tmp/del.wav')); %export file
specgram(sig,150,400);
1st plot
2nd plot
How can I fix the the equation in the variable sig to get it to look like the BLUE line in the 1st plot?
3rd plot
This question is almost an month old, so you might have figured this out by now. Here's an answer in case you are still interested.
It appears that your current model for the frequency is
freq(t) = b*exp(-alpha*t)
with
freq(0) = b = startfreq
freq(dursec) = b*exp(-alpha*dursec) = endfreq
There are two free parameters (b and alpha), and two equations. The first equation, b = startfreq, gives us b (trivially).
Solving the last equation for alpha gives
alpha = -log(endfreq/startfreq)/dursec
= log(startfreq/endfreq)/dursec
So
freq(t) = startfreq * exp(-alpha*t)
To use this as the instantaneous frequency of a frequency-swept signal,
we need the integral, which I'll call phase(t):
phase(t) = -(startfreq/alpha) * exp(-alpha*t)
The (complex) frequency-swept signal is then
sig(t) = exp(2*pi*j * phase(t))
The real part of this signal is
sig(t) = cos(2*pi*phase(t))
That explains your current code. To generate a chirp whose frequency varies like the blue curve, you need a different model for the frequency. A more general model than the one used above is
freq(t) = a + b*exp(-alpha*t)
The requirements at t=0 and t=dursec are
freq(0) = a + b = startfreq
freq(dursec) = a + b*exp(-alpha*dursec) = endfreq
That's two equation, but we now have three parameters: a, b, and alpha. I'll use the two equations to determine a and b, and leave alpha as a free parameter. Solving gives
b = (startfreq - endfreq)/(1 - exp(-alpha*dursec))
a = startfreq - b
Integrating the model gives
phase(t) = a*t - (b/alpha)*exp(-alpha*t)
alpha is an arbitrary parameter. Following the formula from the first model, I'll use:
alpha = abs(log(startfreq/endfreq))/dursec
The following is a complete script. Note that I also changed the use of exp(-j*2*pi*...) to cos(2*pi*...). The factor 0.8 is there to match your code.
startfreq = 20;
endfreq = 200;
fs = 44100;
dursec = 10; % duration of signal in seconds
t = (0:dursec*fs)/fs; % Time vector
if (startfreq == endfreq)
phase = startfreq * t;
else
alpha = abs(log(endfreq/startfreq))/dursec;
b = (startfreq - endfreq)/(1 - exp(-alpha*dursec));
a = startfreq - b;
phase = a*t - (b/alpha)*exp(-alpha*t);
endif
sig = 0.8 * cos(2*pi*phase);
wavwrite([sig'] ,fs,32,strcat('del.wav')); % export file
specgram(sig,150,400);
What is the best way to calculate the mean value (mean) and standard deviation (StdDev) for a continuous signal in Modelica. The mean and StdDev should be calculated for fixed period of time T; i.e., from t-T until t.
Below is a discrete solution to the problem. It is coded as a block in Modelica with 1 continuous input and 2 continuous output signals. Discretization is accomplished using the Modelica built-in function sample:
block MeanStdDevDiscr
"Determines the mean value and standard deviation of a signal for a fixed time interval T."
extends Modelica.Blocks.Interfaces.BlockIcon;
import SI = Modelica.SIunits;
parameter SI.Time T = 0.1
"Time interval used for calculating mean value and standard deviation";
parameter Integer n = 10 "number of increments in T";
Modelica.Blocks.Interfaces.RealInput u "signal input"
annotation (Placement(transformation(extent={{-140,-20},{-100,20}})));
Modelica.Blocks.Interfaces.RealOutput[2] y
"y[1] = average value; y[2] = standard deviation"
annotation (Placement(transformation(extent={{100,-10},{120,10}})));
protected
parameter SI.Time dt = T/n "Precision of monitor";
Real[n] uArray;
initial equation
uArray = ones(n)*u;
equation
when sample(0, dt) then
uArray[1] = u;
for j in 2:n loop
uArray[j] = pre(uArray[j-1]);
end for;
end when;
y[1] = sum(uArray)/n; // mean value
y[2] = sqrt(sum((uArray .- y[1]).^2)/n); // standard deviation
annotation (Diagram(graphics));
end MeanStdDevDiscr;