Caffe CNN Slice layer: 2nd Slice layer produces unknown bottom blob
I have 2 Slice layers (see proto file). It seems the 1st one is working well; whereas the 2nd one's bottom gives "unknown bottom blob error" as following:
In fact I am not sure the error is related to Slice or Flatten!?
please note that the the 2nd Slice does not even printed in the log!!!
this is the Proto file:
layer {
name: "data"
type: "HDF5Data"
top: "data"
top: "label_b4_noise"
include {
phase: TEST
}
hdf5_data_param {
source: "data/4removal_nAmp3nData2_2e5/2048_2e5_0.01_s_val_list.txt"
batch_size: 25
shuffle: true
}
}
layer {
name: "data"
type: "HDF5Data"
top: "data"
top: "label_b4_noise"
include {
phase: TRAIN
}
hdf5_data_param {
source: "data/4removal_nAmp3nData2_2e5/2048_2e5_0.01_s_train_list.txt"
batch_size: 25
shuffle: true
}
}
layer {
name: "slic0"
type: "Slice"
bottom: "data"
top: "data1"
top: "data2"
slice_param {
axis: 1
slice_point: 1
}
}
layer {
name: "conv_u0d-score_New"
type: "Convolution"
bottom: "data1"
top: "conv_last"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 1
pad: 0
kernel_size: 1
weight_filler {
type: "msra"
}
}
}
layer {
name: "flat"
type: "Flatten"
bottom: "conv_last"
top: "ampl"
}
layer {
name: "slic1"
type: "Slice"
bottom: "label_b4_noise"
top: "label_b4_noise1"
top: "label_b4_noise2"
slice_param {
axis: 1
slice_point: 1
}
}
layer {
name: "flatdata"
type: "Flatten"
bottom: "label_b4_noise1"
top: "flatdata"
}
layer {
name: "loss"
type: "EuclideanLoss"
bottom: "ampl"
bottom: "flatdata"
top: "loss"
softmax_param {engine: CAFFE}
}
this is the log file:
GL ----------------------------------------------------------------
res/4removal_nAmp3nData2_2e5/unet_bs10/unet data/4removal_nAmp3nData2_2e5/2048_2e5_0.01_s .
res/4removal_nAmp3nData2_2e5/unet_bs10/unet data/4removal_nAmp3nData2_2e5/2048_2e5_0.01_s .
WARNING: Logging before InitGoogleLogging() is written to STDERR
I1018 11:40:22.930601 104201 upgrade_proto.cpp:67] Attempting to upgrade input file specified using deprecated input fields: res/4removal_nAmp3nData2_2e5/unet_bs10/unet_tmp/unet_deploy.txt
I1018 11:40:22.930654 104201 upgrade_proto.cpp:70] Successfully upgraded file specified using deprecated input fields.
W1018 11:40:22.930658 104201 upgrade_proto.cpp:72] Note that future Caffe releases will only support input layers and not input fields.
I1018 11:40:23.237383 104201 net.cpp:51] Initializing net from parameters:
name: "unet"
state {
phase: TEST
level: 0
}
layer {
name: "input"
type: "Input"
top: "data"
input_param {
shape {
dim: 1
dim: 2
dim: 1
dim: 2048
}
}
}
layer {
name: "slic0"
type: "Slice"
bottom: "data"
top: "data1"
top: "data2"
slice_param {
slice_point: 1
axis: 1
}
}
layer {
name: "conv_u0d-score_New"
type: "Convolution"
bottom: "data1"
top: "conv_last"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 1
pad: 0
kernel_size: 1
weight_filler {
type: "msra"
}
}
}
layer {
name: "flat"
type: "Flatten"
bottom: "conv_last"
top: "ampl"
}
layer {
name: "flatdata"
type: "Flatten"
bottom: "label_b4_noise1"
top: "flatdata"
}
F1018 11:40:23.237546 104201 insert_splits.cpp:29] Unknown bottom blob 'label_b4_noise1' (layer 'flatdata', bottom index 0)
*** Check failure stack trace: ***
/pbs/home/n/nhatami/sps/spectro/trainAndTest_4removal: line 101: 104201 Aborted $pydir/dumpLayersSize.py ${tmp_root}_deploy.txt ${oroot}
/pbs/home/n/nhatami/sps/spectro/trainAndTest_4removal: line 101: 104201 Aborted $pydir/dumpLayersSize.py ${tmp_root}_deploy.txt ${oroot}
Thu Oct 18 11:40:23 CEST 2018
/usr/bin/time -v caffe -gpu 0 --log_dir=res/4removal_nAmp3nData2_2e5/unet_bs10/unet_tmp train -solver res/4removal_nAmp3nData2_2e5/unet_bs10/unet_tmp/unet_solver.txt
Thu Oct 18 11:41:26 CEST 2018
/pbs/home/n/nhatami/sps/spectro/trainAndTest_4removal: line 206: gnuplot: command not found
/pbs/home/n/nhatami/sps/spectro/trainAndTest_4removal: line 225: gnuplot: command not found
/usr/bin/time -v -o res/4removal_nAmp3nData2_2e5/unet_bs10/unet_time_test.txt python /pbs/home/n/nhatami/sps/spectro/python/test_4removal.py -eg hist -l label -mf=data/4removal_nAmp3nData2_2e5/2048_2e5_0.01_s_met.txt res/4removal_nAmp3nData2_2e5/unet_bs10/unet_deploy.txt res/4removal_nAmp3nData2_2e5/unet_bs10/unet.caffemodel data/4removal_nAmp3nData2_2e5/2048_2e5_0.01_s_test.h5 res/4removal_nAmp3nData2_2e5/unet_bs10/restest/unet_test
WARNING: Logging before InitGoogleLogging() is written to STDERR
W1018 11:41:42.595289 105177 _caffe.cpp:139] DEPRECATION WARNING - deprecated use of Python interface
W1018 11:41:42.595335 105177 _caffe.cpp:140] Use this instead (with the named "weights" parameter):
W1018 11:41:42.595338 105177 _caffe.cpp:142] Net('res/4removal_nAmp3nData2_2e5/unet_bs10/unet_deploy.txt', 1, weights='res/4removal_nAmp3nData2_2e5/unet_bs10/unet.caffemodel')
I1018 11:41:42.597472 105177 upgrade_proto.cpp:67] Attempting to upgrade input file specified using deprecated input fields: res/4removal_nAmp3nData2_2e5/unet_bs10/unet_deploy.txt
I1018 11:41:42.597497 105177 upgrade_proto.cpp:70] Successfully upgraded file specified using deprecated input fields.
W1018 11:41:42.597501 105177 upgrade_proto.cpp:72] Note that future Caffe releases will only support input layers and not input fields.
I1018 11:41:42.597535 105177 net.cpp:51] Initializing net from parameters:
name: "unet"
state {
phase: TEST
level: 0
}
layer {
name: "input"
type: "Input"
top: "data"
input_param {
shape {
dim: 1
dim: 2
dim: 1
dim: 2048
}
}
}
layer {
name: "slic0"
type: "Slice"
bottom: "data"
top: "data1"
top: "data2"
slice_param {
slice_point: 1
axis: 1
}
}
layer {
name: "conv_u0d-score_New"
type: "Convolution"
bottom: "data1"
top: "conv_last"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 1
pad: 0
kernel_size: 1
weight_filler {
type: "msra"
}
}
}
layer {
name: "flat"
type: "Flatten"
bottom: "conv_last"
top: "ampl"
}
layer {
name: "flatdata"
type: "Flatten"
bottom: "label_b4_noise1"
top: "flatdata"
}
F1018 11:41:42.597617 105177 insert_splits.cpp:29] Unknown bottom blob 'label_b4_noise1' (layer 'flatdata', bottom index 0)
*** Check failure stack trace: ***
('res/4removal_nAmp3nData2_2e5/unet_bs10/unet_deploy.txt', 'res/4removal_nAmp3nData2_2e5/unet_bs10/unet.caffemodel', 'data/4removal_nAmp3nData2_2e5/2048_2e5_0.01_s_test.h5', 'data/4removal_nAmp3nData2_2e5/2048_2e5_0.01_s_met.txt')
here is h5disp:
Dataset 'label_b4_noise'
Size: 2048x1x2x10000
MaxSize: 2048x1x2xInf
Datatype: H5T_IEEE_F32LE (single)
ChunkSize: 2048x1x2x100
Filters: none
FillValue: 0.000000
Dataset 'data'
Size: 2048x1x2x10000
MaxSize: 2048x1x2xInf
Datatype: H5T_IEEE_F32LE (single)
ChunkSize: 2048x1x2x100
Filters: none
FillValue: 0.000000
Related
Is there a way to use d3-axis without directly manipulating the DOM?
D3.js complements Vue.js nicely when used for its helper functions (especially d3-scale).
I'm currently using a simple Vue template to generate a SVG.
To generate the axis, I am first creating a <g ref="axisX"> element and then calling d3.select(this.$refs.axisX).call(d3.axisBottom(this.scale.x)).
I would like to avoid using d3.select to prevent direct DOM modifications. It works well but it conflicts with Vue.js' scoped styles.
Is there a way to access d3-axis without calling it from a DOM element? It would be useful to have access to its path generation function independently instead of via the DOM.
Here is a sample CodePen: https://codepen.io/thibautg/pen/BYRBXW
This is a situation that calls for a custom directive. Custom directives allow you to manipulate the DOM within the element they are attached to.
In this case, I created a directive that takes an argument for which axis and a value which is your scale computed. Based on whether the axis is x or y, it calls axisBottom or axisLeft with scale[axis].
No more watching. The directive will be called any time anything updates. You could put in a check to see whether scale in particular had changed from its previous value, if you wanted.
new Vue({
el: "#app",
data() {
return {
width: 600,
height: 400,
margin: {
top: 20,
right: 20,
bottom: 20,
left: 20
},
items: [
{ name: "a", val: 10 },
{ name: "b", val: 8 },
{ name: "c", val: 1 },
{ name: "d", val: 5 },
{ name: "e", val: 6 },
{ name: "f", val: 3 }
]
};
},
computed: {
outsideWidth() {
return this.width + this.margin.left + this.margin.right;
},
outsideHeight() {
return this.height + this.margin.top + this.margin.bottom;
},
scale() {
const x = d3
.scaleBand()
.domain(this.items.map(x => x.name))
.rangeRound([0, this.width])
.padding(0.15);
const y = d3
.scaleLinear()
.domain([0, Math.max(...this.items.map(x => x.val))])
.rangeRound([this.height, 0]);
return { x, y };
}
},
directives: {
axis(el, binding) {
const axis = binding.arg;
const axisMethod = { x: "axisBottom", y: "axisLeft" }[axis];
const methodArg = binding.value[axis];
d3.select(el).call(d3[axisMethod](methodArg));
}
}
});
rect.bar {
fill: steelblue;
}
<script src="//unpkg.com/vue#2"></script>
<script src="//cdnjs.cloudflare.com/ajax/libs/d3/4.11.0/d3.min.js"></script>
<div id="app">
<svg :width="outsideWidth"
:height="outsideHeight">
<g :transform="`translate(${margin.left},${margin.top})`">
<g class="bars">
<template v-for="item in items">
<rect class="bar"
:x="scale.x(item.name)"
:y="scale.y(item.val)"
:width="scale.x.bandwidth()"
:height="height - scale.y(item.val)"
/>
</template>
<g v-axis:x="scale" :transform="`translate(0,${height})`"></g>
<g v-axis:y="scale"></g>
</g>
</g>
</svg>
</div>
By default, in QML Charts LineSeries all the points are labelled. I need to label only specific points, how to achieve this ?
Here is my qml file (part of big project). I have set corresponding properties for LineSeries to show point labels, but I need only couple of labels displayed instead of complete set of points. Is there a way in Qml to control this ?
import QtQuick 2.7
import QtCharts 2.2
import QtQuick.Layouts 1.3
import aa.ui.backend 1.0
Item {
property DSPBackend backend: analyzer.dsp
property alias titleV: chartV.title
property alias seriesV: lineSeriesV
property alias titleI: chartI.title
property alias seriesI: lineSeriesI
GridLayout {
id: gridSpectrum
anchors.fill: parent
columns: 2
rows: 1
columnSpacing: 0
flow: GridLayout.LeftToRight
ChartView {
id: chartV
Layout.fillWidth: true
Layout.fillHeight: true
/*title: "Spectrum (V-channel)"
titleFont.pixelSize: 10*/
theme: ChartView.ChartThemeBlueCerulean
antialiasing: false
legend.visible: false
legend.font.pointSize: 10
margins.top: 2
margins.left: 2
margins.right: 2
margins.bottom: 4
ValueAxis {
id: axisXV
titleFont.pointSize: 7
labelsFont.pointSize: 7
}
ValueAxis {
id: axisYV
titleFont.pointSize: 7
labelsFont.pointSize: 7
}
LineSeries {
id: lineSeriesV
axisX: axisXV
axisY: axisYV
color: "#66cc00"
name: "V-channel"
pointLabelsFormat: "(#xPoint Hz, #yPoint dB)"
pointLabelsVisible: true
width: 3
useOpenGL: false
}
}
ChartView {
id: chartI
/*title: "Spectrum (I-channel)"
titleFont.pixelSize: 10*/
antialiasing: true
theme: ChartView.ChartThemeBlueCerulean
legend.visible: false
legend.font.pointSize: 10
margins.top: 2
margins.left: 2
margins.right: 2
margins.bottom: 4
Layout.fillWidth: true
Layout.fillHeight: true
ValueAxis {
id: axisXI
titleFont.pointSize: 7
labelsFont.pointSize: 7
}
ValueAxis {
id: axisYI
titleFont.pointSize: 7
labelsFont.pointSize: 7
}
LineSeries {
id: lineSeriesI
axisX: axisXI
axisY: axisYI
color: "#0099ff"
name: "I-channel"
pointLabelsFormat: "(#xPoint Hz, #yPoint dB)"
pointLabelsVisible: true
width: 3
useOpenGL: false
}
}
}
Timer {
id: timerSpectrum
interval: 500
running: true
repeat: true
onTriggered: {
backend.update_spectrum(seriesV, seriesI);
}
}
Component.onCompleted: {
backend.setup_spectrum(axisXV, axisYV, axisXI, axisYI)
}
}
You can set a bool property or a function that returns a bool. Then evaluate it in the pointLabelsFormat property.
pointLabelsFormat: myBool? "(#xPoint Hz, #yPoint dB)" : ""
If the boolean is false it sets the point's label to an empty string.
By default, in QML Charts LineSeries all the points are labelled. I need to label only specific points, how to achieve this ?
Here is my qml file (part of big project).
I have set corresponding properties for LineSeries to show point labels, but I need only couple of labels displayed instead of complete set of points.
Is there a way in Qml to control this ?
import QtQuick 2.7
import QtCharts 2.2
import QtQuick.Layouts 1.3
import aa.ui.backend 1.0
Item {
property DSPBackend backend: analyzer.dsp
property alias titleV: chartV.title
property alias seriesV: lineSeriesV
property alias titleI: chartI.title
property alias seriesI: lineSeriesI
GridLayout {
id: gridSpectrum
anchors.fill: parent
columns: 2
rows: 1
columnSpacing: 0
flow: GridLayout.LeftToRight
ChartView {
id: chartV
Layout.fillWidth: true
Layout.fillHeight: true
/*title: "Spectrum (V-channel)"
titleFont.pixelSize: 10*/
theme: ChartView.ChartThemeBlueCerulean
antialiasing: false
legend.visible: false
legend.font.pointSize: 10
margins.top: 2
margins.left: 2
margins.right: 2
margins.bottom: 4
ValueAxis {
id: axisXV
titleFont.pointSize: 7
labelsFont.pointSize: 7
}
ValueAxis {
id: axisYV
titleFont.pointSize: 7
labelsFont.pointSize: 7
}
LineSeries {
id: lineSeriesV
axisX: axisXV
axisY: axisYV
color: "#66cc00"
name: "V-channel"
pointLabelsFormat: "(#xPoint Hz, #yPoint dB)"
pointLabelsVisible: true
width: 3
useOpenGL: false
}
}
ChartView {
id: chartI
/*title: "Spectrum (I-channel)"
titleFont.pixelSize: 10*/
antialiasing: true
theme: ChartView.ChartThemeBlueCerulean
legend.visible: false
legend.font.pointSize: 10
margins.top: 2
margins.left: 2
margins.right: 2
margins.bottom: 4
Layout.fillWidth: true
Layout.fillHeight: true
ValueAxis {
id: axisXI
titleFont.pointSize: 7
labelsFont.pointSize: 7
}
ValueAxis {
id: axisYI
titleFont.pointSize: 7
labelsFont.pointSize: 7
}
LineSeries {
id: lineSeriesI
axisX: axisXI
axisY: axisYI
color: "#0099ff"
name: "I-channel"
pointLabelsFormat: "(#xPoint Hz, #yPoint dB)"
pointLabelsVisible: true
width: 3
useOpenGL: false
}
}
}
Timer {
id: timerSpectrum
interval: 500
running: true
repeat: true
onTriggered: {
backend.update_spectrum(seriesV, seriesI);
}
}
Component.onCompleted: {
backend.setup_spectrum(axisXV, axisYV, axisXI, axisYI)
}
}
I am relatively new to using caffe and am trying to create minimal working examples that I can (later) tweak. I had no difficulty using caffe's examples with MNIST data. I downloaded image-net data (ILSVRC12) and used caffe's tool to convert it to an lmdb database using:
$CAFFE_ROOT/build/install/bin/convert_imageset -shuffle -encoded=true top_level_data_dir/ fileNames.txt lmdb_name
To create an lmdb containing encoded (jpeg) image data. The reason for this is that encoded, the lmdb is about 64GB versus unencoded being about 240GB.
My .prototxt file that describes the net is minimal (a pair of inner product layers, mostly borrowed from the MNIST example--not going for accuracy here, I just want something to work).
name: "example"
layer {
name: "imagenet"
type: "Data"
top: "data"
top: "label"
include {
phase: TRAIN
}
transform_param {
scale: 0.00390625
}
data_param {
source: "train-lmdb"
batch_size: 100
backend: LMDB
}
}
layer {
name: "imagenet"
type: "Data"
top: "data"
top: "label"
include {
phase: TEST
}
transform_param {
scale: 0.00390625
}
data_param {
source: "test-lmdb"
batch_size: 100
backend: LMDB
}
}
layer {
name: "ip1"
type: "InnerProduct"
bottom: "data"
top: "ip1"
param {
lr_mult: 1
}
param {
lr_mult: 2
}
inner_product_param {
num_output: 1000
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "relu1"
type: "ReLU"
bottom: "ip1"
top: "ip1"
}
layer {
name: "ip2"
type: "InnerProduct"
bottom: "ip1"
top: "ip2"
param {
lr_mult: 1
}
param {
lr_mult: 2
}
inner_product_param {
num_output: 1000
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "ip2"
bottom: "label"
top: "accuracy"
include {
phase: TEST
}
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "ip2"
bottom: "label"
top: "loss"
}
When train-lmdb is unencoded, this .prototxt file works fine (accuracy is abysmal, but caffe does not crash). However, if train-lmdb is encoded then I get the following error:
data_transformer.cpp:239] Check failed: channels == img_channels (3 vs. 1)
Question: Is there some "flag" I must set in the .prototxt file that indicates that the train-lmdb is encoded images? (The same flag would likely have to be given to for the testing data layer, test-lmdb.)
A little research:
Poking around with google I found a resolved issue which seemed promising. However, setting the 'force_encoded_color' to true did not resolve my problem.
I also found this answer very helpful with creating the lmdb (specifically, with directions for enabling the encoding), however, no mention was made of what should be done so that caffe is aware that the images are encoded.
The error message you got:
data_transformer.cpp:239] Check failed: channels == img_channels (3 vs. 1)
means caffe data transformer is expecting input with 3 channels (i.e., color image), but is getting an image with only 1 img_channels (i.e., gray scale image).
looking ar caffe.proto it would seems like you should set the parameter at the transformation_param:
layer {
name: "imagenet"
type: "Data"
top: "data"
top: "label"
include {
phase: TRAIN
}
transform_param {
scale: 0.00390625
force_color: true ## try this
}
data_param {
source: "train-lmdb"
batch_size: 100
backend: LMDB
force_encoded_color: true ## cannot hurt...
}
}
Since I have a classifier based on single patch scores, I would like to sum together the predictions a network produces for different images.
From
https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto, Reduction does not support operation over axis different than the last one.
Also a pooling operation would produce an average of its input but, obviously, without touching on the full batch.
I have implemented a python layer, but this is not fast enough for large scale experiments.
Is there a way to "sum" or, more generally, operate over the first axis with the tools already available?
Yes. You can. If you have N x p x q x r blob of prediction, the first use Slice (SliceLayer), creating N blobs, each of shape 1 x p x q x r. Then use these N blobs as N bottoms for the eltwise (EltwiseLayer) layer to produce a single top.
If your predictions have the dimensions: N x c (for mini-batch size of N and c channels), then you can splice this into c blobs with dimension N. You can feed these into a Reduction layer.
For example, you to write the following as a Jinja2 template:
layer {
name: "pred-slice"
type: "Slice"
bottom: "pred"
{%- for num in range(10) %}
top: "pred-{{ num }}-vector"
{%- endfor %}
slice_param {
slice_dim: 1
{%- for num in range(1, 10) %}
slice_point: {{ num }}
{%- endfor %}
}
include {
phase: TEST
}
}
{%- for num in range(10) %}
layer {
name: "pred-{{num}}"
type: "Reduction"
bottom: "pred-{{ num }}-vector"
top: "pred-{{ num }}"
include {
phase: TEST
}
reduction_param {
operation: MEAN
}
}
{%- endfor %}
which expands to:
layer {
name: "pred-slice"
type: "Slice"
bottom: "pred"
top: "pred-0-vector"
top: "pred-1-vector"
top: "pred-2-vector"
top: "pred-3-vector"
top: "pred-4-vector"
top: "pred-5-vector"
top: "pred-6-vector"
top: "pred-7-vector"
top: "pred-8-vector"
top: "pred-9-vector"
slice_param {
slice_dim: 1
slice_point: 1
slice_point: 2
slice_point: 3
slice_point: 4
slice_point: 5
slice_point: 6
slice_point: 7
slice_point: 8
slice_point: 9
}
include {
phase: TEST
}
}
layer {
name: "pred-0"
type: "Reduction"
bottom: "pred-0-vector"
top: "pred-0"
include {
phase: TEST
}
reduction_param {
operation: MEAN
}
}
layer {
name: "pred-1"
type: "Reduction"
bottom: "pred-1-vector"
top: "pred-1"
include {
phase: TEST
}
reduction_param {
operation: MEAN
}
}
layer {
name: "pred-2"
type: "Reduction"
bottom: "pred-2-vector"
top: "pred-2"
include {
phase: TEST
}
reduction_param {
operation: MEAN
}
}
layer {
name: "pred-3"
type: "Reduction"
bottom: "pred-3-vector"
top: "pred-3"
include {
phase: TEST
}
reduction_param {
operation: MEAN
}
}
layer {
name: "pred-4"
type: "Reduction"
bottom: "pred-4-vector"
top: "pred-4"
include {
phase: TEST
}
reduction_param {
operation: MEAN
}
}
layer {
name: "pred-5"
type: "Reduction"
bottom: "pred-5-vector"
top: "pred-5"
include {
phase: TEST
}
reduction_param {
operation: MEAN
}
}
layer {
name: "pred-6"
type: "Reduction"
bottom: "pred-6-vector"
top: "pred-6"
include {
phase: TEST
}
reduction_param {
operation: MEAN
}
}
layer {
name: "pred-7"
type: "Reduction"
bottom: "pred-7-vector"
top: "pred-7"
include {
phase: TEST
}
reduction_param {
operation: MEAN
}
}
layer {
name: "pred-8"
type: "Reduction"
bottom: "pred-8-vector"
top: "pred-8"
include {
phase: TEST
}
reduction_param {
operation: MEAN
}
}
layer {
name: "pred-9"
type: "Reduction"
bottom: "pred-9-vector"
top: "pred-9"
include {
phase: TEST
}
reduction_param {
operation: MEAN
}
}