Reading encoded image data from lmdb database in caffe - neural-network

I am relatively new to using caffe and am trying to create minimal working examples that I can (later) tweak. I had no difficulty using caffe's examples with MNIST data. I downloaded image-net data (ILSVRC12) and used caffe's tool to convert it to an lmdb database using:
$CAFFE_ROOT/build/install/bin/convert_imageset -shuffle -encoded=true top_level_data_dir/ fileNames.txt lmdb_name
To create an lmdb containing encoded (jpeg) image data. The reason for this is that encoded, the lmdb is about 64GB versus unencoded being about 240GB.
My .prototxt file that describes the net is minimal (a pair of inner product layers, mostly borrowed from the MNIST example--not going for accuracy here, I just want something to work).
name: "example"
layer {
name: "imagenet"
type: "Data"
top: "data"
top: "label"
include {
phase: TRAIN
}
transform_param {
scale: 0.00390625
}
data_param {
source: "train-lmdb"
batch_size: 100
backend: LMDB
}
}
layer {
name: "imagenet"
type: "Data"
top: "data"
top: "label"
include {
phase: TEST
}
transform_param {
scale: 0.00390625
}
data_param {
source: "test-lmdb"
batch_size: 100
backend: LMDB
}
}
layer {
name: "ip1"
type: "InnerProduct"
bottom: "data"
top: "ip1"
param {
lr_mult: 1
}
param {
lr_mult: 2
}
inner_product_param {
num_output: 1000
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "relu1"
type: "ReLU"
bottom: "ip1"
top: "ip1"
}
layer {
name: "ip2"
type: "InnerProduct"
bottom: "ip1"
top: "ip2"
param {
lr_mult: 1
}
param {
lr_mult: 2
}
inner_product_param {
num_output: 1000
weight_filler {
type: "xavier"
}
bias_filler {
type: "constant"
}
}
}
layer {
name: "accuracy"
type: "Accuracy"
bottom: "ip2"
bottom: "label"
top: "accuracy"
include {
phase: TEST
}
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "ip2"
bottom: "label"
top: "loss"
}
When train-lmdb is unencoded, this .prototxt file works fine (accuracy is abysmal, but caffe does not crash). However, if train-lmdb is encoded then I get the following error:
data_transformer.cpp:239] Check failed: channels == img_channels (3 vs. 1)
Question: Is there some "flag" I must set in the .prototxt file that indicates that the train-lmdb is encoded images? (The same flag would likely have to be given to for the testing data layer, test-lmdb.)
A little research:
Poking around with google I found a resolved issue which seemed promising. However, setting the 'force_encoded_color' to true did not resolve my problem.
I also found this answer very helpful with creating the lmdb (specifically, with directions for enabling the encoding), however, no mention was made of what should be done so that caffe is aware that the images are encoded.

The error message you got:
data_transformer.cpp:239] Check failed: channels == img_channels (3 vs. 1)
means caffe data transformer is expecting input with 3 channels (i.e., color image), but is getting an image with only 1 img_channels (i.e., gray scale image).
looking ar caffe.proto it would seems like you should set the parameter at the transformation_param:
layer {
name: "imagenet"
type: "Data"
top: "data"
top: "label"
include {
phase: TRAIN
}
transform_param {
scale: 0.00390625
force_color: true ## try this
}
data_param {
source: "train-lmdb"
batch_size: 100
backend: LMDB
force_encoded_color: true ## cannot hurt...
}
}

Related

Specify Dynamic keys in openapi yaml

I want to create a response structure like this where either fieldProfile (profile type) or summaryProfile is optional. i.e outer key is dynamic and optional
{
"fieldProfile": [
"metric1",
"metric2",
"metric3"
],
"summaryProfile": [
"metric1",
"metric2",
"metric3"
]
}
Or
{
"fieldProfile": [
"metric1",
"metric2",
"metric3"
]
} // Here Summary Profile is removed, similarly we can have a response of summaryProfile or have both in response
I am creating the response schema like this, but here I want the metricType (fieldProfile/summaryProfile) to be the key for metrics (metric list).
ProfileMetrics:
description: List of metrics available in Field Profile
type: object
required:
- metricType
- metrics
properties:
metrics:
type: array
items:
type: object
required:
- metricName
properties:
metricName:
type: string
Any help is appreciated
Your schema can be defined as follows:
MySchema:
type: object
properties:
fieldProfile:
type: array
items:
type: string
summaryProfile:
type: array
items:
type: string
# At least fieldProfile or summaryProfile (or both) must be present
minProperties: 1
Instead of minProperties: 1, you can use this anyOf + required construct, it will achieve the same effect in this example:
anyOf:
- required: [fieldProfile]
- required: [summaryProfile]

MongoDB query with 300k documents takes more than 30 seconds

Ok, as said in title, I have "performance issue" where I need to get all documents from a collection but it takes too long. Players collection contains around 300k documents with small size and query in service goes like this:
async getAllPlayers() {
const players = await this.playersCollection.find({}, {projection: { playerId: 1, name: 1, surname: 1, shirtNumber: 1, position: 1 }}).toArray();
return players;
}
Overall size is 6.4MB. I'm using Fastify adapter, fastify-compress and mongodb native driver. If I remove projection, it takes almost a minute.
Any idea how to improve this?
The best time I get is 8 seconds, where fast-json-stringify give me more than 10 seconds boost over 300k records:
'use strict'
// run fresh mongo
// docker run --name temp --rm -p 27017:27017 mongo
const fastify = require('fastify')({ logger: true })
const fjs = require('fast-json-stringify')
const toString = fjs({
type: 'object',
properties: {
playerId: { type: 'integer' },
name: { type: 'string' },
surname: { type: 'string' },
shirtNumber: { type: 'integer' },
}
})
fastify.register(require('fastify-mongodb'), {
forceClose: true,
url: 'mongodb://localhost/mydb'
})
fastify.get('/', (request, reply) => {
const dataStream = fastify.mongo.db.collection('foo')
.find({}, {
limit: 300000,
projection: { playerId: 1, name: 1, surname: 1, shirtNumber: 1, position: 1 }
})
.stream({
transform(doc) {
return toString(doc) + '\n'
}
})
reply.type('application/jsonl')
reply.send(dataStream)
})
fastify.get('/insert', async (request, reply) => {
const collection = fastify.mongo.db.collection('foo')
const batch = collection.initializeOrderedBulkOp();
for (let i = 0; i < 300000; i++) {
const player = {
playerId: i,
name: `Name ${i}`,
surname: `surname ${i}`,
shirtNumber: i
}
batch.insert(player);
}
const { result } = await batch.execute()
return result
})
fastify.listen(8080)
In any case, you should consider to:
paginate your output
or pushing the data into a bucket (like S3) and return to the client a URL to download the file directly, this will speed up a lot the process and will save your node.js process from this data streaming
Note that the compression in node.js is a heavy process, so it slows it down a lot the response. An nginx proxy adds it by default without the need to implement it in your business logic server.

ApexCharts: Hide every nth label in chart

I would like to hide some of the labels from my chart made with ApexCharts.js. I am coming from Frappé Charts, which has a feature called "continuity." It allows you to hide labels if they do not comfortably fit, because the chart is a timeseries chart.
My ApexChart looks like this:
I would like to remove many of the dates, but still have them appear in the tooltip. I was able to do this in Frappé Charts and it looked like this:
Here's my code for the Apex chart:
var options = {
chart: {
animations: { enabled: false },
toolbar: { show: false },
zoom: { enabled: false },
type: 'line',
height: 400,
fontFamily: 'PT Sans'
},
stroke: {
width: 2
},
theme: {
monochrome: {
enabled: true,
color: '#800000',
shadeTo: 'light',
shadeIntensity: 0.65
}
},
series: [{
name: 'New Daily Cases',
data: [2,0,0,0,0,0,0,1,0,1,0,7,1,1,1,8,0,11,2,9,8,21,17,28,24,20,38,39,36,21,10,49,45,44,52,74,31,29,43,28,39,58,30,47,50,31,28,79,39,54,55,33,42,39,41,52,25,30,37,26,30,35,42,64,46,25,35,45,56,45,64,34,34,32,40,65,56,64,55,37,61,51,70,81,76,64,71,61,56,52,106,108,104,33,57,82,71,67,68,63,71,32,70,65,98,52,72,87,66,85,90,47,164,123,180,119,85,66,122,65,155,191,129,144,175,224,234,240,128,99,141,131,215,228,198,152,126,201,92,137,286,139,236,238,153,170,106,61]
}],
labels: ['February 28','February 29','March 1','March 2','March 3','March 4','March 5','March 6','March 7','March 8','March 9','March 10','March 11','March 12','March 13','March 14','March 15','March 16','March 17','March 18','March 19','March 20','March 21','March 22','March 23','March 24','March 25','March 26','March 27','March 28','March 29','March 30','March 31','April 1','April 2','April 3','April 4','April 5','April 6','April 7','April 8','April 9','April 10','April 11','April 12','April 13','April 14','April 15','April 16','April 17','April 18','April 19','April 20','April 21','April 22','April 23','April 24','April 25','April 26','April 27','April 28','April 29','April 30','May 1','May 2','May 3','May 4','May 5','May 6','May 7','May 8','May 9','May 10','May 11','May 12','May 13','May 14','May 15','May 16','May 17','May 18','May 19','May 20','May 21','May 22','May 23','May 24','May 25','May 26','May 27','May 28','May 29','May 30','May 31','June 1','June 2','June 3','June 4','June 5','June 6','June 7','June 8','June 9','June 10','June 11','June 12','June 13','June 14','June 15','June 16','June 17','June 18','June 19','June 20','June 21','June 22','June 23','June 24','June 25','June 26','June 27','June 28','June 29','June 30','July 1','July 2','July 3','July 4','July 5','July 6','July 7','July 8','July 9','July 10','July 11','July 12','July 13','July 14','July 15','July 16','July 17','July 18','July 19','July 20','July 21','July 22','July 23','July 24'],
xaxis: {
tooltip: { enabled: false }
},
}
var chart = new ApexCharts(document.querySelector("#chart"), options);
chart.render();
<script src="https://cdn.jsdelivr.net/npm/apexcharts"></script>
<div id="chart"></div>
And here's my code for the Frappé Chart if it helps:
const data = {
labels: ['February 28','February 29','March 1','March 2','March 3','March 4','March 5','March 6','March 7','March 8','March 9','March 10','March 11','March 12','March 13','March 14','March 15','March 16','March 17','March 18','March 19','March 20','March 21','March 22','March 23','March 24','March 25','March 26','March 27','March 28','March 29','March 30','March 31','April 1','April 2','April 3','April 4','April 5','April 6','April 7','April 8','April 9','April 10','April 11','April 12','April 13','April 14','April 15','April 16','April 17','April 18','April 19','April 20','April 21','April 22','April 23','April 24','April 25','April 26','April 27','April 28','April 29','April 30','May 1','May 2','May 3','May 4','May 5','May 6','May 7','May 8','May 9','May 10','May 11','May 12','May 13','May 14','May 15','May 16','May 17','May 18','May 19','May 20','May 21','May 22','May 23','May 24','May 25','May 26','May 27','May 28','May 29','May 30','May 31','June 1','June 2','June 3','June 4','June 5','June 6','June 7','June 8','June 9','June 10','June 11','June 12','June 13','June 14','June 15','June 16','June 17','June 18','June 19','June 20','June 21','June 22','June 23','June 24','June 25','June 26','June 27','June 28','June 29','June 30','July 1','July 2','July 3','July 4','July 5','July 6','July 7','July 8','July 9','July 10','July 11','July 12','July 13','July 14','July 15','July 16','July 17','July 18','July 19','July 20','July 21','July 22','July 23','July 24'],
datasets: [{
name: 'Cumulative Cases',
values: [2,0,0,0,0,0,0,1,0,1,0,7,1,1,1,8,0,11,2,9,8,21,17,28,24,20,38,39,36,21,10,49,45,44,52,74,31,29,43,28,39,58,30,47,50,31,28,79,39,54,55,33,42,39,41,52,25,30,37,26,30,35,42,64,46,25,35,45,56,45,64,34,34,32,40,65,56,64,55,37,61,51,70,81,76,64,71,61,56,52,106,108,104,33,57,82,71,67,68,63,71,32,70,65,98,52,72,87,66,85,90,47,164,123,180,119,85,66,122,65,155,191,129,144,175,224,234,240,128,99,141,131,215,228,198,152,126,201,92,137,286,139,236,238,153,170,106,61],
chartType: 'line'
}]
}
const chart = new frappe.Chart('#chart', {
data: data,
type: 'line',
height: 250,
animate: false,
barOptions: {
spaceRatio: 0.25
},
colors: ['#800000'],
tooltipOptions: {
formatTooltipY: d => d.toLocaleString()
},
axisOptions: {
xAxisMode: 'tick',
xIsSeries: true
},
lineOptions: {
hideDots: true,
regionFill: true
}
})
<script src="https://cdn.jsdelivr.net/npm/frappe-charts#1.5.2/dist/frappe-charts.min.iife.min.js"></script>
<div id="chart"></div>
I've tried using the formatter callback function to return only every 10th value, but things get all out of position and the tooltips don't work. I get similar problems returning an empty string or a space for the values I wish to exclude (but still include in the tooltip).
What I do is calculate the ratio between the area's width and the number of ticks, and if that ratio is above a certain number, I add a classname to the chart or it's wrapper and there I write:
.apexcharts-xaxis-label{
display: none;
&:nth-child(5n){ display:revert; }
}
So every 5th label is shown and the rest are hidden.
You can also set up a resizeObserver to add/remove the special class.
This require the below config to be given to the chart:
xaxis: {
labels: {
rotate: 0, // no need to rotate since hiding labels gives plenty of room
hideOverlappingLabels: false // all labels must be rendered
}
}
You can try 2 things.
xaxis: {
type: 'datetime',
}
You can convert the x-axis to datetime and labels will align as shown below
Or
You can stop rotation of the x-axis labels using
xaxis: {
labels: {
rotate: 0
}
}
which produces the following result.
Vsync answer have not worked for me. It needed a little modification:
.apexcharts-xaxis-texts-g text[id^='SvgjsText'] {
display: none;
}
.apexcharts-xaxis-texts-g text[id^='SvgjsText']:nth-of-type(5n) {
display: revert;
}
labels: ['',this.props.itemNames], //"(labels: [the label , the label below])"

Caffe CNN Slice layer: 2nd Slice layer produces unknown bottom blob

Caffe CNN Slice layer: 2nd Slice layer produces unknown bottom blob
I have 2 Slice layers (see proto file). It seems the 1st one is working well; whereas the 2nd one's bottom gives "unknown bottom blob error" as following:
In fact I am not sure the error is related to Slice or Flatten!?
please note that the the 2nd Slice does not even printed in the log!!!
this is the Proto file:
layer {
name: "data"
type: "HDF5Data"
top: "data"
top: "label_b4_noise"
include {
phase: TEST
}
hdf5_data_param {
source: "data/4removal_nAmp3nData2_2e5/2048_2e5_0.01_s_val_list.txt"
batch_size: 25
shuffle: true
}
}
layer {
name: "data"
type: "HDF5Data"
top: "data"
top: "label_b4_noise"
include {
phase: TRAIN
}
hdf5_data_param {
source: "data/4removal_nAmp3nData2_2e5/2048_2e5_0.01_s_train_list.txt"
batch_size: 25
shuffle: true
}
}
layer {
name: "slic0"
type: "Slice"
bottom: "data"
top: "data1"
top: "data2"
slice_param {
axis: 1
slice_point: 1
}
}
layer {
name: "conv_u0d-score_New"
type: "Convolution"
bottom: "data1"
top: "conv_last"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 1
pad: 0
kernel_size: 1
weight_filler {
type: "msra"
}
}
}
layer {
name: "flat"
type: "Flatten"
bottom: "conv_last"
top: "ampl"
}
layer {
name: "slic1"
type: "Slice"
bottom: "label_b4_noise"
top: "label_b4_noise1"
top: "label_b4_noise2"
slice_param {
axis: 1
slice_point: 1
}
}
layer {
name: "flatdata"
type: "Flatten"
bottom: "label_b4_noise1"
top: "flatdata"
}
layer {
name: "loss"
type: "EuclideanLoss"
bottom: "ampl"
bottom: "flatdata"
top: "loss"
softmax_param {engine: CAFFE}
}
this is the log file:
GL ----------------------------------------------------------------
res/4removal_nAmp3nData2_2e5/unet_bs10/unet data/4removal_nAmp3nData2_2e5/2048_2e5_0.01_s .
res/4removal_nAmp3nData2_2e5/unet_bs10/unet data/4removal_nAmp3nData2_2e5/2048_2e5_0.01_s .
WARNING: Logging before InitGoogleLogging() is written to STDERR
I1018 11:40:22.930601 104201 upgrade_proto.cpp:67] Attempting to upgrade input file specified using deprecated input fields: res/4removal_nAmp3nData2_2e5/unet_bs10/unet_tmp/unet_deploy.txt
I1018 11:40:22.930654 104201 upgrade_proto.cpp:70] Successfully upgraded file specified using deprecated input fields.
W1018 11:40:22.930658 104201 upgrade_proto.cpp:72] Note that future Caffe releases will only support input layers and not input fields.
I1018 11:40:23.237383 104201 net.cpp:51] Initializing net from parameters:
name: "unet"
state {
phase: TEST
level: 0
}
layer {
name: "input"
type: "Input"
top: "data"
input_param {
shape {
dim: 1
dim: 2
dim: 1
dim: 2048
}
}
}
layer {
name: "slic0"
type: "Slice"
bottom: "data"
top: "data1"
top: "data2"
slice_param {
slice_point: 1
axis: 1
}
}
layer {
name: "conv_u0d-score_New"
type: "Convolution"
bottom: "data1"
top: "conv_last"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 1
pad: 0
kernel_size: 1
weight_filler {
type: "msra"
}
}
}
layer {
name: "flat"
type: "Flatten"
bottom: "conv_last"
top: "ampl"
}
layer {
name: "flatdata"
type: "Flatten"
bottom: "label_b4_noise1"
top: "flatdata"
}
F1018 11:40:23.237546 104201 insert_splits.cpp:29] Unknown bottom blob 'label_b4_noise1' (layer 'flatdata', bottom index 0)
*** Check failure stack trace: ***
/pbs/home/n/nhatami/sps/spectro/trainAndTest_4removal: line 101: 104201 Aborted $pydir/dumpLayersSize.py ${tmp_root}_deploy.txt ${oroot}
/pbs/home/n/nhatami/sps/spectro/trainAndTest_4removal: line 101: 104201 Aborted $pydir/dumpLayersSize.py ${tmp_root}_deploy.txt ${oroot}
Thu Oct 18 11:40:23 CEST 2018
/usr/bin/time -v caffe -gpu 0 --log_dir=res/4removal_nAmp3nData2_2e5/unet_bs10/unet_tmp train -solver res/4removal_nAmp3nData2_2e5/unet_bs10/unet_tmp/unet_solver.txt
Thu Oct 18 11:41:26 CEST 2018
/pbs/home/n/nhatami/sps/spectro/trainAndTest_4removal: line 206: gnuplot: command not found
/pbs/home/n/nhatami/sps/spectro/trainAndTest_4removal: line 225: gnuplot: command not found
/usr/bin/time -v -o res/4removal_nAmp3nData2_2e5/unet_bs10/unet_time_test.txt python /pbs/home/n/nhatami/sps/spectro/python/test_4removal.py -eg hist -l label -mf=data/4removal_nAmp3nData2_2e5/2048_2e5_0.01_s_met.txt res/4removal_nAmp3nData2_2e5/unet_bs10/unet_deploy.txt res/4removal_nAmp3nData2_2e5/unet_bs10/unet.caffemodel data/4removal_nAmp3nData2_2e5/2048_2e5_0.01_s_test.h5 res/4removal_nAmp3nData2_2e5/unet_bs10/restest/unet_test
WARNING: Logging before InitGoogleLogging() is written to STDERR
W1018 11:41:42.595289 105177 _caffe.cpp:139] DEPRECATION WARNING - deprecated use of Python interface
W1018 11:41:42.595335 105177 _caffe.cpp:140] Use this instead (with the named "weights" parameter):
W1018 11:41:42.595338 105177 _caffe.cpp:142] Net('res/4removal_nAmp3nData2_2e5/unet_bs10/unet_deploy.txt', 1, weights='res/4removal_nAmp3nData2_2e5/unet_bs10/unet.caffemodel')
I1018 11:41:42.597472 105177 upgrade_proto.cpp:67] Attempting to upgrade input file specified using deprecated input fields: res/4removal_nAmp3nData2_2e5/unet_bs10/unet_deploy.txt
I1018 11:41:42.597497 105177 upgrade_proto.cpp:70] Successfully upgraded file specified using deprecated input fields.
W1018 11:41:42.597501 105177 upgrade_proto.cpp:72] Note that future Caffe releases will only support input layers and not input fields.
I1018 11:41:42.597535 105177 net.cpp:51] Initializing net from parameters:
name: "unet"
state {
phase: TEST
level: 0
}
layer {
name: "input"
type: "Input"
top: "data"
input_param {
shape {
dim: 1
dim: 2
dim: 1
dim: 2048
}
}
}
layer {
name: "slic0"
type: "Slice"
bottom: "data"
top: "data1"
top: "data2"
slice_param {
slice_point: 1
axis: 1
}
}
layer {
name: "conv_u0d-score_New"
type: "Convolution"
bottom: "data1"
top: "conv_last"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 1
pad: 0
kernel_size: 1
weight_filler {
type: "msra"
}
}
}
layer {
name: "flat"
type: "Flatten"
bottom: "conv_last"
top: "ampl"
}
layer {
name: "flatdata"
type: "Flatten"
bottom: "label_b4_noise1"
top: "flatdata"
}
F1018 11:41:42.597617 105177 insert_splits.cpp:29] Unknown bottom blob 'label_b4_noise1' (layer 'flatdata', bottom index 0)
*** Check failure stack trace: ***
('res/4removal_nAmp3nData2_2e5/unet_bs10/unet_deploy.txt', 'res/4removal_nAmp3nData2_2e5/unet_bs10/unet.caffemodel', 'data/4removal_nAmp3nData2_2e5/2048_2e5_0.01_s_test.h5', 'data/4removal_nAmp3nData2_2e5/2048_2e5_0.01_s_met.txt')
here is h5disp:
Dataset 'label_b4_noise'
Size: 2048x1x2x10000
MaxSize: 2048x1x2xInf
Datatype: H5T_IEEE_F32LE (single)
ChunkSize: 2048x1x2x100
Filters: none
FillValue: 0.000000
Dataset 'data'
Size: 2048x1x2x10000
MaxSize: 2048x1x2xInf
Datatype: H5T_IEEE_F32LE (single)
ChunkSize: 2048x1x2x100
Filters: none
FillValue: 0.000000

Caffe, operations among batches

Since I have a classifier based on single patch scores, I would like to sum together the predictions a network produces for different images.
From
https://github.com/BVLC/caffe/blob/master/src/caffe/proto/caffe.proto, Reduction does not support operation over axis different than the last one.
Also a pooling operation would produce an average of its input but, obviously, without touching on the full batch.
I have implemented a python layer, but this is not fast enough for large scale experiments.
Is there a way to "sum" or, more generally, operate over the first axis with the tools already available?
Yes. You can. If you have N x p x q x r blob of prediction, the first use Slice (SliceLayer), creating N blobs, each of shape 1 x p x q x r. Then use these N blobs as N bottoms for the eltwise (EltwiseLayer) layer to produce a single top.
If your predictions have the dimensions: N x c (for mini-batch size of N and c channels), then you can splice this into c blobs with dimension N. You can feed these into a Reduction layer.
For example, you to write the following as a Jinja2 template:
layer {
name: "pred-slice"
type: "Slice"
bottom: "pred"
{%- for num in range(10) %}
top: "pred-{{ num }}-vector"
{%- endfor %}
slice_param {
slice_dim: 1
{%- for num in range(1, 10) %}
slice_point: {{ num }}
{%- endfor %}
}
include {
phase: TEST
}
}
{%- for num in range(10) %}
layer {
name: "pred-{{num}}"
type: "Reduction"
bottom: "pred-{{ num }}-vector"
top: "pred-{{ num }}"
include {
phase: TEST
}
reduction_param {
operation: MEAN
}
}
{%- endfor %}
which expands to:
layer {
name: "pred-slice"
type: "Slice"
bottom: "pred"
top: "pred-0-vector"
top: "pred-1-vector"
top: "pred-2-vector"
top: "pred-3-vector"
top: "pred-4-vector"
top: "pred-5-vector"
top: "pred-6-vector"
top: "pred-7-vector"
top: "pred-8-vector"
top: "pred-9-vector"
slice_param {
slice_dim: 1
slice_point: 1
slice_point: 2
slice_point: 3
slice_point: 4
slice_point: 5
slice_point: 6
slice_point: 7
slice_point: 8
slice_point: 9
}
include {
phase: TEST
}
}
layer {
name: "pred-0"
type: "Reduction"
bottom: "pred-0-vector"
top: "pred-0"
include {
phase: TEST
}
reduction_param {
operation: MEAN
}
}
layer {
name: "pred-1"
type: "Reduction"
bottom: "pred-1-vector"
top: "pred-1"
include {
phase: TEST
}
reduction_param {
operation: MEAN
}
}
layer {
name: "pred-2"
type: "Reduction"
bottom: "pred-2-vector"
top: "pred-2"
include {
phase: TEST
}
reduction_param {
operation: MEAN
}
}
layer {
name: "pred-3"
type: "Reduction"
bottom: "pred-3-vector"
top: "pred-3"
include {
phase: TEST
}
reduction_param {
operation: MEAN
}
}
layer {
name: "pred-4"
type: "Reduction"
bottom: "pred-4-vector"
top: "pred-4"
include {
phase: TEST
}
reduction_param {
operation: MEAN
}
}
layer {
name: "pred-5"
type: "Reduction"
bottom: "pred-5-vector"
top: "pred-5"
include {
phase: TEST
}
reduction_param {
operation: MEAN
}
}
layer {
name: "pred-6"
type: "Reduction"
bottom: "pred-6-vector"
top: "pred-6"
include {
phase: TEST
}
reduction_param {
operation: MEAN
}
}
layer {
name: "pred-7"
type: "Reduction"
bottom: "pred-7-vector"
top: "pred-7"
include {
phase: TEST
}
reduction_param {
operation: MEAN
}
}
layer {
name: "pred-8"
type: "Reduction"
bottom: "pred-8-vector"
top: "pred-8"
include {
phase: TEST
}
reduction_param {
operation: MEAN
}
}
layer {
name: "pred-9"
type: "Reduction"
bottom: "pred-9-vector"
top: "pred-9"
include {
phase: TEST
}
reduction_param {
operation: MEAN
}
}