Upgrade existing nets using upgrade_net_proto_text tool

Restore comments afterwards
This commit is contained in:
Jeff Donahue 2015-02-05 15:17:24 -08:00
Родитель 11a4c16558
Коммит e6c80dac40
23 изменённых файлов: 3693 добавлений и 2764 удалений

Просмотреть файл

@ -6,13 +6,17 @@ input_dim: 1
input_dim: 3
input_dim: 32
input_dim: 32
layers {
layer {
name: "conv1"
type: CONVOLUTION
type: "Convolution"
bottom: "data"
top: "conv1"
blobs_lr: 1
blobs_lr: 2
param {
lr_mult: 1
}
param {
lr_mult: 2
}
convolution_param {
num_output: 32
pad: 2
@ -20,9 +24,9 @@ layers {
stride: 1
}
}
layers {
layer {
name: "pool1"
type: POOLING
type: "Pooling"
bottom: "conv1"
top: "pool1"
pooling_param {
@ -31,31 +35,35 @@ layers {
stride: 2
}
}
layers {
layer {
name: "relu1"
type: RELU
type: "ReLU"
bottom: "pool1"
top: "pool1"
}
layers {
layer {
name: "norm1"
type: LRN
type: "LRN"
bottom: "pool1"
top: "norm1"
lrn_param {
norm_region: WITHIN_CHANNEL
local_size: 3
alpha: 5e-05
beta: 0.75
norm_region: WITHIN_CHANNEL
}
}
layers {
layer {
name: "conv2"
type: CONVOLUTION
type: "Convolution"
bottom: "norm1"
top: "conv2"
blobs_lr: 1
blobs_lr: 2
param {
lr_mult: 1
}
param {
lr_mult: 2
}
convolution_param {
num_output: 32
pad: 2
@ -63,15 +71,15 @@ layers {
stride: 1
}
}
layers {
layer {
name: "relu2"
type: RELU
type: "ReLU"
bottom: "conv2"
top: "conv2"
}
layers {
layer {
name: "pool2"
type: POOLING
type: "Pooling"
bottom: "conv2"
top: "pool2"
pooling_param {
@ -80,21 +88,21 @@ layers {
stride: 2
}
}
layers {
layer {
name: "norm2"
type: LRN
type: "LRN"
bottom: "pool2"
top: "norm2"
lrn_param {
norm_region: WITHIN_CHANNEL
local_size: 3
alpha: 5e-05
beta: 0.75
norm_region: WITHIN_CHANNEL
}
}
layers {
layer {
name: "conv3"
type: CONVOLUTION
type: "Convolution"
bottom: "norm2"
top: "conv3"
convolution_param {
@ -104,15 +112,15 @@ layers {
stride: 1
}
}
layers {
layer {
name: "relu3"
type: RELU
type: "ReLU"
bottom: "conv3"
top: "conv3"
}
layers {
layer {
name: "pool3"
type: POOLING
type: "Pooling"
bottom: "conv3"
top: "pool3"
pooling_param {
@ -121,22 +129,26 @@ layers {
stride: 2
}
}
layers {
layer {
name: "ip1"
type: INNER_PRODUCT
type: "InnerProduct"
bottom: "pool3"
top: "ip1"
blobs_lr: 1
blobs_lr: 2
weight_decay: 250
weight_decay: 0
param {
lr_mult: 1
decay_mult: 250
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 10
}
}
layers {
layer {
name: "prob"
type: SOFTMAX
type: "Softmax"
bottom: "ip1"
top: "prob"
}

Просмотреть файл

@ -1,41 +1,49 @@
name: "CIFAR10_full"
layers {
layer {
name: "cifar"
type: DATA
type: "Data"
top: "data"
top: "label"
include {
phase: TRAIN
}
transform_param {
mean_file: "examples/cifar10/mean.binaryproto"
}
data_param {
source: "examples/cifar10/cifar10_train_lmdb"
batch_size: 100
backend: LMDB
}
}
layer {
name: "cifar"
type: "Data"
top: "data"
top: "label"
include {
phase: TEST
}
transform_param {
mean_file: "examples/cifar10/mean.binaryproto"
}
include: { phase: TRAIN }
}
layers {
name: "cifar"
type: DATA
top: "data"
top: "label"
data_param {
source: "examples/cifar10/cifar10_test_lmdb"
batch_size: 100
backend: LMDB
}
transform_param {
mean_file: "examples/cifar10/mean.binaryproto"
}
include: { phase: TEST }
}
layers {
layer {
name: "conv1"
type: CONVOLUTION
type: "Convolution"
bottom: "data"
top: "conv1"
blobs_lr: 1
blobs_lr: 2
param {
lr_mult: 1
}
param {
lr_mult: 2
}
convolution_param {
num_output: 32
pad: 2
@ -50,9 +58,9 @@ layers {
}
}
}
layers {
layer {
name: "pool1"
type: POOLING
type: "Pooling"
bottom: "conv1"
top: "pool1"
pooling_param {
@ -61,31 +69,35 @@ layers {
stride: 2
}
}
layers {
layer {
name: "relu1"
type: RELU
type: "ReLU"
bottom: "pool1"
top: "pool1"
}
layers {
layer {
name: "norm1"
type: LRN
type: "LRN"
bottom: "pool1"
top: "norm1"
lrn_param {
norm_region: WITHIN_CHANNEL
local_size: 3
alpha: 5e-05
beta: 0.75
norm_region: WITHIN_CHANNEL
}
}
layers {
layer {
name: "conv2"
type: CONVOLUTION
type: "Convolution"
bottom: "norm1"
top: "conv2"
blobs_lr: 1
blobs_lr: 2
param {
lr_mult: 1
}
param {
lr_mult: 2
}
convolution_param {
num_output: 32
pad: 2
@ -100,15 +112,15 @@ layers {
}
}
}
layers {
layer {
name: "relu2"
type: RELU
type: "ReLU"
bottom: "conv2"
top: "conv2"
}
layers {
layer {
name: "pool2"
type: POOLING
type: "Pooling"
bottom: "conv2"
top: "pool2"
pooling_param {
@ -117,21 +129,21 @@ layers {
stride: 2
}
}
layers {
layer {
name: "norm2"
type: LRN
type: "LRN"
bottom: "pool2"
top: "norm2"
lrn_param {
norm_region: WITHIN_CHANNEL
local_size: 3
alpha: 5e-05
beta: 0.75
norm_region: WITHIN_CHANNEL
}
}
layers {
layer {
name: "conv3"
type: CONVOLUTION
type: "Convolution"
bottom: "norm2"
top: "conv3"
convolution_param {
@ -148,15 +160,15 @@ layers {
}
}
}
layers {
layer {
name: "relu3"
type: RELU
type: "ReLU"
bottom: "conv3"
top: "conv3"
}
layers {
layer {
name: "pool3"
type: POOLING
type: "Pooling"
bottom: "conv3"
top: "pool3"
pooling_param {
@ -165,15 +177,19 @@ layers {
stride: 2
}
}
layers {
layer {
name: "ip1"
type: INNER_PRODUCT
type: "InnerProduct"
bottom: "pool3"
top: "ip1"
blobs_lr: 1
blobs_lr: 2
weight_decay: 250
weight_decay: 0
param {
lr_mult: 1
decay_mult: 250
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 10
weight_filler {
@ -185,17 +201,19 @@ layers {
}
}
}
layers {
layer {
name: "accuracy"
type: ACCURACY
type: "Accuracy"
bottom: "ip1"
bottom: "label"
top: "accuracy"
include: { phase: TEST }
include {
phase: TEST
}
}
layers {
layer {
name: "loss"
type: SOFTMAX_LOSS
type: "SoftmaxWithLoss"
bottom: "ip1"
bottom: "label"
top: "loss"

Просмотреть файл

@ -4,13 +4,17 @@ input_dim: 1
input_dim: 3
input_dim: 32
input_dim: 32
layers {
layer {
name: "conv1"
type: CONVOLUTION
type: "Convolution"
bottom: "data"
top: "conv1"
blobs_lr: 1
blobs_lr: 2
param {
lr_mult: 1
}
param {
lr_mult: 2
}
convolution_param {
num_output: 32
pad: 2
@ -18,9 +22,9 @@ layers {
stride: 1
}
}
layers {
layer {
name: "pool1"
type: POOLING
type: "Pooling"
bottom: "conv1"
top: "pool1"
pooling_param {
@ -29,19 +33,23 @@ layers {
stride: 2
}
}
layers {
layer {
name: "relu1"
type: RELU
type: "ReLU"
bottom: "pool1"
top: "pool1"
}
layers {
layer {
name: "conv2"
type: CONVOLUTION
type: "Convolution"
bottom: "pool1"
top: "conv2"
blobs_lr: 1
blobs_lr: 2
param {
lr_mult: 1
}
param {
lr_mult: 2
}
convolution_param {
num_output: 32
pad: 2
@ -49,15 +57,15 @@ layers {
stride: 1
}
}
layers {
layer {
name: "relu2"
type: RELU
type: "ReLU"
bottom: "conv2"
top: "conv2"
}
layers {
layer {
name: "pool2"
type: POOLING
type: "Pooling"
bottom: "conv2"
top: "pool2"
pooling_param {
@ -66,13 +74,17 @@ layers {
stride: 2
}
}
layers {
layer {
name: "conv3"
type: CONVOLUTION
type: "Convolution"
bottom: "pool2"
top: "conv3"
blobs_lr: 1
blobs_lr: 2
param {
lr_mult: 1
}
param {
lr_mult: 2
}
convolution_param {
num_output: 64
pad: 2
@ -80,15 +92,15 @@ layers {
stride: 1
}
}
layers {
layer {
name: "relu3"
type: RELU
type: "ReLU"
bottom: "conv3"
top: "conv3"
}
layers {
layer {
name: "pool3"
type: POOLING
type: "Pooling"
bottom: "conv3"
top: "pool3"
pooling_param {
@ -97,31 +109,39 @@ layers {
stride: 2
}
}
layers {
layer {
name: "ip1"
type: INNER_PRODUCT
type: "InnerProduct"
bottom: "pool3"
top: "ip1"
blobs_lr: 1
blobs_lr: 2
param {
lr_mult: 1
}
param {
lr_mult: 2
}
inner_product_param {
num_output: 64
}
}
layers {
layer {
name: "ip2"
type: INNER_PRODUCT
type: "InnerProduct"
bottom: "ip1"
top: "ip2"
blobs_lr: 1
blobs_lr: 2
param {
lr_mult: 1
}
param {
lr_mult: 2
}
inner_product_param {
num_output: 10
}
}
layers {
layer {
name: "prob"
type: SOFTMAX
type: "Softmax"
bottom: "ip2"
top: "prob"
}

Просмотреть файл

@ -1,41 +1,49 @@
name: "CIFAR10_quick"
layers {
layer {
name: "cifar"
type: DATA
type: "Data"
top: "data"
top: "label"
include {
phase: TRAIN
}
transform_param {
mean_file: "examples/cifar10/mean.binaryproto"
}
data_param {
source: "examples/cifar10/cifar10_train_lmdb"
batch_size: 100
backend: LMDB
}
}
layer {
name: "cifar"
type: "Data"
top: "data"
top: "label"
include {
phase: TEST
}
transform_param {
mean_file: "examples/cifar10/mean.binaryproto"
}
include: { phase: TRAIN }
}
layers {
name: "cifar"
type: DATA
top: "data"
top: "label"
data_param {
source: "examples/cifar10/cifar10_test_lmdb"
batch_size: 100
backend: LMDB
}
transform_param {
mean_file: "examples/cifar10/mean.binaryproto"
}
include: { phase: TEST }
}
layers {
layer {
name: "conv1"
type: CONVOLUTION
type: "Convolution"
bottom: "data"
top: "conv1"
blobs_lr: 1
blobs_lr: 2
param {
lr_mult: 1
}
param {
lr_mult: 2
}
convolution_param {
num_output: 32
pad: 2
@ -50,9 +58,9 @@ layers {
}
}
}
layers {
layer {
name: "pool1"
type: POOLING
type: "Pooling"
bottom: "conv1"
top: "pool1"
pooling_param {
@ -61,19 +69,23 @@ layers {
stride: 2
}
}
layers {
layer {
name: "relu1"
type: RELU
type: "ReLU"
bottom: "pool1"
top: "pool1"
}
layers {
layer {
name: "conv2"
type: CONVOLUTION
type: "Convolution"
bottom: "pool1"
top: "conv2"
blobs_lr: 1
blobs_lr: 2
param {
lr_mult: 1
}
param {
lr_mult: 2
}
convolution_param {
num_output: 32
pad: 2
@ -88,15 +100,15 @@ layers {
}
}
}
layers {
layer {
name: "relu2"
type: RELU
type: "ReLU"
bottom: "conv2"
top: "conv2"
}
layers {
layer {
name: "pool2"
type: POOLING
type: "Pooling"
bottom: "conv2"
top: "pool2"
pooling_param {
@ -105,13 +117,17 @@ layers {
stride: 2
}
}
layers {
layer {
name: "conv3"
type: CONVOLUTION
type: "Convolution"
bottom: "pool2"
top: "conv3"
blobs_lr: 1
blobs_lr: 2
param {
lr_mult: 1
}
param {
lr_mult: 2
}
convolution_param {
num_output: 64
pad: 2
@ -126,15 +142,15 @@ layers {
}
}
}
layers {
layer {
name: "relu3"
type: RELU
type: "ReLU"
bottom: "conv3"
top: "conv3"
}
layers {
layer {
name: "pool3"
type: POOLING
type: "Pooling"
bottom: "conv3"
top: "pool3"
pooling_param {
@ -143,13 +159,17 @@ layers {
stride: 2
}
}
layers {
layer {
name: "ip1"
type: INNER_PRODUCT
type: "InnerProduct"
bottom: "pool3"
top: "ip1"
blobs_lr: 1
blobs_lr: 2
param {
lr_mult: 1
}
param {
lr_mult: 2
}
inner_product_param {
num_output: 64
weight_filler {
@ -161,13 +181,17 @@ layers {
}
}
}
layers {
layer {
name: "ip2"
type: INNER_PRODUCT
type: "InnerProduct"
bottom: "ip1"
top: "ip2"
blobs_lr: 1
blobs_lr: 2
param {
lr_mult: 1
}
param {
lr_mult: 2
}
inner_product_param {
num_output: 10
weight_filler {
@ -179,17 +203,19 @@ layers {
}
}
}
layers {
layer {
name: "accuracy"
type: ACCURACY
type: "Accuracy"
bottom: "ip2"
bottom: "label"
top: "accuracy"
include: { phase: TEST }
include {
phase: TEST
}
}
layers {
layer {
name: "loss"
type: SOFTMAX_LOSS
type: "SoftmaxWithLoss"
bottom: "ip2"
bottom: "label"
top: "loss"

Просмотреть файл

@ -1,24 +1,24 @@
name: "CaffeNet"
layers {
layer {
name: "data"
type: IMAGE_DATA
type: "ImageData"
top: "data"
top: "label"
transform_param {
mirror: false
crop_size: 227
mean_file: "data/ilsvrc12/imagenet_mean.binaryproto"
}
image_data_param {
source: "examples/_temp/file_list.txt"
batch_size: 50
new_height: 256
new_width: 256
}
transform_param {
crop_size: 227
mean_file: "data/ilsvrc12/imagenet_mean.binaryproto"
mirror: false
}
}
layers {
layer {
name: "conv1"
type: CONVOLUTION
type: "Convolution"
bottom: "data"
top: "conv1"
convolution_param {
@ -27,15 +27,15 @@ layers {
stride: 4
}
}
layers {
layer {
name: "relu1"
type: RELU
type: "ReLU"
bottom: "conv1"
top: "conv1"
}
layers {
layer {
name: "pool1"
type: POOLING
type: "Pooling"
bottom: "conv1"
top: "pool1"
pooling_param {
@ -44,9 +44,9 @@ layers {
stride: 2
}
}
layers {
layer {
name: "norm1"
type: LRN
type: "LRN"
bottom: "pool1"
top: "norm1"
lrn_param {
@ -55,9 +55,9 @@ layers {
beta: 0.75
}
}
layers {
layer {
name: "conv2"
type: CONVOLUTION
type: "Convolution"
bottom: "norm1"
top: "conv2"
convolution_param {
@ -67,15 +67,15 @@ layers {
group: 2
}
}
layers {
layer {
name: "relu2"
type: RELU
type: "ReLU"
bottom: "conv2"
top: "conv2"
}
layers {
layer {
name: "pool2"
type: POOLING
type: "Pooling"
bottom: "conv2"
top: "pool2"
pooling_param {
@ -84,9 +84,9 @@ layers {
stride: 2
}
}
layers {
layer {
name: "norm2"
type: LRN
type: "LRN"
bottom: "pool2"
top: "norm2"
lrn_param {
@ -95,9 +95,9 @@ layers {
beta: 0.75
}
}
layers {
layer {
name: "conv3"
type: CONVOLUTION
type: "Convolution"
bottom: "norm2"
top: "conv3"
convolution_param {
@ -106,15 +106,15 @@ layers {
kernel_size: 3
}
}
layers {
layer {
name: "relu3"
type: RELU
type: "ReLU"
bottom: "conv3"
top: "conv3"
}
layers {
layer {
name: "conv4"
type: CONVOLUTION
type: "Convolution"
bottom: "conv3"
top: "conv4"
convolution_param {
@ -124,15 +124,15 @@ layers {
group: 2
}
}
layers {
layer {
name: "relu4"
type: RELU
type: "ReLU"
bottom: "conv4"
top: "conv4"
}
layers {
layer {
name: "conv5"
type: CONVOLUTION
type: "Convolution"
bottom: "conv4"
top: "conv5"
convolution_param {
@ -142,15 +142,15 @@ layers {
group: 2
}
}
layers {
layer {
name: "relu5"
type: RELU
type: "ReLU"
bottom: "conv5"
top: "conv5"
}
layers {
layer {
name: "pool5"
type: POOLING
type: "Pooling"
bottom: "conv5"
top: "pool5"
pooling_param {
@ -159,79 +159,79 @@ layers {
stride: 2
}
}
layers {
layer {
name: "fc6"
type: INNER_PRODUCT
type: "InnerProduct"
bottom: "pool5"
top: "fc6"
inner_product_param {
num_output: 4096
}
}
layers {
layer {
name: "relu6"
type: RELU
type: "ReLU"
bottom: "fc6"
top: "fc6"
}
layers {
layer {
name: "drop6"
type: DROPOUT
type: "Dropout"
bottom: "fc6"
top: "fc6"
dropout_param {
dropout_ratio: 0.5
}
}
layers {
layer {
name: "fc7"
type: INNER_PRODUCT
type: "InnerProduct"
bottom: "fc6"
top: "fc7"
inner_product_param {
num_output: 4096
}
}
layers {
layer {
name: "relu7"
type: RELU
type: "ReLU"
bottom: "fc7"
top: "fc7"
}
layers {
layer {
name: "drop7"
type: DROPOUT
type: "Dropout"
bottom: "fc7"
top: "fc7"
dropout_param {
dropout_ratio: 0.5
}
}
layers {
layer {
name: "fc8"
type: INNER_PRODUCT
type: "InnerProduct"
bottom: "fc7"
top: "fc8"
inner_product_param {
num_output: 1000
}
}
layers {
layer {
name: "prob"
type: SOFTMAX
type: "Softmax"
bottom: "fc8"
top: "prob"
}
layers {
layer {
name: "accuracy"
type: ACCURACY
type: "Accuracy"
bottom: "prob"
bottom: "label"
top: "accuracy"
}
layers {
layer {
name: "loss"
type: SOFTMAX_LOSS
type: "SoftmaxWithLoss"
bottom: "fc8"
bottom: "label"
top: "loss"

Просмотреть файл

@ -1,9 +1,17 @@
name: "CaffeNet"
layers {
layer {
name: "data"
type: WINDOW_DATA
type: "WindowData"
top: "data"
top: "label"
include {
phase: TRAIN
}
transform_param {
mirror: true
crop_size: 227
mean_file: "data/ilsvrc12/imagenet_mean.binaryproto"
}
window_data_param {
source: "examples/finetune_pascal_detection/window_file_2007_trainval.txt"
batch_size: 128
@ -13,18 +21,20 @@ layers {
context_pad: 16
crop_mode: "warp"
}
}
layer {
name: "data"
type: "WindowData"
top: "data"
top: "label"
include {
phase: TEST
}
transform_param {
mirror: true
crop_size: 227
mean_file: "data/ilsvrc12/imagenet_mean.binaryproto"
}
include: { phase: TRAIN }
}
layers {
name: "data"
type: WINDOW_DATA
top: "data"
top: "label"
window_data_param {
source: "examples/finetune_pascal_detection/window_file_2007_test.txt"
batch_size: 128
@ -34,22 +44,20 @@ layers {
context_pad: 16
crop_mode: "warp"
}
transform_param {
mirror: true
crop_size: 227
mean_file: "data/ilsvrc12/imagenet_mean.binaryproto"
}
include: { phase: TEST }
}
layers {
layer {
name: "conv1"
type: CONVOLUTION
type: "Convolution"
bottom: "data"
top: "conv1"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 96
kernel_size: 11
@ -64,15 +72,15 @@ layers {
}
}
}
layers {
layer {
name: "relu1"
type: RELU
type: "ReLU"
bottom: "conv1"
top: "conv1"
}
layers {
layer {
name: "pool1"
type: POOLING
type: "Pooling"
bottom: "conv1"
top: "pool1"
pooling_param {
@ -81,9 +89,9 @@ layers {
stride: 2
}
}
layers {
layer {
name: "norm1"
type: LRN
type: "LRN"
bottom: "pool1"
top: "norm1"
lrn_param {
@ -92,15 +100,19 @@ layers {
beta: 0.75
}
}
layers {
layer {
name: "conv2"
type: CONVOLUTION
type: "Convolution"
bottom: "norm1"
top: "conv2"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 256
pad: 2
@ -116,15 +128,15 @@ layers {
}
}
}
layers {
layer {
name: "relu2"
type: RELU
type: "ReLU"
bottom: "conv2"
top: "conv2"
}
layers {
layer {
name: "pool2"
type: POOLING
type: "Pooling"
bottom: "conv2"
top: "pool2"
pooling_param {
@ -133,9 +145,9 @@ layers {
stride: 2
}
}
layers {
layer {
name: "norm2"
type: LRN
type: "LRN"
bottom: "pool2"
top: "norm2"
lrn_param {
@ -144,15 +156,19 @@ layers {
beta: 0.75
}
}
layers {
layer {
name: "conv3"
type: CONVOLUTION
type: "Convolution"
bottom: "norm2"
top: "conv3"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 384
pad: 1
@ -167,21 +183,25 @@ layers {
}
}
}
layers {
layer {
name: "relu3"
type: RELU
type: "ReLU"
bottom: "conv3"
top: "conv3"
}
layers {
layer {
name: "conv4"
type: CONVOLUTION
type: "Convolution"
bottom: "conv3"
top: "conv4"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 384
pad: 1
@ -197,21 +217,25 @@ layers {
}
}
}
layers {
layer {
name: "relu4"
type: RELU
type: "ReLU"
bottom: "conv4"
top: "conv4"
}
layers {
layer {
name: "conv5"
type: CONVOLUTION
type: "Convolution"
bottom: "conv4"
top: "conv5"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 256
pad: 1
@ -227,15 +251,15 @@ layers {
}
}
}
layers {
layer {
name: "relu5"
type: RELU
type: "ReLU"
bottom: "conv5"
top: "conv5"
}
layers {
layer {
name: "pool5"
type: POOLING
type: "Pooling"
bottom: "conv5"
top: "pool5"
pooling_param {
@ -244,15 +268,19 @@ layers {
stride: 2
}
}
layers {
layer {
name: "fc6"
type: INNER_PRODUCT
type: "InnerProduct"
bottom: "pool5"
top: "fc6"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 4096
weight_filler {
@ -265,30 +293,34 @@ layers {
}
}
}
layers {
layer {
name: "relu6"
type: RELU
type: "ReLU"
bottom: "fc6"
top: "fc6"
}
layers {
layer {
name: "drop6"
type: DROPOUT
type: "Dropout"
bottom: "fc6"
top: "fc6"
dropout_param {
dropout_ratio: 0.5
}
}
layers {
layer {
name: "fc7"
type: INNER_PRODUCT
type: "InnerProduct"
bottom: "fc6"
top: "fc7"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 4096
weight_filler {
@ -301,30 +333,34 @@ layers {
}
}
}
layers {
layer {
name: "relu7"
type: RELU
type: "ReLU"
bottom: "fc7"
top: "fc7"
}
layers {
layer {
name: "drop7"
type: DROPOUT
type: "Dropout"
bottom: "fc7"
top: "fc7"
dropout_param {
dropout_ratio: 0.5
}
}
layers {
layer {
name: "fc8_pascal"
type: INNER_PRODUCT
type: "InnerProduct"
bottom: "fc7"
top: "fc8_pascal"
blobs_lr: 10
blobs_lr: 20
weight_decay: 1
weight_decay: 0
param {
lr_mult: 10
decay_mult: 1
}
param {
lr_mult: 20
decay_mult: 0
}
inner_product_param {
num_output: 21
weight_filler {
@ -337,17 +373,19 @@ layers {
}
}
}
layers {
layer {
name: "loss"
type: SOFTMAX_LOSS
type: "SoftmaxWithLoss"
bottom: "fc8_pascal"
bottom: "label"
}
layers {
layer {
name: "accuracy"
type: ACCURACY
type: "Accuracy"
bottom: "fc8_pascal"
bottom: "label"
top: "accuracy"
include { phase: TEST }
include {
phase: TEST
}
}

Просмотреть файл

@ -1,35 +1,43 @@
name: "LogisticRegressionNet"
layers {
layer {
name: "data"
type: HDF5_DATA
type: "HDF5Data"
top: "data"
top: "label"
include {
phase: TRAIN
}
hdf5_data_param {
source: "examples/hdf5_classification/data/train.txt"
batch_size: 10
}
include: { phase: TRAIN }
}
layers {
layer {
name: "data"
type: HDF5_DATA
type: "HDF5Data"
top: "data"
top: "label"
include {
phase: TEST
}
hdf5_data_param {
source: "examples/hdf5_classification/data/test.txt"
batch_size: 10
}
include: { phase: TEST }
}
layers {
layer {
name: "fc1"
type: INNER_PRODUCT
type: "InnerProduct"
bottom: "data"
top: "fc1"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 2
weight_filler {
@ -42,18 +50,20 @@ layers {
}
}
}
layers {
layer {
name: "loss"
type: SOFTMAX_LOSS
type: "SoftmaxWithLoss"
bottom: "fc1"
bottom: "label"
top: "loss"
}
layers {
layer {
name: "accuracy"
type: ACCURACY
type: "Accuracy"
bottom: "fc1"
bottom: "label"
top: "accuracy"
include: { phase: TEST }
include {
phase: TEST
}
}

Просмотреть файл

@ -1,35 +1,43 @@
name: "LogisticRegressionNet"
layers {
layer {
name: "data"
type: HDF5_DATA
type: "HDF5Data"
top: "data"
top: "label"
include {
phase: TRAIN
}
hdf5_data_param {
source: "examples/hdf5_classification/data/train.txt"
batch_size: 10
}
include: { phase: TRAIN }
}
layers {
layer {
name: "data"
type: HDF5_DATA
type: "HDF5Data"
top: "data"
top: "label"
include {
phase: TEST
}
hdf5_data_param {
source: "examples/hdf5_classification/data/test.txt"
batch_size: 10
}
include: { phase: TEST }
}
layers {
layer {
name: "fc1"
type: INNER_PRODUCT
type: "InnerProduct"
bottom: "data"
top: "fc1"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 40
weight_filler {
@ -42,21 +50,25 @@ layers {
}
}
}
layers {
layer {
name: "relu1"
type: RELU
type: "ReLU"
bottom: "fc1"
top: "fc1"
}
layers {
layer {
name: "fc2"
type: INNER_PRODUCT
type: "InnerProduct"
bottom: "fc1"
top: "fc2"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 2
weight_filler {
@ -69,18 +81,20 @@ layers {
}
}
}
layers {
layer {
name: "loss"
type: SOFTMAX_LOSS
type: "SoftmaxWithLoss"
bottom: "fc2"
bottom: "label"
top: "loss"
}
layers {
layer {
name: "accuracy"
type: ACCURACY
type: "Accuracy"
bottom: "fc2"
bottom: "label"
top: "accuracy"
include: { phase: TEST }
include {
phase: TEST
}
}

Просмотреть файл

@ -5,9 +5,9 @@ input_dim: 1
input_dim: 3
input_dim: 451
input_dim: 451
layers {
layer {
name: "conv1"
type: CONVOLUTION
type: "Convolution"
bottom: "data"
top: "conv1"
convolution_param {
@ -16,15 +16,15 @@ layers {
stride: 4
}
}
layers {
layer {
name: "relu1"
type: RELU
type: "ReLU"
bottom: "conv1"
top: "conv1"
}
layers {
layer {
name: "pool1"
type: POOLING
type: "Pooling"
bottom: "conv1"
top: "pool1"
pooling_param {
@ -33,9 +33,9 @@ layers {
stride: 2
}
}
layers {
layer {
name: "norm1"
type: LRN
type: "LRN"
bottom: "pool1"
top: "norm1"
lrn_param {
@ -44,9 +44,9 @@ layers {
beta: 0.75
}
}
layers {
layer {
name: "conv2"
type: CONVOLUTION
type: "Convolution"
bottom: "norm1"
top: "conv2"
convolution_param {
@ -56,15 +56,15 @@ layers {
group: 2
}
}
layers {
layer {
name: "relu2"
type: RELU
type: "ReLU"
bottom: "conv2"
top: "conv2"
}
layers {
layer {
name: "pool2"
type: POOLING
type: "Pooling"
bottom: "conv2"
top: "pool2"
pooling_param {
@ -73,9 +73,9 @@ layers {
stride: 2
}
}
layers {
layer {
name: "norm2"
type: LRN
type: "LRN"
bottom: "pool2"
top: "norm2"
lrn_param {
@ -84,9 +84,9 @@ layers {
beta: 0.75
}
}
layers {
layer {
name: "conv3"
type: CONVOLUTION
type: "Convolution"
bottom: "norm2"
top: "conv3"
convolution_param {
@ -95,15 +95,15 @@ layers {
kernel_size: 3
}
}
layers {
layer {
name: "relu3"
type: RELU
type: "ReLU"
bottom: "conv3"
top: "conv3"
}
layers {
layer {
name: "conv4"
type: CONVOLUTION
type: "Convolution"
bottom: "conv3"
top: "conv4"
convolution_param {
@ -113,15 +113,15 @@ layers {
group: 2
}
}
layers {
layer {
name: "relu4"
type: RELU
type: "ReLU"
bottom: "conv4"
top: "conv4"
}
layers {
layer {
name: "conv5"
type: CONVOLUTION
type: "Convolution"
bottom: "conv4"
top: "conv5"
convolution_param {
@ -131,15 +131,15 @@ layers {
group: 2
}
}
layers {
layer {
name: "relu5"
type: RELU
type: "ReLU"
bottom: "conv5"
top: "conv5"
}
layers {
layer {
name: "pool5"
type: POOLING
type: "Pooling"
bottom: "conv5"
top: "pool5"
pooling_param {
@ -148,9 +148,9 @@ layers {
stride: 2
}
}
layers {
layer {
name: "fc6-conv"
type: CONVOLUTION
type: "Convolution"
bottom: "pool5"
top: "fc6-conv"
convolution_param {
@ -158,24 +158,24 @@ layers {
kernel_size: 6
}
}
layers {
layer {
name: "relu6"
type: RELU
type: "ReLU"
bottom: "fc6-conv"
top: "fc6-conv"
}
layers {
layer {
name: "drop6"
type: DROPOUT
type: "Dropout"
bottom: "fc6-conv"
top: "fc6-conv"
dropout_param {
dropout_ratio: 0.5
}
}
layers {
layer {
name: "fc7-conv"
type: CONVOLUTION
type: "Convolution"
bottom: "fc6-conv"
top: "fc7-conv"
convolution_param {
@ -183,24 +183,24 @@ layers {
kernel_size: 1
}
}
layers {
layer {
name: "relu7"
type: RELU
type: "ReLU"
bottom: "fc7-conv"
top: "fc7-conv"
}
layers {
layer {
name: "drop7"
type: DROPOUT
type: "Dropout"
bottom: "fc7-conv"
top: "fc7-conv"
dropout_param {
dropout_ratio: 0.5
}
}
layers {
layer {
name: "fc8-conv"
type: CONVOLUTION
type: "Convolution"
bottom: "fc7-conv"
top: "fc8-conv"
convolution_param {
@ -208,9 +208,9 @@ layers {
kernel_size: 1
}
}
layers {
layer {
name: "prob"
type: SOFTMAX
type: "Softmax"
bottom: "fc8-conv"
top: "prob"
}

Просмотреть файл

@ -4,13 +4,17 @@ input_dim: 64
input_dim: 1
input_dim: 28
input_dim: 28
layers {
layer {
name: "conv1"
type: CONVOLUTION
type: "Convolution"
bottom: "data"
top: "conv1"
blobs_lr: 1
blobs_lr: 2
param {
lr_mult: 1
}
param {
lr_mult: 2
}
convolution_param {
num_output: 20
kernel_size: 5
@ -23,9 +27,9 @@ layers {
}
}
}
layers {
layer {
name: "pool1"
type: POOLING
type: "Pooling"
bottom: "conv1"
top: "pool1"
pooling_param {
@ -34,13 +38,17 @@ layers {
stride: 2
}
}
layers {
layer {
name: "conv2"
type: CONVOLUTION
type: "Convolution"
bottom: "pool1"
top: "conv2"
blobs_lr: 1
blobs_lr: 2
param {
lr_mult: 1
}
param {
lr_mult: 2
}
convolution_param {
num_output: 50
kernel_size: 5
@ -53,9 +61,9 @@ layers {
}
}
}
layers {
layer {
name: "pool2"
type: POOLING
type: "Pooling"
bottom: "conv2"
top: "pool2"
pooling_param {
@ -64,13 +72,17 @@ layers {
stride: 2
}
}
layers {
layer {
name: "ip1"
type: INNER_PRODUCT
type: "InnerProduct"
bottom: "pool2"
top: "ip1"
blobs_lr: 1
blobs_lr: 2
param {
lr_mult: 1
}
param {
lr_mult: 2
}
inner_product_param {
num_output: 500
weight_filler {
@ -81,19 +93,23 @@ layers {
}
}
}
layers {
layer {
name: "relu1"
type: RELU
type: "ReLU"
bottom: "ip1"
top: "ip1"
}
layers {
layer {
name: "ip2"
type: INNER_PRODUCT
type: "InnerProduct"
bottom: "ip1"
top: "ip2"
blobs_lr: 1
blobs_lr: 2
param {
lr_mult: 1
}
param {
lr_mult: 2
}
inner_product_param {
num_output: 10
weight_filler {
@ -104,9 +120,9 @@ layers {
}
}
}
layers {
layer {
name: "prob"
type: SOFTMAX
type: "Softmax"
bottom: "ip2"
top: "prob"
}

Просмотреть файл

@ -1,42 +1,49 @@
name: "LeNet"
layers {
layer {
name: "mnist"
type: DATA
type: "Data"
top: "data"
top: "label"
include {
phase: TRAIN
}
transform_param {
scale: 0.00390625
}
data_param {
source: "examples/mnist/mnist_train_lmdb"
backend: LMDB
batch_size: 64
backend: LMDB
}
transform_param {
scale: 0.00390625
}
include: { phase: TRAIN }
}
layers {
layer {
name: "mnist"
type: DATA
type: "Data"
top: "data"
top: "label"
data_param {
source: "examples/mnist/mnist_test_lmdb"
backend: LMDB
batch_size: 100
include {
phase: TEST
}
transform_param {
scale: 0.00390625
}
include: { phase: TEST }
data_param {
source: "examples/mnist/mnist_test_lmdb"
batch_size: 100
backend: LMDB
}
}
layers {
layer {
name: "conv1"
type: CONVOLUTION
type: "Convolution"
bottom: "data"
top: "conv1"
blobs_lr: 1
blobs_lr: 2
param {
lr_mult: 1
}
param {
lr_mult: 2
}
convolution_param {
num_output: 20
kernel_size: 5
@ -49,9 +56,9 @@ layers {
}
}
}
layers {
layer {
name: "pool1"
type: POOLING
type: "Pooling"
bottom: "conv1"
top: "pool1"
pooling_param {
@ -60,13 +67,17 @@ layers {
stride: 2
}
}
layers {
layer {
name: "conv2"
type: CONVOLUTION
type: "Convolution"
bottom: "pool1"
top: "conv2"
blobs_lr: 1
blobs_lr: 2
param {
lr_mult: 1
}
param {
lr_mult: 2
}
convolution_param {
num_output: 50
kernel_size: 5
@ -79,9 +90,9 @@ layers {
}
}
}
layers {
layer {
name: "pool2"
type: POOLING
type: "Pooling"
bottom: "conv2"
top: "pool2"
pooling_param {
@ -90,13 +101,17 @@ layers {
stride: 2
}
}
layers {
layer {
name: "ip1"
type: INNER_PRODUCT
type: "InnerProduct"
bottom: "pool2"
top: "ip1"
blobs_lr: 1
blobs_lr: 2
param {
lr_mult: 1
}
param {
lr_mult: 2
}
inner_product_param {
num_output: 500
weight_filler {
@ -107,19 +122,23 @@ layers {
}
}
}
layers {
layer {
name: "relu1"
type: RELU
type: "ReLU"
bottom: "ip1"
top: "ip1"
}
layers {
layer {
name: "ip2"
type: INNER_PRODUCT
type: "InnerProduct"
bottom: "ip1"
top: "ip2"
blobs_lr: 1
blobs_lr: 2
param {
lr_mult: 1
}
param {
lr_mult: 2
}
inner_product_param {
num_output: 10
weight_filler {
@ -130,17 +149,19 @@ layers {
}
}
}
layers {
layer {
name: "accuracy"
type: ACCURACY
type: "Accuracy"
bottom: "ip2"
bottom: "label"
top: "accuracy"
include: { phase: TEST }
include {
phase: TEST
}
}
layers {
layer {
name: "loss"
type: SOFTMAX_LOSS
type: "SoftmaxWithLoss"
bottom: "ip2"
bottom: "label"
top: "loss"

Просмотреть файл

@ -1,67 +1,73 @@
name: "MNISTAutoencoder"
layers {
top: "data"
layer {
name: "data"
type: DATA
data_param {
source: "examples/mnist/mnist_train_lmdb"
backend: LMDB
batch_size: 100
type: "Data"
top: "data"
include {
phase: TRAIN
}
transform_param {
scale: 0.0039215684
}
include: { phase: TRAIN }
data_param {
source: "examples/mnist/mnist_train_lmdb"
batch_size: 100
backend: LMDB
}
}
layers {
top: "data"
layer {
name: "data"
type: DATA
data_param {
source: "examples/mnist/mnist_train_lmdb"
backend: LMDB
batch_size: 100
}
transform_param {
scale: 0.0039215684
}
include: {
type: "Data"
top: "data"
include {
phase: TEST
stage: 'test-on-train'
stage: "test-on-train"
}
transform_param {
scale: 0.0039215684
}
data_param {
source: "examples/mnist/mnist_train_lmdb"
batch_size: 100
backend: LMDB
}
}
layers {
top: "data"
layer {
name: "data"
type: DATA
type: "Data"
top: "data"
include {
phase: TEST
stage: "test-on-test"
}
transform_param {
scale: 0.0039215684
}
data_param {
source: "examples/mnist/mnist_test_lmdb"
backend: LMDB
batch_size: 100
}
transform_param {
scale: 0.0039215684
}
include: {
phase: TEST
stage: 'test-on-test'
backend: LMDB
}
}
layers {
layer {
name: "flatdata"
type: "Flatten"
bottom: "data"
top: "flatdata"
name: "flatdata"
type: FLATTEN
}
layers {
layer {
name: "encode1"
type: "InnerProduct"
bottom: "data"
top: "encode1"
name: "encode1"
type: INNER_PRODUCT
blobs_lr: 1
blobs_lr: 1
weight_decay: 1
weight_decay: 0
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 0
}
inner_product_param {
num_output: 1000
weight_filler {
@ -75,21 +81,25 @@ layers {
}
}
}
layers {
layer {
name: "encode1neuron"
type: "Sigmoid"
bottom: "encode1"
top: "encode1neuron"
name: "encode1neuron"
type: SIGMOID
}
layers {
layer {
name: "encode2"
type: "InnerProduct"
bottom: "encode1neuron"
top: "encode2"
name: "encode2"
type: INNER_PRODUCT
blobs_lr: 1
blobs_lr: 1
weight_decay: 1
weight_decay: 0
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 0
}
inner_product_param {
num_output: 500
weight_filler {
@ -103,21 +113,25 @@ layers {
}
}
}
layers {
layer {
name: "encode2neuron"
type: "Sigmoid"
bottom: "encode2"
top: "encode2neuron"
name: "encode2neuron"
type: SIGMOID
}
layers {
layer {
name: "encode3"
type: "InnerProduct"
bottom: "encode2neuron"
top: "encode3"
name: "encode3"
type: INNER_PRODUCT
blobs_lr: 1
blobs_lr: 1
weight_decay: 1
weight_decay: 0
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 0
}
inner_product_param {
num_output: 250
weight_filler {
@ -131,21 +145,25 @@ layers {
}
}
}
layers {
layer {
name: "encode3neuron"
type: "Sigmoid"
bottom: "encode3"
top: "encode3neuron"
name: "encode3neuron"
type: SIGMOID
}
layers {
layer {
name: "encode4"
type: "InnerProduct"
bottom: "encode3neuron"
top: "encode4"
name: "encode4"
type: INNER_PRODUCT
blobs_lr: 1
blobs_lr: 1
weight_decay: 1
weight_decay: 0
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 0
}
inner_product_param {
num_output: 30
weight_filler {
@ -159,15 +177,19 @@ layers {
}
}
}
layers {
layer {
name: "decode4"
type: "InnerProduct"
bottom: "encode4"
top: "decode4"
name: "decode4"
type: INNER_PRODUCT
blobs_lr: 1
blobs_lr: 1
weight_decay: 1
weight_decay: 0
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 0
}
inner_product_param {
num_output: 250
weight_filler {
@ -181,21 +203,25 @@ layers {
}
}
}
layers {
layer {
name: "decode4neuron"
type: "Sigmoid"
bottom: "decode4"
top: "decode4neuron"
name: "decode4neuron"
type: SIGMOID
}
layers {
layer {
name: "decode3"
type: "InnerProduct"
bottom: "decode4neuron"
top: "decode3"
name: "decode3"
type: INNER_PRODUCT
blobs_lr: 1
blobs_lr: 1
weight_decay: 1
weight_decay: 0
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 0
}
inner_product_param {
num_output: 500
weight_filler {
@ -209,21 +235,25 @@ layers {
}
}
}
layers {
layer {
name: "decode3neuron"
type: "Sigmoid"
bottom: "decode3"
top: "decode3neuron"
name: "decode3neuron"
type: SIGMOID
}
layers {
layer {
name: "decode2"
type: "InnerProduct"
bottom: "decode3neuron"
top: "decode2"
name: "decode2"
type: INNER_PRODUCT
blobs_lr: 1
blobs_lr: 1
weight_decay: 1
weight_decay: 0
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 0
}
inner_product_param {
num_output: 1000
weight_filler {
@ -237,21 +267,25 @@ layers {
}
}
}
layers {
layer {
name: "decode2neuron"
type: "Sigmoid"
bottom: "decode2"
top: "decode2neuron"
name: "decode2neuron"
type: SIGMOID
}
layers {
layer {
name: "decode1"
type: "InnerProduct"
bottom: "decode2neuron"
top: "decode1"
name: "decode1"
type: INNER_PRODUCT
blobs_lr: 1
blobs_lr: 1
weight_decay: 1
weight_decay: 0
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 1
decay_mult: 0
}
inner_product_param {
num_output: 784
weight_filler {
@ -265,25 +299,25 @@ layers {
}
}
}
layers {
layer {
name: "loss"
type: "SigmoidCrossEntropyLoss"
bottom: "decode1"
bottom: "flatdata"
top: "cross_entropy_loss"
name: "loss"
type: SIGMOID_CROSS_ENTROPY_LOSS
loss_weight: 1
}
layers {
layer {
name: "decode1neuron"
type: "Sigmoid"
bottom: "decode1"
top: "decode1neuron"
name: "decode1neuron"
type: SIGMOID
}
layers {
layer {
name: "loss"
type: "EuclideanLoss"
bottom: "decode1neuron"
bottom: "flatdata"
top: "l2_error"
name: "loss"
type: EUCLIDEAN_LOSS
loss_weight: 0
}

Просмотреть файл

@ -4,23 +4,26 @@ input_dim: 10000
input_dim: 1
input_dim: 28
input_dim: 28
layers {
layer {
name: "conv1"
type: CONVOLUTION
type: "Convolution"
bottom: "data"
top: "conv1"
blobs_lr: 1
blobs_lr: 2
param {
lr_mult: 1
}
param {
lr_mult: 2
}
convolution_param {
num_output: 20
kernel_size: 5
stride: 1
}
}
layers {
layer {
name: "pool1"
type: POOLING
type: "Pooling"
bottom: "conv1"
top: "pool1"
pooling_param {
@ -29,22 +32,26 @@ layers {
stride: 2
}
}
layers {
layer {
name: "conv2"
type: CONVOLUTION
type: "Convolution"
bottom: "pool1"
top: "conv2"
blobs_lr: 1
blobs_lr: 2
param {
lr_mult: 1
}
param {
lr_mult: 2
}
convolution_param {
num_output: 50
kernel_size: 5
stride: 1
}
}
layers {
layer {
name: "pool2"
type: POOLING
type: "Pooling"
bottom: "conv2"
top: "pool2"
pooling_param {
@ -53,42 +60,53 @@ layers {
stride: 2
}
}
layers {
layer {
name: "ip1"
type: INNER_PRODUCT
type: "InnerProduct"
bottom: "pool2"
top: "ip1"
blobs_lr: 1
blobs_lr: 2
param {
lr_mult: 1
}
param {
lr_mult: 2
}
inner_product_param {
num_output: 500
}
}
layers {
layer {
name: "relu1"
type: RELU
type: "ReLU"
bottom: "ip1"
top: "ip1"
}
layers {
layer {
name: "ip2"
type: INNER_PRODUCT
type: "InnerProduct"
bottom: "ip1"
top: "ip2"
blobs_lr: 1
blobs_lr: 2
param {
lr_mult: 1
}
param {
lr_mult: 2
}
inner_product_param {
num_output: 10
}
}
layers {
layer {
name: "feat"
type: INNER_PRODUCT
type: "InnerProduct"
bottom: "ip2"
top: "feat"
blobs_lr: 1
blobs_lr: 2
param {
lr_mult: 1
}
param {
lr_mult: 2
}
inner_product_param {
num_output: 2
}

Просмотреть файл

@ -1,50 +1,60 @@
name: "mnist_siamese_train_test"
layers {
layer {
name: "pair_data"
type: DATA
type: "Data"
top: "pair_data"
top: "sim"
include {
phase: TRAIN
}
transform_param {
scale: 0.00390625
}
data_param {
source: "examples/siamese/mnist_siamese_train_leveldb"
scale: 0.00390625
batch_size: 64
}
include: { phase: TRAIN }
}
layers {
layer {
name: "pair_data"
type: DATA
type: "Data"
top: "pair_data"
top: "sim"
include {
phase: TEST
}
transform_param {
scale: 0.00390625
}
data_param {
source: "examples/siamese/mnist_siamese_test_leveldb"
scale: 0.00390625
batch_size: 100
}
include: { phase: TEST }
}
layers {
name: "slice_pair"
type: SLICE
bottom: "pair_data"
top: "data"
top: "data_p"
slice_param {
slice_dim: 1
slice_point: 1
}
layer {
name: "slice_pair"
type: "Slice"
bottom: "pair_data"
top: "data"
top: "data_p"
slice_param {
slice_dim: 1
slice_point: 1
}
}
layers {
layer {
name: "conv1"
type: CONVOLUTION
type: "Convolution"
bottom: "data"
top: "conv1"
blobs_lr: 1
blobs_lr: 2
param {
name: "conv1_w"
lr_mult: 1
}
param {
name: "conv1_b"
lr_mult: 2
}
convolution_param {
num_output: 20
kernel_size: 5
@ -56,12 +66,10 @@ layers {
type: "constant"
}
}
param: "conv1_w"
param: "conv1_b"
}
layers {
layer {
name: "pool1"
type: POOLING
type: "Pooling"
bottom: "conv1"
top: "pool1"
pooling_param {
@ -70,13 +78,19 @@ layers {
stride: 2
}
}
layers {
layer {
name: "conv2"
type: CONVOLUTION
type: "Convolution"
bottom: "pool1"
top: "conv2"
blobs_lr: 1
blobs_lr: 2
param {
name: "conv2_w"
lr_mult: 1
}
param {
name: "conv2_b"
lr_mult: 2
}
convolution_param {
num_output: 50
kernel_size: 5
@ -88,12 +102,10 @@ layers {
type: "constant"
}
}
param: "conv2_w"
param: "conv2_b"
}
layers {
layer {
name: "pool2"
type: POOLING
type: "Pooling"
bottom: "conv2"
top: "pool2"
pooling_param {
@ -102,13 +114,19 @@ layers {
stride: 2
}
}
layers {
layer {
name: "ip1"
type: INNER_PRODUCT
type: "InnerProduct"
bottom: "pool2"
top: "ip1"
blobs_lr: 1
blobs_lr: 2
param {
name: "ip1_w"
lr_mult: 1
}
param {
name: "ip1_b"
lr_mult: 2
}
inner_product_param {
num_output: 500
weight_filler {
@ -118,22 +136,26 @@ layers {
type: "constant"
}
}
param: "ip1_w"
param: "ip1_b"
}
layers {
layer {
name: "relu1"
type: RELU
type: "ReLU"
bottom: "ip1"
top: "ip1"
}
layers {
layer {
name: "ip2"
type: INNER_PRODUCT
type: "InnerProduct"
bottom: "ip1"
top: "ip2"
blobs_lr: 1
blobs_lr: 2
param {
name: "ip2_w"
lr_mult: 1
}
param {
name: "ip2_b"
lr_mult: 2
}
inner_product_param {
num_output: 10
weight_filler {
@ -143,17 +165,20 @@ layers {
type: "constant"
}
}
param: "ip2_w"
param: "ip2_b"
}
layers {
layer {
name: "feat"
type: INNER_PRODUCT
type: "InnerProduct"
bottom: "ip2"
top: "feat"
blobs_lr: 1
blobs_lr: 2
param {
name: "feat_w"
lr_mult: 1
}
param {
name: "feat_b"
lr_mult: 2
}
inner_product_param {
num_output: 2
weight_filler {
@ -163,19 +188,20 @@ layers {
type: "constant"
}
}
param: "feat_w"
param: "feat_b"
}
layers {
layer {
name: "conv1_p"
type: CONVOLUTION
type: "Convolution"
bottom: "data_p"
top: "conv1_p"
blobs_lr: 1
blobs_lr: 2
param {
name: "conv1_w"
lr_mult: 1
}
param {
name: "conv1_b"
lr_mult: 2
}
convolution_param {
num_output: 20
kernel_size: 5
@ -187,12 +213,10 @@ layers {
type: "constant"
}
}
param: "conv1_w"
param: "conv1_b"
}
layers {
layer {
name: "pool1_p"
type: POOLING
type: "Pooling"
bottom: "conv1_p"
top: "pool1_p"
pooling_param {
@ -201,13 +225,19 @@ layers {
stride: 2
}
}
layers {
layer {
name: "conv2_p"
type: CONVOLUTION
type: "Convolution"
bottom: "pool1_p"
top: "conv2_p"
blobs_lr: 1
blobs_lr: 2
param {
name: "conv2_w"
lr_mult: 1
}
param {
name: "conv2_b"
lr_mult: 2
}
convolution_param {
num_output: 50
kernel_size: 5
@ -219,12 +249,10 @@ layers {
type: "constant"
}
}
param: "conv2_w"
param: "conv2_b"
}
layers {
layer {
name: "pool2_p"
type: POOLING
type: "Pooling"
bottom: "conv2_p"
top: "pool2_p"
pooling_param {
@ -233,13 +261,19 @@ layers {
stride: 2
}
}
layers {
layer {
name: "ip1_p"
type: INNER_PRODUCT
type: "InnerProduct"
bottom: "pool2_p"
top: "ip1_p"
blobs_lr: 1
blobs_lr: 2
param {
name: "ip1_w"
lr_mult: 1
}
param {
name: "ip1_b"
lr_mult: 2
}
inner_product_param {
num_output: 500
weight_filler {
@ -249,22 +283,26 @@ layers {
type: "constant"
}
}
param: "ip1_w"
param: "ip1_b"
}
layers {
layer {
name: "relu1_p"
type: RELU
type: "ReLU"
bottom: "ip1_p"
top: "ip1_p"
}
layers {
layer {
name: "ip2_p"
type: INNER_PRODUCT
type: "InnerProduct"
bottom: "ip1_p"
top: "ip2_p"
blobs_lr: 1
blobs_lr: 2
param {
name: "ip2_w"
lr_mult: 1
}
param {
name: "ip2_b"
lr_mult: 2
}
inner_product_param {
num_output: 10
weight_filler {
@ -274,17 +312,20 @@ layers {
type: "constant"
}
}
param: "ip2_w"
param: "ip2_b"
}
layers {
layer {
name: "feat_p"
type: INNER_PRODUCT
type: "InnerProduct"
bottom: "ip2_p"
top: "feat_p"
blobs_lr: 1
blobs_lr: 2
param {
name: "feat_w"
lr_mult: 1
}
param {
name: "feat_b"
lr_mult: 2
}
inner_product_param {
num_output: 2
weight_filler {
@ -294,20 +335,15 @@ layers {
type: "constant"
}
}
param: "feat_w"
param: "feat_b"
}
layers {
name: "loss"
type: CONTRASTIVE_LOSS
contrastive_loss_param {
margin: 1.0
}
bottom: "feat"
bottom: "feat_p"
bottom: "sim"
top: "loss"
layer {
name: "loss"
type: "ContrastiveLoss"
bottom: "feat"
bottom: "feat_p"
bottom: "sim"
top: "loss"
contrastive_loss_param {
margin: 1
}
}

Просмотреть файл

@ -4,241 +4,273 @@ input_dim: 10
input_dim: 3
input_dim: 227
input_dim: 227
layers {
layer {
name: "conv1"
type: CONVOLUTION
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
type: "Convolution"
bottom: "data"
top: "conv1"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 96
kernel_size: 11
stride: 4
}
bottom: "data"
top: "conv1"
}
layers {
layer {
name: "relu1"
type: RELU
type: "ReLU"
bottom: "conv1"
top: "conv1"
}
layers {
layer {
name: "norm1"
type: LRN
type: "LRN"
bottom: "conv1"
top: "norm1"
lrn_param {
local_size: 5
alpha: 0.0001
beta: 0.75
}
bottom: "conv1"
top: "norm1"
}
layers {
layer {
name: "pool1"
type: POOLING
type: "Pooling"
bottom: "norm1"
top: "pool1"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
bottom: "norm1"
top: "pool1"
}
layers {
layer {
name: "conv2"
type: CONVOLUTION
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
type: "Convolution"
bottom: "pool1"
top: "conv2"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 256
pad: 2
kernel_size: 5
group: 2
}
bottom: "pool1"
top: "conv2"
}
layers {
layer {
name: "relu2"
type: RELU
type: "ReLU"
bottom: "conv2"
top: "conv2"
}
layers {
layer {
name: "norm2"
type: LRN
type: "LRN"
bottom: "conv2"
top: "norm2"
lrn_param {
local_size: 5
alpha: 0.0001
beta: 0.75
}
bottom: "conv2"
top: "norm2"
}
layers {
layer {
name: "pool2"
type: POOLING
type: "Pooling"
bottom: "norm2"
top: "pool2"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
bottom: "norm2"
top: "pool2"
}
layers {
layer {
name: "conv3"
type: CONVOLUTION
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
type: "Convolution"
bottom: "pool2"
top: "conv3"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 384
pad: 1
kernel_size: 3
}
bottom: "pool2"
top: "conv3"
}
layers {
layer {
name: "relu3"
type: RELU
type: "ReLU"
bottom: "conv3"
top: "conv3"
}
layers {
layer {
name: "conv4"
type: CONVOLUTION
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
type: "Convolution"
bottom: "conv3"
top: "conv4"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 384
pad: 1
kernel_size: 3
group: 2
}
bottom: "conv3"
top: "conv4"
}
layers {
layer {
name: "relu4"
type: RELU
type: "ReLU"
bottom: "conv4"
top: "conv4"
}
layers {
layer {
name: "conv5"
type: CONVOLUTION
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
type: "Convolution"
bottom: "conv4"
top: "conv5"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 256
pad: 1
kernel_size: 3
group: 2
}
bottom: "conv4"
top: "conv5"
}
layers {
layer {
name: "relu5"
type: RELU
type: "ReLU"
bottom: "conv5"
top: "conv5"
}
layers {
layer {
name: "pool5"
type: POOLING
type: "Pooling"
bottom: "conv5"
top: "pool5"
pooling_param {
pool: MAX
kernel_size: 3
stride: 2
}
bottom: "conv5"
top: "pool5"
}
layers {
layer {
name: "fc6"
type: INNER_PRODUCT
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
inner_product_param {
num_output: 4096
}
type: "InnerProduct"
bottom: "pool5"
top: "fc6"
}
layers {
name: "relu6"
type: RELU
bottom: "fc6"
top: "fc6"
}
layers {
name: "drop6"
type: DROPOUT
dropout_param {
dropout_ratio: 0.5
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
bottom: "fc6"
top: "fc6"
}
layers {
name: "fc7"
type: INNER_PRODUCT
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
inner_product_param {
num_output: 4096
}
}
layer {
name: "relu6"
type: "ReLU"
bottom: "fc6"
top: "fc7"
top: "fc6"
}
layers {
name: "relu7"
type: RELU
bottom: "fc7"
top: "fc7"
}
layers {
name: "drop7"
type: DROPOUT
layer {
name: "drop6"
type: "Dropout"
bottom: "fc6"
top: "fc6"
dropout_param {
dropout_ratio: 0.5
}
}
layer {
name: "fc7"
type: "InnerProduct"
bottom: "fc6"
top: "fc7"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 4096
}
}
layer {
name: "relu7"
type: "ReLU"
bottom: "fc7"
top: "fc7"
}
layers {
layer {
name: "drop7"
type: "Dropout"
bottom: "fc7"
top: "fc7"
dropout_param {
dropout_ratio: 0.5
}
}
layer {
name: "fc8"
type: INNER_PRODUCT
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
type: "InnerProduct"
bottom: "fc7"
top: "fc8"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 1000
}
bottom: "fc7"
top: "fc8"
}
layers {
layer {
name: "prob"
type: SOFTMAX
type: "Softmax"
bottom: "fc8"
top: "prob"
}

Просмотреть файл

@ -1,47 +1,55 @@
name: "AlexNet"
layers {
layer {
name: "data"
type: DATA
type: "Data"
top: "data"
top: "label"
include {
phase: TRAIN
}
transform_param {
mirror: true
crop_size: 227
mean_file: "data/ilsvrc12/imagenet_mean.binaryproto"
}
data_param {
source: "examples/imagenet/ilsvrc12_train_lmdb"
backend: LMDB
batch_size: 256
backend: LMDB
}
transform_param {
crop_size: 227
mean_file: "data/ilsvrc12/imagenet_mean.binaryproto"
mirror: true
}
include: { phase: TRAIN }
}
layers {
layer {
name: "data"
type: DATA
type: "Data"
top: "data"
top: "label"
data_param {
source: "examples/imagenet/ilsvrc12_val_lmdb"
backend: LMDB
batch_size: 50
include {
phase: TEST
}
transform_param {
mirror: false
crop_size: 227
mean_file: "data/ilsvrc12/imagenet_mean.binaryproto"
mirror: false
}
include: { phase: TEST }
data_param {
source: "examples/imagenet/ilsvrc12_val_lmdb"
batch_size: 50
backend: LMDB
}
}
layers {
layer {
name: "conv1"
type: CONVOLUTION
type: "Convolution"
bottom: "data"
top: "conv1"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 96
kernel_size: 11
@ -56,15 +64,15 @@ layers {
}
}
}
layers {
layer {
name: "relu1"
type: RELU
type: "ReLU"
bottom: "conv1"
top: "conv1"
}
layers {
layer {
name: "norm1"
type: LRN
type: "LRN"
bottom: "conv1"
top: "norm1"
lrn_param {
@ -73,9 +81,9 @@ layers {
beta: 0.75
}
}
layers {
layer {
name: "pool1"
type: POOLING
type: "Pooling"
bottom: "norm1"
top: "pool1"
pooling_param {
@ -84,15 +92,19 @@ layers {
stride: 2
}
}
layers {
layer {
name: "conv2"
type: CONVOLUTION
type: "Convolution"
bottom: "pool1"
top: "conv2"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 256
pad: 2
@ -108,15 +120,15 @@ layers {
}
}
}
layers {
layer {
name: "relu2"
type: RELU
type: "ReLU"
bottom: "conv2"
top: "conv2"
}
layers {
layer {
name: "norm2"
type: LRN
type: "LRN"
bottom: "conv2"
top: "norm2"
lrn_param {
@ -125,9 +137,9 @@ layers {
beta: 0.75
}
}
layers {
layer {
name: "pool2"
type: POOLING
type: "Pooling"
bottom: "norm2"
top: "pool2"
pooling_param {
@ -136,15 +148,19 @@ layers {
stride: 2
}
}
layers {
layer {
name: "conv3"
type: CONVOLUTION
type: "Convolution"
bottom: "pool2"
top: "conv3"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 384
pad: 1
@ -159,21 +175,25 @@ layers {
}
}
}
layers {
layer {
name: "relu3"
type: RELU
type: "ReLU"
bottom: "conv3"
top: "conv3"
}
layers {
layer {
name: "conv4"
type: CONVOLUTION
type: "Convolution"
bottom: "conv3"
top: "conv4"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 384
pad: 1
@ -189,21 +209,25 @@ layers {
}
}
}
layers {
layer {
name: "relu4"
type: RELU
type: "ReLU"
bottom: "conv4"
top: "conv4"
}
layers {
layer {
name: "conv5"
type: CONVOLUTION
type: "Convolution"
bottom: "conv4"
top: "conv5"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 256
pad: 1
@ -219,15 +243,15 @@ layers {
}
}
}
layers {
layer {
name: "relu5"
type: RELU
type: "ReLU"
bottom: "conv5"
top: "conv5"
}
layers {
layer {
name: "pool5"
type: POOLING
type: "Pooling"
bottom: "conv5"
top: "pool5"
pooling_param {
@ -236,15 +260,19 @@ layers {
stride: 2
}
}
layers {
layer {
name: "fc6"
type: INNER_PRODUCT
type: "InnerProduct"
bottom: "pool5"
top: "fc6"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 4096
weight_filler {
@ -257,30 +285,34 @@ layers {
}
}
}
layers {
layer {
name: "relu6"
type: RELU
type: "ReLU"
bottom: "fc6"
top: "fc6"
}
layers {
layer {
name: "drop6"
type: DROPOUT
type: "Dropout"
bottom: "fc6"
top: "fc6"
dropout_param {
dropout_ratio: 0.5
}
}
layers {
layer {
name: "fc7"
type: INNER_PRODUCT
type: "InnerProduct"
bottom: "fc6"
top: "fc7"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 4096
weight_filler {
@ -293,30 +325,34 @@ layers {
}
}
}
layers {
layer {
name: "relu7"
type: RELU
type: "ReLU"
bottom: "fc7"
top: "fc7"
}
layers {
layer {
name: "drop7"
type: DROPOUT
type: "Dropout"
bottom: "fc7"
top: "fc7"
dropout_param {
dropout_ratio: 0.5
}
}
layers {
layer {
name: "fc8"
type: INNER_PRODUCT
type: "InnerProduct"
bottom: "fc7"
top: "fc8"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 1000
weight_filler {
@ -329,17 +365,19 @@ layers {
}
}
}
layers {
layer {
name: "accuracy"
type: ACCURACY
type: "Accuracy"
bottom: "fc8"
bottom: "label"
top: "accuracy"
include: { phase: TEST }
include {
phase: TEST
}
}
layers {
layer {
name: "loss"
type: SOFTMAX_LOSS
type: "SoftmaxWithLoss"
bottom: "fc8"
bottom: "label"
top: "loss"

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -4,9 +4,9 @@ input_dim: 10
input_dim: 3
input_dim: 227
input_dim: 227
layers {
layer {
name: "conv1"
type: CONVOLUTION
type: "Convolution"
bottom: "data"
top: "conv1"
convolution_param {
@ -15,15 +15,15 @@ layers {
stride: 4
}
}
layers {
layer {
name: "relu1"
type: RELU
type: "ReLU"
bottom: "conv1"
top: "conv1"
}
layers {
layer {
name: "pool1"
type: POOLING
type: "Pooling"
bottom: "conv1"
top: "pool1"
pooling_param {
@ -32,9 +32,9 @@ layers {
stride: 2
}
}
layers {
layer {
name: "norm1"
type: LRN
type: "LRN"
bottom: "pool1"
top: "norm1"
lrn_param {
@ -43,9 +43,9 @@ layers {
beta: 0.75
}
}
layers {
layer {
name: "conv2"
type: CONVOLUTION
type: "Convolution"
bottom: "norm1"
top: "conv2"
convolution_param {
@ -55,15 +55,15 @@ layers {
group: 2
}
}
layers {
layer {
name: "relu2"
type: RELU
type: "ReLU"
bottom: "conv2"
top: "conv2"
}
layers {
layer {
name: "pool2"
type: POOLING
type: "Pooling"
bottom: "conv2"
top: "pool2"
pooling_param {
@ -72,9 +72,9 @@ layers {
stride: 2
}
}
layers {
layer {
name: "norm2"
type: LRN
type: "LRN"
bottom: "pool2"
top: "norm2"
lrn_param {
@ -83,9 +83,9 @@ layers {
beta: 0.75
}
}
layers {
layer {
name: "conv3"
type: CONVOLUTION
type: "Convolution"
bottom: "norm2"
top: "conv3"
convolution_param {
@ -94,15 +94,15 @@ layers {
kernel_size: 3
}
}
layers {
layer {
name: "relu3"
type: RELU
type: "ReLU"
bottom: "conv3"
top: "conv3"
}
layers {
layer {
name: "conv4"
type: CONVOLUTION
type: "Convolution"
bottom: "conv3"
top: "conv4"
convolution_param {
@ -112,15 +112,15 @@ layers {
group: 2
}
}
layers {
layer {
name: "relu4"
type: RELU
type: "ReLU"
bottom: "conv4"
top: "conv4"
}
layers {
layer {
name: "conv5"
type: CONVOLUTION
type: "Convolution"
bottom: "conv4"
top: "conv5"
convolution_param {
@ -130,15 +130,15 @@ layers {
group: 2
}
}
layers {
layer {
name: "relu5"
type: RELU
type: "ReLU"
bottom: "conv5"
top: "conv5"
}
layers {
layer {
name: "pool5"
type: POOLING
type: "Pooling"
bottom: "conv5"
top: "pool5"
pooling_param {
@ -147,66 +147,66 @@ layers {
stride: 2
}
}
layers {
layer {
name: "fc6"
type: INNER_PRODUCT
type: "InnerProduct"
bottom: "pool5"
top: "fc6"
inner_product_param {
num_output: 4096
}
}
layers {
layer {
name: "relu6"
type: RELU
type: "ReLU"
bottom: "fc6"
top: "fc6"
}
layers {
layer {
name: "drop6"
type: DROPOUT
type: "Dropout"
bottom: "fc6"
top: "fc6"
dropout_param {
dropout_ratio: 0.5
}
}
layers {
layer {
name: "fc7"
type: INNER_PRODUCT
type: "InnerProduct"
bottom: "fc6"
top: "fc7"
inner_product_param {
num_output: 4096
}
}
layers {
layer {
name: "relu7"
type: RELU
type: "ReLU"
bottom: "fc7"
top: "fc7"
}
layers {
layer {
name: "drop7"
type: DROPOUT
type: "Dropout"
bottom: "fc7"
top: "fc7"
dropout_param {
dropout_ratio: 0.5
}
}
layers {
layer {
name: "fc8"
type: INNER_PRODUCT
type: "InnerProduct"
bottom: "fc7"
top: "fc8"
inner_product_param {
num_output: 1000
}
}
layers {
layer {
name: "prob"
type: SOFTMAX
type: "Softmax"
bottom: "fc8"
top: "prob"
}

Просмотреть файл

@ -1,43 +1,43 @@
name: "CaffeNet"
layers {
layer {
name: "data"
type: DATA
type: "Data"
top: "data"
top: "label"
include {
phase: TRAIN
}
transform_param {
mirror: true
crop_size: 227
mean_file: "data/ilsvrc12/imagenet_mean.binaryproto"
}
# mean pixel / channel-wise mean instead of mean image
# transform_param {
# crop_size: 227
# mean_value: 104
# mean_value: 117
# mean_value: 123
# mirror: true
# }
data_param {
source: "examples/imagenet/ilsvrc12_train_lmdb"
backend: LMDB
batch_size: 256
backend: LMDB
}
transform_param {
crop_size: 227
mean_file: "data/ilsvrc12/imagenet_mean.binaryproto"
mirror: true
}
# mean pixel / channel-wise mean instead of mean image
# transform_param {
# crop_size: 227
# mean_value: 104
# mean_value: 117
# mean_value: 123
# mirror: true
# }
include: { phase: TRAIN }
}
layers {
layer {
name: "data"
type: DATA
type: "Data"
top: "data"
top: "label"
data_param {
source: "examples/imagenet/ilsvrc12_val_lmdb"
backend: LMDB
batch_size: 50
include {
phase: TEST
}
transform_param {
mirror: false
crop_size: 227
mean_file: "data/ilsvrc12/imagenet_mean.binaryproto"
mirror: false
}
# mean pixel / channel-wise mean instead of mean image
# transform_param {
@ -47,17 +47,25 @@ layers {
# mean_value: 123
# mirror: true
# }
include: { phase: TEST }
data_param {
source: "examples/imagenet/ilsvrc12_val_lmdb"
batch_size: 50
backend: LMDB
}
}
layers {
layer {
name: "conv1"
type: CONVOLUTION
type: "Convolution"
bottom: "data"
top: "conv1"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 96
kernel_size: 11
@ -72,15 +80,15 @@ layers {
}
}
}
layers {
layer {
name: "relu1"
type: RELU
type: "ReLU"
bottom: "conv1"
top: "conv1"
}
layers {
layer {
name: "pool1"
type: POOLING
type: "Pooling"
bottom: "conv1"
top: "pool1"
pooling_param {
@ -89,9 +97,9 @@ layers {
stride: 2
}
}
layers {
layer {
name: "norm1"
type: LRN
type: "LRN"
bottom: "pool1"
top: "norm1"
lrn_param {
@ -100,15 +108,19 @@ layers {
beta: 0.75
}
}
layers {
layer {
name: "conv2"
type: CONVOLUTION
type: "Convolution"
bottom: "norm1"
top: "conv2"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 256
pad: 2
@ -124,15 +136,15 @@ layers {
}
}
}
layers {
layer {
name: "relu2"
type: RELU
type: "ReLU"
bottom: "conv2"
top: "conv2"
}
layers {
layer {
name: "pool2"
type: POOLING
type: "Pooling"
bottom: "conv2"
top: "pool2"
pooling_param {
@ -141,9 +153,9 @@ layers {
stride: 2
}
}
layers {
layer {
name: "norm2"
type: LRN
type: "LRN"
bottom: "pool2"
top: "norm2"
lrn_param {
@ -152,15 +164,19 @@ layers {
beta: 0.75
}
}
layers {
layer {
name: "conv3"
type: CONVOLUTION
type: "Convolution"
bottom: "norm2"
top: "conv3"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 384
pad: 1
@ -175,21 +191,25 @@ layers {
}
}
}
layers {
layer {
name: "relu3"
type: RELU
type: "ReLU"
bottom: "conv3"
top: "conv3"
}
layers {
layer {
name: "conv4"
type: CONVOLUTION
type: "Convolution"
bottom: "conv3"
top: "conv4"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 384
pad: 1
@ -205,21 +225,25 @@ layers {
}
}
}
layers {
layer {
name: "relu4"
type: RELU
type: "ReLU"
bottom: "conv4"
top: "conv4"
}
layers {
layer {
name: "conv5"
type: CONVOLUTION
type: "Convolution"
bottom: "conv4"
top: "conv5"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 256
pad: 1
@ -235,15 +259,15 @@ layers {
}
}
}
layers {
layer {
name: "relu5"
type: RELU
type: "ReLU"
bottom: "conv5"
top: "conv5"
}
layers {
layer {
name: "pool5"
type: POOLING
type: "Pooling"
bottom: "conv5"
top: "pool5"
pooling_param {
@ -252,15 +276,19 @@ layers {
stride: 2
}
}
layers {
layer {
name: "fc6"
type: INNER_PRODUCT
type: "InnerProduct"
bottom: "pool5"
top: "fc6"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 4096
weight_filler {
@ -273,30 +301,34 @@ layers {
}
}
}
layers {
layer {
name: "relu6"
type: RELU
type: "ReLU"
bottom: "fc6"
top: "fc6"
}
layers {
layer {
name: "drop6"
type: DROPOUT
type: "Dropout"
bottom: "fc6"
top: "fc6"
dropout_param {
dropout_ratio: 0.5
}
}
layers {
layer {
name: "fc7"
type: INNER_PRODUCT
type: "InnerProduct"
bottom: "fc6"
top: "fc7"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 4096
weight_filler {
@ -309,30 +341,34 @@ layers {
}
}
}
layers {
layer {
name: "relu7"
type: RELU
type: "ReLU"
bottom: "fc7"
top: "fc7"
}
layers {
layer {
name: "drop7"
type: DROPOUT
type: "Dropout"
bottom: "fc7"
top: "fc7"
dropout_param {
dropout_ratio: 0.5
}
}
layers {
layer {
name: "fc8"
type: INNER_PRODUCT
type: "InnerProduct"
bottom: "fc7"
top: "fc8"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 1000
weight_filler {
@ -345,17 +381,19 @@ layers {
}
}
}
layers {
layer {
name: "accuracy"
type: ACCURACY
type: "Accuracy"
bottom: "fc8"
bottom: "label"
top: "accuracy"
include: { phase: TEST }
include {
phase: TEST
}
}
layers {
layer {
name: "loss"
type: SOFTMAX_LOSS
type: "SoftmaxWithLoss"
bottom: "fc8"
bottom: "label"
top: "loss"

Просмотреть файл

@ -4,9 +4,9 @@ input_dim: 10
input_dim: 3
input_dim: 227
input_dim: 227
layers {
layer {
name: "conv1"
type: CONVOLUTION
type: "Convolution"
bottom: "data"
top: "conv1"
convolution_param {
@ -15,15 +15,15 @@ layers {
stride: 4
}
}
layers {
layer {
name: "relu1"
type: RELU
type: "ReLU"
bottom: "conv1"
top: "conv1"
}
layers {
layer {
name: "pool1"
type: POOLING
type: "Pooling"
bottom: "conv1"
top: "pool1"
pooling_param {
@ -32,9 +32,9 @@ layers {
stride: 2
}
}
layers {
layer {
name: "norm1"
type: LRN
type: "LRN"
bottom: "pool1"
top: "norm1"
lrn_param {
@ -43,9 +43,9 @@ layers {
beta: 0.75
}
}
layers {
layer {
name: "conv2"
type: CONVOLUTION
type: "Convolution"
bottom: "norm1"
top: "conv2"
convolution_param {
@ -55,15 +55,15 @@ layers {
group: 2
}
}
layers {
layer {
name: "relu2"
type: RELU
type: "ReLU"
bottom: "conv2"
top: "conv2"
}
layers {
layer {
name: "pool2"
type: POOLING
type: "Pooling"
bottom: "conv2"
top: "pool2"
pooling_param {
@ -72,9 +72,9 @@ layers {
stride: 2
}
}
layers {
layer {
name: "norm2"
type: LRN
type: "LRN"
bottom: "pool2"
top: "norm2"
lrn_param {
@ -83,9 +83,9 @@ layers {
beta: 0.75
}
}
layers {
layer {
name: "conv3"
type: CONVOLUTION
type: "Convolution"
bottom: "norm2"
top: "conv3"
convolution_param {
@ -94,15 +94,15 @@ layers {
kernel_size: 3
}
}
layers {
layer {
name: "relu3"
type: RELU
type: "ReLU"
bottom: "conv3"
top: "conv3"
}
layers {
layer {
name: "conv4"
type: CONVOLUTION
type: "Convolution"
bottom: "conv3"
top: "conv4"
convolution_param {
@ -112,15 +112,15 @@ layers {
group: 2
}
}
layers {
layer {
name: "relu4"
type: RELU
type: "ReLU"
bottom: "conv4"
top: "conv4"
}
layers {
layer {
name: "conv5"
type: CONVOLUTION
type: "Convolution"
bottom: "conv4"
top: "conv5"
convolution_param {
@ -130,15 +130,15 @@ layers {
group: 2
}
}
layers {
layer {
name: "relu5"
type: RELU
type: "ReLU"
bottom: "conv5"
top: "conv5"
}
layers {
layer {
name: "pool5"
type: POOLING
type: "Pooling"
bottom: "conv5"
top: "pool5"
pooling_param {
@ -147,48 +147,48 @@ layers {
stride: 2
}
}
layers {
layer {
name: "fc6"
type: INNER_PRODUCT
type: "InnerProduct"
bottom: "pool5"
top: "fc6"
inner_product_param {
num_output: 4096
}
}
layers {
layer {
name: "relu6"
type: RELU
type: "ReLU"
bottom: "fc6"
top: "fc6"
}
layers {
layer {
name: "drop6"
type: DROPOUT
type: "Dropout"
bottom: "fc6"
top: "fc6"
dropout_param {
dropout_ratio: 0.5
}
}
layers {
layer {
name: "fc7"
type: INNER_PRODUCT
type: "InnerProduct"
bottom: "fc6"
top: "fc7"
inner_product_param {
num_output: 4096
}
}
layers {
layer {
name: "relu7"
type: RELU
type: "ReLU"
bottom: "fc7"
top: "fc7"
}
layers {
layer {
name: "drop7"
type: DROPOUT
type: "Dropout"
bottom: "fc7"
top: "fc7"
dropout_param {
@ -196,9 +196,9 @@ layers {
}
}
# R-CNN classification layer made from R-CNN ILSVRC13 SVMs.
layers {
layer {
name: "fc-rcnn"
type: INNER_PRODUCT
type: "InnerProduct"
bottom: "fc7"
top: "fc-rcnn"
inner_product_param {

Просмотреть файл

@ -4,15 +4,19 @@ input_dim: 10
input_dim: 3
input_dim: 227
input_dim: 227
layers {
layer {
name: "conv1"
type: CONVOLUTION
type: "Convolution"
bottom: "data"
top: "conv1"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 96
kernel_size: 11
@ -27,15 +31,15 @@ layers {
}
}
}
layers {
layer {
name: "relu1"
type: RELU
type: "ReLU"
bottom: "conv1"
top: "conv1"
}
layers {
layer {
name: "pool1"
type: POOLING
type: "Pooling"
bottom: "conv1"
top: "pool1"
pooling_param {
@ -44,9 +48,9 @@ layers {
stride: 2
}
}
layers {
layer {
name: "norm1"
type: LRN
type: "LRN"
bottom: "pool1"
top: "norm1"
lrn_param {
@ -55,15 +59,19 @@ layers {
beta: 0.75
}
}
layers {
layer {
name: "conv2"
type: CONVOLUTION
type: "Convolution"
bottom: "norm1"
top: "conv2"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 256
pad: 2
@ -79,15 +87,15 @@ layers {
}
}
}
layers {
layer {
name: "relu2"
type: RELU
type: "ReLU"
bottom: "conv2"
top: "conv2"
}
layers {
layer {
name: "pool2"
type: POOLING
type: "Pooling"
bottom: "conv2"
top: "pool2"
pooling_param {
@ -96,9 +104,9 @@ layers {
stride: 2
}
}
layers {
layer {
name: "norm2"
type: LRN
type: "LRN"
bottom: "pool2"
top: "norm2"
lrn_param {
@ -107,15 +115,19 @@ layers {
beta: 0.75
}
}
layers {
layer {
name: "conv3"
type: CONVOLUTION
type: "Convolution"
bottom: "norm2"
top: "conv3"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 384
pad: 1
@ -130,21 +142,25 @@ layers {
}
}
}
layers {
layer {
name: "relu3"
type: RELU
type: "ReLU"
bottom: "conv3"
top: "conv3"
}
layers {
layer {
name: "conv4"
type: CONVOLUTION
type: "Convolution"
bottom: "conv3"
top: "conv4"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 384
pad: 1
@ -160,21 +176,25 @@ layers {
}
}
}
layers {
layer {
name: "relu4"
type: RELU
type: "ReLU"
bottom: "conv4"
top: "conv4"
}
layers {
layer {
name: "conv5"
type: CONVOLUTION
type: "Convolution"
bottom: "conv4"
top: "conv5"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 256
pad: 1
@ -190,15 +210,15 @@ layers {
}
}
}
layers {
layer {
name: "relu5"
type: RELU
type: "ReLU"
bottom: "conv5"
top: "conv5"
}
layers {
layer {
name: "pool5"
type: POOLING
type: "Pooling"
bottom: "conv5"
top: "pool5"
pooling_param {
@ -207,15 +227,19 @@ layers {
stride: 2
}
}
layers {
layer {
name: "fc6"
type: INNER_PRODUCT
type: "InnerProduct"
bottom: "pool5"
top: "fc6"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 4096
weight_filler {
@ -228,31 +252,35 @@ layers {
}
}
}
layers {
layer {
name: "relu6"
type: RELU
type: "ReLU"
bottom: "fc6"
top: "fc6"
}
layers {
layer {
name: "drop6"
type: DROPOUT
type: "Dropout"
bottom: "fc6"
top: "fc6"
dropout_param {
dropout_ratio: 0.5
}
}
layers {
layer {
name: "fc7"
type: INNER_PRODUCT
type: "InnerProduct"
bottom: "fc6"
top: "fc7"
# Note that blobs_lr can be set to 0 to disable any fine-tuning of this, and any other, layer
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
# Note that lr_mult can be set to 0 to disable any fine-tuning of this, and any other, layer
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 4096
weight_filler {
@ -265,31 +293,35 @@ layers {
}
}
}
layers {
layer {
name: "relu7"
type: RELU
type: "ReLU"
bottom: "fc7"
top: "fc7"
}
layers {
layer {
name: "drop7"
type: DROPOUT
type: "Dropout"
bottom: "fc7"
top: "fc7"
dropout_param {
dropout_ratio: 0.5
}
}
layers {
layer {
name: "fc8_flickr"
type: INNER_PRODUCT
type: "InnerProduct"
bottom: "fc7"
top: "fc8_flickr"
# blobs_lr is set to higher than for other layers, because this layer is starting from random while the others are already trained
blobs_lr: 10
blobs_lr: 20
weight_decay: 1
weight_decay: 0
# lr_mult is set to higher than for other layers, because this layer is starting from random while the others are already trained
param {
lr_mult: 10
decay_mult: 1
}
param {
lr_mult: 20
decay_mult: 0
}
inner_product_param {
num_output: 20
weight_filler {
@ -302,9 +334,9 @@ layers {
}
}
}
layers {
layer {
name: "prob"
type: SOFTMAX
type: "Softmax"
bottom: "fc8_flickr"
top: "prob"
}

Просмотреть файл

@ -1,49 +1,57 @@
name: "FlickrStyleCaffeNet"
layers {
layer {
name: "data"
type: IMAGE_DATA
type: "ImageData"
top: "data"
top: "label"
include {
phase: TRAIN
}
transform_param {
mirror: true
crop_size: 227
mean_file: "data/ilsvrc12/imagenet_mean.binaryproto"
}
image_data_param {
source: "data/flickr_style/train.txt"
batch_size: 50
new_height: 256
new_width: 256
}
transform_param {
crop_size: 227
mean_file: "data/ilsvrc12/imagenet_mean.binaryproto"
mirror: true
}
include: { phase: TRAIN }
}
layers {
layer {
name: "data"
type: IMAGE_DATA
type: "ImageData"
top: "data"
top: "label"
include {
phase: TEST
}
transform_param {
mirror: false
crop_size: 227
mean_file: "data/ilsvrc12/imagenet_mean.binaryproto"
}
image_data_param {
source: "data/flickr_style/test.txt"
batch_size: 50
new_height: 256
new_width: 256
}
transform_param {
crop_size: 227
mean_file: "data/ilsvrc12/imagenet_mean.binaryproto"
mirror: false
}
include: { phase: TEST }
}
layers {
layer {
name: "conv1"
type: CONVOLUTION
type: "Convolution"
bottom: "data"
top: "conv1"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 96
kernel_size: 11
@ -58,15 +66,15 @@ layers {
}
}
}
layers {
layer {
name: "relu1"
type: RELU
type: "ReLU"
bottom: "conv1"
top: "conv1"
}
layers {
layer {
name: "pool1"
type: POOLING
type: "Pooling"
bottom: "conv1"
top: "pool1"
pooling_param {
@ -75,9 +83,9 @@ layers {
stride: 2
}
}
layers {
layer {
name: "norm1"
type: LRN
type: "LRN"
bottom: "pool1"
top: "norm1"
lrn_param {
@ -86,15 +94,19 @@ layers {
beta: 0.75
}
}
layers {
layer {
name: "conv2"
type: CONVOLUTION
type: "Convolution"
bottom: "norm1"
top: "conv2"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 256
pad: 2
@ -110,15 +122,15 @@ layers {
}
}
}
layers {
layer {
name: "relu2"
type: RELU
type: "ReLU"
bottom: "conv2"
top: "conv2"
}
layers {
layer {
name: "pool2"
type: POOLING
type: "Pooling"
bottom: "conv2"
top: "pool2"
pooling_param {
@ -127,9 +139,9 @@ layers {
stride: 2
}
}
layers {
layer {
name: "norm2"
type: LRN
type: "LRN"
bottom: "pool2"
top: "norm2"
lrn_param {
@ -138,15 +150,19 @@ layers {
beta: 0.75
}
}
layers {
layer {
name: "conv3"
type: CONVOLUTION
type: "Convolution"
bottom: "norm2"
top: "conv3"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 384
pad: 1
@ -161,21 +177,25 @@ layers {
}
}
}
layers {
layer {
name: "relu3"
type: RELU
type: "ReLU"
bottom: "conv3"
top: "conv3"
}
layers {
layer {
name: "conv4"
type: CONVOLUTION
type: "Convolution"
bottom: "conv3"
top: "conv4"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 384
pad: 1
@ -191,21 +211,25 @@ layers {
}
}
}
layers {
layer {
name: "relu4"
type: RELU
type: "ReLU"
bottom: "conv4"
top: "conv4"
}
layers {
layer {
name: "conv5"
type: CONVOLUTION
type: "Convolution"
bottom: "conv4"
top: "conv5"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 256
pad: 1
@ -221,15 +245,15 @@ layers {
}
}
}
layers {
layer {
name: "relu5"
type: RELU
type: "ReLU"
bottom: "conv5"
top: "conv5"
}
layers {
layer {
name: "pool5"
type: POOLING
type: "Pooling"
bottom: "conv5"
top: "pool5"
pooling_param {
@ -238,15 +262,19 @@ layers {
stride: 2
}
}
layers {
layer {
name: "fc6"
type: INNER_PRODUCT
type: "InnerProduct"
bottom: "pool5"
top: "fc6"
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 4096
weight_filler {
@ -259,31 +287,35 @@ layers {
}
}
}
layers {
layer {
name: "relu6"
type: RELU
type: "ReLU"
bottom: "fc6"
top: "fc6"
}
layers {
layer {
name: "drop6"
type: DROPOUT
type: "Dropout"
bottom: "fc6"
top: "fc6"
dropout_param {
dropout_ratio: 0.5
}
}
layers {
layer {
name: "fc7"
type: INNER_PRODUCT
type: "InnerProduct"
bottom: "fc6"
top: "fc7"
# Note that blobs_lr can be set to 0 to disable any fine-tuning of this, and any other, layer
blobs_lr: 1
blobs_lr: 2
weight_decay: 1
weight_decay: 0
# Note that lr_mult can be set to 0 to disable any fine-tuning of this, and any other, layer
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
inner_product_param {
num_output: 4096
weight_filler {
@ -296,31 +328,35 @@ layers {
}
}
}
layers {
layer {
name: "relu7"
type: RELU
type: "ReLU"
bottom: "fc7"
top: "fc7"
}
layers {
layer {
name: "drop7"
type: DROPOUT
type: "Dropout"
bottom: "fc7"
top: "fc7"
dropout_param {
dropout_ratio: 0.5
}
}
layers {
layer {
name: "fc8_flickr"
type: INNER_PRODUCT
type: "InnerProduct"
bottom: "fc7"
top: "fc8_flickr"
# blobs_lr is set to higher than for other layers, because this layer is starting from random while the others are already trained
blobs_lr: 10
blobs_lr: 20
weight_decay: 1
weight_decay: 0
# lr_mult is set to higher than for other layers, because this layer is starting from random while the others are already trained
param {
lr_mult: 10
decay_mult: 1
}
param {
lr_mult: 20
decay_mult: 0
}
inner_product_param {
num_output: 20
weight_filler {
@ -333,17 +369,19 @@ layers {
}
}
}
layers {
layer {
name: "loss"
type: SOFTMAX_LOSS
type: "SoftmaxWithLoss"
bottom: "fc8_flickr"
bottom: "label"
}
layers {
layer {
name: "accuracy"
type: ACCURACY
type: "Accuracy"
bottom: "fc8_flickr"
bottom: "label"
top: "accuracy"
include: { phase: TEST }
include {
phase: TEST
}
}