2015-08-16 1 views
0

J'ai formé le modèle Alexnet de caffe pour tester avec un modèle plus efficace. Puisque ma formation est pour les piétons, ma taille d'image est de 64 x 80 images. J'ai fait des changements aux fichiers prototxt pour correspondre à ma taille d'image formée. Selon ce tutorial, il vaudra mieux régler la taille du filtre de convolution pour qu'il corresponde à la taille de l'image d'entrée. Donc, mes tailles de filtres ont de légères modifications par rapport aux fichiers prototxt fournis par Alexnet d'origine (je les ai entraînés et testés avec les fichiers prototxt originaux d'Alexnet et j'ai obtenu la même erreur sur la même ligne mentionnée ci-dessous).Erreur dans le test du modèle caffe Alexnet de Caffe

Selon mon calcul, la taille d'image après le passage de chaque couche sera

80x64x3 -> CONV1 -> 38x30x96
38x30x96 -> Piscines -> 18x14x96
18x14x96 -> CONV2 -> 19x15x256
19x15x256 - > Pool2 -> 9x7x256
9x7x256 -> Conv3 -> 9x7x384
9x7x384 -> Conv4 -> 9x7x384
9x7x384 -> Conv5 -> 9x7x256
9x7x256 -> intérieure5 -> 4x3x256

L'erreur est sur la couche fc6 et le numéro de ligne 714 de test_predict_imagenet.cpp. J'utilise le fichier test_predict_imagenet.cpp pour tester le modèle.

CHECK_EQ(target_blobs[j]->width(), source_layer.blobs(j).width()); 

L'erreur est

F0816 22:58:28.328047 3432 net.cpp:714] Check failed: target_blobs[j]->width() 
== source_layer.blobs(j).width() (5120 vs. 1024) 

Je ne comprends pas pourquoi il est comme ça.

Mes deux fichiers prototxt sont présentés ci-dessous.

train_val.prototxt 
name: "AlexNet" 
layers { 
    name: "data" 
    type: DATA 
    top: "data" 
    top: "label" 
    data_param { 
    source: "../../examples/Alexnet/Alexnet_train_leveldb" 
    batch_size: 200 
    } 
    transform_param { 
    crop_size: 48 
    mean_file: "../../examples/Alexnet/mean.binaryproto" 
    mirror: true 
    } 
    include: { phase: TRAIN } 
} 
layers { 
    name: "data" 
    type: DATA 
    top: "data" 
    top: "label" 
    data_param { 
    source: "../../examples/Alexnet/Alexnet_test_leveldb" 
    batch_size: 200 
    } 
    transform_param { 
    crop_size: 48 
    mean_file: "../../examples/Alexnet/mean.binaryproto" 
    mirror: false 
    } 
    include: { phase: TEST } 
} 
layers { 
    name: "conv1" 
    type: CONVOLUTION 
    bottom: "data" 
    top: "conv1" 
    blobs_lr: 1 
    blobs_lr: 2 
    weight_decay: 1 
    weight_decay: 0 
    convolution_param { 
    num_output: 96 
    kernel_size: 6 
    stride: 2 
    weight_filler { 
     type: "gaussian" 
     std: 0.01 
    } 
    bias_filler { 
     type: "constant" 
     value: 0 
    } 
    } 
} 
layers { 
    name: "relu1" 
    type: RELU 
    bottom: "conv1" 
    top: "conv1" 
} 
layers { 
    name: "norm1" 
    type: LRN 
    bottom: "conv1" 
    top: "norm1" 
    lrn_param { 
    local_size: 5 
    alpha: 0.0001 
    beta: 0.75 
    } 
} 
layers { 
    name: "pool1" 
    type: POOLING 
    bottom: "norm1" 
    top: "pool1" 
    pooling_param { 
    pool: MAX 
    kernel_size: 4 
    stride: 2 
    } 
} 
layers { 
    name: "conv2" 
    type: CONVOLUTION 
    bottom: "pool1" 
    top: "conv2" 
    blobs_lr: 1 
    blobs_lr: 2 
    weight_decay: 1 
    weight_decay: 0 
    convolution_param { 
    num_output: 256 
    pad: 2 
    kernel_size: 4 
    group: 2 
    weight_filler { 
     type: "gaussian" 
     std: 0.01 
    } 
    bias_filler { 
     type: "constant" 
     value: 0.1 
    } 
    } 
} 
layers { 
    name: "relu2" 
    type: RELU 
    bottom: "conv2" 
    top: "conv2" 
} 
layers { 
    name: "norm2" 
    type: LRN 
    bottom: "conv2" 
    top: "norm2" 
    lrn_param { 
    local_size: 5 
    alpha: 0.0001 
    beta: 0.75 
    } 
} 
layers { 
    name: "pool2" 
    type: POOLING 
    bottom: "norm2" 
    top: "pool2" 
    pooling_param { 
    pool: MAX 
    kernel_size: 3 
    stride: 2 
    } 
} 
layers { 
    name: "conv3" 
    type: CONVOLUTION 
    bottom: "pool2" 
    top: "conv3" 
    blobs_lr: 1 
    blobs_lr: 2 
    weight_decay: 1 
    weight_decay: 0 
    convolution_param { 
    num_output: 384 
    pad: 1 
    kernel_size: 3 
    weight_filler { 
     type: "gaussian" 
     std: 0.01 
    } 
    bias_filler { 
     type: "constant" 
     value: 0 
    } 
    } 
} 
layers { 
    name: "relu3" 
    type: RELU 
    bottom: "conv3" 
    top: "conv3" 
} 
layers { 
    name: "conv4" 
    type: CONVOLUTION 
    bottom: "conv3" 
    top: "conv4" 
    blobs_lr: 1 
    blobs_lr: 2 
    weight_decay: 1 
    weight_decay: 0 
    convolution_param { 
    num_output: 384 
    pad: 1 
    kernel_size: 3 
    group: 2 
    weight_filler { 
     type: "gaussian" 
     std: 0.01 
    } 
    bias_filler { 
     type: "constant" 
     value: 0.1 
    } 
    } 
} 
layers { 
    name: "relu4" 
    type: RELU 
    bottom: "conv4" 
    top: "conv4" 
} 
layers { 
    name: "conv5" 
    type: CONVOLUTION 
    bottom: "conv4" 
    top: "conv5" 
    blobs_lr: 1 
    blobs_lr: 2 
    weight_decay: 1 
    weight_decay: 0 
    convolution_param { 
    num_output: 256 
    pad: 1 
    kernel_size: 3 
    group: 2 
    weight_filler { 
     type: "gaussian" 
     std: 0.01 
    } 
    bias_filler { 
     type: "constant" 
     value: 0.1 
    } 
    } 
} 
layers { 
    name: "relu5" 
    type: RELU 
    bottom: "conv5" 
    top: "conv5" 
} 
layers { 
    name: "pool5" 
    type: POOLING 
    bottom: "conv5" 
    top: "pool5" 
    pooling_param { 
    pool: MAX 
    kernel_size: 3 
    stride: 2 
    } 
} 
layers { 
    name: "fc6" 
    type: INNER_PRODUCT 
    bottom: "pool5" 
    top: "fc6" 
    blobs_lr: 1 
    blobs_lr: 2 
    weight_decay: 1 
    weight_decay: 0 
    inner_product_param { 
    num_output: 4096 
    weight_filler { 
     type: "gaussian" 
     std: 0.005 
    } 
    bias_filler { 
     type: "constant" 
     value: 0.1 
    } 
    } 
} 
layers { 
    name: "relu6" 
    type: RELU 
    bottom: "fc6" 
    top: "fc6" 
} 
layers { 
    name: "drop6" 
    type: DROPOUT 
    bottom: "fc6" 
    top: "fc6" 
    dropout_param { 
    dropout_ratio: 0.5 
    } 
} 
layers { 
    name: "fc7" 
    type: INNER_PRODUCT 
    bottom: "fc6" 
    top: "fc7" 
    blobs_lr: 1 
    blobs_lr: 2 
    weight_decay: 1 
    weight_decay: 0 
    inner_product_param { 
    num_output: 4096 
    weight_filler { 
     type: "gaussian" 
     std: 0.005 
    } 
    bias_filler { 
     type: "constant" 
     value: 0.1 
    } 
    } 
} 
layers { 
    name: "relu7" 
    type: RELU 
    bottom: "fc7" 
    top: "fc7" 
} 
layers { 
    name: "drop7" 
    type: DROPOUT 
    bottom: "fc7" 
    top: "fc7" 
    dropout_param { 
    dropout_ratio: 0.5 
    } 
} 
layers { 
    name: "fc8" 
    type: INNER_PRODUCT 
    bottom: "fc7" 
    top: "fc8" 
    blobs_lr: 1 
    blobs_lr: 2 
    weight_decay: 1 
    weight_decay: 0 
    inner_product_param { 
    num_output: 2 
    weight_filler { 
     type: "gaussian" 
     std: 0.01 
    } 
    bias_filler { 
     type: "constant" 
     value: 0 
    } 
    } 
} 
layers { 
    name: "accuracy" 
    type: ACCURACY 
    bottom: "fc8" 
    bottom: "label" 
    top: "accuracy" 
    include: { phase: TEST } 
} 
layers { 
    name: "loss" 
    type: SOFTMAX_LOSS 
    bottom: "fc8" 
    bottom: "label" 
    top: "loss" 
} 

Ceci est le fichier de test pour le modèle.

deploy.txt 
name: "AlexNet" 
layers 
{ 
    name: "data" 
    type: MEMORY_DATA 
    top: "data" 
    top: "label" 
    memory_data_param 
    { 
    batch_size: 1 
    channels: 3 
    height: 80 
    width: 64 
    } 
    transform_param 
    { 
    crop_size: 64 
    mirror: false 
    mean_file: "../../examples/Alexnet/mean.binaryproto" 
    } 
} 

layers { 
    name: "conv1" 
    type: CONVOLUTION 
    blobs_lr: 1 
    blobs_lr: 2 
    weight_decay: 1 
    weight_decay: 0 
    convolution_param { 
    num_output: 96 
    kernel_size: 6 
    stride: 2 
    } 
    bottom: "data" 
    top: "conv1" 
} 
layers { 
    name: "relu1" 
    type: RELU 
    bottom: "conv1" 
    top: "conv1" 
} 
layers { 
    name: "norm1" 
    type: LRN 
    lrn_param { 
    local_size: 5 
    alpha: 0.0001 
    beta: 0.75 
    } 
    bottom: "conv1" 
    top: "norm1" 
} 
layers { 
    name: "pool1" 
    type: POOLING 
    pooling_param { 
    pool: MAX 
    kernel_size: 3 
    stride: 2 
    } 
    bottom: "norm1" 
    top: "pool1" 
} 
layers { 
    name: "conv2" 
    type: CONVOLUTION 
    blobs_lr: 1 
    blobs_lr: 2 
    weight_decay: 1 
    weight_decay: 0 
    convolution_param { 
    num_output: 256 
    pad: 2 
    kernel_size: 4 
    group: 2 
    } 
    bottom: "pool1" 
    top: "conv2" 
} 
layers { 
    name: "relu2" 
    type: RELU 
    bottom: "conv2" 
    top: "conv2" 
} 
layers { 
    name: "norm2" 
    type: LRN 
    lrn_param { 
    local_size: 5 
    alpha: 0.0001 
    beta: 0.75 
    } 
    bottom: "conv2" 
    top: "norm2" 
} 
layers { 
    name: "pool2" 
    type: POOLING 
    pooling_param { 
    pool: MAX 
    kernel_size: 3 
    stride: 2 
    } 
    bottom: "norm2" 
    top: "pool2" 
} 
layers { 
    name: "conv3" 
    type: CONVOLUTION 
    blobs_lr: 1 
    blobs_lr: 2 
    weight_decay: 1 
    weight_decay: 0 
    convolution_param { 
    num_output: 384 
    pad: 1 
    kernel_size: 3 
    } 
    bottom: "pool2" 
    top: "conv3" 
} 
layers { 
    name: "relu3" 
    type: RELU 
    bottom: "conv3" 
    top: "conv3" 
} 
layers { 
    name: "conv4" 
    type: CONVOLUTION 
    blobs_lr: 1 
    blobs_lr: 2 
    weight_decay: 1 
    weight_decay: 0 
    convolution_param { 
    num_output: 384 
    pad: 1 
    kernel_size: 3 
    group: 2 
    } 
    bottom: "conv3" 
    top: "conv4" 
} 
layers { 
    name: "relu4" 
    type: RELU 
    bottom: "conv4" 
    top: "conv4" 
} 
layers { 
    name: "conv5" 
    type: CONVOLUTION 
    blobs_lr: 1 
    blobs_lr: 2 
    weight_decay: 1 
    weight_decay: 0 
    convolution_param { 
    num_output: 256 
    pad: 1 
    kernel_size: 3 
    group: 2 
    } 
    bottom: "conv4" 
    top: "conv5" 
} 
layers { 
    name: "relu5" 
    type: RELU 
    bottom: "conv5" 
    top: "conv5" 
} 
layers { 
    name: "pool5" 
    type: POOLING 
    pooling_param { 
    pool: MAX 
    kernel_size: 3 
    stride: 2 
    } 
    bottom: "conv5" 
    top: "pool5" 
} 
layers { 
    name: "fc6" 
    type: INNER_PRODUCT 
    blobs_lr: 1 
    blobs_lr: 2 
    weight_decay: 1 
    weight_decay: 0 
    inner_product_param { 
    num_output: 4096 
    } 
    bottom: "pool5" 
    top: "fc6" 
} 
layers { 
    name: "relu6" 
    type: RELU 
    bottom: "fc6" 
    top: "fc6" 
} 
layers { 
    name: "drop6" 
    type: DROPOUT 
    dropout_param { 
    dropout_ratio: 0.5 
    } 
    bottom: "fc6" 
    top: "fc6" 
} 
layers { 
    name: "fc7" 
    type: INNER_PRODUCT 
    blobs_lr: 1 
    blobs_lr: 2 
    weight_decay: 1 
    weight_decay: 0 
    inner_product_param { 
    num_output: 4096 
    } 
    bottom: "fc6" 
    top: "fc7" 
} 
layers { 
    name: "relu7" 
    type: RELU 
    bottom: "fc7" 
    top: "fc7" 
} 
layers { 
    name: "drop7" 
    type: DROPOUT 
    dropout_param { 
    dropout_ratio: 0.5 
    } 
    bottom: "fc7" 
    top: "fc7" 
} 
layers { 
    name: "fc8" 
    type: INNER_PRODUCT 
    blobs_lr: 1 
    blobs_lr: 2 
    weight_decay: 1 
    weight_decay: 0 
    inner_product_param { 
    num_output: 2 
    } 
    bottom: "fc7" 
    top: "fc8" 
} 
layers { 
    name: "prob" 
    type: SOFTMAX 
    bottom: "fc8" 
    top: "prob" 
} 

Quel est le problème avec cette erreur?

Répondre

0

Ceux qui ont le même problème que j'ai affronté, s'il vous plaît regarder les fichiers prototxt ci-dessous. Certaines modifications ont été apportées par rapport aux fichiers Prototxt d'origine fournis dans les dossiers téléchargés. J'ai utilisé des tailles d'image 80x64 pour l'entrée dans la formation et les tests.

Train_val.prototxt 
name: "AlexNet" 
layers { 
    name: "data" 
    type: DATA 
    top: "data" 
    top: "label" 
    data_param { 
    source: "../../examples/Alexnet_2/Alexnet_train_leveldb" 
    batch_size: 100 
    } 
    transform_param { 
    mean_file: "../../examples/Alexnet_2/mean.binaryproto" 

    } 
    include: { phase: TRAIN } 
} 
layers { 
    name: "data" 
    type: DATA 
    top: "data" 
    top: "label" 
    data_param { 
    source: "../../examples/Alexnet_2/Alexnet_test_leveldb" 
    batch_size: 100 
    } 
    transform_param { 
     mean_file: "../../examples/Alexnet_2/mean.binaryproto" 
     } 
    include: { phase: TEST } 
} 
layers { 
    name: "conv1" 
    type: CONVOLUTION 
    bottom: "data" 
    top: "conv1" 
    blobs_lr: 1 
    blobs_lr: 2 
    weight_decay: 1 
    weight_decay: 0 
    convolution_param { 
    num_output: 96 
    kernel_size: 11 
    stride: 2 
    weight_filler { 
     type: "gaussian" 
     std: 0.01 
    } 
    bias_filler { 
     type: "constant" 
     value: 0 
    } 
    } 
} 
layers { 
    name: "relu1" 
    type: RELU 
    bottom: "conv1" 
    top: "conv1" 
} 
layers { 
    name: "pool1" 
    type: POOLING 
    bottom: "conv1" 
    top: "pool1" 
    pooling_param { 
    pool: MAX 
    kernel_size: 3 
    stride: 2 
    } 
} 
layers { 
    name: "norm1" 
    type: LRN 
    bottom: "pool1" 
    top: "norm1" 
    lrn_param { 
    local_size: 5 
    alpha: 0.0001 
    beta: 0.75 
    } 
} 
layers { 
    name: "conv2" 
    type: CONVOLUTION 
    bottom: "norm1" 
    top: "conv2" 
    blobs_lr: 1 
    blobs_lr: 2 
    weight_decay: 1 
    weight_decay: 0 
    convolution_param { 
    num_output: 256 
    pad: 2 
    kernel_size: 5 
    group: 2 
    weight_filler { 
     type: "gaussian" 
     std: 0.01 
    } 
    bias_filler { 
     type: "constant" 
     value: 1 
    } 
    } 
} 
layers { 
    name: "relu2" 
    type: RELU 
    bottom: "conv2" 
    top: "conv2" 
} 
layers { 
    name: "pool2" 
    type: POOLING 
    bottom: "conv2" 
    top: "pool2" 
    pooling_param { 
    pool: MAX 
    kernel_size: 3 
    stride: 2 
    } 
} 
layers { 
    name: "norm2" 
    type: LRN 
    bottom: "pool2" 
    top: "norm2" 
    lrn_param { 
    local_size: 5 
    alpha: 0.0001 
    beta: 0.75 
    } 
} 
layers { 
    name: "conv3" 
    type: CONVOLUTION 
    bottom: "norm2" 
    top: "conv3" 
    blobs_lr: 1 
    blobs_lr: 2 
    weight_decay: 1 
    weight_decay: 0 
    convolution_param { 
    num_output: 384 
    pad: 1 
    kernel_size: 3 
    weight_filler { 
     type: "gaussian" 
     std: 0.01 
    } 
    bias_filler { 
     type: "constant" 
     value: 0 
    } 
    } 
} 
layers { 
    name: "relu3" 
    type: RELU 
    bottom: "conv3" 
    top: "conv3" 
} 
layers { 
    name: "conv4" 
    type: CONVOLUTION 
    bottom: "conv3" 
    top: "conv4" 
    blobs_lr: 1 
    blobs_lr: 2 
    weight_decay: 1 
    weight_decay: 0 
    convolution_param { 
    num_output: 384 
    pad: 1 
    kernel_size: 3 
    group: 2 
    weight_filler { 
     type: "gaussian" 
     std: 0.01 
    } 
    bias_filler { 
     type: "constant" 
     value: 1 
    } 
    } 
} 
layers { 
    name: "relu4" 
    type: RELU 
    bottom: "conv4" 
    top: "conv4" 
} 
layers { 
    name: "conv5" 
    type: CONVOLUTION 
    bottom: "conv4" 
    top: "conv5" 
    blobs_lr: 1 
    blobs_lr: 2 
    weight_decay: 1 
    weight_decay: 0 
    convolution_param { 
    num_output: 256 
    pad: 1 
    kernel_size: 3 
    group: 2 
    weight_filler { 
     type: "gaussian" 
     std: 0.01 
    } 
    bias_filler { 
     type: "constant" 
     value: 1 
    } 
    } 
} 
layers { 
    name: "relu5" 
    type: RELU 
    bottom: "conv5" 
    top: "conv5" 
} 
layers { 
    name: "pool5" 
    type: POOLING 
    bottom: "conv5" 
    top: "pool5" 
    pooling_param { 
    pool: MAX 
    kernel_size: 3 
    stride: 2 
    } 
} 
layers { 
    name: "fc6" 
    type: INNER_PRODUCT 
    bottom: "pool5" 
    top: "fc6" 
    blobs_lr: 1 
    blobs_lr: 2 
    weight_decay: 1 
    weight_decay: 0 
    inner_product_param { 
    num_output: 4096 
    weight_filler { 
     type: "gaussian" 
     std: 0.005 
    } 
    bias_filler { 
     type: "constant" 
     value: 1 
    } 
    } 
} 
layers { 
    name: "relu6" 
    type: RELU 
    bottom: "fc6" 
    top: "fc6" 
} 
layers { 
    name: "drop6" 
    type: DROPOUT 
    bottom: "fc6" 
    top: "fc6" 
    dropout_param { 
    dropout_ratio: 0.5 
    } 
} 
layers { 
    name: "fc7" 
    type: INNER_PRODUCT 
    bottom: "fc6" 
    top: "fc7" 
    blobs_lr: 1 
    blobs_lr: 2 
    weight_decay: 1 
    weight_decay: 0 
    inner_product_param { 
    num_output: 4096 
    weight_filler { 
     type: "gaussian" 
     std: 0.005 
    } 
    bias_filler { 
     type: "constant" 
     value: 1 
    } 
    } 
} 
layers { 
    name: "relu7" 
    type: RELU 
    bottom: "fc7" 
    top: "fc7" 
} 
layers { 
    name: "drop7" 
    type: DROPOUT 
    bottom: "fc7" 
    top: "fc7" 
    dropout_param { 
    dropout_ratio: 0.5 
    } 
} 
layers { 
    name: "fc8" 
    type: INNER_PRODUCT 
    bottom: "fc7" 
    top: "fc8" 
    blobs_lr: 1 
    blobs_lr: 2 
    weight_decay: 1 
    weight_decay: 0 
    inner_product_param { 
    num_output: 2 
    weight_filler { 
     type: "gaussian" 
     std: 0.01 
    } 
    bias_filler { 
     type: "constant" 
     value: 0 
    } 
    } 
} 
layers { 
    name: "accuracy" 
    type: ACCURACY 
    bottom: "fc8" 
    bottom: "label" 
    top: "accuracy" 
    include: { phase: TEST } 
} 
layers { 
    name: "loss" 
    type: SOFTMAX_LOSS 
    bottom: "fc8" 
    bottom: "label" 
    top: "loss" 
} 

test.prototxt 
name: "CaffeNet" 
layers 
{ 
    name: "data" 
    type: MEMORY_DATA 
    top: "data" 
    top: "label" 
    memory_data_param 
    { 
    batch_size: 1 
    channels: 3 
    height: 80 
    width: 64 
    } 
    transform_param 
    { 
    crop_size: 64 
    mirror: false 
    mean_file: "../../examples/Alexnet_2/mean.binaryproto" 
    } 
} 
layers { 
    name: "conv1" 
    type: CONVOLUTION 
    bottom: "data" 
    top: "conv1" 
    convolution_param { 
    num_output: 96 
    kernel_size: 11 
    stride: 2 
    } 
} 
layers { 
    name: "relu1" 
    type: RELU 
    bottom: "conv1" 
    top: "conv1" 
} 
layers { 
    name: "pool1" 
    type: POOLING 
    bottom: "conv1" 
    top: "pool1" 
    pooling_param { 
    pool: MAX 
    kernel_size: 3 
    stride: 2 
    } 
} 
layers { 
    name: "norm1" 
    type: LRN 
    bottom: "pool1" 
    top: "norm1" 
    lrn_param { 
    local_size: 5 
    alpha: 0.0001 
    beta: 0.75 
    } 
} 
layers { 
    name: "conv2" 
    type: CONVOLUTION 
    bottom: "norm1" 
    top: "conv2" 
    convolution_param { 
    num_output: 256 
    pad: 2 
    kernel_size: 5 
    group: 2 
    } 
} 
layers { 
    name: "relu2" 
    type: RELU 
    bottom: "conv2" 
    top: "conv2" 
} 
layers { 
    name: "pool2" 
    type: POOLING 
    bottom: "conv2" 
    top: "pool2" 
    pooling_param { 
    pool: MAX 
    kernel_size: 3 
    stride: 2 
    } 
} 
layers { 
    name: "norm2" 
    type: LRN 
    bottom: "pool2" 
    top: "norm2" 
    lrn_param { 
    local_size: 5 
    alpha: 0.0001 
    beta: 0.75 
    } 
} 
layers { 
    name: "conv3" 
    type: CONVOLUTION 
    bottom: "norm2" 
    top: "conv3" 
    convolution_param { 
    num_output: 384 
    pad: 1 
    kernel_size: 3 
    } 
} 
layers { 
    name: "relu3" 
    type: RELU 
    bottom: "conv3" 
    top: "conv3" 
} 
layers { 
    name: "conv4" 
    type: CONVOLUTION 
    bottom: "conv3" 
    top: "conv4" 
    convolution_param { 
    num_output: 384 
    pad: 1 
    kernel_size: 3 
    group: 2 
    } 
} 
layers { 
    name: "relu4" 
    type: RELU 
    bottom: "conv4" 
    top: "conv4" 
} 
layers { 
    name: "conv5" 
    type: CONVOLUTION 
    bottom: "conv4" 
    top: "conv5" 
    convolution_param { 
    num_output: 256 
    pad: 1 
    kernel_size: 3 
    group: 2 
    } 
} 
layers { 
    name: "relu5" 
    type: RELU 
    bottom: "conv5" 
    top: "conv5" 
} 
layers { 
    name: "pool5" 
    type: POOLING 
    bottom: "conv5" 
    top: "pool5" 
    pooling_param { 
    pool: MAX 
    kernel_size: 3 
    stride: 2 
    } 
} 
layers { 
    name: "fc6" 
    type: INNER_PRODUCT 
    bottom: "pool5" 
    top: "fc6" 
    inner_product_param { 
    num_output: 4096 
    } 
} 
layers { 
    name: "relu6" 
    type: RELU 
    bottom: "fc6" 
    top: "fc6" 
} 
layers { 
    name: "drop6" 
    type: DROPOUT 
    bottom: "fc6" 
    top: "fc6" 
    dropout_param { 
    dropout_ratio: 0.5 
    } 
} 
layers { 
    name: "fc7" 
    type: INNER_PRODUCT 
    bottom: "fc6" 
    top: "fc7" 
    inner_product_param { 
    num_output: 4096 
    } 
} 
layers { 
    name: "relu7" 
    type: RELU 
    bottom: "fc7" 
    top: "fc7" 
} 
layers { 
    name: "drop7" 
    type: DROPOUT 
    bottom: "fc7" 
    top: "fc7" 
    dropout_param { 
    dropout_ratio: 0.5 
    } 
} 
layers { 
    name: "fc8" 
    type: INNER_PRODUCT 
    bottom: "fc7" 
    top: "fc8" 
    inner_product_param { 
    num_output: 2 
    } 
} 
layers { 
    name: "prob" 
    type: SOFTMAX 
    bottom: "fc8" 
    top: "prob" 
} 
layers { 
    name: "output" 
    type: ARGMAX 
    bottom: "prob" 
    top: "output" 
}