1. Opencv obtains the information of each layer of the imported DNN model

I. code

1. C + + code

#include <opencv2/opencv.hpp>
#include <opencv2/dnn.hpp>
#include <iostream>

using namespace cv;
using namespace cv::dnn;
using namespace std;

int main(int argc, char** argv) {
	string bin_model = "E:/workOpencv/JZG_opencv/opencv_tutorial/data/models/googlenet/bvlc_googlenet.caffemodel";
	string protxt = "E:/workOpencv/JZG_opencv/opencv_tutorial/data/models/googlenet/bvlc_googlenet.prototxt";

	// load CNN model
	Net net = dnn::readNet(bin_model, protxt);

	// Get information of each layer   # Vector is a container, which is a dynamic array vector < type > identifier.
	vector<String> layer_names = net.getLayerNames();
	for (int i = 0; i < layer_names.size(); i++) {
		int id = net.getLayerId(layer_names[i]);
		auto layer = net.getLayer(id);
		printf("layer id:%d, type: %s, name:%s \n", id, layer->type.c_str(), layer->name.c_str());
	}

	system("pause");   // Use double quotes, not single quotes
	return 0;
}

Output results:

Attempting to upgrade input file specified using deprecated V1LayerParameter: E:/workOpencv/JZG_opencv/opencv_tutorial/data/models/googlenet/bvlc_googlenet.caffemodel
Successfully upgraded file specified using deprecated V1LayerParameter
layer id:1, type: Convolution, name:conv1/7x7_s2
layer id:2, type: ReLU, name:conv1/relu_7x7
layer id:3, type: Pooling, name:pool1/3x3_s2
layer id:4, type: LRN, name:pool1/norm1
layer id:5, type: Convolution, name:conv2/3x3_reduce
layer id:6, type: ReLU, name:conv2/relu_3x3_reduce
layer id:7, type: Convolution, name:conv2/3x3
layer id:8, type: ReLU, name:conv2/relu_3x3
layer id:9, type: LRN, name:conv2/norm2
layer id:10, type: Pooling, name:pool2/3x3_s2
layer id:11, type: Convolution, name:inception_3a/1x1
layer id:12, type: ReLU, name:inception_3a/relu_1x1
layer id:13, type: Convolution, name:inception_3a/3x3_reduce
layer id:14, type: ReLU, name:inception_3a/relu_3x3_reduce
layer id:15, type: Convolution, name:inception_3a/3x3
layer id:16, type: ReLU, name:inception_3a/relu_3x3
layer id:17, type: Convolution, name:inception_3a/5x5_reduce
layer id:18, type: ReLU, name:inception_3a/relu_5x5_reduce
layer id:19, type: Convolution, name:inception_3a/5x5
layer id:20, type: ReLU, name:inception_3a/relu_5x5
layer id:21, type: Pooling, name:inception_3a/pool
layer id:22, type: Convolution, name:inception_3a/pool_proj
layer id:23, type: ReLU, name:inception_3a/relu_pool_proj
layer id:24, type: Concat, name:inception_3a/output
layer id:25, type: Convolution, name:inception_3b/1x1
layer id:26, type: ReLU, name:inception_3b/relu_1x1
layer id:27, type: Convolution, name:inception_3b/3x3_reduce
layer id:28, type: ReLU, name:inception_3b/relu_3x3_reduce
layer id:29, type: Convolution, name:inception_3b/3x3
layer id:30, type: ReLU, name:inception_3b/relu_3x3
layer id:31, type: Convolution, name:inception_3b/5x5_reduce
layer id:32, type: ReLU, name:inception_3b/relu_5x5_reduce
layer id:33, type: Convolution, name:inception_3b/5x5
layer id:34, type: ReLU, name:inception_3b/relu_5x5
layer id:35, type: Pooling, name:inception_3b/pool
layer id:36, type: Convolution, name:inception_3b/pool_proj
layer id:37, type: ReLU, name:inception_3b/relu_pool_proj
layer id:38, type: Concat, name:inception_3b/output
layer id:39, type: Pooling, name:pool3/3x3_s2
layer id:40, type: Convolution, name:inception_4a/1x1
layer id:41, type: ReLU, name:inception_4a/relu_1x1
layer id:42, type: Convolution, name:inception_4a/3x3_reduce
layer id:43, type: ReLU, name:inception_4a/relu_3x3_reduce
layer id:44, type: Convolution, name:inception_4a/3x3
layer id:45, type: ReLU, name:inception_4a/relu_3x3
layer id:46, type: Convolution, name:inception_4a/5x5_reduce
layer id:47, type: ReLU, name:inception_4a/relu_5x5_reduce
layer id:48, type: Convolution, name:inception_4a/5x5
layer id:49, type: ReLU, name:inception_4a/relu_5x5
layer id:50, type: Pooling, name:inception_4a/pool
layer id:51, type: Convolution, name:inception_4a/pool_proj
layer id:52, type: ReLU, name:inception_4a/relu_pool_proj
layer id:53, type: Concat, name:inception_4a/output
layer id:54, type: Convolution, name:inception_4b/1x1
layer id:55, type: ReLU, name:inception_4b/relu_1x1
layer id:56, type: Convolution, name:inception_4b/3x3_reduce
layer id:57, type: ReLU, name:inception_4b/relu_3x3_reduce
layer id:58, type: Convolution, name:inception_4b/3x3
layer id:59, type: ReLU, name:inception_4b/relu_3x3
layer id:60, type: Convolution, name:inception_4b/5x5_reduce
layer id:61, type: ReLU, name:inception_4b/relu_5x5_reduce
layer id:62, type: Convolution, name:inception_4b/5x5
layer id:63, type: ReLU, name:inception_4b/relu_5x5
layer id:64, type: Pooling, name:inception_4b/pool
layer id:65, type: Convolution, name:inception_4b/pool_proj
layer id:66, type: ReLU, name:inception_4b/relu_pool_proj
layer id:67, type: Concat, name:inception_4b/output
layer id:68, type: Convolution, name:inception_4c/1x1
layer id:69, type: ReLU, name:inception_4c/relu_1x1
layer id:70, type: Convolution, name:inception_4c/3x3_reduce
layer id:71, type: ReLU, name:inception_4c/relu_3x3_reduce
layer id:72, type: Convolution, name:inception_4c/3x3
layer id:73, type: ReLU, name:inception_4c/relu_3x3
layer id:74, type: Convolution, name:inception_4c/5x5_reduce
layer id:75, type: ReLU, name:inception_4c/relu_5x5_reduce
layer id:76, type: Convolution, name:inception_4c/5x5
layer id:77, type: ReLU, name:inception_4c/relu_5x5
layer id:78, type: Pooling, name:inception_4c/pool
layer id:79, type: Convolution, name:inception_4c/pool_proj
layer id:80, type: ReLU, name:inception_4c/relu_pool_proj
layer id:81, type: Concat, name:inception_4c/output
layer id:82, type: Convolution, name:inception_4d/1x1
layer id:83, type: ReLU, name:inception_4d/relu_1x1
layer id:84, type: Convolution, name:inception_4d/3x3_reduce
layer id:85, type: ReLU, name:inception_4d/relu_3x3_reduce
layer id:86, type: Convolution, name:inception_4d/3x3
layer id:87, type: ReLU, name:inception_4d/relu_3x3
layer id:88, type: Convolution, name:inception_4d/5x5_reduce
layer id:89, type: ReLU, name:inception_4d/relu_5x5_reduce
layer id:90, type: Convolution, name:inception_4d/5x5
layer id:91, type: ReLU, name:inception_4d/relu_5x5
layer id:92, type: Pooling, name:inception_4d/pool
layer id:93, type: Convolution, name:inception_4d/pool_proj
layer id:94, type: ReLU, name:inception_4d/relu_pool_proj
layer id:95, type: Concat, name:inception_4d/output
layer id:96, type: Convolution, name:inception_4e/1x1
layer id:97, type: ReLU, name:inception_4e/relu_1x1
layer id:98, type: Convolution, name:inception_4e/3x3_reduce
layer id:99, type: ReLU, name:inception_4e/relu_3x3_reduce
layer id:100, type: Convolution, name:inception_4e/3x3
layer id:101, type: ReLU, name:inception_4e/relu_3x3
layer id:102, type: Convolution, name:inception_4e/5x5_reduce
layer id:103, type: ReLU, name:inception_4e/relu_5x5_reduce
layer id:104, type: Convolution, name:inception_4e/5x5
layer id:105, type: ReLU, name:inception_4e/relu_5x5
layer id:106, type: Pooling, name:inception_4e/pool
layer id:107, type: Convolution, name:inception_4e/pool_proj
layer id:108, type: ReLU, name:inception_4e/relu_pool_proj
layer id:109, type: Concat, name:inception_4e/output
layer id:110, type: Pooling, name:pool4/3x3_s2
layer id:111, type: Convolution, name:inception_5a/1x1
layer id:112, type: ReLU, name:inception_5a/relu_1x1
layer id:113, type: Convolution, name:inception_5a/3x3_reduce
layer id:114, type: ReLU, name:inception_5a/relu_3x3_reduce
layer id:115, type: Convolution, name:inception_5a/3x3
layer id:116, type: ReLU, name:inception_5a/relu_3x3
layer id:117, type: Convolution, name:inception_5a/5x5_reduce
layer id:118, type: ReLU, name:inception_5a/relu_5x5_reduce
layer id:119, type: Convolution, name:inception_5a/5x5
layer id:120, type: ReLU, name:inception_5a/relu_5x5
layer id:121, type: Pooling, name:inception_5a/pool
layer id:122, type: Convolution, name:inception_5a/pool_proj
layer id:123, type: ReLU, name:inception_5a/relu_pool_proj
layer id:124, type: Concat, name:inception_5a/output
layer id:125, type: Convolution, name:inception_5b/1x1
layer id:126, type: ReLU, name:inception_5b/relu_1x1
layer id:127, type: Convolution, name:inception_5b/3x3_reduce
layer id:128, type: ReLU, name:inception_5b/relu_3x3_reduce
layer id:129, type: Convolution, name:inception_5b/3x3
layer id:130, type: ReLU, name:inception_5b/relu_3x3
layer id:131, type: Convolution, name:inception_5b/5x5_reduce
layer id:132, type: ReLU, name:inception_5b/relu_5x5_reduce
layer id:133, type: Convolution, name:inception_5b/5x5
layer id:134, type: ReLU, name:inception_5b/relu_5x5
layer id:135, type: Pooling, name:inception_5b/pool
layer id:136, type: Convolution, name:inception_5b/pool_proj
layer id:137, type: ReLU, name:inception_5b/relu_pool_proj
layer id:138, type: Concat, name:inception_5b/output
layer id:139, type: Pooling, name:pool5/7x7_s1
layer id:140, type: Dropout, name:pool5/drop_7x7_s1
layer id:141, type: InnerProduct, name:loss3/classifier
layer id:142, type: Softmax, name:prob

OpenCV DNN obtains the information of each layer of the imported model

2. Python code

import cv2 as cv
import numpy as np

bin_model = "googlenet/bvlc_googlenet.caffemodel"
protxt = "googlenet/bvlc_googlenet.prototxt"

# Load names of classes
classes = None
with open("googlenet/classification_classes_ILSVRC2012.txt", 'rt') as f:
    classes = f.read().rstrip('\n').split('\n')

# load CNN model
net = cv.dnn.readNet(bin_model, protxt)

# Get information of each layer
layer_names = net.getLayerNames()
print(layer_names)
for name in layer_names:
    id = net.getLayerId(name)
    layer = net.getLayer(id)
    print("layer id : %d, type : %s, name: %s"%(id, layer.type, layer.name))

print("successfully loaded model...")

II. Relevant instructions

1. Support in-depth learning framework

The model supports 1000 categories of image classification, and the OpenCV DNN module supports the use of feedforward network (prediction graph) of the pre training model of the following framework

  • Caffe
  • Tensorflow
  • Torch
  • DLDT
  • Darknet

2. There are three parameters to import the model

At the same time, it also supports user-defined layer parsing, non maximum suppression operation, obtaining information of each layer, etc. The general API of OpenCV loading model is
Net cv::dnn::readNet(
const String & model,
const String & config = "",
const String & framework = ""
)

1. First parameter model
Model is a trained binary network weight file, which may come from the supported network framework. The binary model extensions corresponding to different deep learning frameworks are as follows:

*.caffemodel (Caffe, http://caffe.berkeleyvision.org/)
*.pb (TensorFlow, https://www.tensorflow.org/)
*.t7 | *.net (Torch, http://torch.ch/)
*.weights (Darknet, https://pjreddie.com/darknet/)
*.bin (DLDT, https://software.intel.com/openvino-toolkit)

2. The second parameter config
config for the description file of model binary, different framework configuration files have different extensions, as follows:

*.prototxt (Caffe, http://caffe.berkeleyvision.org/)
*.pbtxt (TensorFlow, https://www.tensorflow.org/)
*.cfg (Darknet, https://pjreddie.com/darknet/)
*.xml (DLDT, https://software.intel.com/openvino-toolkit)

3. The third parameter framework
Framework displays declaration parameters, indicating which framework is used to train the model.

The next two parameters are not very important. The first parameter must have