使用OpenCV 3.4.11在C++下跑YOLOv4的时候对cv::dnn::Net::forward()函数的第一个参数产生了一些疑问,在此记录学习解惑的过程
代码来源:
https://www.learnopencv.com/deep-learning-based-object-detection-using-yolov3-with-opencv-python-c/https://blog.csdn.net/guyuealian/article/details/84098461网络模型训练自:https://github.com/AlexeyAB/darknet
使用dnn网络进行输入输出的代码:
// 文件路径 string imgPath = imgDir + "test1.jpg"; string cfgPath = netDir + "yolo-obj.cfg"; string wtPath = netDir + "yolo-obj_last.weights"; // 加载网络 Net net = dnn::readNetFromDarknet(cfgPath, wtPath); net.setPreferableBackend(DNN_BACKEND_OPENCV); net.setPreferableTarget(DNN_TARGET_CPU); // 读取图片 Mat img = imread(imgPath); // 创建 4D blob Mat blob; blobFromImage(img, blob, 1/255.0, cvSize(iw, ih), Scalar(0, 0, 0), true, false); // 设置网络输入 net.setInput(blob); // 运行 forward pass 获取输出层的输出 vector<Mat> outs; net.forward(outs, getOutputsNames(net));在这里,函数getOutputsNames()的输出为网络中的三个输出层名称(函数代码在文末)。 net.forward()函数根据输出层名称得到如下的输出结果: 观察可得,net.forward()的输出内容outs为一个Mat数组,每个Mat为6维度向量组(n行6列矩阵)
官方文档:https://docs.opencv.org/3.4.11/
在上部分的调试信息中看到,outs数组的每一个元素都是一个6维向量,根据OpenCV官方文档的解释,该6维向量是每一个输出层的输出,形式为blob二进制对象,是根据不同的网络结构发生改变的数据结构。
在Sunita Nayak的文章中,找到了对YOLOv4网络结构下forward()函数输出的解释: YOLOv4网络的输出为矩形框,每个矩形框由一个向量表示,所有矩形框组成一个向量组。每个向量的长度为类别数 + 5个参数,这五个参数的前四个分别是矩形框在图像上的位置center_x, center_y, width, height(均为比例,范围在0-1之间),第五个参数是该矩形框包含一个物体的置信度。
从向量的第五个参数开始,分别表示矩形框中物体对应每个类别的置信度。
在本文的例子中,自行训练的网络仅包含一个类,因此每个向量的长度为5+1=6。下面是一些数据例子:
center_xcenter_ywidthheightconfidence of containing a objectconfidence of class10.5032061338420.06070671603080.05540368333460.03702243790030.9564980864520.9479244351390.269188106060.09221667796370.05775146931410.0344571284950.7977473735810.7912827730180.0298348180950.007896039634940.04943570122120.02079393714670.0001708045310810.0处理forward()函数输出outs的详细代码及注释见文末。
附
getOutputsNames()函数代码 vector<String> getOutputsNames(Net &net) { vector<String> names; if (names.empty()) { // 获取输出层的索引号 vector<int> outLayers = net.getUnconnectedOutLayers(); // 获取网络中所有层的名称 vector<String> layersNames = net.getLayerNames(); // 将 cv::String 转为 std::string names.resize(outLayers.size()); for (size_t i = 0; i < outLayers.size(); i++) { names[i] = layersNames[outLayers[i] - 1]; } } return names; } postprocess()函数 void postprocess(Mat& frame, const vector<Mat>& outs) { vector<int> classIds; vector<float> confidences; vector<Rect> boxes; for (size_t i = 0; i < outs.size(); ++i) { // Scan through all the bounding boxes output from the network and keep only the // ones with high confidence scores. Assign the box's class label as the class // with the highest score for the box. /// 扫描所有的矩形框,在找出该矩形框中对应的置信度最高的类别 /// 当该类别的置信度高于阈值时,保留并记录类别号 float* data = (float*)outs[i].data; // 强制转换成 float // 针对每一行进行处理 for (int j = 0; j < outs[i].rows; ++j, data += outs[i].cols) { Mat scores = outs[i].row(j).colRange(5, outs[i].cols); // 第i个Mat,第j行 Point classIdPoint; double confidence; // Get the value and location of the maximum score /// 获取具有最高置信度的类别 /// (const SparseMat &a, double *minVal, double *maxVal, int *minIdx, int *maxIdx) minMaxLoc(scores, 0, &confidence, 0, &classIdPoint); if (confidence > cfdThr) { /// data /// |centerX|centerY|width|height|confidence of containing object|confidence1|confidence2|... int centerX = (int)(data[0] * frame.cols); int centerY = (int)(data[1] * frame.rows); int width = (int)(data[2] * frame.cols); int height = (int)(data[3] * frame.rows); int left = centerX - width / 2; int top = centerY - height / 2; classIds.push_back(classIdPoint.x); confidences.push_back((float)confidence); boxes.push_back(Rect(left, top, width, height)); } } } // Perform non maximum suppression to eliminate redundant overlapping boxes with // lower confidences vector<int> indices; NMSBoxes(boxes, confidences, cfdThr, nmsThr, indices); for (size_t i = 0; i < indices.size(); ++i) { int idx = indices[i]; Rect box = boxes[idx]; drawPred(classIds[idx], confidences[idx], box.x, box.y, box.x + box.width, box.y + box.height, frame); } } // Draw the predicted bounding box void drawPred(int classId, float conf, int left, int top, int right, int bottom, Mat& frame) { //Draw a rectangle displaying the bounding box rectangle(frame, Point(left, top), Point(right, bottom), Scalar(255, 178, 50), 3); //Get the label for the class name and its confidence string label = format("%.2f", conf); if (!classes.empty()) { CV_Assert(classId < (int)classes.size()); label = classes[classId] + ":" + label; } //Display the label at the top of the bounding box int baseLine; Size labelSize = getTextSize(label, FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseLine); top = max(top, labelSize.height); rectangle(frame, Point(left, top - round(1.5*labelSize.height)), Point(left + round(1.5*labelSize.width), top + baseLine), Scalar(255, 255, 255), FILLED); putText(frame, label, Point(left, top), FONT_HERSHEY_SIMPLEX, 0.75, Scalar(0, 0, 0), 1); }