网络知识 娱乐 Java使用OpenCV进行答题卡识别

Java使用OpenCV进行答题卡识别

下载OpenCV

https://github.com/opencv/opencv/releases

下载这个

Java使用OpenCV进行答题卡识别

下面的exe也可以 这个exe是一个自解压文件

安装的时候选择目录的时候不用新建 opencv ,在解压的时候会自动创建 opencv 文件夹。

加载依赖

项目中添加jar

jar的位置在安装目录下 buildjava 的目录下。

下面两种方式任选其一即可。

推荐方式1,方式2在Linux上就失效了。

方式1

添加测试代码

import org.opencv.core.Core;nimport org.opencv.core.CvType;nimport org.opencv.core.Mat;nimport org.opencv.core.Scalar;nnpublic class Test01 {n static {n System.loadLibrary(Core.NATIVE_LIBRARY_NAME);n }nn public static void main(String[] args) {n System.out.println("Welcome to OpenCV " + Core.VERSION);n Mat m = new Mat(5, 10, CvType.CV_8UC1, new Scalar(0));n System.out.println("OpenCV Mat: " + m);n Mat mr1 = m.row(1);n mr1.setTo(new Scalar(1));n Mat mc5 = m.col(5);n mc5.setTo(new Scalar(5));n System.out.println("OpenCV Mat data:n" + m.dump());n }n}n

配置运行时参数。

通过菜单 Run->Edit Configurations... 打开 Run/Debug Configurations 对话框。

在对话框窗口右侧,找到 VM options 标签对应的文本框。

在文本框中填写参数

-Djava.library.path=D:Toolsopencvbuildjavax64n

看好自己电脑是64为就选x64,32位就选x86。

当然也可以直接把 opencv_java455.dll 放在Java的bin目录下,这样也就不用配置 java.library.path

D:ToolsJavajdk1.8.0_102binn

方式2

这种方式就不用再指定 java.library.path 了。

把jar和dll放到项目中。

Java使用OpenCV进行答题卡识别

代码中

import org.opencv.core.Core;nimport org.opencv.core.CvType;nimport org.opencv.core.Mat;nimport org.opencv.core.Scalar;nimport java.net.URL;nnpublic class Test01 {n static {n System.loadLibrary(Core.NATIVE_LIBRARY_NAME);n URL url = ClassLoader.getSystemResource("lib/opencv/opencv_java455.dll");n System.load(url.getPath());n }nn public static void main(String[] args) {n System.out.println("Welcome to OpenCV " + Core.VERSION);n Mat m = new Mat(5, 10, CvType.CV_8UC1, new Scalar(0));n System.out.println("OpenCV Mat: " + m);n Mat mr1 = m.row(1);n mr1.setTo(new Scalar(1));n Mat mc5 = m.col(5);n mc5.setTo(new Scalar(5));n System.out.println("OpenCV Mat data:n" + m.dump());n }n}n

常用方法

import org.opencv.core.*;nimport org.opencv.imgcodecs.Imgcodecs;nimport org.opencv.imgproc.Imgproc;nimport utils.OpenCVUtil;nnnpublic class Test01 {n static {n System.loadLibrary(Core.NATIVE_LIBRARY_NAME);n }nn public static void main(String[] args) {n // 以灰度方式,读取图片n Mat img = Imgcodecs.imread("D:Pic0.png", Imgcodecs.IMREAD_GRAYSCALE);n Imgcodecs.imwrite("D:Pic1.png", img);n n // 转成二值化图片n Mat img2 = new Mat();n Imgproc.threshold(img, img2, 0, 255, Imgproc.THRESH_BINARY | Imgproc.THRESH_OTSU);n Imgcodecs.imwrite("D:Pic2.png", img2);n n // 膨胀n Mat img3 = OpenCVUtil.eroding(img2);n Imgcodecs.imwrite("D:Pic3.png", img3);nnn }n}n

如上就分别展示了

  1. 图片转灰度
  2. 灰度图片二值化
  3. 二值化图片黑色区域膨胀
  4. 图片的裁剪

灰度

// 以灰度方式,读取图片nMat img = Imgcodecs.imread("D:Pic0.png", Imgcodecs.IMREAD_GRAYSCALE);nImgcodecs.imwrite("D:Pic1.png", img);n

二值化

// 转成二值化图片nMat img2 = new Mat();nImgproc.threshold(img, img2, 0, 255, Imgproc.THRESH_BINARY | Imgproc.THRESH_OTSU);nImgcodecs.imwrite("D:Pic2.png", img2);n

膨胀

// 膨胀nMat img3 = OpenCVUtil.eroding(img2);nImgcodecs.imwrite("D:Pic3.png", img3);n

高斯模糊

Mat img01 = new Mat();nImgproc.GaussianBlur(img, img01, new Size(1, 1), 10, 10);nImgcodecs.imwrite("D:Picimg01.png", img01);n

剪裁

// 截取左上角四分之一区域nRect rect = new Rect(0, 0, img2.cols() / 2, img2.rows() / 2);nMat img4 = new Mat(img2, rect);nImgcodecs.imwrite("D:Pic4.png", img4);n

工具类

通用

package utils.opencv;nnimport java.awt.image.BufferedImage;nimport java.awt.image.DataBufferByte;nimport java.io.IOException;nimport java.util.*;nnimport org.opencv.core.*;nimport org.opencv.imgcodecs.Imgcodecs;nimport org.opencv.imgproc.Imgproc;nnpublic class OpenCVUtil {n public static BufferedImage covertMat2Buffer(Mat mat) throws IOException {n long time1 = new Date().getTime();n // Mat 转byte数组n BufferedImage originalB = toBufferedImage(mat);n long time3 = new Date().getTime();n System.out.println("保存读取方法2转=" + (time3 - time1));n return originalB;n // ImageIO.write(originalB, "jpg", new File("D:testtestImgews2.jpg"));n }nn public static byte[] covertMat2Byte(Mat mat) throws IOException {n long time1 = new Date().getTime();n // Mat 转byte数组n byte[] return_buff = new byte[(int) (mat.total() * mat.channels())];n Mat mat1 = new Mat();n mat1.get(0, 0, return_buff);n long time3 = new Date().getTime();n System.out.println(mat.total() * mat.channels());n System.out.println("保存读取方法2转=" + (time3 - time1));n return return_buff;n }nn public static byte[] covertMat2Byte1(Mat mat) throws IOException {n long time1 = new Date().getTime();n MatOfByte mob = new MatOfByte();nn Imgcodecs.imencode(".jpg", mat, mob);nn long time3 = new Date().getTime();n // System.out.println(mat.total() * mat.channels());n System.out.println("Mat转byte[] 耗时=" + (time3 - time1));n return mob.toArray();n }nn public static BufferedImage toBufferedImage(Mat m) {n int type = BufferedImage.TYPE_BYTE_GRAY;n if (m.channels() > 1) {n type = BufferedImage.TYPE_3BYTE_BGR;n }n int bufferSize = m.channels() * m.cols() * m.rows();n byte[] b = new byte[bufferSize];n m.get(0, 0, b); // get all the pixelsn BufferedImage image = new BufferedImage(m.cols(), m.rows(), type);n final byte[] targetPixels = ((DataBufferByte) image.getRaster().getDataBuffer()).getData();n System.arraycopy(b, 0, targetPixels, 0, b.length);n return image;n }nn /**n * 腐蚀膨胀是针对于白色区域来说的,腐蚀即腐蚀白色区域n * 腐蚀算法(黑色区域变大)n *n * @param sourcen * @returnn */n public static Mat eroding(Mat source) {n return eroding(source, 1);n }nn public static Mat eroding(Mat source, double erosion_size) {n Mat resultMat = new Mat(source.rows(), source.cols(), source.type());n Mat element = Imgproc.getStructuringElement(n Imgproc.MORPH_RECT,n new Size(erosion_size + 1, erosion_size + 1)n );n Imgproc.erode(source, resultMat, element);n return resultMat;n }nn /**n * 腐蚀膨胀是针对于白色区域来说的,膨胀是膨胀白色区域n * 膨胀算法(白色区域变大)n *n * @param sourcen * @returnn */n public static Mat dilation(Mat source) {n return dilation(source, 1);n }nn /**n * 腐蚀膨胀是针对于白色区域来说的,膨胀是膨胀白色区域n *n * @param sourcen * @param dilation_size 膨胀因子2*x+1 里的xn * @return Matn */n public static Mat dilation(Mat source, double dilation_size) {n Mat resultMat = new Mat(source.rows(), source.cols(), source.type());n Mat element = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(2 * dilation_size + 1,n 2 * dilation_size + 1));n Imgproc.dilate(source, resultMat, element);n return resultMat;n }n}n

透视变换

package utils.opencv;nnimport org.opencv.core.*;nimport org.opencv.imgproc.Imgproc;nimport org.opencv.utils.Converters;nnimport java.util.Arrays;nimport java.util.List;nn/**n * 透视变换工具类n * 因为我透视变换做的也不是很好,就仅提供一个大概的函数...n */npublic class WarpPerspectiveUtils {nn /**n * 透视变换n *n * @param srcn * @param pointsn * @returnn */n public static Mat warpPerspective(Mat src, Point[] points) {n // 点的顺序[左上 ,右上 ,右下 ,左下]n List<Point> listSrcs = Arrays.asList(n points[0],n points[1],n points[2],n points[3]n );n Mat srcPoints = Converters.vector_Point_to_Mat(listSrcs, CvType.CV_32F);nn List<Point> listDsts = Arrays.asList(n new Point(0, 0),n new Point(src.width(), 0),n new Point(src.width(), src.height()),n new Point(0, src.height())n );nnn Mat dstPoints = Converters.vector_Point_to_Mat(listDsts, CvType.CV_32F);nn Mat perspectiveMmat = Imgproc.getPerspectiveTransform(dstPoints, srcPoints);nn Mat dst = new Mat();nn Imgproc.warpPerspective(n src,n dst,n perspectiveMmat,n src.size(),n Imgproc.INTER_LINEAR + Imgproc.WARP_INVERSE_MAP,n 1,n new Scalar(0)n );nn return dst;n }n}n

轮廓相关

package utils.opencv;nnimport org.opencv.core.Mat;nimport org.opencv.core.MatOfPoint;nimport org.opencv.core.Point;nimport org.opencv.imgproc.Imgproc;nnimport java.util.Vector;nn/**n * 轮廓工具类n */npublic class ContoursUtils {nn /**n * 获取图片的四个顶点n *n * @param imgn * @returnn */n public static Point[] getAllPoints(Mat img) {n Point[] potArr = new Point[4];n for (int i = 0; i < 4; i++) {n potArr[i] = new Point(-1, -1);n }nn int[] spaceArr = new int[]{-1, -1, -1, -1};n int cols = img.cols();n int rows = img.rows();n int x1 = cols / 3;n int x2 = cols * 2 / 3;n int y1 = rows / 3;n int y2 = rows * 2 / 3;n for (int x = 0; x < cols; x++) {n for (int y = 0; y < rows; y++) {n if (x > x1 && x < x2 && y > y1 && y < y2) {n continue;n }n double[] darr = img.get(y, x);n if (darr != null && darr.length >= 1 && darr[0] == 0) {n if (spaceArr[0] == -1) {n potArr[0].x = x;n potArr[0].y = y;n potArr[1].x = x;n potArr[1].y = y;n potArr[2].x = x;n potArr[2].y = y;n potArr[3].x = x;n potArr[3].y = y;n spaceArr[0] = getSpace(0, 0, x, y);n spaceArr[1] = getSpace(cols, 0, x, y);n spaceArr[2] = getSpace(cols, rows, x, y);n spaceArr[3] = getSpace(0, rows, x, y);n } else {n int s0 = getSpace(0, 0, x, y);n int s1 = getSpace(cols, 0, x, y);n int s2 = getSpace(cols, rows, x, y);n int s3 = getSpace(0, rows, x, y);n if (s0 < spaceArr[0]) {n spaceArr[0] = s0;n potArr[0].x = x;n potArr[0].y = y;n }n if (s1 < spaceArr[1]) {n spaceArr[1] = s1;n potArr[1].x = x;n potArr[1].y = y;n }n if (s2 < spaceArr[2]) {n spaceArr[2] = s2;n potArr[2].x = x;n potArr[2].y = y;n }n if (s3 < spaceArr[3]) {n spaceArr[3] = s3;n potArr[3].x = x;n potArr[3].y = y;n }n }nn }n }n }n return potArr;n }nn /**n * 轮廓识别,使用最外轮廓发抽取轮廓RETR_EXTERNAL,轮廓识别方法为CHAIN_APPROX_SIMPLEn *n * @param source 传入进来的图片Mat对象n * @return 返回轮廓结果集n */n public static Vector<MatOfPoint> findContours(Mat source) {n Mat rs = new Mat();n /**n * 定义轮廓抽取模式n *RETR_EXTERNAL:只检索最外面的轮廓;n *RETR_LIST:检索所有的轮廓,并将其放入list中;n *RETR_CCOMP:检索所有的轮廓,并将他们组织为两层:顶层是各部分的外部边界,第二层是空洞的边界;n *RETR_TREE:检索所有的轮廓,并重构嵌套轮廓的整个层次。n */n /**n * 定义轮廓识别方法n * 边缘近似方法(除了RETR_RUNS使用内置的近似,其他模式均使用此设定的近似算法)。可取值如下:n *CV_CHAIN_CODE:以Freeman链码的方式输出轮廓,所有其他方法输出多边形(顶点的序列)。n *CHAIN_APPROX_NONE:将所有的连码点,转换成点。n *CHAIN_APPROX_SIMPLE:压缩水平的、垂直的和斜的部分,也就是,函数只保留他们的终点部分。n *CHAIN_APPROX_TC89_L1,CV_CHAIN_APPROX_TC89_KCOS:使用the flavors of Teh-Chin chain近似算法的一种。n *LINK_RUNS:通过连接水平段的1,使用完全不同的边缘提取算法。使用CV_RETR_LIST检索模式能使用此方法。n */n Vector<MatOfPoint> contours = new Vector<MatOfPoint>();n Imgproc.findContours(n source,n contours,n rs,n Imgproc.RETR_LIST,n Imgproc.CHAIN_APPROX_SIMPLEn );n return contours;n }nn /**n * 计算两点之间的距离n *n * @param x1n * @param y1n * @param x2n * @param y2n * @returnn */n private static int getSpace(int x1, int y1, int x2, int y2) {n int xspace = Math.abs(x1 - x2);n int yspace = Math.abs(y1 - y2);n return (int) Math.sqrt(Math.pow(xspace, 2) + Math.pow(yspace, 2));n }n}n

处理全流程

import org.opencv.core.*;nimport org.opencv.imgcodecs.Imgcodecs;nimport org.opencv.imgproc.Imgproc;nimport utils.opencv.ContoursUtils;nimport utils.opencv.OpenCVUtil;nimport utils.opencv.WarpPerspectiveUtils;nnimport java.util.Vector;nnnpublic class Test01 {n static {n System.loadLibrary(Core.NATIVE_LIBRARY_NAME);n }nn public static void main(String[] args) {n // 以灰度方式,读取图片n Mat img = Imgcodecs.imread("D:ProjectJavaopencv-demo01pic0.jpg", Imgcodecs.IMREAD_GRAYSCALE);n Imgcodecs.imwrite("D:ProjectJavaopencv-demo01pic1.png", img);nn // 转成二值化图片n Mat img2 = new Mat();n Imgproc.threshold(img, img2, 0, 255, Imgproc.THRESH_BINARY | Imgproc.THRESH_OTSU);n Imgcodecs.imwrite("D:ProjectJavaopencv-demo01pic2.png", img2);nn // 透视变形n Mat img3 = WarpPerspectiveUtils.warpPerspective(img2, ContoursUtils.getAllPoints(img2));n Imgcodecs.imwrite("D:ProjectJavaopencv-demo01pic3.png", img3);nn // 膨胀n Mat img4 = OpenCVUtil.eroding(img3);n Imgcodecs.imwrite("D:ProjectJavaopencv-demo01pic4.png", img4);nn // 截取选择题区域n Rect rect = new Rect(68, 834, 1536, 220);n Mat img5 = new Mat(img4, rect);n Imgcodecs.imwrite("D:ProjectJavaopencv-demo01pic5.png", img5);nn // 获取边界n Vector<MatOfPoint> rectVec = ContoursUtils.findContours(img5);n Vector<MatOfPoint> rectVec2 = new Vector<>();n for (MatOfPoint matOfPoint : rectVec) {n Rect rect2 = Imgproc.boundingRect(matOfPoint);n if (rect2.width > 36 && rect2.height > 20 && rect2.width < 50 && rect2.height < 40) {n rectVec2.add(matOfPoint);n }n }n Mat img6 = new Mat(img5.rows(), img5.cols(), CvType.CV_8UC3, new Scalar(255, 255, 255));n Imgproc.drawContours(img6, rectVec2, -1, new Scalar(0, 0, 255), 1);n Imgcodecs.imwrite("D:ProjectJavaopencv-demo01pic6.png", img6);n }n}