From 2d80346e6b12bc272e9b022aab3b0367d775ad08 Mon Sep 17 00:00:00 2001 From: Michael Kroneev Date: Thu, 29 Feb 2024 09:40:49 +0200 Subject: [PATCH 1/2] Upgrade versions of all Maven dependencies and plugins (pull #2159) --- platform/pom.xml | 12 ++++++------ pom.xml | 16 ++++++++-------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/platform/pom.xml b/platform/pom.xml index 7bab9656..0dae5f27 100644 --- a/platform/pom.xml +++ b/platform/pom.xml @@ -126,7 +126,7 @@ maven-jar-plugin - 3.2.0 + 3.3.0 default-jar @@ -162,7 +162,7 @@ org.moditect moditect-maven-plugin - 1.0.0.RC2 + 1.1.0 9 true @@ -188,7 +188,7 @@ maven-dependency-plugin - 3.1.2 + 3.6.1 properties @@ -210,14 +210,14 @@ maven-surefire-plugin - 2.22.2 + 3.2.5 -Xmx2g maven-assembly-plugin - 3.3.0 + 3.6.0 false @@ -263,7 +263,7 @@ maven-gpg-plugin - 3.0.1 + 3.1.0 sign-artifacts diff --git a/pom.xml b/pom.xml index d456d0c6..eb3030b5 100644 --- a/pom.xml +++ b/pom.xml @@ -190,7 +190,7 @@ maven-compiler-plugin - 3.8.1 + 3.12.1 1.7 1.7 @@ -202,7 +202,7 @@ maven-jar-plugin - 3.2.0 + 3.3.0 @@ -226,7 +226,7 @@ org.moditect moditect-maven-plugin - 1.0.0.RC2 + 1.1.0 9 true @@ -252,14 +252,14 @@ maven-install-plugin - 3.0.0-M1 + 3.1.1 true maven-source-plugin - 3.2.1 + 3.3.0 attach-source @@ -271,7 +271,7 @@ maven-javadoc-plugin - 3.3.1 + 3.6.3 attach-javadocs @@ -312,7 +312,7 @@ org.sonatype.plugins nexus-staging-maven-plugin - 1.6.8 + 1.6.13 true sonatype-nexus-staging @@ -409,7 +409,7 @@ maven-gpg-plugin - 3.0.1 + 3.1.0 sign-artifacts From b45158501dd49248788209aaa54e753ad4e9ca82 Mon Sep 17 00:00:00 2001 From: Michael Kroneev Date: Thu, 29 Feb 2024 09:41:37 +0200 Subject: [PATCH 2/2] replacement of swscale for avfilter --- platform/pom.xml | 572 +- platform/src/main/assembly/bin.xml | 86 +- platform/src/main/assembly/src.xml | 42 +- platform/src/main/java9/module-info.java | 32 +- .../bytedeco/javacv/FrameConverterTest.java | 748 +- .../org/bytedeco/javacv/FrameFilterTest.java | 456 +- .../FrameGrabberChangingResolutionTest.java | 554 +- .../org/bytedeco/javacv/FrameGrabberTest.java | 974 +- .../SeekableByteArrayOutputStreamTest.java | 492 +- samples/AudioSplitMergeHelper.java | 304 +- samples/BioInspiredRetina.java | 166 +- samples/BlobDemo.java | 546 +- samples/CaffeGooglenet.java | 210 +- samples/ColoredObjectTrack.java | 240 +- samples/DeepLearningFaceDetection.java | 214 +- samples/DeinterlacedVideoPlayer.java | 166 +- samples/Demo.java | 308 +- samples/FFmpegStreamingTimeout.java | 296 +- samples/FaceApplet.html | 34 +- samples/FaceApplet.java | 304 +- samples/FaceApplet.jnlp | 32 +- samples/FacePreview.java | 580 +- samples/FaceRecognizerInVideo.java | 228 +- samples/HoughLines.java | 240 +- samples/ImageSegmentation.java | 316 +- samples/JavaFxPlayVideoAndAudio.java | 399 +- samples/KazemiFacemarkExample.java | 144 +- samples/LBFFacemarkExampleWithVideo.java | 178 +- samples/MotionDetector.java | 202 +- samples/OpenCVFaceRecognizer.java | 176 +- samples/OpenCVFeatures2dSerialization.java | 188 +- samples/OpticalFlowDense.java | 94 +- samples/OpticalFlowTracker.java | 170 +- samples/PacketRecorderTest.java | 152 +- samples/PerspectiveWarpDemo.java | 170 +- samples/PrincipalComponentAnalysis.java | 366 +- samples/RLSA.java | 224 +- samples/RealSense2DepthMeasuring.java | 208 +- samples/RecordActivity.java | 1182 +- samples/Similarity.java | 202 +- samples/Smoother.java | 74 +- samples/Square.java | 614 +- samples/TemplateMatching.java | 282 +- samples/WebcamAndMicrophoneCapture.java | 470 +- samples/YOLONet.java | 576 +- samples/haarcascade_frontalface_alt2.xml | 41438 ++++++++-------- samples/pom.xml | 92 +- .../javacv/AndroidFrameConverter.java | 424 +- .../bytedeco/javacv/BaseChildSettings.java | 138 +- .../org/bytedeco/javacv/BaseSettings.java | 148 +- src/main/java/org/bytedeco/javacv/Blobs.java | 1304 +- .../java/org/bytedeco/javacv/BufferRing.java | 156 +- .../org/bytedeco/javacv/CameraDevice.java | 1076 +- .../org/bytedeco/javacv/CameraSettings.java | 168 +- .../java/org/bytedeco/javacv/CanvasFrame.java | 898 +- .../org/bytedeco/javacv/ColorCalibrator.java | 234 +- .../bytedeco/javacv/DC1394FrameGrabber.java | 1232 +- .../bytedeco/javacv/FFmpegFrameFilter.java | 1643 +- .../bytedeco/javacv/FFmpegFrameGrabber.java | 3217 +- .../bytedeco/javacv/FFmpegFrameRecorder.java | 2899 +- .../bytedeco/javacv/FFmpegLockCallback.java | 142 +- .../bytedeco/javacv/FFmpegLogCallback.java | 172 +- .../javacv/FlyCapture2FrameGrabber.java | 1224 +- .../javacv/FlyCaptureFrameGrabber.java | 960 +- src/main/java/org/bytedeco/javacv/Frame.java | 760 +- .../org/bytedeco/javacv/FrameConverter.java | 98 +- .../java/org/bytedeco/javacv/FrameFilter.java | 306 +- .../org/bytedeco/javacv/FrameGrabber.java | 1620 +- .../org/bytedeco/javacv/FrameRecorder.java | 884 +- .../org/bytedeco/javacv/GLCanvasFrame.java | 696 +- .../org/bytedeco/javacv/GNImageAligner.java | 1532 +- .../org/bytedeco/javacv/GNImageAlignerCL.java | 870 +- .../bytedeco/javacv/GeometricCalibrator.java | 1104 +- .../java/org/bytedeco/javacv/HandMouse.java | 784 +- .../bytedeco/javacv/IPCameraFrameGrabber.java | 560 +- .../org/bytedeco/javacv/ImageAligner.java | 232 +- .../org/bytedeco/javacv/ImageAlignerCL.java | 88 +- .../org/bytedeco/javacv/ImageTransformer.java | 176 +- .../bytedeco/javacv/ImageTransformerCL.java | 208 +- .../bytedeco/javacv/Java2DFrameConverter.java | 1478 +- .../org/bytedeco/javacv/Java2DFrameUtils.java | 208 +- src/main/java/org/bytedeco/javacv/JavaCV.java | 2004 +- .../java/org/bytedeco/javacv/JavaCVCL.java | 1414 +- .../bytedeco/javacv/JavaCvErrorCallback.java | 178 +- .../bytedeco/javacv/JavaFXFrameConverter.java | 224 +- .../javacv/LeptonicaFrameConverter.java | 340 +- .../java/org/bytedeco/javacv/MarkedPlane.java | 470 +- src/main/java/org/bytedeco/javacv/Marker.java | 556 +- .../org/bytedeco/javacv/MarkerDetector.java | 642 +- .../org/bytedeco/javacv/ObjectFinder.java | 824 +- .../bytedeco/javacv/OpenCVFrameConverter.java | 502 +- .../bytedeco/javacv/OpenCVFrameGrabber.java | 636 +- .../bytedeco/javacv/OpenCVFrameRecorder.java | 284 +- .../javacv/OpenKinect2FrameGrabber.java | 738 +- .../javacv/OpenKinectFrameGrabber.java | 586 +- .../bytedeco/javacv/PS3EyeFrameGrabber.java | 766 +- .../java/org/bytedeco/javacv/Parallel.java | 216 +- .../javacv/ProCamColorCalibrator.java | 670 +- .../javacv/ProCamGeometricCalibrator.java | 998 +- .../bytedeco/javacv/ProCamTransformer.java | 1172 +- .../bytedeco/javacv/ProCamTransformerCL.java | 446 +- .../javacv/ProjectiveColorTransformer.java | 762 +- .../javacv/ProjectiveColorTransformerCL.java | 302 +- .../org/bytedeco/javacv/ProjectiveDevice.java | 1894 +- .../javacv/ProjectiveTransformer.java | 1104 +- .../javacv/ProjectiveTransformerCL.java | 288 +- .../org/bytedeco/javacv/ProjectorDevice.java | 786 +- .../bytedeco/javacv/ProjectorSettings.java | 138 +- .../javacv/RealSense2FrameGrabber.java | 1534 +- .../javacv/RealSenseFrameGrabber.java | 1962 +- .../javacv/ReflectanceInitializer.java | 404 +- .../java/org/bytedeco/javacv/Seekable.java | 56 +- .../javacv/SeekableByteArrayOutputStream.java | 112 +- .../javacv/VideoInputFrameGrabber.java | 376 +- .../java/org/bytedeco/javacv/cvkernels.java | 262 +- src/main/java9/module-info.java | 38 +- .../org/bytedeco/javacv/ImageTransformer.cl | 556 +- .../resources/org/bytedeco/javacv/JavaCV.cl | 256 +- .../org/bytedeco/javacv/ProCamTransformer.cl | 118 +- .../javacv/ProjectiveColorTransformer.cl | 118 +- .../bytedeco/javacv/ProjectiveTransformer.cl | 118 +- 121 files changed, 54159 insertions(+), 54071 deletions(-) diff --git a/platform/pom.xml b/platform/pom.xml index 0dae5f27..293b0c52 100644 --- a/platform/pom.xml +++ b/platform/pom.xml @@ -1,286 +1,286 @@ - - - 4.0.0 - - - org.bytedeco - javacpp-presets - 1.5.11-SNAPSHOT - - - - org.bytedeco - javacv-platform - JavaCV Platform - - - ${project.version} - - - - - org.bytedeco - javacv - ${project.version} - - - - org.bytedeco - javacpp-platform - ${javacpp.version} - - - org.bytedeco - openblas-platform - 0.3.26-${javacpp.version} - - - org.bytedeco - opencv-platform - 4.9.0-${javacpp.version} - - - org.bytedeco - ffmpeg-platform - 6.1.1-${javacpp.version} - - - org.bytedeco - flycapture-platform - 2.13.3.31-1.5.9 - - - org.bytedeco - libdc1394-platform - 2.2.6-1.5.9 - - - org.bytedeco - libfreenect-platform - 0.5.7-1.5.9 - - - org.bytedeco - libfreenect2-platform - 0.2.0-1.5.9 - - - org.bytedeco - librealsense-platform - 1.12.4-1.5.9 - - - org.bytedeco - librealsense2-platform - 2.53.1-1.5.9 - - - org.bytedeco - videoinput-platform - 0.200-1.5.9 - - - org.bytedeco - artoolkitplus-platform - 2.3.1-1.5.9 - - - - - - - - org.bytedeco - leptonica-platform - 1.84.1-${javacpp.version} - - - org.bytedeco - tesseract-platform - 5.3.4-${javacpp.version} - - - - junit - junit - 4.13.2 - test - - - org.bytedeco - opencv-platform-gpu - 4.9.0-${javacpp.version} - test - - - org.bytedeco - ffmpeg-platform-gpl - 6.1.1-${javacpp.version} - test - - - - - - - - maven-jar-plugin - 3.3.0 - - - default-jar - - - - javacv.jar javacpp-platform.jar openblas-platform.jar opencv-platform.jar ffmpeg-platform.jar flycapture-platform.jar libdc1394-platform.jar libfreenect-platform.jar libfreenect2-platform.jar librealsense-platform.jar librealsense2-platform.jar videoinput-platform.jar artoolkitplus-platform.jar flandmark-platform.jar leptonica-platform.jar tesseract-platform.jar - org/bytedeco/javacv/ - - - - - - empty-javadoc-jar - - jar - - - javadoc - - - - empty-sources-jar - - jar - - - sources - - - - - - org.moditect - moditect-maven-plugin - 1.1.0 - - 9 - true - ${project.build.directory} - - - - add-module-infos - package - - add-module-info - - - - - ${project.build.directory}/${project.artifactId}.jar - ${project.basedir}/src/main/java9/module-info.java - - - - - - - - maven-dependency-plugin - 3.6.1 - - - properties - - properties - - - - copy-dependencies - - copy-dependencies - - - ${project.build.directory} - true - - - - - - maven-surefire-plugin - 3.2.5 - - -Xmx2g - - - - maven-assembly-plugin - 3.6.0 - - false - - src/main/assembly/bin.xml - src/main/assembly/src.xml - - - - - package - - single - - - - - - - - - - sign-artifacts - - - performRelease - true - - - - - sonatype-nexus-staging - Sonatype Nexus Staging - https://oss.sonatype.org/service/local/staging/deployByRepositoryId/${stagingRepositoryId}/ - - true - - - false - - - - - - - maven-gpg-plugin - 3.1.0 - - - sign-artifacts - verify - - sign - - - - - ${env.GPG_PASSPHRASE} - false - - - - - - - - + + + 4.0.0 + + + org.bytedeco + javacpp-presets + 1.5.10 + + + + org.bytedeco + javacv-platform + JavaCV Platform + + + ${project.version} + + + + + org.bytedeco + javacv + ${project.version} + + + + org.bytedeco + javacpp-platform + ${javacpp.version} + + + org.bytedeco + openblas-platform + 0.3.26-${javacpp.version} + + + org.bytedeco + opencv-platform + 4.9.0-${javacpp.version} + + + org.bytedeco + ffmpeg-platform + 6.1.1-${javacpp.version} + + + org.bytedeco + flycapture-platform + 2.13.3.31-1.5.9 + + + org.bytedeco + libdc1394-platform + 2.2.6-1.5.9 + + + org.bytedeco + libfreenect-platform + 0.5.7-1.5.9 + + + org.bytedeco + libfreenect2-platform + 0.2.0-1.5.9 + + + org.bytedeco + librealsense-platform + 1.12.4-1.5.9 + + + org.bytedeco + librealsense2-platform + 2.53.1-1.5.9 + + + org.bytedeco + videoinput-platform + 0.200-1.5.9 + + + org.bytedeco + artoolkitplus-platform + 2.3.1-1.5.9 + + + + + + + + org.bytedeco + leptonica-platform + 1.84.1-${javacpp.version} + + + org.bytedeco + tesseract-platform + 5.3.4-${javacpp.version} + + + + junit + junit + 4.13.2 + test + + + org.bytedeco + opencv-platform-gpu + 4.9.0-${javacpp.version} + test + + + org.bytedeco + ffmpeg-platform-gpl + 6.1.1-${javacpp.version} + test + + + + + + + + maven-jar-plugin + 3.2.0 + + + default-jar + + + + javacv.jar javacpp-platform.jar openblas-platform.jar opencv-platform.jar ffmpeg-platform.jar flycapture-platform.jar libdc1394-platform.jar libfreenect-platform.jar libfreenect2-platform.jar librealsense-platform.jar librealsense2-platform.jar videoinput-platform.jar artoolkitplus-platform.jar flandmark-platform.jar leptonica-platform.jar tesseract-platform.jar + org/bytedeco/javacv/ + + + + + + empty-javadoc-jar + + jar + + + javadoc + + + + empty-sources-jar + + jar + + + sources + + + + + + org.moditect + moditect-maven-plugin + 1.0.0.RC2 + + 9 + true + ${project.build.directory} + + + + add-module-infos + package + + add-module-info + + + + + ${project.build.directory}/${project.artifactId}.jar + ${project.basedir}/src/main/java9/module-info.java + + + + + + + + maven-dependency-plugin + 3.1.2 + + + properties + + properties + + + + copy-dependencies + + copy-dependencies + + + ${project.build.directory} + true + + + + + + maven-surefire-plugin + 2.22.2 + + -Xmx2g + + + + maven-assembly-plugin + 3.3.0 + + false + + src/main/assembly/bin.xml + src/main/assembly/src.xml + + + + + package + + single + + + + + + + + + + sign-artifacts + + + performRelease + true + + + + + sonatype-nexus-staging + Sonatype Nexus Staging + https://oss.sonatype.org/service/local/staging/deployByRepositoryId/${stagingRepositoryId}/ + + true + + + false + + + + + + + maven-gpg-plugin + 3.0.1 + + + sign-artifacts + verify + + sign + + + + + ${env.GPG_PASSPHRASE} + false + + + + + + + + diff --git a/platform/src/main/assembly/bin.xml b/platform/src/main/assembly/bin.xml index dc7edd08..856eaa7d 100644 --- a/platform/src/main/assembly/bin.xml +++ b/platform/src/main/assembly/bin.xml @@ -1,43 +1,43 @@ - - ${project.version}-bin - - zip - - ${project.artifactId}-${project.version}-bin - - - ${project.basedir}/.. - / - - samples/* - CHANGELOG* - README* - LICENSE* - NOTICE* - - - - ${project.build.directory} - / - - *.jar - - - android*.jar - gluegen*.jar - hamcrest*.jar - junit*.jar - jogl*.jar - jocl*.jar - *-javadoc.jar - *-sources.jar - - 0644 - - - ${project.build.directory}/site - docs - - - + + ${project.version}-bin + + zip + + ${project.artifactId}-${project.version}-bin + + + ${project.basedir}/.. + / + + samples/* + CHANGELOG* + README* + LICENSE* + NOTICE* + + + + ${project.build.directory} + / + + *.jar + + + android*.jar + gluegen*.jar + hamcrest*.jar + junit*.jar + jogl*.jar + jocl*.jar + *-javadoc.jar + *-sources.jar + + 0644 + + + ${project.build.directory}/site + docs + + + diff --git a/platform/src/main/assembly/src.xml b/platform/src/main/assembly/src.xml index 5da118c0..9ada65f7 100644 --- a/platform/src/main/assembly/src.xml +++ b/platform/src/main/assembly/src.xml @@ -1,21 +1,21 @@ - - ${project.version}-src - - zip - - ${project.artifactId}-${project.version} - - - ${project.basedir}/.. - / - true - - - **/target/** - **/cppbuild/** - - - - - + + ${project.version}-src + + zip + + ${project.artifactId}-${project.version} + + + ${project.basedir}/.. + / + true + + + **/target/** + **/cppbuild/** + + + + + diff --git a/platform/src/main/java9/module-info.java b/platform/src/main/java9/module-info.java index d14c3ae3..ba7830e6 100644 --- a/platform/src/main/java9/module-info.java +++ b/platform/src/main/java9/module-info.java @@ -1,16 +1,16 @@ -module org.bytedeco.javacv.platform { - requires transitive org.bytedeco.javacv; - requires org.bytedeco.opencv.platform; - requires org.bytedeco.ffmpeg.platform; - requires org.bytedeco.flycapture.platform; - requires org.bytedeco.libdc1394.platform; - requires org.bytedeco.libfreenect.platform; - requires org.bytedeco.libfreenect2.platform; - requires org.bytedeco.librealsense.platform; - requires org.bytedeco.librealsense2.platform; - requires org.bytedeco.videoinput.platform; - requires org.bytedeco.artoolkitplus.platform; - requires org.bytedeco.flandmark.platform; - requires org.bytedeco.leptonica.platform; - requires org.bytedeco.tesseract.platform; -} +module org.bytedeco.javacv.platform { + requires transitive org.bytedeco.javacv; + requires org.bytedeco.opencv.platform; + requires org.bytedeco.ffmpeg.platform; + requires org.bytedeco.flycapture.platform; + requires org.bytedeco.libdc1394.platform; + requires org.bytedeco.libfreenect.platform; + requires org.bytedeco.libfreenect2.platform; + requires org.bytedeco.librealsense.platform; + requires org.bytedeco.librealsense2.platform; + requires org.bytedeco.videoinput.platform; + requires org.bytedeco.artoolkitplus.platform; + requires org.bytedeco.flandmark.platform; + requires org.bytedeco.leptonica.platform; + requires org.bytedeco.tesseract.platform; +} diff --git a/platform/src/test/java/org/bytedeco/javacv/FrameConverterTest.java b/platform/src/test/java/org/bytedeco/javacv/FrameConverterTest.java index caedd582..539bad6b 100644 --- a/platform/src/test/java/org/bytedeco/javacv/FrameConverterTest.java +++ b/platform/src/test/java/org/bytedeco/javacv/FrameConverterTest.java @@ -1,374 +1,374 @@ -/* - * Copyright (C) 2015-2016 Samuel Audet - * - * Licensed either under the Apache License, Version 2.0, or (at your option) - * under the terms of the GNU General Public License as published by - * the Free Software Foundation (subject to the "Classpath" exception), - * either version 2, or any later version (collectively, the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * http://www.gnu.org/licenses/ - * http://www.gnu.org/software/classpath/license.html - * - * or as provided in the LICENSE.txt file that accompanied this code. - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.bytedeco.javacv; - -import java.awt.image.BufferedImage; -import java.awt.image.DataBufferByte; -import java.awt.image.DataBufferInt; -import java.awt.image.WritableRaster; -import java.nio.ByteBuffer; -import java.nio.ByteOrder; -import java.nio.IntBuffer; -import org.bytedeco.javacpp.BytePointer; -import org.bytedeco.javacpp.Loader; -import org.bytedeco.javacpp.indexer.Indexer; -import org.bytedeco.javacpp.indexer.UByteIndexer; -import org.junit.Test; - -import org.bytedeco.leptonica.PIX; -import org.bytedeco.opencv.opencv_core.*; -import org.bytedeco.opencv.opencv_imgproc.*; -import static org.bytedeco.opencv.global.opencv_core.*; -import static org.bytedeco.opencv.global.opencv_imgproc.*; -import static org.junit.Assert.*; - -/** - * Test cases for FrameConverter classes. Also uses other classes from JavaCV. - * - * @author Samuel Audet - */ -public class FrameConverterTest { - - @Test public void testAndroidFrameConverter() { - System.out.println("AndroidFrameConverter"); - - AndroidFrameConverter converter = new AndroidFrameConverter(); - - int width = 512; - int height = 1024; - byte[] yuvData = new byte[3 * width * height / 2]; - for (int i = 0; i < yuvData.length; i++) { - yuvData[i] = (byte)i; - } - Mat yuvImage = new Mat(3 * height / 2, width, CV_8UC1, new BytePointer(yuvData)); - Mat bgrImage = new Mat(height, width, CV_8UC3); - cvtColor(yuvImage, bgrImage, CV_YUV2BGR_NV21); - Frame bgrFrame = converter.convert(yuvData, width, height); - - UByteIndexer bgrImageIdx = bgrImage.createIndexer(); - UByteIndexer bgrFrameIdx = bgrFrame.createIndexer(); - assertEquals(bgrImageIdx.rows(), bgrFrameIdx.rows()); - assertEquals(bgrImageIdx.cols(), bgrFrameIdx.cols()); - assertEquals(bgrImageIdx.channels(), bgrFrameIdx.channels()); - for (int i = 0; i < bgrImageIdx.rows(); i++) { - for (int j = 0; j < bgrImageIdx.cols(); j++) { - for (int k = 0; k < bgrImageIdx.channels(); k++) { - assertEquals((float)bgrImageIdx.get(i, j, k), (float)bgrFrameIdx.get(i, j, k), 1.0f); - } - } - } - bgrImageIdx.release(); - bgrFrameIdx.release(); - - Frame grayFrame = new Frame(1024 + 1, 768, Frame.DEPTH_UBYTE, 1); - Frame colorFrame = new Frame(640 + 1, 480, Frame.DEPTH_UBYTE, 3); - - UByteIndexer grayFrameIdx = grayFrame.createIndexer(); - for (int i = 0; i < grayFrameIdx.rows(); i++) { - for (int j = 0; j < grayFrameIdx.cols(); j++) { - grayFrameIdx.put(i, j, i + j); - } - } - - UByteIndexer colorFrameIdx = colorFrame.createIndexer(); - for (int i = 0; i < colorFrameIdx.rows(); i++) { - for (int j = 0; j < colorFrameIdx.cols(); j++) { - for (int k = 0; k < colorFrameIdx.channels(); k++) { - colorFrameIdx.put(i, j, k, i + j + k); - } - } - } - - width = grayFrame.imageWidth; - height = grayFrame.imageHeight; - int stride = grayFrame.imageStride; - int rowBytes = width * 4; - ByteBuffer in = (ByteBuffer)grayFrame.image[0]; - ByteBuffer buffer = converter.gray2rgba(in, width, height, stride, rowBytes); - for (int y = 0; y < height; y++) { - for (int x = 0; x < width; x++) { - // GRAY -> RGBA - byte B = in.get(y * stride + x); - assertEquals(buffer.get(y * rowBytes + 4 * x ), B); - assertEquals(buffer.get(y * rowBytes + 4 * x + 1), B); - assertEquals(buffer.get(y * rowBytes + 4 * x + 2), B); - assertEquals(buffer.get(y * rowBytes + 4 * x + 3), (byte)0xFF); - } - } - - width = colorFrame.imageWidth; - height = colorFrame.imageHeight; - stride = colorFrame.imageStride; - rowBytes = width * 4; - in = (ByteBuffer)colorFrame.image[0]; - buffer = converter.bgr2rgba(in, width, height, stride, rowBytes); - for (int y = 0; y < height; y++) { - for (int x = 0; x < width; x++) { - // BGR -> RGBA - byte B = in.get(y * stride + 3 * x ); - byte G = in.get(y * stride + 3 * x + 1); - byte R = in.get(y * stride + 3 * x + 2); - assertEquals(buffer.get(y * rowBytes + 4 * x ), R); - assertEquals(buffer.get(y * rowBytes + 4 * x + 1), G); - assertEquals(buffer.get(y * rowBytes + 4 * x + 2), B); - assertEquals(buffer.get(y * rowBytes + 4 * x + 3), (byte)0xFF); - } - } - - colorFrameIdx.release(); - grayFrameIdx.release(); - converter.close(); - colorFrame.close(); - grayFrame.close(); - } - - @Test public void testJava2DFrameConverter() { - System.out.println("Java2DFrameConverter"); - - int[] depths = {Frame.DEPTH_UBYTE, Frame.DEPTH_SHORT, Frame.DEPTH_FLOAT}; - int[] channels = {1, 3, 4}; - for (int i = 0; i < depths.length; i++) { - for (int j = 0; j < channels.length; j++) { - Frame frame = new Frame(640 + 1, 480, depths[i], channels[j]); - Java2DFrameConverter converter = new Java2DFrameConverter(); - - Indexer frameIdx = frame.createIndexer(); - for (int y = 0; y < frameIdx.rows(); y++) { - for (int x = 0; x < frameIdx.cols(); x++) { - for (int z = 0; z < frameIdx.channels(); z++) { - frameIdx.putDouble(new long[] {y, x, z}, y + x + z); - } - } - } - - BufferedImage image = converter.convert(frame); - converter.frame = null; - Frame frame2 = converter.convert(image); - - Indexer frame2Idx = frame2.createIndexer(); - for (int y = 0; y < frameIdx.rows(); y++) { - for (int x = 0; x < frameIdx.cols(); x++) { - for (int z = 0; z < frameIdx.channels(); z++) { - double value = frameIdx.getDouble(y, x, z); - assertEquals(value, frame2Idx.getDouble(y, x, z), 0); - } - } - } - - try { - frame2Idx.getDouble(frameIdx.rows() + 1, frameIdx.cols() + 1); - fail("IndexOutOfBoundsException should have been thrown."); - } catch (IndexOutOfBoundsException e) { } - - frameIdx.release(); - frame2Idx.release(); - converter.close(); - frame.close(); - } - } - - int[] types = {BufferedImage.TYPE_INT_RGB, BufferedImage.TYPE_INT_ARGB, - BufferedImage.TYPE_INT_ARGB_PRE, BufferedImage.TYPE_INT_BGR}; - for (int i = 0; i < types.length; i++) { - BufferedImage image = new BufferedImage(640 + 1, 480, types[i]); - Java2DFrameConverter converter = new Java2DFrameConverter(); - - WritableRaster raster = image.getRaster(); - int[] array = ((DataBufferInt)raster.getDataBuffer()).getData(); - for (int j = 0; j < array.length; j++) { - array[j] = j; - } - - Frame frame = converter.convert(image); - converter.bufferedImage = null; - BufferedImage image2 = converter.convert(frame); - - WritableRaster raster2 = image2.getRaster(); - byte[] array2 = ((DataBufferByte)raster2.getDataBuffer()).getData(); - for (int j = 0; j < array.length; j++) { - int n = ((array2[4 * j ] & 0xFF) << 24) | ((array2[4 * j + 1] & 0xFF) << 16) - | ((array2[4 * j + 2] & 0xFF) << 8) | (array2[4 * j + 3] & 0xFF); - assertEquals(array[j], n); - } - converter.close(); - } - } - - @Test public void testOpenCVFrameConverter() { - System.out.println("OpenCVFrameConverter"); - Loader.load(org.bytedeco.opencv.opencv_java.class); - - for (int depth = 8; depth <= 64; depth *= 2) { - assertEquals(depth, OpenCVFrameConverter.getFrameDepth(OpenCVFrameConverter.getIplImageDepth(depth))); - assertEquals(depth, OpenCVFrameConverter.getFrameDepth(OpenCVFrameConverter.getMatDepth(depth))); - if (depth < 64) { - assertEquals(-depth, OpenCVFrameConverter.getFrameDepth(OpenCVFrameConverter.getIplImageDepth(-depth))); - assertEquals(-depth, OpenCVFrameConverter.getFrameDepth(OpenCVFrameConverter.getMatDepth(-depth))); - } - } - - Frame frame = new Frame(640 + 1, 480, Frame.DEPTH_UBYTE, 3); - OpenCVFrameConverter.ToIplImage converter1 = new OpenCVFrameConverter.ToIplImage(); - OpenCVFrameConverter.ToMat converter2 = new OpenCVFrameConverter.ToMat(); - OpenCVFrameConverter.ToOrgOpenCvCoreMat converter3 = new OpenCVFrameConverter.ToOrgOpenCvCoreMat(); - - UByteIndexer frameIdx = frame.createIndexer(); - for (int i = 0; i < frameIdx.rows(); i++) { - for (int j = 0; j < frameIdx.cols(); j++) { - for (int k = 0; k < frameIdx.channels(); k++) { - frameIdx.put(i, j, k, i + j + k); - } - } - } - - IplImage image = converter1.convert(frame); - Mat mat = converter2.convert(frame); - final org.opencv.core.Mat cvmat = converter3.convert(frame); - - converter1.frame = null; - converter2.frame = null; - converter3.frame = null; - Frame frame1 = converter1.convert(image); - Frame frame2 = converter2.convert(mat); - Frame frame3 = converter3.convert(cvmat); - assertEquals(frame2.opaque, mat); - assertEquals(frame3.opaque, cvmat); - - Mat mat2 = new Mat(mat.rows(), mat.cols(), mat.type(), mat.data(), mat.step()); - org.opencv.core.Mat cvmat2 = new org.opencv.core.Mat(cvmat.rows(), cvmat.cols(), cvmat.type(), - new BytePointer() { { address = cvmat.dataAddr(); } }.capacity(cvmat.rows() * cvmat.cols() * cvmat.elemSize()).asByteBuffer(), - cvmat.step1() * cvmat.elemSize1()); - assertNotEquals(mat, mat2); - assertNotEquals(cvmat, cvmat2); - - frame2 = converter2.convert(mat2); - frame3 = converter3.convert(cvmat2); - assertEquals(frame2.opaque, mat2); - assertEquals(frame3.opaque, cvmat2); - assertEquals(frame3.imageStride, cvmat2.step1() * cvmat2.elemSize1()); - - UByteIndexer frame1Idx = frame1.createIndexer(); - UByteIndexer frame2Idx = frame2.createIndexer(); - UByteIndexer frame3Idx = frame3.createIndexer(); - for (int i = 0; i < frameIdx.rows(); i++) { - for (int j = 0; j < frameIdx.cols(); j++) { - for (int k = 0; k < frameIdx.channels(); k++) { - int b = frameIdx.get(i, j, k); - assertEquals(b, frame1Idx.get(i, j, k)); - assertEquals(b, frame2Idx.get(i, j, k)); - assertEquals(b, frame3Idx.get(i, j, k)); - } - } - } - - try { - frame1Idx.get(frameIdx.rows() + 1, frameIdx.cols() + 1); - fail("IndexOutOfBoundsException should have been thrown."); - } catch (IndexOutOfBoundsException e) { } - - try { - frame2Idx.get(frameIdx.rows() + 1, frameIdx.cols() + 1); - fail("IndexOutOfBoundsException should have been thrown."); - } catch (IndexOutOfBoundsException e) { } - - try { - frame3Idx.get(frameIdx.rows() + 1, frameIdx.cols() + 1); - fail("IndexOutOfBoundsException should have been thrown."); - } catch (IndexOutOfBoundsException e) { } - - frameIdx.release(); - frame1Idx.release(); - frame2Idx.release(); - frame3Idx.release(); - converter1.close(); - converter2.close(); - converter3.close(); - frame.close(); - } - - @Test public void testLeptonicaFrameConverter() { - System.out.println("LeptonicaFrameConverter"); - - Frame frame = new Frame(640 + 1, 480, Frame.DEPTH_UBYTE, 3); - LeptonicaFrameConverter converter = new LeptonicaFrameConverter(); - - UByteIndexer frameIdx = frame.createIndexer(); - for (int i = 0; i < frameIdx.rows(); i++) { - for (int j = 0; j < frameIdx.cols(); j++) { - for (int k = 0; k < frameIdx.channels(); k++) { - frameIdx.put(i, j, k, i + j + k); - } - } - } - - PIX pix = converter.convert(frame); - - converter.frame = null; - Frame frame1 = converter.convert(pix); -// assertEquals(frame1.opaque, pix); - - PIX pix2 = PIX.createHeader(pix.w(), pix.h(), pix.d()).data(pix.data()).wpl(pix.wpl()); - assertNotEquals(pix, pix2); - - Frame frame2 = converter.convert(pix2); -// assertEquals(frame2.opaque, pix2); - - IntBuffer frameBuf = ((ByteBuffer)frame.image[0].position(0)).asIntBuffer(); - IntBuffer frame1Buf = ((ByteBuffer)frame1.image[0].position(0)).asIntBuffer(); - IntBuffer frame2Buf = ((ByteBuffer)frame2.image[0].position(0)).asIntBuffer(); - IntBuffer pixBuf = pix.createBuffer().order(ByteOrder.BIG_ENDIAN).asIntBuffer(); - IntBuffer pix2Buf = pix2.createBuffer().order(ByteOrder.BIG_ENDIAN).asIntBuffer(); - for (int i = 0; i < frameBuf.capacity(); i++) { - int j = frameBuf.get(i); - assertEquals(j, frame1Buf.get(i)); - assertEquals(j, frame2Buf.get(i)); - assertEquals(j, pixBuf.get(i)); - assertEquals(j, pix2Buf.get(i)); - } - - try { - frame1Buf.get(frameBuf.capacity() + 1); - fail("IndexOutOfBoundsException should have been thrown."); - } catch (IndexOutOfBoundsException e) { } - - try { - frame2Buf.get(frameBuf.capacity() + 1); - fail("IndexOutOfBoundsException should have been thrown."); - } catch (IndexOutOfBoundsException e) { } - - try { - pixBuf.get(frameBuf.capacity() + 1); - fail("IndexOutOfBoundsException should have been thrown."); - } catch (IndexOutOfBoundsException e) { } - - try { - pix2Buf.get(frameBuf.capacity() + 1); - fail("IndexOutOfBoundsException should have been thrown."); - } catch (IndexOutOfBoundsException e) { } - - pix2.deallocate(); - pix.deallocate(); - converter.close(); - frame.close(); - } -} +/* + * Copyright (C) 2015-2016 Samuel Audet + * + * Licensed either under the Apache License, Version 2.0, or (at your option) + * under the terms of the GNU General Public License as published by + * the Free Software Foundation (subject to the "Classpath" exception), + * either version 2, or any later version (collectively, the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.gnu.org/licenses/ + * http://www.gnu.org/software/classpath/license.html + * + * or as provided in the LICENSE.txt file that accompanied this code. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bytedeco.javacv; + +import java.awt.image.BufferedImage; +import java.awt.image.DataBufferByte; +import java.awt.image.DataBufferInt; +import java.awt.image.WritableRaster; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.nio.IntBuffer; +import org.bytedeco.javacpp.BytePointer; +import org.bytedeco.javacpp.Loader; +import org.bytedeco.javacpp.indexer.Indexer; +import org.bytedeco.javacpp.indexer.UByteIndexer; +import org.junit.Test; + +import org.bytedeco.leptonica.PIX; +import org.bytedeco.opencv.opencv_core.*; +import org.bytedeco.opencv.opencv_imgproc.*; +import static org.bytedeco.opencv.global.opencv_core.*; +import static org.bytedeco.opencv.global.opencv_imgproc.*; +import static org.junit.Assert.*; + +/** + * Test cases for FrameConverter classes. Also uses other classes from JavaCV. + * + * @author Samuel Audet + */ +public class FrameConverterTest { + + @Test public void testAndroidFrameConverter() { + System.out.println("AndroidFrameConverter"); + + AndroidFrameConverter converter = new AndroidFrameConverter(); + + int width = 512; + int height = 1024; + byte[] yuvData = new byte[3 * width * height / 2]; + for (int i = 0; i < yuvData.length; i++) { + yuvData[i] = (byte)i; + } + Mat yuvImage = new Mat(3 * height / 2, width, CV_8UC1, new BytePointer(yuvData)); + Mat bgrImage = new Mat(height, width, CV_8UC3); + cvtColor(yuvImage, bgrImage, CV_YUV2BGR_NV21); + Frame bgrFrame = converter.convert(yuvData, width, height); + + UByteIndexer bgrImageIdx = bgrImage.createIndexer(); + UByteIndexer bgrFrameIdx = bgrFrame.createIndexer(); + assertEquals(bgrImageIdx.rows(), bgrFrameIdx.rows()); + assertEquals(bgrImageIdx.cols(), bgrFrameIdx.cols()); + assertEquals(bgrImageIdx.channels(), bgrFrameIdx.channels()); + for (int i = 0; i < bgrImageIdx.rows(); i++) { + for (int j = 0; j < bgrImageIdx.cols(); j++) { + for (int k = 0; k < bgrImageIdx.channels(); k++) { + assertEquals((float)bgrImageIdx.get(i, j, k), (float)bgrFrameIdx.get(i, j, k), 1.0f); + } + } + } + bgrImageIdx.release(); + bgrFrameIdx.release(); + + Frame grayFrame = new Frame(1024 + 1, 768, Frame.DEPTH_UBYTE, 1); + Frame colorFrame = new Frame(640 + 1, 480, Frame.DEPTH_UBYTE, 3); + + UByteIndexer grayFrameIdx = grayFrame.createIndexer(); + for (int i = 0; i < grayFrameIdx.rows(); i++) { + for (int j = 0; j < grayFrameIdx.cols(); j++) { + grayFrameIdx.put(i, j, i + j); + } + } + + UByteIndexer colorFrameIdx = colorFrame.createIndexer(); + for (int i = 0; i < colorFrameIdx.rows(); i++) { + for (int j = 0; j < colorFrameIdx.cols(); j++) { + for (int k = 0; k < colorFrameIdx.channels(); k++) { + colorFrameIdx.put(i, j, k, i + j + k); + } + } + } + + width = grayFrame.imageWidth; + height = grayFrame.imageHeight; + int stride = grayFrame.imageStride; + int rowBytes = width * 4; + ByteBuffer in = (ByteBuffer)grayFrame.image[0]; + ByteBuffer buffer = converter.gray2rgba(in, width, height, stride, rowBytes); + for (int y = 0; y < height; y++) { + for (int x = 0; x < width; x++) { + // GRAY -> RGBA + byte B = in.get(y * stride + x); + assertEquals(buffer.get(y * rowBytes + 4 * x ), B); + assertEquals(buffer.get(y * rowBytes + 4 * x + 1), B); + assertEquals(buffer.get(y * rowBytes + 4 * x + 2), B); + assertEquals(buffer.get(y * rowBytes + 4 * x + 3), (byte)0xFF); + } + } + + width = colorFrame.imageWidth; + height = colorFrame.imageHeight; + stride = colorFrame.imageStride; + rowBytes = width * 4; + in = (ByteBuffer)colorFrame.image[0]; + buffer = converter.bgr2rgba(in, width, height, stride, rowBytes); + for (int y = 0; y < height; y++) { + for (int x = 0; x < width; x++) { + // BGR -> RGBA + byte B = in.get(y * stride + 3 * x ); + byte G = in.get(y * stride + 3 * x + 1); + byte R = in.get(y * stride + 3 * x + 2); + assertEquals(buffer.get(y * rowBytes + 4 * x ), R); + assertEquals(buffer.get(y * rowBytes + 4 * x + 1), G); + assertEquals(buffer.get(y * rowBytes + 4 * x + 2), B); + assertEquals(buffer.get(y * rowBytes + 4 * x + 3), (byte)0xFF); + } + } + + colorFrameIdx.release(); + grayFrameIdx.release(); + converter.close(); + colorFrame.close(); + grayFrame.close(); + } + + @Test public void testJava2DFrameConverter() { + System.out.println("Java2DFrameConverter"); + + int[] depths = {Frame.DEPTH_UBYTE, Frame.DEPTH_SHORT, Frame.DEPTH_FLOAT}; + int[] channels = {1, 3, 4}; + for (int i = 0; i < depths.length; i++) { + for (int j = 0; j < channels.length; j++) { + Frame frame = new Frame(640 + 1, 480, depths[i], channels[j]); + Java2DFrameConverter converter = new Java2DFrameConverter(); + + Indexer frameIdx = frame.createIndexer(); + for (int y = 0; y < frameIdx.rows(); y++) { + for (int x = 0; x < frameIdx.cols(); x++) { + for (int z = 0; z < frameIdx.channels(); z++) { + frameIdx.putDouble(new long[] {y, x, z}, y + x + z); + } + } + } + + BufferedImage image = converter.convert(frame); + converter.frame = null; + Frame frame2 = converter.convert(image); + + Indexer frame2Idx = frame2.createIndexer(); + for (int y = 0; y < frameIdx.rows(); y++) { + for (int x = 0; x < frameIdx.cols(); x++) { + for (int z = 0; z < frameIdx.channels(); z++) { + double value = frameIdx.getDouble(y, x, z); + assertEquals(value, frame2Idx.getDouble(y, x, z), 0); + } + } + } + + try { + frame2Idx.getDouble(frameIdx.rows() + 1, frameIdx.cols() + 1); + fail("IndexOutOfBoundsException should have been thrown."); + } catch (IndexOutOfBoundsException e) { } + + frameIdx.release(); + frame2Idx.release(); + converter.close(); + frame.close(); + } + } + + int[] types = {BufferedImage.TYPE_INT_RGB, BufferedImage.TYPE_INT_ARGB, + BufferedImage.TYPE_INT_ARGB_PRE, BufferedImage.TYPE_INT_BGR}; + for (int i = 0; i < types.length; i++) { + BufferedImage image = new BufferedImage(640 + 1, 480, types[i]); + Java2DFrameConverter converter = new Java2DFrameConverter(); + + WritableRaster raster = image.getRaster(); + int[] array = ((DataBufferInt)raster.getDataBuffer()).getData(); + for (int j = 0; j < array.length; j++) { + array[j] = j; + } + + Frame frame = converter.convert(image); + converter.bufferedImage = null; + BufferedImage image2 = converter.convert(frame); + + WritableRaster raster2 = image2.getRaster(); + byte[] array2 = ((DataBufferByte)raster2.getDataBuffer()).getData(); + for (int j = 0; j < array.length; j++) { + int n = ((array2[4 * j ] & 0xFF) << 24) | ((array2[4 * j + 1] & 0xFF) << 16) + | ((array2[4 * j + 2] & 0xFF) << 8) | (array2[4 * j + 3] & 0xFF); + assertEquals(array[j], n); + } + converter.close(); + } + } + + @Test public void testOpenCVFrameConverter() { + System.out.println("OpenCVFrameConverter"); + Loader.load(org.bytedeco.opencv.opencv_java.class); + + for (int depth = 8; depth <= 64; depth *= 2) { + assertEquals(depth, OpenCVFrameConverter.getFrameDepth(OpenCVFrameConverter.getIplImageDepth(depth))); + assertEquals(depth, OpenCVFrameConverter.getFrameDepth(OpenCVFrameConverter.getMatDepth(depth))); + if (depth < 64) { + assertEquals(-depth, OpenCVFrameConverter.getFrameDepth(OpenCVFrameConverter.getIplImageDepth(-depth))); + assertEquals(-depth, OpenCVFrameConverter.getFrameDepth(OpenCVFrameConverter.getMatDepth(-depth))); + } + } + + Frame frame = new Frame(640 + 1, 480, Frame.DEPTH_UBYTE, 3); + OpenCVFrameConverter.ToIplImage converter1 = new OpenCVFrameConverter.ToIplImage(); + OpenCVFrameConverter.ToMat converter2 = new OpenCVFrameConverter.ToMat(); + OpenCVFrameConverter.ToOrgOpenCvCoreMat converter3 = new OpenCVFrameConverter.ToOrgOpenCvCoreMat(); + + UByteIndexer frameIdx = frame.createIndexer(); + for (int i = 0; i < frameIdx.rows(); i++) { + for (int j = 0; j < frameIdx.cols(); j++) { + for (int k = 0; k < frameIdx.channels(); k++) { + frameIdx.put(i, j, k, i + j + k); + } + } + } + + IplImage image = converter1.convert(frame); + Mat mat = converter2.convert(frame); + final org.opencv.core.Mat cvmat = converter3.convert(frame); + + converter1.frame = null; + converter2.frame = null; + converter3.frame = null; + Frame frame1 = converter1.convert(image); + Frame frame2 = converter2.convert(mat); + Frame frame3 = converter3.convert(cvmat); + assertEquals(frame2.opaque, mat); + assertEquals(frame3.opaque, cvmat); + + Mat mat2 = new Mat(mat.rows(), mat.cols(), mat.type(), mat.data(), mat.step()); + org.opencv.core.Mat cvmat2 = new org.opencv.core.Mat(cvmat.rows(), cvmat.cols(), cvmat.type(), + new BytePointer() { { address = cvmat.dataAddr(); } }.capacity(cvmat.rows() * cvmat.cols() * cvmat.elemSize()).asByteBuffer(), + cvmat.step1() * cvmat.elemSize1()); + assertNotEquals(mat, mat2); + assertNotEquals(cvmat, cvmat2); + + frame2 = converter2.convert(mat2); + frame3 = converter3.convert(cvmat2); + assertEquals(frame2.opaque, mat2); + assertEquals(frame3.opaque, cvmat2); + assertEquals(frame3.imageStride, cvmat2.step1() * cvmat2.elemSize1()); + + UByteIndexer frame1Idx = frame1.createIndexer(); + UByteIndexer frame2Idx = frame2.createIndexer(); + UByteIndexer frame3Idx = frame3.createIndexer(); + for (int i = 0; i < frameIdx.rows(); i++) { + for (int j = 0; j < frameIdx.cols(); j++) { + for (int k = 0; k < frameIdx.channels(); k++) { + int b = frameIdx.get(i, j, k); + assertEquals(b, frame1Idx.get(i, j, k)); + assertEquals(b, frame2Idx.get(i, j, k)); + assertEquals(b, frame3Idx.get(i, j, k)); + } + } + } + + try { + frame1Idx.get(frameIdx.rows() + 1, frameIdx.cols() + 1); + fail("IndexOutOfBoundsException should have been thrown."); + } catch (IndexOutOfBoundsException e) { } + + try { + frame2Idx.get(frameIdx.rows() + 1, frameIdx.cols() + 1); + fail("IndexOutOfBoundsException should have been thrown."); + } catch (IndexOutOfBoundsException e) { } + + try { + frame3Idx.get(frameIdx.rows() + 1, frameIdx.cols() + 1); + fail("IndexOutOfBoundsException should have been thrown."); + } catch (IndexOutOfBoundsException e) { } + + frameIdx.release(); + frame1Idx.release(); + frame2Idx.release(); + frame3Idx.release(); + converter1.close(); + converter2.close(); + converter3.close(); + frame.close(); + } + + @Test public void testLeptonicaFrameConverter() { + System.out.println("LeptonicaFrameConverter"); + + Frame frame = new Frame(640 + 1, 480, Frame.DEPTH_UBYTE, 3); + LeptonicaFrameConverter converter = new LeptonicaFrameConverter(); + + UByteIndexer frameIdx = frame.createIndexer(); + for (int i = 0; i < frameIdx.rows(); i++) { + for (int j = 0; j < frameIdx.cols(); j++) { + for (int k = 0; k < frameIdx.channels(); k++) { + frameIdx.put(i, j, k, i + j + k); + } + } + } + + PIX pix = converter.convert(frame); + + converter.frame = null; + Frame frame1 = converter.convert(pix); +// assertEquals(frame1.opaque, pix); + + PIX pix2 = PIX.createHeader(pix.w(), pix.h(), pix.d()).data(pix.data()).wpl(pix.wpl()); + assertNotEquals(pix, pix2); + + Frame frame2 = converter.convert(pix2); +// assertEquals(frame2.opaque, pix2); + + IntBuffer frameBuf = ((ByteBuffer)frame.image[0].position(0)).asIntBuffer(); + IntBuffer frame1Buf = ((ByteBuffer)frame1.image[0].position(0)).asIntBuffer(); + IntBuffer frame2Buf = ((ByteBuffer)frame2.image[0].position(0)).asIntBuffer(); + IntBuffer pixBuf = pix.createBuffer().order(ByteOrder.BIG_ENDIAN).asIntBuffer(); + IntBuffer pix2Buf = pix2.createBuffer().order(ByteOrder.BIG_ENDIAN).asIntBuffer(); + for (int i = 0; i < frameBuf.capacity(); i++) { + int j = frameBuf.get(i); + assertEquals(j, frame1Buf.get(i)); + assertEquals(j, frame2Buf.get(i)); + assertEquals(j, pixBuf.get(i)); + assertEquals(j, pix2Buf.get(i)); + } + + try { + frame1Buf.get(frameBuf.capacity() + 1); + fail("IndexOutOfBoundsException should have been thrown."); + } catch (IndexOutOfBoundsException e) { } + + try { + frame2Buf.get(frameBuf.capacity() + 1); + fail("IndexOutOfBoundsException should have been thrown."); + } catch (IndexOutOfBoundsException e) { } + + try { + pixBuf.get(frameBuf.capacity() + 1); + fail("IndexOutOfBoundsException should have been thrown."); + } catch (IndexOutOfBoundsException e) { } + + try { + pix2Buf.get(frameBuf.capacity() + 1); + fail("IndexOutOfBoundsException should have been thrown."); + } catch (IndexOutOfBoundsException e) { } + + pix2.deallocate(); + pix.deallocate(); + converter.close(); + frame.close(); + } +} diff --git a/platform/src/test/java/org/bytedeco/javacv/FrameFilterTest.java b/platform/src/test/java/org/bytedeco/javacv/FrameFilterTest.java index 89fbce7c..1c439da9 100644 --- a/platform/src/test/java/org/bytedeco/javacv/FrameFilterTest.java +++ b/platform/src/test/java/org/bytedeco/javacv/FrameFilterTest.java @@ -1,228 +1,228 @@ -/* - * Copyright (C) 2018 Samuel Audet - * - * Licensed either under the Apache License, Version 2.0, or (at your option) - * under the terms of the GNU General Public License as published by - * the Free Software Foundation (subject to the "Classpath" exception), - * either version 2, or any later version (collectively, the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * http://www.gnu.org/licenses/ - * http://www.gnu.org/software/classpath/license.html - * - * or as provided in the LICENSE.txt file that accompanied this code. - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.bytedeco.javacv; - -import java.io.File; -import java.nio.ByteBuffer; -import java.nio.ShortBuffer; -import org.bytedeco.javacpp.Loader; -import org.junit.Test; - -import static org.bytedeco.ffmpeg.global.avcodec.*; -import static org.bytedeco.ffmpeg.global.avutil.*; -import static org.junit.Assert.*; - -/** - * Test cases for FrameFilter classes. Also uses other classes from JavaCV. - * - * @author Samuel Audet - */ -public class FrameFilterTest { - - @Test - public void testFFmpegFrameFilter() { - System.out.println("FFmpegFrameFilter"); - - File tempFile = new File(Loader.getTempDir(), "test.mov"); - try { - FFmpegFrameRecorder recorder = new FFmpegFrameRecorder(tempFile, 800, 600, 2); - recorder.setFormat("mov"); - recorder.setPixelFormat(AV_PIX_FMT_YUV420P); - recorder.setFrameRate(30); - recorder.setVideoCodec(AV_CODEC_ID_H264); - recorder.setVideoQuality(10); - recorder.setSampleFormat(AV_SAMPLE_FMT_FLTP); - recorder.setSampleRate(48000); - recorder.setAudioCodec(AV_CODEC_ID_AAC); - recorder.setAudioQuality(10); - recorder.start(); - - int n = 1000; - Frame frame = new Frame(800, 600, Frame.DEPTH_UBYTE, 3); - for (int i = 0; i < n; i++) { - recorder.record(frame); - } - Frame audioFrame = new Frame(); - ShortBuffer audioBuffer = ShortBuffer.allocate(48000 * 2 * n / 30); - audioFrame.sampleRate = 48000; - audioFrame.audioChannels = 2; - audioFrame.samples = new ShortBuffer[] {audioBuffer}; - recorder.record(audioFrame); - recorder.stop(); - recorder.release(); - - FFmpegFrameGrabber grabber = new FFmpegFrameGrabber(tempFile); - grabber.setSampleMode(FrameGrabber.SampleMode.FLOAT); - grabber.start(); - - FFmpegFrameFilter filter = new FFmpegFrameFilter( - "scale=400x300,transpose=cclock_flip,format=gray", - "volume=0.5,aformat=sample_fmts=u8:channel_layouts=mono", - grabber.getImageWidth(), grabber.getImageHeight(), grabber.getAudioChannels()); - filter.setPixelFormat(grabber.getPixelFormat()); - filter.setSampleFormat(grabber.getSampleFormat()); - filter.setFrameRate(grabber.getFrameRate()); - filter.setSampleRate(grabber.getSampleRate()); - filter.start(); - - FFmpegFrameFilter nullFilter = new FFmpegFrameFilter(null, null, 0, 0, 0); - nullFilter.start(); - - int a = 0, b = 0, c = 0, d = 0; - Frame frame2; - while ((frame2 = grabber.grab()) != null) { - if (frame2.image != null) { - a++; - } - if (frame2.samples != null) { - b++; - } - filter.push(frame2); - Frame frame3; - while ((frame3 = filter.pull()) != null) { - if (frame3.image != null) { - c++; - assertEquals(300, frame3.imageWidth); - assertEquals(400, frame3.imageHeight); - assertEquals(1, frame3.imageChannels); - } - if (frame3.samples != null) { - d++; - assertEquals(1, frame3.audioChannels); - assertEquals(1, frame3.samples.length); - assertTrue(frame3.samples[0] instanceof ByteBuffer); - assertEquals(frame2.samples.length, frame3.samples.length); - assertEquals(frame2.samples[0].limit() / 2, frame3.samples[0].limit()); - } - assertEquals(frame2.timestamp, frame3.timestamp); - } - nullFilter.push(frame2); - assertEquals(frame2, nullFilter.pull()); - } - filter.push(null); - assertEquals(null, filter.pull()); - assertEquals(a, c); - assertEquals(b, d); - assertEquals(null, grabber.grab()); - filter.stop(); - filter.release(); - grabber.restart(); - grabber.stop(); - grabber.release(); - frame.close(); - } catch (Exception e) { - e.printStackTrace(); - fail("Exception should not have been thrown: " + e); - } finally { - tempFile.delete(); - } - } - - @Test - public void testFFmpegFrameFilterMultipleInputs() { - System.out.println("FFmpegFrameFilterMultipleInputs"); - - File tempFile = new File(Loader.getTempDir(), "test.avi"); - try { - FFmpegFrameRecorder recorder = new FFmpegFrameRecorder(tempFile, 320, 200, 2); - recorder.setVideoCodec(AV_CODEC_ID_VP8); - recorder.setAudioCodec(AV_CODEC_ID_VORBIS); - recorder.start(); - - int n = 1000; - Frame frame = new Frame(320, 200, Frame.DEPTH_UBYTE, 3); - for (int i = 0; i < n; i++) { - recorder.record(frame); - } - Frame audioFrame = new Frame(); - ShortBuffer audioBuffer = ShortBuffer.allocate(8000 * 2 * n / 30); - audioFrame.sampleRate = 8000; - audioFrame.audioChannels = 2; - audioFrame.samples = new ShortBuffer[] {audioBuffer}; - recorder.record(audioFrame); - recorder.stop(); - recorder.release(); - - FFmpegFrameGrabber grabber = new FFmpegFrameGrabber(tempFile); - grabber.start(); - - FFmpegFrameFilter filter = new FFmpegFrameFilter( - "[0:v][1:v]hstack=inputs=2[v]", - "[0:a][1:a]amerge[a]", - grabber.getImageWidth(), grabber.getImageHeight(), grabber.getAudioChannels()); - filter.setPixelFormat(grabber.getPixelFormat()); - filter.setSampleFormat(grabber.getSampleFormat()); - filter.setVideoInputs(2); - filter.setAudioInputs(2); - filter.start(); - - int a = 0, b = 0, c = 0, d = 0; - Frame frame2; - while ((frame2 = grabber.grab()) != null) { - if (frame2.image != null) { - a++; - } - if (frame2.samples != null) { - b++; - } - filter.push(0, frame2); - filter.push(1, frame2); - Frame frame3; - while ((frame3 = filter.pull()) != null) { - if (frame3.image != null) { - c++; - assertEquals(640, frame3.imageWidth); - assertEquals(200, frame3.imageHeight); - assertEquals(3, frame3.imageChannels); - } - if (frame3.samples != null) { - d++; - assertEquals(2, frame3.audioChannels); - assertEquals(1, frame3.samples.length); - assertTrue(frame3.samples[0] instanceof ByteBuffer); - assertEquals(frame2.samples.length, frame3.samples.length); - assertEquals(frame2.samples[0].limit(), frame3.samples[0].limit()); - } - } - } - filter.push(0, null); - filter.push(1, null); - assertEquals(null, filter.pull()); - assertEquals(a, c); - assertEquals(b, d); - assertEquals(null, grabber.grab()); - filter.stop(); - filter.release(); - grabber.restart(); - grabber.stop(); - grabber.release(); - frame.close(); - } catch (Exception e) { - e.printStackTrace(); - fail("Exception should not have been thrown: " + e); - } finally { - tempFile.delete(); - } - } - -} +/* + * Copyright (C) 2018 Samuel Audet + * + * Licensed either under the Apache License, Version 2.0, or (at your option) + * under the terms of the GNU General Public License as published by + * the Free Software Foundation (subject to the "Classpath" exception), + * either version 2, or any later version (collectively, the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.gnu.org/licenses/ + * http://www.gnu.org/software/classpath/license.html + * + * or as provided in the LICENSE.txt file that accompanied this code. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bytedeco.javacv; + +import java.io.File; +import java.nio.ByteBuffer; +import java.nio.ShortBuffer; +import org.bytedeco.javacpp.Loader; +import org.junit.Test; + +import static org.bytedeco.ffmpeg.global.avcodec.*; +import static org.bytedeco.ffmpeg.global.avutil.*; +import static org.junit.Assert.*; + +/** + * Test cases for FrameFilter classes. Also uses other classes from JavaCV. + * + * @author Samuel Audet + */ +public class FrameFilterTest { + + @Test + public void testFFmpegFrameFilter() { + System.out.println("FFmpegFrameFilter"); + + File tempFile = new File(Loader.getTempDir(), "test.mov"); + try { + FFmpegFrameRecorder recorder = new FFmpegFrameRecorder(tempFile, 800, 600, 2); + recorder.setFormat("mov"); + recorder.setPixelFormat(AV_PIX_FMT_YUV420P); + recorder.setFrameRate(30); + recorder.setVideoCodec(AV_CODEC_ID_H264); + recorder.setVideoQuality(10); + recorder.setSampleFormat(AV_SAMPLE_FMT_FLTP); + recorder.setSampleRate(48000); + recorder.setAudioCodec(AV_CODEC_ID_AAC); + recorder.setAudioQuality(10); + recorder.start(); + + int n = 1000; + Frame frame = new Frame(800, 600, Frame.DEPTH_UBYTE, 3); + for (int i = 0; i < n; i++) { + recorder.record(frame); + } + Frame audioFrame = new Frame(); + ShortBuffer audioBuffer = ShortBuffer.allocate(48000 * 2 * n / 30); + audioFrame.sampleRate = 48000; + audioFrame.audioChannels = 2; + audioFrame.samples = new ShortBuffer[] {audioBuffer}; + recorder.record(audioFrame); + recorder.stop(); + recorder.release(); + + FFmpegFrameGrabber grabber = new FFmpegFrameGrabber(tempFile); + grabber.setSampleMode(FrameGrabber.SampleMode.FLOAT); + grabber.start(); + + FFmpegFrameFilter filter = new FFmpegFrameFilter( + "scale=400x300,transpose=cclock_flip,format=gray", + "volume=0.5,aformat=sample_fmts=u8:channel_layouts=mono", + grabber.getImageWidth(), grabber.getImageHeight(), grabber.getAudioChannels()); + filter.setPixelFormat(grabber.getPixelFormat()); + filter.setSampleFormat(grabber.getSampleFormat()); + filter.setFrameRate(grabber.getFrameRate()); + filter.setSampleRate(grabber.getSampleRate()); + filter.start(); + + FFmpegFrameFilter nullFilter = new FFmpegFrameFilter(null, null, 0, 0, 0); + nullFilter.start(); + + int a = 0, b = 0, c = 0, d = 0; + Frame frame2; + while ((frame2 = grabber.grab()) != null) { + if (frame2.image != null) { + a++; + } + if (frame2.samples != null) { + b++; + } + filter.push(frame2); + Frame frame3; + while ((frame3 = filter.pull()) != null) { + if (frame3.image != null) { + c++; + assertEquals(300, frame3.imageWidth); + assertEquals(400, frame3.imageHeight); + assertEquals(1, frame3.imageChannels); + } + if (frame3.samples != null) { + d++; + assertEquals(1, frame3.audioChannels); + assertEquals(1, frame3.samples.length); + assertTrue(frame3.samples[0] instanceof ByteBuffer); + assertEquals(frame2.samples.length, frame3.samples.length); + assertEquals(frame2.samples[0].limit() / 2, frame3.samples[0].limit()); + } + assertEquals(frame2.timestamp, frame3.timestamp); + } + nullFilter.push(frame2); + assertEquals(frame2, nullFilter.pull()); + } + filter.push(null); + assertEquals(null, filter.pull()); + assertEquals(a, c); + assertEquals(b, d); + assertEquals(null, grabber.grab()); + filter.stop(); + filter.release(); + grabber.restart(); + grabber.stop(); + grabber.release(); + frame.close(); + } catch (Exception e) { + e.printStackTrace(); + fail("Exception should not have been thrown: " + e); + } finally { + tempFile.delete(); + } + } + + @Test + public void testFFmpegFrameFilterMultipleInputs() { + System.out.println("FFmpegFrameFilterMultipleInputs"); + + File tempFile = new File(Loader.getTempDir(), "test.avi"); + try { + FFmpegFrameRecorder recorder = new FFmpegFrameRecorder(tempFile, 320, 200, 2); + recorder.setVideoCodec(AV_CODEC_ID_VP8); + recorder.setAudioCodec(AV_CODEC_ID_VORBIS); + recorder.start(); + + int n = 1000; + Frame frame = new Frame(320, 200, Frame.DEPTH_UBYTE, 3); + for (int i = 0; i < n; i++) { + recorder.record(frame); + } + Frame audioFrame = new Frame(); + ShortBuffer audioBuffer = ShortBuffer.allocate(8000 * 2 * n / 30); + audioFrame.sampleRate = 8000; + audioFrame.audioChannels = 2; + audioFrame.samples = new ShortBuffer[] {audioBuffer}; + recorder.record(audioFrame); + recorder.stop(); + recorder.release(); + + FFmpegFrameGrabber grabber = new FFmpegFrameGrabber(tempFile); + grabber.start(); + + FFmpegFrameFilter filter = new FFmpegFrameFilter( + "[0:v][1:v]hstack=inputs=2[v]", + "[0:a][1:a]amerge[a]", + grabber.getImageWidth(), grabber.getImageHeight(), grabber.getAudioChannels()); + filter.setPixelFormat(grabber.getPixelFormat()); + filter.setSampleFormat(grabber.getSampleFormat()); + filter.setVideoInputs(2); + filter.setAudioInputs(2); + filter.start(); + + int a = 0, b = 0, c = 0, d = 0; + Frame frame2; + while ((frame2 = grabber.grab()) != null) { + if (frame2.image != null) { + a++; + } + if (frame2.samples != null) { + b++; + } + filter.push(0, frame2); + filter.push(1, frame2); + Frame frame3; + while ((frame3 = filter.pull()) != null) { + if (frame3.image != null) { + c++; + assertEquals(640, frame3.imageWidth); + assertEquals(200, frame3.imageHeight); + assertEquals(3, frame3.imageChannels); + } + if (frame3.samples != null) { + d++; + assertEquals(2, frame3.audioChannels); + assertEquals(1, frame3.samples.length); + assertTrue(frame3.samples[0] instanceof ByteBuffer); + assertEquals(frame2.samples.length, frame3.samples.length); + assertEquals(frame2.samples[0].limit(), frame3.samples[0].limit()); + } + } + } + filter.push(0, null); + filter.push(1, null); + assertEquals(null, filter.pull()); + assertEquals(a, c); + assertEquals(b, d); + assertEquals(null, grabber.grab()); + filter.stop(); + filter.release(); + grabber.restart(); + grabber.stop(); + grabber.release(); + frame.close(); + } catch (Exception e) { + e.printStackTrace(); + fail("Exception should not have been thrown: " + e); + } finally { + tempFile.delete(); + } + } + +} diff --git a/platform/src/test/java/org/bytedeco/javacv/FrameGrabberChangingResolutionTest.java b/platform/src/test/java/org/bytedeco/javacv/FrameGrabberChangingResolutionTest.java index 0af80ec3..e46ae1a8 100644 --- a/platform/src/test/java/org/bytedeco/javacv/FrameGrabberChangingResolutionTest.java +++ b/platform/src/test/java/org/bytedeco/javacv/FrameGrabberChangingResolutionTest.java @@ -1,277 +1,277 @@ -/* - * Copyright (C) 2016-2017 Samuel Audet - * - * Licensed either under the Apache License, Version 2.0, or (at your option) - * under the terms of the GNU General Public License as published by - * the Free Software Foundation (subject to the "Classpath" exception), - * either version 2, or any later version (collectively, the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * http://www.gnu.org/licenses/ - * http://www.gnu.org/software/classpath/license.html - * - * or as provided in the LICENSE.txt file that accompanied this code. - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.bytedeco.javacv; - -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.IOException; -import java.nio.FloatBuffer; -import java.nio.ShortBuffer; -import org.bytedeco.javacpp.Loader; -import org.bytedeco.javacpp.indexer.UByteIndexer; -import org.junit.Test; - -import static org.bytedeco.ffmpeg.global.avcodec.*; -import static org.bytedeco.ffmpeg.global.avutil.*; -import static org.junit.Assert.*; - -/** - * Complex Test case for FrameGrabber classes - change the resolution during runtime. - * Also uses other classes from JavaCV. - * - * @author Samuel Audet, Michael Fritscher - */ -public class FrameGrabberChangingResolutionTest { - private File tempFile = new File(Loader.getTempDir(), "test.mkv"); - private File tempTargetFile = new File(Loader.getTempDir(), "target.mkv"); - private boolean endRequested; - - private void makeTestfile() throws Exception { - FFmpegFrameRecorder recorder = new FFmpegFrameRecorder(new FileOutputStream(tempFile), 640, 480, 2); - recorder.setFormat("matroska"); // mp4 doesn't support streaming - recorder.setPixelFormat(AV_PIX_FMT_YUV420P); - recorder.setVideoCodec(AV_CODEC_ID_H264); - recorder.setVideoQuality(0); // lossless - recorder.setFrameRate(30); - recorder.startUnsafe(); - - Frame[] frames = new Frame[60]; - for (int n = 0; n < frames.length; n++) { - Frame frame = new Frame(640, 480, Frame.DEPTH_UBYTE, 3); - UByteIndexer frameIdx = frame.createIndexer(); - for (int i = 0; i < frameIdx.rows(); i++) { - for (int j = 0; j < frameIdx.cols(); j++) { - for (int k = 0; k < frameIdx.channels(); k++) { - frameIdx.put(i, j, k, n + i + j + k); - } - } - } - recorder.record(frame); - frames[n] = frame; - } - recorder.stop(); - recorder.release(); - for (int n = 0; n < frames.length; n++) { - frames[n].close(); - } - } - - final public void setupUDPSender(final int x, final int y, final int bandwidth, final int count) throws IOException { - final FFmpegFrameGrabber fg = new FFmpegFrameGrabber(tempFile); - fg.setFrameRate(30); - - final FFmpegFrameRecorder fr = new FFmpegFrameRecorder("udp://127.0.0.1:2345", 0); - fr.setVideoCodecName("mpeg2video"); - fr.setFormat("mpegts"); - - fr.setImageWidth(x); - fr.setImageHeight(y); - fr.setVideoBitrate(bandwidth); - - fr.setFrameRate(30); - - fg.startUnsafe(); - fr.startUnsafe(); - - final boolean[] b = new boolean[1]; - Thread t = new Thread() { - public void run() { - try { - for (int i = 0; i < count; i++) { - /*- System.out.println("S: " + fg.getFrameNumber() + " " + fg.getTimestamp() + " " - + fg.getFrameRate() + " " + fg.getImageWidth() + "x" + fg.getImageHeight() + " " - + fg.getVideoCodec() + " " + fg.getVideoBitrate() + " " + i); */ - Frame source = fg.grabFrame(); - fr.record(source); - } - fg.close(); - fr.close(); - b[0] = true; - } catch (Exception e) { - e.printStackTrace(); - fail("Exception should not have been thrown: " + e); - try { - fg.close(); - fr.close(); - } catch (Exception e1) { - e1.printStackTrace(); - } - b[0] = true; - } - } - }; - t.setName("Sender"); - t.start(); - - while (!b[0]) { - try { - Thread.sleep(100); - } catch (InterruptedException e) { - e.printStackTrace(); - } - } - } - - final public void setupUDPReceiver() throws IOException { - Thread t = new Thread() { - - public void run() { - FFmpegFrameGrabber fg = new FFmpegFrameGrabber("udp://127.0.0.1:2345"); - fg.setFrameRate(30); - - FFmpegFrameRecorder fr = new FFmpegFrameRecorder(tempTargetFile, 0); - fr.setVideoCodecName("mpeg2video"); - fr.setFormat("mpegts"); - - fr.setImageWidth(640); - fr.setImageHeight(480); - fr.setVideoBitrate(8000000); - - fr.setFrameRate(30); - - try { - fg.startUnsafe(); - fr.startUnsafe(); - } catch (Exception e) { - e.printStackTrace(); - } - - // Tests whether the width of the picture changes trough all - // qualities and every step has a few pictures. - try { - int n = 0; - int m = 0; // Pictures in this quality - int q = 0; // which quality state? - int[] qualities = { 160, 320, 640, 160, 320, 640, 320, 160 }; - while (!endRequested) { - /*- System.out.println("R: " + fg.getFrameNumber() + " " + fg.getTimestamp() + " " - + fg.getFrameRate() + " " + fg.getImageWidth() + "x" + fg.getImageHeight() + " " - + fg.getVideoCodec() + " " + fg.getVideoBitrate()); */ - Frame source = fg.grabFrame(); - n++; - m++; - // System.out.println("WRITTEN: " + n + " " + m + " " + - // q + " " + source.imageWidth); - if (source.imageWidth != qualities[q]) { - q++; - assertEquals(source.imageWidth, qualities[q]); - assertTrue(m > 5); - assertTrue(m <= 60); - m = 0; - } - fr.record(source); - } - assertEquals(q, qualities.length - 1); - assertTrue(n > 300); - assertTrue(n <= 480); - fr.close(); - } catch (Exception e) { - e.printStackTrace(); - fail("Exception should not have been thrown: " + e); - try { - fg.close(); - fr.close(); - } catch (Exception e1) { - e1.printStackTrace(); - } - } - } - }; - t.setName("Receiver"); - t.start(); - } - - @Test - public void testFFmpegFrameGrabber() { - System.out.println("FFmpegFrameGrabber"); - - try { - makeTestfile(); - - setupUDPReceiver(); - - System.out.println("Changing to 160x120"); - setupUDPSender(160, 120, 50000, 60); - - System.out.println("Changing to 320x240"); - setupUDPSender(320, 240, 100000, 60); - - System.out.println("Changing to 640x480"); - setupUDPSender(640, 480, 200000, 60); - - System.out.println("Changing to 160x120"); - setupUDPSender(160, 120, 50000, 60); - - System.out.println("Changing to 320x240"); - setupUDPSender(320, 240, 100000, 60); - - System.out.println("Changing to 640x480"); - setupUDPSender(640, 480, 200000, 60); - - System.out.println("Changing to 320x240"); - setupUDPSender(320, 240, 100000, 60); - - System.out.println("Changing to 160x120"); - setupUDPSender(160, 120, 50000, 60); - - Thread.sleep(3000); - endRequested = true; - } catch (Exception e) { - tempFile.delete(); - tempTargetFile.delete(); - e.printStackTrace(); - fail("Exception should not have been thrown: " + e); - } - - try { - FFmpegFrameGrabber grabber = new FFmpegFrameGrabber(new FileInputStream(tempTargetFile)); - grabber.setSampleMode(FrameGrabber.SampleMode.FLOAT); - grabber.startUnsafe(); - - int n = 0; - Frame frame2; - while ((frame2 = grabber.grab()) != null) { - if (frame2.image != null) { - n++; - assertEquals(640, frame2.imageWidth); - } - } - - // It seems that ffmpeg lose some frames while switching (ideal - // value would be 240) - // System.out.println("END NUMBER: " + n); - assertTrue(n > 300); - assertTrue(n <= 480); - assertEquals(null, grabber.grab()); - grabber.stop(); - grabber.release(); - } catch (Exception e) { - e.printStackTrace(); - fail("Exception should not have been thrown: " + e); - } finally { - tempFile.delete(); - tempTargetFile.delete(); - } - } -} +/* + * Copyright (C) 2016-2017 Samuel Audet + * + * Licensed either under the Apache License, Version 2.0, or (at your option) + * under the terms of the GNU General Public License as published by + * the Free Software Foundation (subject to the "Classpath" exception), + * either version 2, or any later version (collectively, the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.gnu.org/licenses/ + * http://www.gnu.org/software/classpath/license.html + * + * or as provided in the LICENSE.txt file that accompanied this code. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bytedeco.javacv; + +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.nio.FloatBuffer; +import java.nio.ShortBuffer; +import org.bytedeco.javacpp.Loader; +import org.bytedeco.javacpp.indexer.UByteIndexer; +import org.junit.Test; + +import static org.bytedeco.ffmpeg.global.avcodec.*; +import static org.bytedeco.ffmpeg.global.avutil.*; +import static org.junit.Assert.*; + +/** + * Complex Test case for FrameGrabber classes - change the resolution during runtime. + * Also uses other classes from JavaCV. + * + * @author Samuel Audet, Michael Fritscher + */ +public class FrameGrabberChangingResolutionTest { + private File tempFile = new File(Loader.getTempDir(), "test.mkv"); + private File tempTargetFile = new File(Loader.getTempDir(), "target.mkv"); + private boolean endRequested; + + private void makeTestfile() throws Exception { + FFmpegFrameRecorder recorder = new FFmpegFrameRecorder(new FileOutputStream(tempFile), 640, 480, 2); + recorder.setFormat("matroska"); // mp4 doesn't support streaming + recorder.setPixelFormat(AV_PIX_FMT_YUV420P); + recorder.setVideoCodec(AV_CODEC_ID_H264); + recorder.setVideoQuality(0); // lossless + recorder.setFrameRate(30); + recorder.startUnsafe(); + + Frame[] frames = new Frame[60]; + for (int n = 0; n < frames.length; n++) { + Frame frame = new Frame(640, 480, Frame.DEPTH_UBYTE, 3); + UByteIndexer frameIdx = frame.createIndexer(); + for (int i = 0; i < frameIdx.rows(); i++) { + for (int j = 0; j < frameIdx.cols(); j++) { + for (int k = 0; k < frameIdx.channels(); k++) { + frameIdx.put(i, j, k, n + i + j + k); + } + } + } + recorder.record(frame); + frames[n] = frame; + } + recorder.stop(); + recorder.release(); + for (int n = 0; n < frames.length; n++) { + frames[n].close(); + } + } + + final public void setupUDPSender(final int x, final int y, final int bandwidth, final int count) throws IOException { + final FFmpegFrameGrabber fg = new FFmpegFrameGrabber(tempFile); + fg.setFrameRate(30); + + final FFmpegFrameRecorder fr = new FFmpegFrameRecorder("udp://127.0.0.1:2345", 0); + fr.setVideoCodecName("mpeg2video"); + fr.setFormat("mpegts"); + + fr.setImageWidth(x); + fr.setImageHeight(y); + fr.setVideoBitrate(bandwidth); + + fr.setFrameRate(30); + + fg.startUnsafe(); + fr.startUnsafe(); + + final boolean[] b = new boolean[1]; + Thread t = new Thread() { + public void run() { + try { + for (int i = 0; i < count; i++) { + /*- System.out.println("S: " + fg.getFrameNumber() + " " + fg.getTimestamp() + " " + + fg.getFrameRate() + " " + fg.getImageWidth() + "x" + fg.getImageHeight() + " " + + fg.getVideoCodec() + " " + fg.getVideoBitrate() + " " + i); */ + Frame source = fg.grabFrame(); + fr.record(source); + } + fg.close(); + fr.close(); + b[0] = true; + } catch (Exception e) { + e.printStackTrace(); + fail("Exception should not have been thrown: " + e); + try { + fg.close(); + fr.close(); + } catch (Exception e1) { + e1.printStackTrace(); + } + b[0] = true; + } + } + }; + t.setName("Sender"); + t.start(); + + while (!b[0]) { + try { + Thread.sleep(100); + } catch (InterruptedException e) { + e.printStackTrace(); + } + } + } + + final public void setupUDPReceiver() throws IOException { + Thread t = new Thread() { + + public void run() { + FFmpegFrameGrabber fg = new FFmpegFrameGrabber("udp://127.0.0.1:2345"); + fg.setFrameRate(30); + + FFmpegFrameRecorder fr = new FFmpegFrameRecorder(tempTargetFile, 0); + fr.setVideoCodecName("mpeg2video"); + fr.setFormat("mpegts"); + + fr.setImageWidth(640); + fr.setImageHeight(480); + fr.setVideoBitrate(8000000); + + fr.setFrameRate(30); + + try { + fg.startUnsafe(); + fr.startUnsafe(); + } catch (Exception e) { + e.printStackTrace(); + } + + // Tests whether the width of the picture changes trough all + // qualities and every step has a few pictures. + try { + int n = 0; + int m = 0; // Pictures in this quality + int q = 0; // which quality state? + int[] qualities = { 160, 320, 640, 160, 320, 640, 320, 160 }; + while (!endRequested) { + /*- System.out.println("R: " + fg.getFrameNumber() + " " + fg.getTimestamp() + " " + + fg.getFrameRate() + " " + fg.getImageWidth() + "x" + fg.getImageHeight() + " " + + fg.getVideoCodec() + " " + fg.getVideoBitrate()); */ + Frame source = fg.grabFrame(); + n++; + m++; + // System.out.println("WRITTEN: " + n + " " + m + " " + + // q + " " + source.imageWidth); + if (source.imageWidth != qualities[q]) { + q++; + assertEquals(source.imageWidth, qualities[q]); + assertTrue(m > 5); + assertTrue(m <= 60); + m = 0; + } + fr.record(source); + } + assertEquals(q, qualities.length - 1); + assertTrue(n > 300); + assertTrue(n <= 480); + fr.close(); + } catch (Exception e) { + e.printStackTrace(); + fail("Exception should not have been thrown: " + e); + try { + fg.close(); + fr.close(); + } catch (Exception e1) { + e1.printStackTrace(); + } + } + } + }; + t.setName("Receiver"); + t.start(); + } + + @Test + public void testFFmpegFrameGrabber() { + System.out.println("FFmpegFrameGrabber"); + + try { + makeTestfile(); + + setupUDPReceiver(); + + System.out.println("Changing to 160x120"); + setupUDPSender(160, 120, 50000, 60); + + System.out.println("Changing to 320x240"); + setupUDPSender(320, 240, 100000, 60); + + System.out.println("Changing to 640x480"); + setupUDPSender(640, 480, 200000, 60); + + System.out.println("Changing to 160x120"); + setupUDPSender(160, 120, 50000, 60); + + System.out.println("Changing to 320x240"); + setupUDPSender(320, 240, 100000, 60); + + System.out.println("Changing to 640x480"); + setupUDPSender(640, 480, 200000, 60); + + System.out.println("Changing to 320x240"); + setupUDPSender(320, 240, 100000, 60); + + System.out.println("Changing to 160x120"); + setupUDPSender(160, 120, 50000, 60); + + Thread.sleep(3000); + endRequested = true; + } catch (Exception e) { + tempFile.delete(); + tempTargetFile.delete(); + e.printStackTrace(); + fail("Exception should not have been thrown: " + e); + } + + try { + FFmpegFrameGrabber grabber = new FFmpegFrameGrabber(new FileInputStream(tempTargetFile)); + grabber.setSampleMode(FrameGrabber.SampleMode.FLOAT); + grabber.startUnsafe(); + + int n = 0; + Frame frame2; + while ((frame2 = grabber.grab()) != null) { + if (frame2.image != null) { + n++; + assertEquals(640, frame2.imageWidth); + } + } + + // It seems that ffmpeg lose some frames while switching (ideal + // value would be 240) + // System.out.println("END NUMBER: " + n); + assertTrue(n > 300); + assertTrue(n <= 480); + assertEquals(null, grabber.grab()); + grabber.stop(); + grabber.release(); + } catch (Exception e) { + e.printStackTrace(); + fail("Exception should not have been thrown: " + e); + } finally { + tempFile.delete(); + tempTargetFile.delete(); + } + } +} diff --git a/platform/src/test/java/org/bytedeco/javacv/FrameGrabberTest.java b/platform/src/test/java/org/bytedeco/javacv/FrameGrabberTest.java index e591f3a3..8229b4ab 100644 --- a/platform/src/test/java/org/bytedeco/javacv/FrameGrabberTest.java +++ b/platform/src/test/java/org/bytedeco/javacv/FrameGrabberTest.java @@ -1,487 +1,487 @@ -/* - * Copyright (C) 2016-2023 Samuel Audet - * - * Licensed either under the Apache License, Version 2.0, or (at your option) - * under the terms of the GNU General Public License as published by - * the Free Software Foundation (subject to the "Classpath" exception), - * either version 2, or any later version (collectively, the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * http://www.gnu.org/licenses/ - * http://www.gnu.org/software/classpath/license.html - * - * or as provided in the LICENSE.txt file that accompanied this code. - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.bytedeco.javacv; - -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.IOException; -import java.nio.FloatBuffer; -import java.nio.ShortBuffer; -import java.util.Random; -import org.bytedeco.javacpp.Loader; -import org.bytedeco.javacpp.PointerScope; -import org.bytedeco.javacpp.indexer.UByteIndexer; -import org.junit.Test; - -import static org.bytedeco.ffmpeg.global.avcodec.*; -import static org.bytedeco.ffmpeg.global.avutil.*; -import static org.junit.Assert.*; - -/** - * Test cases for FrameGrabber classes. Also uses other classes from JavaCV. - * - * @author Samuel Audet - */ -public class FrameGrabberTest { - - @Test - public void testFFmpegFrameGrabber() { - System.out.println("FFmpegFrameGrabber"); - - File tempFile = new File(Loader.getTempDir(), "test.mkv"); - try { - FFmpegFrameRecorder recorder = new FFmpegFrameRecorder(new FileOutputStream(tempFile), 640, 480, 2); - recorder.setFormat("matroska"); // mp4 doesn't support streaming - recorder.setPixelFormat(AV_PIX_FMT_BGR24); - recorder.setVideoCodecName("jpegls"); - recorder.setVideoQuality(0); // lossless - recorder.setSampleFormat(AV_SAMPLE_FMT_S16); - recorder.setSampleRate(44100); - recorder.setAudioCodecName("pcm_s16le"); - recorder.start(); - - Frame[] frames = new Frame[1000]; - for (int n = 0; n < frames.length; n++) { - Frame frame = new Frame(640, 480, Frame.DEPTH_UBYTE, 3); - UByteIndexer frameIdx = frame.createIndexer(); - for (int i = 0; i < frameIdx.rows(); i++) { - for (int j = 0; j < frameIdx.cols(); j++) { - for (int k = 0; k < frameIdx.channels(); k++) { - frameIdx.put(i, j, k, n + i + j + k); - } - } - } - recorder.record(frame); - frames[n] = frame; - } - Frame audioFrame = new Frame(); - ShortBuffer audioBuffer = ShortBuffer.allocate(64 * 1024); - audioFrame.sampleRate = 44100; - audioFrame.audioChannels = 2; - audioFrame.samples = new ShortBuffer[] {audioBuffer}; - for (int i = 0; i < audioBuffer.capacity(); i++) { - audioBuffer.put(i, (short)i); - } - recorder.record(audioFrame); - recorder.stop(); - recorder.release(); - - FFmpegFrameGrabber grabber = new FFmpegFrameGrabber(new FileInputStream(tempFile)); - grabber.setSampleMode(FrameGrabber.SampleMode.FLOAT); - grabber.start(); - - int n = 0, m = 0; - Frame frame2; - long startTime = System.nanoTime(); - while ((frame2 = grabber.grabAtFrameRate()) != null) { - long delay = frame2.timestamp * 1000 - (System.nanoTime() - startTime); - if (delay < -1_000_000_000 / grabber.getFrameRate()) { - // skip to catch up with frame rate - if (frame2.image != null) { - n++; - } else { - m++; - } - continue; - } - Frame clone2 = frame2.clone(); - if (frame2.image != null) { - Frame frame = frames[n++]; - assertEquals(frame.imageWidth, frame2.imageWidth); - assertEquals(frame.imageHeight, frame2.imageHeight); - assertEquals(frame.imageChannels, frame2.imageChannels); - assertEquals(frame.imageWidth, clone2.imageWidth); - assertEquals(frame.imageHeight, clone2.imageHeight); - assertEquals(frame.imageChannels, clone2.imageChannels); - - UByteIndexer frameIdx = frame.createIndexer(); - UByteIndexer frame2Idx = frame2.createIndexer(); - UByteIndexer clone2Idx = clone2.createIndexer(); - for (int i = 0; i < frameIdx.rows(); i++) { - for (int j = 0; j < frameIdx.cols(); j++) { - for (int k = 0; k < frameIdx.channels(); k++) { - int b = frameIdx.get(i, j, k); - assertEquals(b, frame2Idx.get(i, j, k)); - assertEquals(b, clone2Idx.get(i, j, k)); - } - } - } - } else { - FloatBuffer audioBuffer2 = (FloatBuffer)frame2.samples[0]; - FloatBuffer cloneBuffer2 = (FloatBuffer)clone2.samples[0]; - while (audioBuffer2.hasRemaining()) { - assertEquals((float)audioBuffer.get(m) / (Short.MAX_VALUE + 1), audioBuffer2.get(), 0); - assertEquals((float)audioBuffer.get(m) / (Short.MAX_VALUE + 1), cloneBuffer2.get(), 0); - m++; - } - } - clone2.close(); - } - long stopTime = System.nanoTime(); - assertEquals(n, (stopTime - startTime) * grabber.getFrameRate() / 1_000_000_000, 3.0); - assertEquals(frames.length, n); - assertEquals(null, grabber.grab()); - grabber.restart(); - grabber.stop(); - grabber.release(); - for (n = 0; n < frames.length; n++) { - frames[n].close(); - } - } catch (Exception e) { - e.printStackTrace(); - fail("Exception should not have been thrown: " + e); - } finally { - tempFile.delete(); - } - } - - @Test - public void testFFmpegFrameGrabberLockingTest() { - final boolean[] failed = {false}; - final int numberOfInstances = 20; - System.out.println("FFmpegFrameGrabberLocking"); - - Runnable[] runables = new Runnable[numberOfInstances]; - Thread[] threads = new Thread[numberOfInstances]; - final boolean[] finish = new boolean[numberOfInstances]; - for (int instance = 0; instance < numberOfInstances; instance++) { - final int instance_final = instance; - Runnable r = new Runnable() { - public void run() { - - File tempFile = new File(Loader.getTempDir(), "test" + instance_final + ".mkv"); - try (PointerScope scope = new PointerScope()) { - FFmpegLogCallback.set(); - FFmpegFrameRecorder recorder = new FFmpegFrameRecorder(new FileOutputStream(tempFile), 640, 480, 2); - recorder.setFormat("matroska"); // mp4 doesn't support streaming - recorder.setPixelFormat(AV_PIX_FMT_BGR24); - recorder.setVideoCodecName("jpegls"); - recorder.setVideoQuality(0); // lossless - recorder.setSampleFormat(AV_SAMPLE_FMT_S16); - recorder.setSampleRate(44100); - recorder.setAudioCodecName("pcm_s16le"); - recorder.startUnsafe(); - - Frame[] frames = new Frame[10]; - for (int n = 0; n < frames.length; n++) { - Frame frame = new Frame(640, 480, Frame.DEPTH_UBYTE, 3); - UByteIndexer frameIdx = frame.createIndexer(); - for (int i = 0; i < frameIdx.rows(); i++) { - for (int j = 0; j < frameIdx.cols(); j++) { - for (int k = 0; k < frameIdx.channels(); k++) { - frameIdx.put(i, j, k, n + i + j + k); - } - } - } - recorder.record(frame); - frames[n] = frame; - } - Frame audioFrame = new Frame(); - ShortBuffer audioBuffer = ShortBuffer.allocate(64 * 1024); - audioFrame.sampleRate = 44100; - audioFrame.audioChannels = 2; - audioFrame.samples = new ShortBuffer[] { audioBuffer }; - for (int i = 0; i < audioBuffer.capacity(); i++) { - audioBuffer.put(i, (short) i); - } - recorder.record(audioFrame); - recorder.stop(); - recorder.release(); - - Thread.sleep(1000); - - FFmpegFrameGrabber grabber = new FFmpegFrameGrabber(new FileInputStream(tempFile)); - grabber.setSampleMode(FrameGrabber.SampleMode.FLOAT); - grabber.startUnsafe(); - - int n = 0, m = 0; - Frame frame2; - while ((frame2 = grabber.grab()) != null) { - if (frame2.image != null) { - Frame frame = frames[n++]; - assertEquals(frame.imageWidth, frame2.imageWidth); - assertEquals(frame.imageHeight, frame2.imageHeight); - assertEquals(frame.imageChannels, frame2.imageChannels); - - UByteIndexer frameIdx = frame.createIndexer(); - UByteIndexer frame2Idx = frame2.createIndexer(); - for (int i = 0; i < frameIdx.rows(); i++) { - for (int j = 0; j < frameIdx.cols(); j++) { - for (int k = 0; k < frameIdx.channels(); k++) { - int b = frameIdx.get(i, j, k); - assertEquals(b, frame2Idx.get(i, j, k)); - } - } - } - } else { - FloatBuffer audioBuffer2 = (FloatBuffer) frame2.samples[0]; - while (audioBuffer2.hasRemaining()) { - assertEquals((float) audioBuffer.get(m++) / (Short.MAX_VALUE + 1), - audioBuffer2.get(), 0); - } - } - } - assertEquals(frames.length, n); - assertEquals(null, grabber.grab()); - grabber.restart(); - grabber.stop(); - grabber.release(); - for (n = 0; n < frames.length; n++) { - frames[n].close(); - } - } catch (Error | Exception e) { - failed[0] = true; - e.printStackTrace(); - fail("Exception should not have been thrown: " + e); - } finally { - tempFile.delete(); - finish[instance_final] = true; - } - } - }; - - runables[instance_final] = r; - } - - for (int instance = 0; instance < numberOfInstances; instance++) { - threads[instance] = new Thread(runables[instance]); - threads[instance].setName("Testthread-" + instance); - } - - for (int instance = 0; instance < numberOfInstances; instance++) { - threads[instance].start(); - } - - while (true) { - boolean finished = true; - for (int instance = 0; instance < numberOfInstances; instance++) { - if (!finish[instance]) { - finished = false; - break; - } - } - - if (!finished) { - System.out.println("Still waiting..."); - try { - Thread.sleep(500); - } catch (InterruptedException e) { - // TODO Auto-generated catch block - e.printStackTrace(); - } - } else { - break; - } - } - assertFalse(failed[0]); - } - - @Test - public void testFFmpegFrameGrabberSeeking() throws IOException { - System.out.println("FFmpegFrameGrabberSeeking"); - - for(int seektestnum = 0; seektestnum < 3; seektestnum++) try (PointerScope scope = new PointerScope()) { - FFmpegLogCallback.set(); - String fileName = seektestnum==0?"testAV.mp4":seektestnum==1?"testV.mp4":"testA.mp4"; - File tempFile = new File(Loader.getTempDir(), fileName); - tempFile.deleteOnExit(); - FFmpegFrameRecorder recorder = seektestnum == 0? new FFmpegFrameRecorder(tempFile, 640, 480, 2) - : seektestnum == 1? new FFmpegFrameRecorder(tempFile, 640, 480, 0) - : new FFmpegFrameRecorder(tempFile, 0, 0, 2); - recorder.setFormat("mp4"); - recorder.setFrameRate(30); - recorder.setPixelFormat(AV_PIX_FMT_YUV420P); - recorder.setVideoCodec(AV_CODEC_ID_MPEG4); - recorder.setVideoQuality(10); - recorder.setSampleRate(48000); - recorder.setSampleFormat(AV_SAMPLE_FMT_FLTP); - recorder.setAudioCodec(AV_CODEC_ID_AAC); - recorder.setAudioQuality(0); - recorder.setDisplayRotation((seektestnum - 2) * 90.0); - recorder.start(); - if (seektestnum!=2) { - Frame frame = new Frame(640, 480, Frame.DEPTH_UBYTE, 3); - UByteIndexer frameIdx = frame.createIndexer(); - for (int n = 0; n < 10000; n++) { - for (int i = 0; i < frameIdx.rows(); i++) { - for (int j = 0; j < frameIdx.cols(); j++) { - for (int k = 0; k < frameIdx.channels(); k++) { - frameIdx.put(i, j, k, n + i + j + k); - } - } - } - recorder.record(frame); - if (n == 5000 && seektestnum!=1){ - Frame audioFrame = new Frame(); - ShortBuffer audioBuffer = ShortBuffer.allocate(48000 * 2 * 10000 / 30); - audioFrame.sampleRate = 48000; - audioFrame.audioChannels = 2; - audioFrame.samples = new ShortBuffer[] {audioBuffer}; - for (int i = 0; i < audioBuffer.capacity(); i++) { - audioBuffer.put(i, (short)i); - } - recorder.record(audioFrame); - } - } - frame.close(); - } else { - Frame audioFrame = new Frame(); - ShortBuffer audioBuffer = ShortBuffer.allocate(48000 * 2 * 10000 / 30); - audioFrame.sampleRate = 48000; - audioFrame.audioChannels = 2; - audioFrame.samples = new ShortBuffer[] {audioBuffer}; - for (int i = 0; i < audioBuffer.capacity(); i++) { - audioBuffer.put(i, (short)i); - } - recorder.record(audioFrame); - } - recorder.stop(); - recorder.release(); - - FFmpegFrameGrabber grabber = new FFmpegFrameGrabber(tempFile); - grabber.setVideoOption("threads", "1"); // more precise without threads - grabber.start(); - assertEquals((seektestnum - 2) * 90.0, grabber.getDisplayRotation(), 0); - int length = (int) ( grabber.getLengthInTime() - 1000000L); - - - System.out.println(); - System.out.println("Seek in file containing "+(seektestnum==0?"video and audio":seektestnum==1?"video only":"audio only")); - System.out.println("============================================"); - System.out.println("Testing file "+tempFile.getName()); - System.out.println("Length = "+grabber.getLengthInTime()); - System.out.println("Framerate = "+grabber.getFrameRate()); - System.out.println(); - System.out.println("has video stream = "+(grabber.hasVideo()?"YES":"NO")+", has audio stream = "+(grabber.hasAudio()?"YES":"NO")); - long tolerance = 1000000L + (grabber.getFrameRate() > 0.0? (long) (5000000/grabber.getFrameRate()):500000L); - Random random = new Random(29); - - for (int frametypenum = 0; frametypenum < 4; frametypenum++) { - long mindelta = Long.MAX_VALUE; - long maxdelta = Long.MIN_VALUE; - System.out.println(); - System.out.println("Seek by " - + (frametypenum == 0 ? "any" : frametypenum == 1 ? "video" : frametypenum == 2 ? "audio" : "old method") - + (frametypenum == 0 ? " frames" : "")); - - System.out.println("--------------------"); - for (int i = 0; i < 200; i++) { - long timestamp = random.nextInt(length); - switch (frametypenum) { - case 0: - grabber.setTimestamp(timestamp, true); - break; - case 1: - grabber.setVideoTimestamp(timestamp); - break; - case 2: - grabber.setAudioTimestamp(timestamp); - break; - case 3: - grabber.setTimestamp(timestamp); - break; - } - - Frame frame = grabber.grab(); - long timestamp2 = grabber.getTimestamp(); - long delta = timestamp2 - timestamp; - if (delta > maxdelta) maxdelta = delta; - if (delta < mindelta) mindelta = delta; - assertTrue(frame.image != null ^ frame.samples != null); - System.out.println(timestamp2 + " - " + timestamp + " = " + delta + " type: " + frame.getTypes()); - assertTrue(Math.abs(delta) < tolerance); - /* - if (seektestnum==0) { - boolean wasVideo = frame.image != null; - boolean wasAudio = frame.samples != null; - Frame frame2 = grabber.grab(); - while ((wasVideo && frame2.image != null) - || (wasAudio && frame2.samples != null)) { - frame2 = grabber.grab(); - } - assertTrue(wasVideo ^ frame2.image != null); - assertTrue(wasAudio ^ frame2.samples != null); - long timestamp3 = grabber.getTimestamp(); - System.out.println(timestamp3 + " - " + timestamp + " = " + (timestamp3 - timestamp)); - assertTrue(timestamp3 >= timestamp - tolerance && timestamp3 < timestamp + tolerance); - } - */ - } - System.out.println(); - System.out.println("------------------------------------"); - System.out.println("delta from " + mindelta + " to " + maxdelta); - System.out.println(); - } - if (seektestnum==0) { - System.out.println(); - System.out.println("======== Check sequential setVideoFrameNumber (issue #1697) ========"); - for (int i = 0; i < 10; i++) { - grabber.setVideoFrameNumber(i); - long timestamp = grabber.grabImage().timestamp; - System.out.println("frame number:" + i + " timestamp:" + timestamp); - assertTrue(i == Math.round(timestamp * grabber.getFrameRate() / 1000000L)); - } - } - if (seektestnum==2) { - - long count1 = 0; - - long duration = grabber.getLengthInTime(); - - System.out.println(); - System.out.println("======== Check seeking in audio ========"); - System.out.println("FrameRate = "+grabber.getFrameRate()+" AudioFrameRate = "+grabber.getAudioFrameRate()+", duration = "+duration+" audio frames = "+grabber.getLengthInAudioFrames()); - - - - double deltaTimeStamp=0.0; - if (grabber.hasAudio() && grabber.getAudioFrameRate() > 0) { - deltaTimeStamp = 1000000.0/grabber.getAudioFrameRate(); - - } - System.out.println("AudioFrameDuration = "+deltaTimeStamp); - System.out.println(); - System.out.println("======== Check setAudioFrameNumber ========"); - count1=0; - - while (count1++<1000) { - int audioFrameToSeek = random.nextInt(grabber.getLengthInAudioFrames()-100); - grabber.setAudioFrameNumber(audioFrameToSeek); - Frame setFrame = grabber.grabSamples(); - if (setFrame == null) { - System.out.println("null frame after seek to audio frame"); - } else { - long audioTs = grabber.getTimestamp(); - System.out.println("audioFrame # "+audioFrameToSeek+", timeStamp = "+audioTs+", difference = "+Math.round(audioTs*grabber.getAudioFrameRate()/1000000 - audioFrameToSeek)); - assertTrue(Math.abs(audioTs*grabber.getAudioFrameRate()/1000000 - audioFrameToSeek)<10); - } - } - } - grabber.stop(); - System.out.println(); - System.out.println("======= seek in " +fileName+" is finished ===========" ); - } - - } -} +/* + * Copyright (C) 2016-2023 Samuel Audet + * + * Licensed either under the Apache License, Version 2.0, or (at your option) + * under the terms of the GNU General Public License as published by + * the Free Software Foundation (subject to the "Classpath" exception), + * either version 2, or any later version (collectively, the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.gnu.org/licenses/ + * http://www.gnu.org/software/classpath/license.html + * + * or as provided in the LICENSE.txt file that accompanied this code. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bytedeco.javacv; + +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.nio.FloatBuffer; +import java.nio.ShortBuffer; +import java.util.Random; +import org.bytedeco.javacpp.Loader; +import org.bytedeco.javacpp.PointerScope; +import org.bytedeco.javacpp.indexer.UByteIndexer; +import org.junit.Test; + +import static org.bytedeco.ffmpeg.global.avcodec.*; +import static org.bytedeco.ffmpeg.global.avutil.*; +import static org.junit.Assert.*; + +/** + * Test cases for FrameGrabber classes. Also uses other classes from JavaCV. + * + * @author Samuel Audet + */ +public class FrameGrabberTest { + + @Test + public void testFFmpegFrameGrabber() { + System.out.println("FFmpegFrameGrabber"); + + File tempFile = new File(Loader.getTempDir(), "test.mkv"); + try { + FFmpegFrameRecorder recorder = new FFmpegFrameRecorder(new FileOutputStream(tempFile), 640, 480, 2); + recorder.setFormat("matroska"); // mp4 doesn't support streaming + recorder.setPixelFormat(AV_PIX_FMT_BGR24); + recorder.setVideoCodecName("jpegls"); + recorder.setVideoQuality(0); // lossless + recorder.setSampleFormat(AV_SAMPLE_FMT_S16); + recorder.setSampleRate(44100); + recorder.setAudioCodecName("pcm_s16le"); + recorder.start(); + + Frame[] frames = new Frame[1000]; + for (int n = 0; n < frames.length; n++) { + Frame frame = new Frame(640, 480, Frame.DEPTH_UBYTE, 3); + UByteIndexer frameIdx = frame.createIndexer(); + for (int i = 0; i < frameIdx.rows(); i++) { + for (int j = 0; j < frameIdx.cols(); j++) { + for (int k = 0; k < frameIdx.channels(); k++) { + frameIdx.put(i, j, k, n + i + j + k); + } + } + } + recorder.record(frame); + frames[n] = frame; + } + Frame audioFrame = new Frame(); + ShortBuffer audioBuffer = ShortBuffer.allocate(64 * 1024); + audioFrame.sampleRate = 44100; + audioFrame.audioChannels = 2; + audioFrame.samples = new ShortBuffer[] {audioBuffer}; + for (int i = 0; i < audioBuffer.capacity(); i++) { + audioBuffer.put(i, (short)i); + } + recorder.record(audioFrame); + recorder.stop(); + recorder.release(); + + FFmpegFrameGrabber grabber = new FFmpegFrameGrabber(new FileInputStream(tempFile)); + grabber.setSampleMode(FrameGrabber.SampleMode.FLOAT); + grabber.start(); + + int n = 0, m = 0; + Frame frame2; + long startTime = System.nanoTime(); + while ((frame2 = grabber.grabAtFrameRate()) != null) { + long delay = frame2.timestamp * 1000 - (System.nanoTime() - startTime); + if (delay < -1_000_000_000 / grabber.getFrameRate()) { + // skip to catch up with frame rate + if (frame2.image != null) { + n++; + } else { + m++; + } + continue; + } + Frame clone2 = frame2.clone(); + if (frame2.image != null) { + Frame frame = frames[n++]; + assertEquals(frame.imageWidth, frame2.imageWidth); + assertEquals(frame.imageHeight, frame2.imageHeight); + assertEquals(frame.imageChannels, frame2.imageChannels); + assertEquals(frame.imageWidth, clone2.imageWidth); + assertEquals(frame.imageHeight, clone2.imageHeight); + assertEquals(frame.imageChannels, clone2.imageChannels); + + UByteIndexer frameIdx = frame.createIndexer(); + UByteIndexer frame2Idx = frame2.createIndexer(); + UByteIndexer clone2Idx = clone2.createIndexer(); + for (int i = 0; i < frameIdx.rows(); i++) { + for (int j = 0; j < frameIdx.cols(); j++) { + for (int k = 0; k < frameIdx.channels(); k++) { + int b = frameIdx.get(i, j, k); + assertEquals(b, frame2Idx.get(i, j, k)); + assertEquals(b, clone2Idx.get(i, j, k)); + } + } + } + } else { + FloatBuffer audioBuffer2 = (FloatBuffer)frame2.samples[0]; + FloatBuffer cloneBuffer2 = (FloatBuffer)clone2.samples[0]; + while (audioBuffer2.hasRemaining()) { + assertEquals((float)audioBuffer.get(m) / (Short.MAX_VALUE + 1), audioBuffer2.get(), 0); + assertEquals((float)audioBuffer.get(m) / (Short.MAX_VALUE + 1), cloneBuffer2.get(), 0); + m++; + } + } + clone2.close(); + } + long stopTime = System.nanoTime(); + assertEquals(n, (stopTime - startTime) * grabber.getFrameRate() / 1_000_000_000, 3.0); + assertEquals(frames.length, n); + assertEquals(null, grabber.grab()); + grabber.restart(); + grabber.stop(); + grabber.release(); + for (n = 0; n < frames.length; n++) { + frames[n].close(); + } + } catch (Exception e) { + e.printStackTrace(); + fail("Exception should not have been thrown: " + e); + } finally { + tempFile.delete(); + } + } + + @Test + public void testFFmpegFrameGrabberLockingTest() { + final boolean[] failed = {false}; + final int numberOfInstances = 20; + System.out.println("FFmpegFrameGrabberLocking"); + + Runnable[] runables = new Runnable[numberOfInstances]; + Thread[] threads = new Thread[numberOfInstances]; + final boolean[] finish = new boolean[numberOfInstances]; + for (int instance = 0; instance < numberOfInstances; instance++) { + final int instance_final = instance; + Runnable r = new Runnable() { + public void run() { + + File tempFile = new File(Loader.getTempDir(), "test" + instance_final + ".mkv"); + try (PointerScope scope = new PointerScope()) { + FFmpegLogCallback.set(); + FFmpegFrameRecorder recorder = new FFmpegFrameRecorder(new FileOutputStream(tempFile), 640, 480, 2); + recorder.setFormat("matroska"); // mp4 doesn't support streaming + recorder.setPixelFormat(AV_PIX_FMT_BGR24); + recorder.setVideoCodecName("jpegls"); + recorder.setVideoQuality(0); // lossless + recorder.setSampleFormat(AV_SAMPLE_FMT_S16); + recorder.setSampleRate(44100); + recorder.setAudioCodecName("pcm_s16le"); + recorder.startUnsafe(); + + Frame[] frames = new Frame[10]; + for (int n = 0; n < frames.length; n++) { + Frame frame = new Frame(640, 480, Frame.DEPTH_UBYTE, 3); + UByteIndexer frameIdx = frame.createIndexer(); + for (int i = 0; i < frameIdx.rows(); i++) { + for (int j = 0; j < frameIdx.cols(); j++) { + for (int k = 0; k < frameIdx.channels(); k++) { + frameIdx.put(i, j, k, n + i + j + k); + } + } + } + recorder.record(frame); + frames[n] = frame; + } + Frame audioFrame = new Frame(); + ShortBuffer audioBuffer = ShortBuffer.allocate(64 * 1024); + audioFrame.sampleRate = 44100; + audioFrame.audioChannels = 2; + audioFrame.samples = new ShortBuffer[] { audioBuffer }; + for (int i = 0; i < audioBuffer.capacity(); i++) { + audioBuffer.put(i, (short) i); + } + recorder.record(audioFrame); + recorder.stop(); + recorder.release(); + + Thread.sleep(1000); + + FFmpegFrameGrabber grabber = new FFmpegFrameGrabber(new FileInputStream(tempFile)); + grabber.setSampleMode(FrameGrabber.SampleMode.FLOAT); + grabber.startUnsafe(); + + int n = 0, m = 0; + Frame frame2; + while ((frame2 = grabber.grab()) != null) { + if (frame2.image != null) { + Frame frame = frames[n++]; + assertEquals(frame.imageWidth, frame2.imageWidth); + assertEquals(frame.imageHeight, frame2.imageHeight); + assertEquals(frame.imageChannels, frame2.imageChannels); + + UByteIndexer frameIdx = frame.createIndexer(); + UByteIndexer frame2Idx = frame2.createIndexer(); + for (int i = 0; i < frameIdx.rows(); i++) { + for (int j = 0; j < frameIdx.cols(); j++) { + for (int k = 0; k < frameIdx.channels(); k++) { + int b = frameIdx.get(i, j, k); + assertEquals(b, frame2Idx.get(i, j, k)); + } + } + } + } else { + FloatBuffer audioBuffer2 = (FloatBuffer) frame2.samples[0]; + while (audioBuffer2.hasRemaining()) { + assertEquals((float) audioBuffer.get(m++) / (Short.MAX_VALUE + 1), + audioBuffer2.get(), 0); + } + } + } + assertEquals(frames.length, n); + assertEquals(null, grabber.grab()); + grabber.restart(); + grabber.stop(); + grabber.release(); + for (n = 0; n < frames.length; n++) { + frames[n].close(); + } + } catch (Error | Exception e) { + failed[0] = true; + e.printStackTrace(); + fail("Exception should not have been thrown: " + e); + } finally { + tempFile.delete(); + finish[instance_final] = true; + } + } + }; + + runables[instance_final] = r; + } + + for (int instance = 0; instance < numberOfInstances; instance++) { + threads[instance] = new Thread(runables[instance]); + threads[instance].setName("Testthread-" + instance); + } + + for (int instance = 0; instance < numberOfInstances; instance++) { + threads[instance].start(); + } + + while (true) { + boolean finished = true; + for (int instance = 0; instance < numberOfInstances; instance++) { + if (!finish[instance]) { + finished = false; + break; + } + } + + if (!finished) { + System.out.println("Still waiting..."); + try { + Thread.sleep(500); + } catch (InterruptedException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } + } else { + break; + } + } + assertFalse(failed[0]); + } + + @Test + public void testFFmpegFrameGrabberSeeking() throws IOException { + System.out.println("FFmpegFrameGrabberSeeking"); + + for(int seektestnum = 0; seektestnum < 3; seektestnum++) try (PointerScope scope = new PointerScope()) { + FFmpegLogCallback.set(); + String fileName = seektestnum==0?"testAV.mp4":seektestnum==1?"testV.mp4":"testA.mp4"; + File tempFile = new File(Loader.getTempDir(), fileName); + tempFile.deleteOnExit(); + FFmpegFrameRecorder recorder = seektestnum == 0? new FFmpegFrameRecorder(tempFile, 640, 480, 2) + : seektestnum == 1? new FFmpegFrameRecorder(tempFile, 640, 480, 0) + : new FFmpegFrameRecorder(tempFile, 0, 0, 2); + recorder.setFormat("mp4"); + recorder.setFrameRate(30); + recorder.setPixelFormat(AV_PIX_FMT_YUV420P); + recorder.setVideoCodec(AV_CODEC_ID_MPEG4); + recorder.setVideoQuality(10); + recorder.setSampleRate(48000); + recorder.setSampleFormat(AV_SAMPLE_FMT_FLTP); + recorder.setAudioCodec(AV_CODEC_ID_AAC); + recorder.setAudioQuality(0); + recorder.setDisplayRotation((seektestnum - 2) * 90.0); + recorder.start(); + if (seektestnum!=2) { + Frame frame = new Frame(640, 480, Frame.DEPTH_UBYTE, 3); + UByteIndexer frameIdx = frame.createIndexer(); + for (int n = 0; n < 10000; n++) { + for (int i = 0; i < frameIdx.rows(); i++) { + for (int j = 0; j < frameIdx.cols(); j++) { + for (int k = 0; k < frameIdx.channels(); k++) { + frameIdx.put(i, j, k, n + i + j + k); + } + } + } + recorder.record(frame); + if (n == 5000 && seektestnum!=1){ + Frame audioFrame = new Frame(); + ShortBuffer audioBuffer = ShortBuffer.allocate(48000 * 2 * 10000 / 30); + audioFrame.sampleRate = 48000; + audioFrame.audioChannels = 2; + audioFrame.samples = new ShortBuffer[] {audioBuffer}; + for (int i = 0; i < audioBuffer.capacity(); i++) { + audioBuffer.put(i, (short)i); + } + recorder.record(audioFrame); + } + } + frame.close(); + } else { + Frame audioFrame = new Frame(); + ShortBuffer audioBuffer = ShortBuffer.allocate(48000 * 2 * 10000 / 30); + audioFrame.sampleRate = 48000; + audioFrame.audioChannels = 2; + audioFrame.samples = new ShortBuffer[] {audioBuffer}; + for (int i = 0; i < audioBuffer.capacity(); i++) { + audioBuffer.put(i, (short)i); + } + recorder.record(audioFrame); + } + recorder.stop(); + recorder.release(); + + FFmpegFrameGrabber grabber = new FFmpegFrameGrabber(tempFile); + grabber.setVideoOption("threads", "1"); // more precise without threads + grabber.start(); + assertEquals((seektestnum - 2) * 90.0, grabber.getDisplayRotation(), 0); + int length = (int) ( grabber.getLengthInTime() - 1000000L); + + + System.out.println(); + System.out.println("Seek in file containing "+(seektestnum==0?"video and audio":seektestnum==1?"video only":"audio only")); + System.out.println("============================================"); + System.out.println("Testing file "+tempFile.getName()); + System.out.println("Length = "+grabber.getLengthInTime()); + System.out.println("Framerate = "+grabber.getFrameRate()); + System.out.println(); + System.out.println("has video stream = "+(grabber.hasVideo()?"YES":"NO")+", has audio stream = "+(grabber.hasAudio()?"YES":"NO")); + long tolerance = 1000000L + (grabber.getFrameRate() > 0.0? (long) (5000000/grabber.getFrameRate()):500000L); + Random random = new Random(29); + + for (int frametypenum = 0; frametypenum < 4; frametypenum++) { + long mindelta = Long.MAX_VALUE; + long maxdelta = Long.MIN_VALUE; + System.out.println(); + System.out.println("Seek by " + + (frametypenum == 0 ? "any" : frametypenum == 1 ? "video" : frametypenum == 2 ? "audio" : "old method") + + (frametypenum == 0 ? " frames" : "")); + + System.out.println("--------------------"); + for (int i = 0; i < 200; i++) { + long timestamp = random.nextInt(length); + switch (frametypenum) { + case 0: + grabber.setTimestamp(timestamp, true); + break; + case 1: + grabber.setVideoTimestamp(timestamp); + break; + case 2: + grabber.setAudioTimestamp(timestamp); + break; + case 3: + grabber.setTimestamp(timestamp); + break; + } + + Frame frame = grabber.grab(); + long timestamp2 = grabber.getTimestamp(); + long delta = timestamp2 - timestamp; + if (delta > maxdelta) maxdelta = delta; + if (delta < mindelta) mindelta = delta; + assertTrue(frame.image != null ^ frame.samples != null); + System.out.println(timestamp2 + " - " + timestamp + " = " + delta + " type: " + frame.getTypes()); + assertTrue(Math.abs(delta) < tolerance); + /* + if (seektestnum==0) { + boolean wasVideo = frame.image != null; + boolean wasAudio = frame.samples != null; + Frame frame2 = grabber.grab(); + while ((wasVideo && frame2.image != null) + || (wasAudio && frame2.samples != null)) { + frame2 = grabber.grab(); + } + assertTrue(wasVideo ^ frame2.image != null); + assertTrue(wasAudio ^ frame2.samples != null); + long timestamp3 = grabber.getTimestamp(); + System.out.println(timestamp3 + " - " + timestamp + " = " + (timestamp3 - timestamp)); + assertTrue(timestamp3 >= timestamp - tolerance && timestamp3 < timestamp + tolerance); + } + */ + } + System.out.println(); + System.out.println("------------------------------------"); + System.out.println("delta from " + mindelta + " to " + maxdelta); + System.out.println(); + } + if (seektestnum==0) { + System.out.println(); + System.out.println("======== Check sequential setVideoFrameNumber (issue #1697) ========"); + for (int i = 0; i < 10; i++) { + grabber.setVideoFrameNumber(i); + long timestamp = grabber.grabImage().timestamp; + System.out.println("frame number:" + i + " timestamp:" + timestamp); + assertTrue(i == Math.round(timestamp * grabber.getFrameRate() / 1000000L)); + } + } + if (seektestnum==2) { + + long count1 = 0; + + long duration = grabber.getLengthInTime(); + + System.out.println(); + System.out.println("======== Check seeking in audio ========"); + System.out.println("FrameRate = "+grabber.getFrameRate()+" AudioFrameRate = "+grabber.getAudioFrameRate()+", duration = "+duration+" audio frames = "+grabber.getLengthInAudioFrames()); + + + + double deltaTimeStamp=0.0; + if (grabber.hasAudio() && grabber.getAudioFrameRate() > 0) { + deltaTimeStamp = 1000000.0/grabber.getAudioFrameRate(); + + } + System.out.println("AudioFrameDuration = "+deltaTimeStamp); + System.out.println(); + System.out.println("======== Check setAudioFrameNumber ========"); + count1=0; + + while (count1++<1000) { + int audioFrameToSeek = random.nextInt(grabber.getLengthInAudioFrames()-100); + grabber.setAudioFrameNumber(audioFrameToSeek); + Frame setFrame = grabber.grabSamples(); + if (setFrame == null) { + System.out.println("null frame after seek to audio frame"); + } else { + long audioTs = grabber.getTimestamp(); + System.out.println("audioFrame # "+audioFrameToSeek+", timeStamp = "+audioTs+", difference = "+Math.round(audioTs*grabber.getAudioFrameRate()/1000000 - audioFrameToSeek)); + assertTrue(Math.abs(audioTs*grabber.getAudioFrameRate()/1000000 - audioFrameToSeek)<10); + } + } + } + grabber.stop(); + System.out.println(); + System.out.println("======= seek in " +fileName+" is finished ===========" ); + } + + } +} diff --git a/platform/src/test/java/org/bytedeco/javacv/SeekableByteArrayOutputStreamTest.java b/platform/src/test/java/org/bytedeco/javacv/SeekableByteArrayOutputStreamTest.java index b3ae6d65..0c1d2e7d 100644 --- a/platform/src/test/java/org/bytedeco/javacv/SeekableByteArrayOutputStreamTest.java +++ b/platform/src/test/java/org/bytedeco/javacv/SeekableByteArrayOutputStreamTest.java @@ -1,246 +1,246 @@ -/* - * Copyright (C) 2019 Sven Vorlauf - * - * Licensed either under the Apache License, Version 2.0, or (at your option) - * under the terms of the GNU General Public License as published by - * the Free Software Foundation (subject to the "Classpath" exception), - * either version 2, or any later version (collectively, the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * http://www.gnu.org/licenses/ - * http://www.gnu.org/software/classpath/license.html - * - * or as provided in the LICENSE.txt file that accompanied this code. - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.bytedeco.javacv; - -import static org.junit.Assert.*; - -import java.io.File; -import java.io.IOException; -import java.nio.file.Files; -import java.util.Arrays; -import java.util.Random; - -import org.bytedeco.ffmpeg.global.avcodec; -import org.bytedeco.ffmpeg.global.avutil; -import org.bytedeco.javacpp.Loader; -import org.bytedeco.javacpp.indexer.UByteIndexer; -import org.hamcrest.core.IsEqual; -import org.hamcrest.core.IsNot; -import org.junit.Test; - -public class SeekableByteArrayOutputStreamTest { - - private static final int WIDTH = 640; - private static final int HEIGHT = 360; - private static final int FRAME_COUNT = 100; - - private int writeByte(byte[] originalBytes, int offset, SeekableByteArrayOutputStream byteArrayOutputStream) { - byteArrayOutputStream.write(originalBytes[offset]); - return 1; - } - - private int writePartialBytes(byte[] originalBytes, int offset, Random random, - SeekableByteArrayOutputStream byteArrayOutputStream) throws IOException { - int chunkSize = Math.min(random.nextInt(50), originalBytes.length - offset); - byteArrayOutputStream.write(originalBytes, offset, chunkSize); - return chunkSize; - } - - private int writeBytes(byte[] originalBytes, int offset, Random random, - SeekableByteArrayOutputStream byteArrayOutputStream) throws IOException { - int chunkSize = Math.min(random.nextInt(50), originalBytes.length - offset); - byteArrayOutputStream.write(Arrays.copyOfRange(originalBytes, offset, offset + chunkSize)); - return chunkSize; - } - - private void createVideo(FFmpegFrameRecorder recorder) throws Exception { - recorder.setVideoCodec(avcodec.AV_CODEC_ID_MPEG4); - recorder.setFormat("mp4"); - recorder.setFrameRate(30); - recorder.setPixelFormat(avutil.AV_PIX_FMT_YUV420P); - recorder.start(); - for (int n = 0; n < FRAME_COUNT; n++) { - Frame frame = new Frame(WIDTH, HEIGHT, Frame.DEPTH_UBYTE, 3); - UByteIndexer frameIdx = frame.createIndexer(); - for (int i = 0; i < frameIdx.rows(); i++) { - for (int j = 0; j < frameIdx.cols(); j++) { - for (int k = 0; k < frameIdx.channels(); k++) { - frameIdx.put(i, j, k, n + i + j + k); - } - } - } - recorder.record(frame); - frame.close(); - } - recorder.close(); - } - - @Test - public void serialWriteByteTest() { - System.out.println("SeekableByteArrayOutputStreamSerialWriteByte"); - Random random = new Random(-1); - byte[] originalBytes = new byte[1000]; - random.nextBytes(originalBytes); - - try (SeekableByteArrayOutputStream byteArrayOutputStream = new SeekableByteArrayOutputStream()) { - int offset = 0; - while (offset < originalBytes.length) { - offset += writeByte(originalBytes, offset, byteArrayOutputStream); - } - assertArrayEquals(originalBytes, byteArrayOutputStream.toByteArray()); - } catch (Exception e) { - fail("Exception should not have been thrown: " + e); - } - } - - @Test - public void serialWriteBytesTest() { - System.out.println("SeekableByteArrayOutputStreamSerialWriteBytes"); - Random random = new Random(-1); - byte[] originalBytes = new byte[1000]; - random.nextBytes(originalBytes); - - try (SeekableByteArrayOutputStream byteArrayOutputStream = new SeekableByteArrayOutputStream()) { - int offset = 0; - while (offset < originalBytes.length) { - offset += writeBytes(originalBytes, offset, random, byteArrayOutputStream); - } - assertArrayEquals(originalBytes, byteArrayOutputStream.toByteArray()); - } catch (Exception e) { - fail("Exception should not have been thrown: " + e); - } - } - - @Test - public void serialWritePartialBytesTest() { - System.out.println("SeekableByteArrayOutputStreamSerialWritePartialBytes"); - Random random = new Random(-1); - byte[] originalBytes = new byte[1000]; - random.nextBytes(originalBytes); - - try (SeekableByteArrayOutputStream byteArrayOutputStream = new SeekableByteArrayOutputStream()) { - int offset = 0; - while (offset < originalBytes.length) { - offset += writePartialBytes(originalBytes, offset, random, byteArrayOutputStream); - } - assertArrayEquals(originalBytes, byteArrayOutputStream.toByteArray()); - } catch (Exception e) { - fail("Exception should not have been thrown: " + e); - } - } - - @Test - public void serialWriteTest() { - System.out.println("SeekableByteArrayOutputStreamSerialWrite"); - Random random = new Random(-1); - byte[] originalBytes = new byte[1000]; - random.nextBytes(originalBytes); - - try (SeekableByteArrayOutputStream byteArrayOutputStream = new SeekableByteArrayOutputStream()) { - int offset = 0; - while (offset < originalBytes.length) { - switch (random.nextInt(3)) { - case 0: - offset += writeByte(originalBytes, offset, byteArrayOutputStream); - break; - case 1: - offset += writeBytes(originalBytes, offset, random, byteArrayOutputStream); - break; - case 2: - offset += writePartialBytes(originalBytes, offset, random, byteArrayOutputStream); - break; - } - } - assertArrayEquals(originalBytes, byteArrayOutputStream.toByteArray()); - } catch (Exception e) { - fail("Exception should not have been thrown: " + e); - } - } - - public void seekWriteTest() { - System.out.println("SeekableByteArrayOutputStreamSeekWrite"); - Random random = new Random(-1); - byte[] originalBytes = new byte[1000]; - random.nextBytes(originalBytes); - try (SeekableByteArrayOutputStream byteArrayOutputStream = new SeekableByteArrayOutputStream()) { - int offset = 0; - for (int i = 0; i < 10; i++) { - // write 100 bytes - byteArrayOutputStream.write(originalBytes, offset, 100); - - int position = random.nextInt(offset + 20); - int newBytesPosition = position + 500 % 1000; - int length = 10 + random.nextInt(20); - // get current bytes - byte[] writtenOriginalBytes = Arrays.copyOfRange(byteArrayOutputStream.toByteArray(), position, - position + length); - - // bytes to write at the new position - byte[] newBytes = Arrays.copyOfRange(originalBytes, newBytesPosition, newBytesPosition + length); - - // just assert that the new bytes are different to the written ones - assertThat(writtenOriginalBytes, IsNot.not(IsEqual.equalTo(newBytes))); - - // replace bytes - byteArrayOutputStream.seek(position, 0); - byteArrayOutputStream.write(newBytes); - byte[] writtenNewBytes = Arrays.copyOfRange(byteArrayOutputStream.toByteArray(), position, - position + length); - assertThat(newBytes, IsEqual.equalTo(writtenNewBytes)); - - // write back original bytes - byteArrayOutputStream.seek(position, 0); - byteArrayOutputStream.write(originalBytes, position, length); - - // get back to the end of the stream - byteArrayOutputStream.seek(offset, 0); - offset += 100; - } - while (offset < originalBytes.length) { - switch (random.nextInt(3)) { - case 0: - offset += writeByte(originalBytes, offset, byteArrayOutputStream); - break; - case 1: - offset += writeBytes(originalBytes, offset, random, byteArrayOutputStream); - break; - case 2: - offset += writePartialBytes(originalBytes, offset, random, byteArrayOutputStream); - break; - } - } - assertArrayEquals(originalBytes, byteArrayOutputStream.toByteArray()); - } catch (Exception e) { - fail("Exception should not have been thrown: " + e); - } - } - - @Test - public void testVideoBytesEqual() { - // if this test fails it might be due to indeterministic multithreaded encoding - System.out.println("SeekableByteArrayOutputStreamVideo"); - File tempFile = new File(Loader.getTempDir(), "test.mp4"); - try { - createVideo(new FFmpegFrameRecorder(tempFile, WIDTH, HEIGHT, 0)); - byte[] fileBytes = Files.readAllBytes(tempFile.toPath()); - - SeekableByteArrayOutputStream byteArrayOutputStream = new SeekableByteArrayOutputStream(); - createVideo(new FFmpegFrameRecorder(byteArrayOutputStream, WIDTH, HEIGHT, 0)); - assertArrayEquals(fileBytes, byteArrayOutputStream.toByteArray()); - } catch (Exception e) { - fail("Exception should not have been thrown: " + e); - } finally { - tempFile.delete(); - } - } -} +/* + * Copyright (C) 2019 Sven Vorlauf + * + * Licensed either under the Apache License, Version 2.0, or (at your option) + * under the terms of the GNU General Public License as published by + * the Free Software Foundation (subject to the "Classpath" exception), + * either version 2, or any later version (collectively, the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.gnu.org/licenses/ + * http://www.gnu.org/software/classpath/license.html + * + * or as provided in the LICENSE.txt file that accompanied this code. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bytedeco.javacv; + +import static org.junit.Assert.*; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.util.Arrays; +import java.util.Random; + +import org.bytedeco.ffmpeg.global.avcodec; +import org.bytedeco.ffmpeg.global.avutil; +import org.bytedeco.javacpp.Loader; +import org.bytedeco.javacpp.indexer.UByteIndexer; +import org.hamcrest.core.IsEqual; +import org.hamcrest.core.IsNot; +import org.junit.Test; + +public class SeekableByteArrayOutputStreamTest { + + private static final int WIDTH = 640; + private static final int HEIGHT = 360; + private static final int FRAME_COUNT = 100; + + private int writeByte(byte[] originalBytes, int offset, SeekableByteArrayOutputStream byteArrayOutputStream) { + byteArrayOutputStream.write(originalBytes[offset]); + return 1; + } + + private int writePartialBytes(byte[] originalBytes, int offset, Random random, + SeekableByteArrayOutputStream byteArrayOutputStream) throws IOException { + int chunkSize = Math.min(random.nextInt(50), originalBytes.length - offset); + byteArrayOutputStream.write(originalBytes, offset, chunkSize); + return chunkSize; + } + + private int writeBytes(byte[] originalBytes, int offset, Random random, + SeekableByteArrayOutputStream byteArrayOutputStream) throws IOException { + int chunkSize = Math.min(random.nextInt(50), originalBytes.length - offset); + byteArrayOutputStream.write(Arrays.copyOfRange(originalBytes, offset, offset + chunkSize)); + return chunkSize; + } + + private void createVideo(FFmpegFrameRecorder recorder) throws Exception { + recorder.setVideoCodec(avcodec.AV_CODEC_ID_MPEG4); + recorder.setFormat("mp4"); + recorder.setFrameRate(30); + recorder.setPixelFormat(avutil.AV_PIX_FMT_YUV420P); + recorder.start(); + for (int n = 0; n < FRAME_COUNT; n++) { + Frame frame = new Frame(WIDTH, HEIGHT, Frame.DEPTH_UBYTE, 3); + UByteIndexer frameIdx = frame.createIndexer(); + for (int i = 0; i < frameIdx.rows(); i++) { + for (int j = 0; j < frameIdx.cols(); j++) { + for (int k = 0; k < frameIdx.channels(); k++) { + frameIdx.put(i, j, k, n + i + j + k); + } + } + } + recorder.record(frame); + frame.close(); + } + recorder.close(); + } + + @Test + public void serialWriteByteTest() { + System.out.println("SeekableByteArrayOutputStreamSerialWriteByte"); + Random random = new Random(-1); + byte[] originalBytes = new byte[1000]; + random.nextBytes(originalBytes); + + try (SeekableByteArrayOutputStream byteArrayOutputStream = new SeekableByteArrayOutputStream()) { + int offset = 0; + while (offset < originalBytes.length) { + offset += writeByte(originalBytes, offset, byteArrayOutputStream); + } + assertArrayEquals(originalBytes, byteArrayOutputStream.toByteArray()); + } catch (Exception e) { + fail("Exception should not have been thrown: " + e); + } + } + + @Test + public void serialWriteBytesTest() { + System.out.println("SeekableByteArrayOutputStreamSerialWriteBytes"); + Random random = new Random(-1); + byte[] originalBytes = new byte[1000]; + random.nextBytes(originalBytes); + + try (SeekableByteArrayOutputStream byteArrayOutputStream = new SeekableByteArrayOutputStream()) { + int offset = 0; + while (offset < originalBytes.length) { + offset += writeBytes(originalBytes, offset, random, byteArrayOutputStream); + } + assertArrayEquals(originalBytes, byteArrayOutputStream.toByteArray()); + } catch (Exception e) { + fail("Exception should not have been thrown: " + e); + } + } + + @Test + public void serialWritePartialBytesTest() { + System.out.println("SeekableByteArrayOutputStreamSerialWritePartialBytes"); + Random random = new Random(-1); + byte[] originalBytes = new byte[1000]; + random.nextBytes(originalBytes); + + try (SeekableByteArrayOutputStream byteArrayOutputStream = new SeekableByteArrayOutputStream()) { + int offset = 0; + while (offset < originalBytes.length) { + offset += writePartialBytes(originalBytes, offset, random, byteArrayOutputStream); + } + assertArrayEquals(originalBytes, byteArrayOutputStream.toByteArray()); + } catch (Exception e) { + fail("Exception should not have been thrown: " + e); + } + } + + @Test + public void serialWriteTest() { + System.out.println("SeekableByteArrayOutputStreamSerialWrite"); + Random random = new Random(-1); + byte[] originalBytes = new byte[1000]; + random.nextBytes(originalBytes); + + try (SeekableByteArrayOutputStream byteArrayOutputStream = new SeekableByteArrayOutputStream()) { + int offset = 0; + while (offset < originalBytes.length) { + switch (random.nextInt(3)) { + case 0: + offset += writeByte(originalBytes, offset, byteArrayOutputStream); + break; + case 1: + offset += writeBytes(originalBytes, offset, random, byteArrayOutputStream); + break; + case 2: + offset += writePartialBytes(originalBytes, offset, random, byteArrayOutputStream); + break; + } + } + assertArrayEquals(originalBytes, byteArrayOutputStream.toByteArray()); + } catch (Exception e) { + fail("Exception should not have been thrown: " + e); + } + } + + public void seekWriteTest() { + System.out.println("SeekableByteArrayOutputStreamSeekWrite"); + Random random = new Random(-1); + byte[] originalBytes = new byte[1000]; + random.nextBytes(originalBytes); + try (SeekableByteArrayOutputStream byteArrayOutputStream = new SeekableByteArrayOutputStream()) { + int offset = 0; + for (int i = 0; i < 10; i++) { + // write 100 bytes + byteArrayOutputStream.write(originalBytes, offset, 100); + + int position = random.nextInt(offset + 20); + int newBytesPosition = position + 500 % 1000; + int length = 10 + random.nextInt(20); + // get current bytes + byte[] writtenOriginalBytes = Arrays.copyOfRange(byteArrayOutputStream.toByteArray(), position, + position + length); + + // bytes to write at the new position + byte[] newBytes = Arrays.copyOfRange(originalBytes, newBytesPosition, newBytesPosition + length); + + // just assert that the new bytes are different to the written ones + assertThat(writtenOriginalBytes, IsNot.not(IsEqual.equalTo(newBytes))); + + // replace bytes + byteArrayOutputStream.seek(position, 0); + byteArrayOutputStream.write(newBytes); + byte[] writtenNewBytes = Arrays.copyOfRange(byteArrayOutputStream.toByteArray(), position, + position + length); + assertThat(newBytes, IsEqual.equalTo(writtenNewBytes)); + + // write back original bytes + byteArrayOutputStream.seek(position, 0); + byteArrayOutputStream.write(originalBytes, position, length); + + // get back to the end of the stream + byteArrayOutputStream.seek(offset, 0); + offset += 100; + } + while (offset < originalBytes.length) { + switch (random.nextInt(3)) { + case 0: + offset += writeByte(originalBytes, offset, byteArrayOutputStream); + break; + case 1: + offset += writeBytes(originalBytes, offset, random, byteArrayOutputStream); + break; + case 2: + offset += writePartialBytes(originalBytes, offset, random, byteArrayOutputStream); + break; + } + } + assertArrayEquals(originalBytes, byteArrayOutputStream.toByteArray()); + } catch (Exception e) { + fail("Exception should not have been thrown: " + e); + } + } + + @Test + public void testVideoBytesEqual() { + // if this test fails it might be due to indeterministic multithreaded encoding + System.out.println("SeekableByteArrayOutputStreamVideo"); + File tempFile = new File(Loader.getTempDir(), "test.mp4"); + try { + createVideo(new FFmpegFrameRecorder(tempFile, WIDTH, HEIGHT, 0)); + byte[] fileBytes = Files.readAllBytes(tempFile.toPath()); + + SeekableByteArrayOutputStream byteArrayOutputStream = new SeekableByteArrayOutputStream(); + createVideo(new FFmpegFrameRecorder(byteArrayOutputStream, WIDTH, HEIGHT, 0)); + assertArrayEquals(fileBytes, byteArrayOutputStream.toByteArray()); + } catch (Exception e) { + fail("Exception should not have been thrown: " + e); + } finally { + tempFile.delete(); + } + } +} diff --git a/samples/AudioSplitMergeHelper.java b/samples/AudioSplitMergeHelper.java index bf7d0ea0..b4b836fb 100644 --- a/samples/AudioSplitMergeHelper.java +++ b/samples/AudioSplitMergeHelper.java @@ -1,152 +1,152 @@ -import org.bytedeco.javacv.*; - -import java.nio.Buffer; -import java.nio.ShortBuffer; - -/** - * This code is a sample which split a 2-channel stereo audio into 2 single-channel mono audios - * or merge 2 single-channel mono audios into a 2-channel stereo. - *

- * The code has been tested on s16le audio. - *

- * s16le means short 16bit little end. For other format, you may need change the ShortBuffer to other Buffer subclass - *

- * For s16lep, s32lep,xxxxxp format, the sample point arrangement format is no longer in ‘LRLRLR'. - * Instead, it is arragement in format 'LLLLLL','RRRRRR'. So you have to change the short copy code. - *

- *

- * /////////////////////////////////////////////////////////////////////////// - * JavaCV is an excellent open-source streaming processing framework in the Java field - *

- * But I see many people, especially in China, making profits for themselves by introducing its usage, - * which is not in line with the concept of open source projects. - * I hope that If this code helped you, you can share your experience and knowledge with others in the world, rather - * than for personal gain. Spread the spirit of open source. - * /////////////////////////////////////////////////////////////////////////// - *

- * Acknowledge: Thanks for my hot girlfriend. - * - * @author steeveen - * @date 2023/7/1 14:32 - */ -public class AudioSplitMergeHelper { - - - /** - * split a 2-channel stereo audio into 2 single-channel mono audios - *

- * If you want to split this 2-channel stereo to 2 single-channel stereo, you should create 2 2-channel stereos - * and fill one channel with 0 data. It is similar in principle, so the code won't go into too much here. - * - * @param input the file path which is to be splited - * @param outputLeft the file path which store the left channel audio file - * @param outputRight the file path which store the right channel audio file - * @throws FrameGrabber.Exception - * @throws FrameRecorder.Exception - */ - public static void split(String input, String outputLeft, String outputRight) throws FrameGrabber.Exception, FrameRecorder.Exception { - //grabber from input - FFmpegFrameGrabber grabber = new FFmpegFrameGrabber(input); - grabber.start(); - //two recorders for two channels - FFmpegFrameRecorder leftRecorder = new FFmpegFrameRecorder(outputLeft, 1); - leftRecorder.setSampleRate(grabber.getSampleRate()); - leftRecorder.start(); - FFmpegFrameRecorder rightRecorder = new FFmpegFrameRecorder(outputRight, 1); - rightRecorder.setSampleRate(grabber.getSampleRate()); - rightRecorder.start(); - - Frame frame = null; - while ((frame = grabber.grabSamples()) != null) { - // use s16le for example. so select ShortBuffer to receive the sample - ShortBuffer sb = (ShortBuffer) frame.samples[0]; - short[] shorts = new short[sb.limit()]; - sb.get(shorts); - //Split the LRLRLR to LLL in left channel and RRR int right channel - Frame leftFrame = frame.clone(); - ShortBuffer leftSb = ShortBuffer.allocate(sb.capacity() / 2); - leftFrame.samples = new Buffer[]{leftSb}; - leftFrame.audioChannels = 1; - - Frame rightFrame = frame.clone(); - ShortBuffer rightSb = ShortBuffer.allocate(sb.capacity() / 2); - rightFrame.samples = new Buffer[]{rightSb}; - rightFrame.audioChannels = 1; - - for (int i = 0; i < shorts.length; i++) { - if (i % 2 == 0) { - leftSb.put(shorts[i]); - } else { - rightSb.put(shorts[i]); - } - } - // reset the buffer to read mode - leftSb.rewind(); - rightSb.rewind(); - leftRecorder.record(leftFrame); - rightRecorder.record(rightFrame); - } - //release source - grabber.close(); - leftRecorder.close(); - rightRecorder.close(); - } - - /** - * Merge 2 single-channel mono audios into a 2-channel stereo. - * As usual the two input audios should have the same parameter and length; - * - * @param inputLeft the left channel to be merged in - * @param inputRight the right channel to be merged in - * @param output the merged stereo audio - * @throws FFmpegFrameGrabber.Exception - * @throws FFmpegFrameRecorder.Exception - */ - public static void merge(String inputLeft, String inputRight, String output) throws FrameGrabber.Exception, FrameRecorder.Exception { - FFmpegFrameGrabber leftGrabber = new FFmpegFrameGrabber(inputLeft); - leftGrabber.start(); - FFmpegFrameGrabber rightGrabber = new FFmpegFrameGrabber(inputRight); - rightGrabber.start(); - FFmpegFrameRecorder recorder = new FFmpegFrameRecorder(output, 2); - //you'd better confirm the two input have the same samplerate. otherwise, you should control it manually by yourself - recorder.setSampleRate(leftGrabber.getSampleRate()); - recorder.start(); - - Frame leftFrame = null; - Frame rightFrame = null; - int index = 0; - int maxLength = leftGrabber.getLengthInAudioFrames(); - while (index < maxLength) { - // carry the bit data from two input into result frame by frame - leftFrame = leftGrabber.grabSamples(); - rightFrame = rightGrabber.grabSamples(); - ShortBuffer leftSb = (ShortBuffer) leftFrame.samples[0]; - ShortBuffer rightSb = (ShortBuffer) rightFrame.samples[0]; - short[] leftShorts = new short[leftSb.limit()]; - short[] rightShorts = new short[rightSb.limit()]; - leftSb.get(leftShorts); - rightSb.get(rightShorts); - ShortBuffer mergeSb = ShortBuffer.allocate(leftSb.capacity() + rightSb.capacity()); - - // create a template from the existing frame - Frame mergeFrame = leftFrame.clone(); - // replace the frame tempalte by our merged buffer - mergeFrame.samples = new Buffer[]{mergeSb}; - mergeFrame.audioChannels = 2; - - for (int i = 0; i < leftShorts.length; i++) { - mergeSb.put(leftShorts[i]); - mergeSb.put(rightShorts[i]); - } - - //reset buffer to read mode - mergeSb.flip(); - recorder.record(mergeFrame); - index++; - } - //release source - leftGrabber.close(); - rightGrabber.close(); - recorder.close(); - } -} +import org.bytedeco.javacv.*; + +import java.nio.Buffer; +import java.nio.ShortBuffer; + +/** + * This code is a sample which split a 2-channel stereo audio into 2 single-channel mono audios + * or merge 2 single-channel mono audios into a 2-channel stereo. + *

+ * The code has been tested on s16le audio. + *

+ * s16le means short 16bit little end. For other format, you may need change the ShortBuffer to other Buffer subclass + *

+ * For s16lep, s32lep,xxxxxp format, the sample point arrangement format is no longer in ‘LRLRLR'. + * Instead, it is arragement in format 'LLLLLL','RRRRRR'. So you have to change the short copy code. + *

+ *

+ * /////////////////////////////////////////////////////////////////////////// + * JavaCV is an excellent open-source streaming processing framework in the Java field + *

+ * But I see many people, especially in China, making profits for themselves by introducing its usage, + * which is not in line with the concept of open source projects. + * I hope that If this code helped you, you can share your experience and knowledge with others in the world, rather + * than for personal gain. Spread the spirit of open source. + * /////////////////////////////////////////////////////////////////////////// + *

+ * Acknowledge: Thanks for my hot girlfriend. + * + * @author steeveen + * @date 2023/7/1 14:32 + */ +public class AudioSplitMergeHelper { + + + /** + * split a 2-channel stereo audio into 2 single-channel mono audios + *

+ * If you want to split this 2-channel stereo to 2 single-channel stereo, you should create 2 2-channel stereos + * and fill one channel with 0 data. It is similar in principle, so the code won't go into too much here. + * + * @param input the file path which is to be splited + * @param outputLeft the file path which store the left channel audio file + * @param outputRight the file path which store the right channel audio file + * @throws FrameGrabber.Exception + * @throws FrameRecorder.Exception + */ + public static void split(String input, String outputLeft, String outputRight) throws FrameGrabber.Exception, FrameRecorder.Exception { + //grabber from input + FFmpegFrameGrabber grabber = new FFmpegFrameGrabber(input); + grabber.start(); + //two recorders for two channels + FFmpegFrameRecorder leftRecorder = new FFmpegFrameRecorder(outputLeft, 1); + leftRecorder.setSampleRate(grabber.getSampleRate()); + leftRecorder.start(); + FFmpegFrameRecorder rightRecorder = new FFmpegFrameRecorder(outputRight, 1); + rightRecorder.setSampleRate(grabber.getSampleRate()); + rightRecorder.start(); + + Frame frame = null; + while ((frame = grabber.grabSamples()) != null) { + // use s16le for example. so select ShortBuffer to receive the sample + ShortBuffer sb = (ShortBuffer) frame.samples[0]; + short[] shorts = new short[sb.limit()]; + sb.get(shorts); + //Split the LRLRLR to LLL in left channel and RRR int right channel + Frame leftFrame = frame.clone(); + ShortBuffer leftSb = ShortBuffer.allocate(sb.capacity() / 2); + leftFrame.samples = new Buffer[]{leftSb}; + leftFrame.audioChannels = 1; + + Frame rightFrame = frame.clone(); + ShortBuffer rightSb = ShortBuffer.allocate(sb.capacity() / 2); + rightFrame.samples = new Buffer[]{rightSb}; + rightFrame.audioChannels = 1; + + for (int i = 0; i < shorts.length; i++) { + if (i % 2 == 0) { + leftSb.put(shorts[i]); + } else { + rightSb.put(shorts[i]); + } + } + // reset the buffer to read mode + leftSb.rewind(); + rightSb.rewind(); + leftRecorder.record(leftFrame); + rightRecorder.record(rightFrame); + } + //release source + grabber.close(); + leftRecorder.close(); + rightRecorder.close(); + } + + /** + * Merge 2 single-channel mono audios into a 2-channel stereo. + * As usual the two input audios should have the same parameter and length; + * + * @param inputLeft the left channel to be merged in + * @param inputRight the right channel to be merged in + * @param output the merged stereo audio + * @throws FFmpegFrameGrabber.Exception + * @throws FFmpegFrameRecorder.Exception + */ + public static void merge(String inputLeft, String inputRight, String output) throws FrameGrabber.Exception, FrameRecorder.Exception { + FFmpegFrameGrabber leftGrabber = new FFmpegFrameGrabber(inputLeft); + leftGrabber.start(); + FFmpegFrameGrabber rightGrabber = new FFmpegFrameGrabber(inputRight); + rightGrabber.start(); + FFmpegFrameRecorder recorder = new FFmpegFrameRecorder(output, 2); + //you'd better confirm the two input have the same samplerate. otherwise, you should control it manually by yourself + recorder.setSampleRate(leftGrabber.getSampleRate()); + recorder.start(); + + Frame leftFrame = null; + Frame rightFrame = null; + int index = 0; + int maxLength = leftGrabber.getLengthInAudioFrames(); + while (index < maxLength) { + // carry the bit data from two input into result frame by frame + leftFrame = leftGrabber.grabSamples(); + rightFrame = rightGrabber.grabSamples(); + ShortBuffer leftSb = (ShortBuffer) leftFrame.samples[0]; + ShortBuffer rightSb = (ShortBuffer) rightFrame.samples[0]; + short[] leftShorts = new short[leftSb.limit()]; + short[] rightShorts = new short[rightSb.limit()]; + leftSb.get(leftShorts); + rightSb.get(rightShorts); + ShortBuffer mergeSb = ShortBuffer.allocate(leftSb.capacity() + rightSb.capacity()); + + // create a template from the existing frame + Frame mergeFrame = leftFrame.clone(); + // replace the frame tempalte by our merged buffer + mergeFrame.samples = new Buffer[]{mergeSb}; + mergeFrame.audioChannels = 2; + + for (int i = 0; i < leftShorts.length; i++) { + mergeSb.put(leftShorts[i]); + mergeSb.put(rightShorts[i]); + } + + //reset buffer to read mode + mergeSb.flip(); + recorder.record(mergeFrame); + index++; + } + //release source + leftGrabber.close(); + rightGrabber.close(); + recorder.close(); + } +} diff --git a/samples/BioInspiredRetina.java b/samples/BioInspiredRetina.java index 547d0322..1b1df4d8 100644 --- a/samples/BioInspiredRetina.java +++ b/samples/BioInspiredRetina.java @@ -1,83 +1,83 @@ -import java.awt.*; -import java.awt.image.BufferedImage; -import java.io.File; -import javax.imageio.ImageIO; - -import org.bytedeco.javacpp.tools.Slf4jLogger; -import org.bytedeco.javacv.CanvasFrame; -import org.bytedeco.javacv.Java2DFrameConverter; -import org.bytedeco.javacv.OpenCVFrameConverter; - -import org.bytedeco.opencv.opencv_core.*; -import org.bytedeco.opencv.opencv_imgproc.*; -import org.bytedeco.opencv.opencv_bioinspired.*; -import static org.bytedeco.opencv.global.opencv_core.*; -import static org.bytedeco.opencv.global.opencv_imgproc.*; -import static org.bytedeco.opencv.global.opencv_bioinspired.*; - - -/** - * Bioinspired Retina demonstration - * This retina model allows spatio-temporal image processing - * As a summary, these are the retina model properties: - * It applies a spectral whithening (mid-frequency details enhancement) - * high frequency spatio-temporal noise reduction - * low frequency luminance to be reduced (luminance range compression) - * local logarithmic luminance compression allows details to be enhanced in low light conditions - * - * Created by mbetzel on 04.09.2016. - */ -public class BioInspiredRetina { - - static { - System.setProperty("org.bytedeco.javacpp.logger", "slf4jlogger"); - System.setProperty("org.slf4j.simpleLogger.defaultLogLevel", "debug"); - } - - private static final Slf4jLogger logger = (Slf4jLogger) org.bytedeco.javacpp.tools.Logger.create(BioInspiredRetina.class); - - public static void main(String[] args) { - try { - logger.info(String.valueOf(logger.isDebugEnabled())); - logger.info("Start"); - new BioInspiredRetina().execute(args); - logger.info("Stop"); - } catch (Exception e) { - e.printStackTrace(); - } - } - - private void execute(String[] args) throws Exception { - BufferedImage bufferedImage = args.length >= 1 ? ImageIO.read(new File(args[0])) : ImageIO.read(this.getClass().getResourceAsStream("BlackBalls.jpg")); - System.out.println("Image type: " + bufferedImage.getType()); - Mat matrix = new OpenCVFrameConverter.ToMat().convert(new Java2DFrameConverter().convert(bufferedImage)); - normalize(matrix, matrix, 0, 255, NORM_MINMAX, -1, noArray()); - showImage(matrix); - matrix.convertTo(matrix, CV_32F); - Mat gammaTransformedImage = new Mat(matrix.size(), CV_32F); - pow(matrix, 1. / 5, gammaTransformedImage); - Retina retina = Retina.create(gammaTransformedImage.size()); - Mat retinaOutput_parvo = new Mat(); - Mat retinaOutput_magno = new Mat(); - retina.clearBuffers(); - retina.run(gammaTransformedImage); - retina.getParvo(retinaOutput_parvo); - retina.getMagno(retinaOutput_magno); - showImage(retinaOutput_parvo); - showImage(retinaOutput_magno); - } - - - private void showImage(Mat matrix) { - CanvasFrame canvasFrame = new CanvasFrame("Retina demonstration", 1); - canvasFrame.setDefaultCloseOperation(javax.swing.JFrame.EXIT_ON_CLOSE); - canvasFrame.setCanvasSize(640, 480); - Canvas canvas = canvasFrame.getCanvas(); - canvasFrame.getContentPane().removeAll(); - ScrollPane scrollPane = new ScrollPane(); - scrollPane.add(canvas); - canvasFrame.add(scrollPane); - canvasFrame.showImage(new OpenCVFrameConverter.ToMat().convert(matrix)); - } - -} +import java.awt.*; +import java.awt.image.BufferedImage; +import java.io.File; +import javax.imageio.ImageIO; + +import org.bytedeco.javacpp.tools.Slf4jLogger; +import org.bytedeco.javacv.CanvasFrame; +import org.bytedeco.javacv.Java2DFrameConverter; +import org.bytedeco.javacv.OpenCVFrameConverter; + +import org.bytedeco.opencv.opencv_core.*; +import org.bytedeco.opencv.opencv_imgproc.*; +import org.bytedeco.opencv.opencv_bioinspired.*; +import static org.bytedeco.opencv.global.opencv_core.*; +import static org.bytedeco.opencv.global.opencv_imgproc.*; +import static org.bytedeco.opencv.global.opencv_bioinspired.*; + + +/** + * Bioinspired Retina demonstration + * This retina model allows spatio-temporal image processing + * As a summary, these are the retina model properties: + * It applies a spectral whithening (mid-frequency details enhancement) + * high frequency spatio-temporal noise reduction + * low frequency luminance to be reduced (luminance range compression) + * local logarithmic luminance compression allows details to be enhanced in low light conditions + * + * Created by mbetzel on 04.09.2016. + */ +public class BioInspiredRetina { + + static { + System.setProperty("org.bytedeco.javacpp.logger", "slf4jlogger"); + System.setProperty("org.slf4j.simpleLogger.defaultLogLevel", "debug"); + } + + private static final Slf4jLogger logger = (Slf4jLogger) org.bytedeco.javacpp.tools.Logger.create(BioInspiredRetina.class); + + public static void main(String[] args) { + try { + logger.info(String.valueOf(logger.isDebugEnabled())); + logger.info("Start"); + new BioInspiredRetina().execute(args); + logger.info("Stop"); + } catch (Exception e) { + e.printStackTrace(); + } + } + + private void execute(String[] args) throws Exception { + BufferedImage bufferedImage = args.length >= 1 ? ImageIO.read(new File(args[0])) : ImageIO.read(this.getClass().getResourceAsStream("BlackBalls.jpg")); + System.out.println("Image type: " + bufferedImage.getType()); + Mat matrix = new OpenCVFrameConverter.ToMat().convert(new Java2DFrameConverter().convert(bufferedImage)); + normalize(matrix, matrix, 0, 255, NORM_MINMAX, -1, noArray()); + showImage(matrix); + matrix.convertTo(matrix, CV_32F); + Mat gammaTransformedImage = new Mat(matrix.size(), CV_32F); + pow(matrix, 1. / 5, gammaTransformedImage); + Retina retina = Retina.create(gammaTransformedImage.size()); + Mat retinaOutput_parvo = new Mat(); + Mat retinaOutput_magno = new Mat(); + retina.clearBuffers(); + retina.run(gammaTransformedImage); + retina.getParvo(retinaOutput_parvo); + retina.getMagno(retinaOutput_magno); + showImage(retinaOutput_parvo); + showImage(retinaOutput_magno); + } + + + private void showImage(Mat matrix) { + CanvasFrame canvasFrame = new CanvasFrame("Retina demonstration", 1); + canvasFrame.setDefaultCloseOperation(javax.swing.JFrame.EXIT_ON_CLOSE); + canvasFrame.setCanvasSize(640, 480); + Canvas canvas = canvasFrame.getCanvas(); + canvasFrame.getContentPane().removeAll(); + ScrollPane scrollPane = new ScrollPane(); + scrollPane.add(canvas); + canvasFrame.add(scrollPane); + canvasFrame.showImage(new OpenCVFrameConverter.ToMat().convert(matrix)); + } + +} diff --git a/samples/BlobDemo.java b/samples/BlobDemo.java index aa25f496..2bd72dc3 100644 --- a/samples/BlobDemo.java +++ b/samples/BlobDemo.java @@ -1,273 +1,273 @@ -import org.bytedeco.javacv.Blobs; -import org.bytedeco.javacv.CanvasFrame; -import org.bytedeco.javacv.OpenCVFrameConverter; - -import org.bytedeco.opencv.opencv_core.*; -import org.bytedeco.opencv.opencv_imgproc.*; -import static org.bytedeco.opencv.global.opencv_core.*; -import static org.bytedeco.opencv.global.opencv_imgcodecs.*; -import static org.bytedeco.opencv.global.opencv_imgproc.*; - -/////////////////////////////////////////////////////////////////// -//* *// -//* As the author of this code, I place all of this code into *// -//* the public domain. Users can use it for any legal purpose. *// -//* *// -//* - Dave Grossman *// -//* *// -/////////////////////////////////////////////////////////////////// -public class BlobDemo -{ - public static void main(String[] args) - { - System.out.println("STARTING...\n"); - demo(); - System.out.println("ALL DONE"); - } - - public static void demo() - { - int MinArea = 6; - int ErodeCount =0; - int DilateCount = 0; - - IplImage RawImage = null; - - // Read an image. - for(int k = 0; k < 7; k++) - { - if(k == 0) { RawImage = cvLoadImage("BlackBalls.jpg"); MinArea = 250; ErodeCount = 0; DilateCount = 1; } - else if(k == 1) { RawImage = cvLoadImage("Shapes1.jpg"); MinArea = 6; ErodeCount = 0; DilateCount = 1; } - else if(k == 2) { RawImage = cvLoadImage("Shapes2.jpg"); MinArea = 250; ErodeCount = 0; DilateCount = 1; } - else if(k == 3) { RawImage = cvLoadImage("Blob1.jpg"); MinArea = 2800; ErodeCount = 1; DilateCount = 1; } - else if(k == 4) { RawImage = cvLoadImage("Blob2.jpg"); MinArea = 2800; ErodeCount = 1; DilateCount = 1; } - else if(k == 5) { RawImage = cvLoadImage("Blob3.jpg"); MinArea = 2800; ErodeCount = 1; DilateCount = 1; } - else if(k == 6) { RawImage = cvLoadImage("Rice.jpg"); MinArea = 30; ErodeCount = 2; DilateCount = 1; } - //ShowImage(RawImage, "RawImage", 512); - - IplImage GrayImage = cvCreateImage(cvGetSize(RawImage), IPL_DEPTH_8U, 1); - cvCvtColor(RawImage, GrayImage, CV_BGR2GRAY); - //ShowImage(GrayImage, "GrayImage", 512); - - IplImage BWImage = cvCreateImage(cvGetSize(GrayImage), IPL_DEPTH_8U, 1); - cvThreshold(GrayImage, BWImage, 127, 255, CV_THRESH_BINARY); - //ShowImage(BWImage, "BWImage"); - - IplImage WorkingImage = cvCreateImage(cvGetSize(BWImage), IPL_DEPTH_8U, 1); - cvErode(BWImage, WorkingImage, null, ErodeCount); - cvDilate(WorkingImage, WorkingImage, null, DilateCount); - //ShowImage(WorkingImage, "WorkingImage", 512); - - //cvSaveImage("Working.jpg", WorkingImage); - //PrintGrayImage(WorkingImage, "WorkingImage"); - //BinaryHistogram(WorkingImage); - - Blobs Regions = new Blobs(); - Regions.BlobAnalysis( - WorkingImage, // image - -1, -1, // ROI start col, row - -1, -1, // ROI cols, rows - 1, // border (0 = black; 1 = white) - MinArea); // minarea - Regions.PrintRegionData(); - - for(int i = 1; i <= Blobs.MaxLabel; i++) - { - double [] Region = Blobs.RegionData[i]; - int Parent = (int) Region[Blobs.BLOBPARENT]; - int Color = (int) Region[Blobs.BLOBCOLOR]; - int MinX = (int) Region[Blobs.BLOBMINX]; - int MaxX = (int) Region[Blobs.BLOBMAXX]; - int MinY = (int) Region[Blobs.BLOBMINY]; - int MaxY = (int) Region[Blobs.BLOBMAXY]; - Highlight(RawImage, MinX, MinY, MaxX, MaxY, 1); - } - - ShowImage(RawImage, "RawImage", 512); - - cvReleaseImage(GrayImage); GrayImage = null; - cvReleaseImage(BWImage); BWImage = null; - cvReleaseImage(WorkingImage); WorkingImage = null; - } - cvReleaseImage(RawImage); RawImage = null; - } - - // Versions with 2, 3, and 4 parms respectively - public static void ShowImage(IplImage image, String caption) - { - CvMat mat = image.asCvMat(); - int width = mat.cols(); if(width < 1) width = 1; - int height = mat.rows(); if(height < 1) height = 1; - double aspect = 1.0 * width / height; - if(height < 128) { height = 128; width = (int) ( height * aspect ); } - if(width < 128) width = 128; - height = (int) ( width / aspect ); - ShowImage(image, caption, width, height); - } - public static void ShowImage(IplImage image, String caption, int size) - { - if(size < 128) size = 128; - CvMat mat = image.asCvMat(); - int width = mat.cols(); if(width < 1) width = 1; - int height = mat.rows(); if(height < 1) height = 1; - double aspect = 1.0 * width / height; - if(height != size) { height = size; width = (int) ( height * aspect ); } - if(width != size) width = size; - height = (int) ( width / aspect ); - ShowImage(image, caption, width, height); - } - public static void ShowImage(IplImage image, String caption, int width, int height) - { - CanvasFrame canvas = new CanvasFrame(caption, 1); // gamma=1 - canvas.setDefaultCloseOperation(javax.swing.JFrame.EXIT_ON_CLOSE); - canvas.setCanvasSize(width, height); - OpenCVFrameConverter converter = new OpenCVFrameConverter.ToIplImage(); - canvas.showImage(converter.convert(image)); - } - - public static void Highlight(IplImage image, int [] inVec) - { - Highlight(image, inVec[0], inVec[1], inVec[2], inVec[3], 1); - } - public static void Highlight(IplImage image, int [] inVec, int Thick) - { - Highlight(image, inVec[0], inVec[1], inVec[2], inVec[3], Thick); - } - public static void Highlight(IplImage image, int xMin, int yMin, int xMax, int yMax) - { - Highlight(image, xMin, yMin, xMax, yMax, 1); - } - public static void Highlight(IplImage image, int xMin, int yMin, int xMax, int yMax, int Thick) - { - CvPoint pt1 = cvPoint(xMin,yMin); - CvPoint pt2 = cvPoint(xMax,yMax); - CvScalar color = cvScalar(255,0,0,0); // blue [green] [red] - cvRectangle(image, pt1, pt2, color, Thick, 4, 0); - } - - public static void PrintGrayImage(IplImage image, String caption) - { - int size = 512; // impractical to print anything larger - CvMat mat = image.asCvMat(); - int cols = mat.cols(); if(cols < 1) cols = 1; - int rows = mat.rows(); if(rows < 1) rows = 1; - double aspect = 1.0 * cols / rows; - if(rows > size) { rows = size; cols = (int) ( rows * aspect ); } - if(cols > size) cols = size; - rows = (int) ( cols / aspect ); - PrintGrayImage(image, caption, 0, cols, 0, rows); - } - public static void PrintGrayImage(IplImage image, String caption, int MinX, int MaxX, int MinY, int MaxY) - { - int size = 512; // impractical to print anything larger - CvMat mat = image.asCvMat(); - int cols = mat.cols(); if(cols < 1) cols = 1; - int rows = mat.rows(); if(rows < 1) rows = 1; - - if(MinX < 0) MinX = 0; if(MinX > cols) MinX = cols; - if(MaxX < 0) MaxX = 0; if(MaxX > cols) MaxX = cols; - if(MinY < 0) MinY = 0; if(MinY > rows) MinY = rows; - if(MaxY < 0) MaxY = 0; if(MaxY > rows) MaxY = rows; - - System.out.println("\n" + caption); - System.out.print(" +"); - for(int icol = MinX; icol < MaxX; icol++) System.out.print("-"); - System.out.println("+"); - - for(int irow = MinY; irow < MaxY; irow++) - { - if(irow<10) System.out.print(" "); - if(irow<100) System.out.print(" "); - System.out.print(irow); - System.out.print("|"); - for(int icol = MinX; icol < MaxX; icol++) - { - int val = (int) mat.get(irow,icol); - String C = " "; - if(val == 0) C = "*"; - System.out.print(C); - } - System.out.println("|"); - } - System.out.print(" +"); - for(int icol = MinX; icol < MaxX; icol++) System.out.print("-"); - System.out.println("+"); - } - - public static void PrintImageProperties(IplImage image) - { - CvMat mat = image.asCvMat(); - int cols = mat.cols(); - int rows = mat.rows(); - int depth = mat.depth(); - System.out.println("ImageProperties for " + image + " : cols=" + cols + " rows=" + rows + " depth=" + depth); - } - - public static float BinaryHistogram(IplImage image) - { - CvScalar Sum = cvSum(image); - float WhitePixels = (float) ( Sum.getVal(0) / 255 ); - CvMat mat = image.asCvMat(); - float TotalPixels = mat.cols() * mat.rows(); - //float BlackPixels = TotalPixels - WhitePixels; - return WhitePixels / TotalPixels; - } - - // Counterclockwise small angle rotation by skewing - Does not stretch border pixels - public static IplImage SkewGrayImage(IplImage Src, double angle) // angle is in radians - { - //double radians = - Math.PI * angle / 360.0; // Half because skew is horizontal and vertical - double sin = - Math.sin(angle); - double AbsSin = Math.abs(sin); - - int nChannels = Src.nChannels(); - if(nChannels != 1) - { - System.out.println("ERROR: SkewGrayImage: Require 1 channel: nChannels=" + nChannels); - System.exit(1); - } - - CvMat SrcMat = Src.asCvMat(); - int SrcCols = SrcMat.cols(); - int SrcRows = SrcMat.rows(); - - double WidthSkew = AbsSin * SrcRows; - double HeightSkew = AbsSin * SrcCols; - - int DstCols = (int) ( SrcCols + WidthSkew ); - int DstRows = (int) ( SrcRows + HeightSkew ); - - CvMat DstMat = cvCreateMat(DstRows, DstCols, CV_8UC1); // Type matches IPL_DEPTH_8U - cvSetZero(DstMat); - cvNot(DstMat, DstMat); - - for(int irow = 0; irow < DstRows; irow++) - { - int dcol = (int) ( WidthSkew * irow / SrcRows ); - for(int icol = 0; icol < DstCols; icol++) - { - int drow = (int) ( HeightSkew - HeightSkew * icol / SrcCols ); - int jrow = irow - drow; - int jcol = icol - dcol; - if(jrow < 0 || jcol < 0 || jrow >= SrcRows || jcol >= SrcCols) DstMat.put(irow, icol, 255); - else DstMat.put(irow, icol, (int) SrcMat.get(jrow,jcol)); - } - } - - IplImage Dst = cvCreateImage(cvSize(DstCols, DstRows), IPL_DEPTH_8U, 1); - Dst = DstMat.asIplImage(); - return Dst; - } - - public static IplImage TransposeImage(IplImage SrcImage) - { - CvMat mat = SrcImage.asCvMat(); - int cols = mat.cols(); - int rows = mat.rows(); - IplImage DstImage = cvCreateImage(cvSize(rows, cols), IPL_DEPTH_8U, 1); - cvTranspose(SrcImage, DstImage); - cvFlip(DstImage,DstImage,1); - return DstImage; - } -} - +import org.bytedeco.javacv.Blobs; +import org.bytedeco.javacv.CanvasFrame; +import org.bytedeco.javacv.OpenCVFrameConverter; + +import org.bytedeco.opencv.opencv_core.*; +import org.bytedeco.opencv.opencv_imgproc.*; +import static org.bytedeco.opencv.global.opencv_core.*; +import static org.bytedeco.opencv.global.opencv_imgcodecs.*; +import static org.bytedeco.opencv.global.opencv_imgproc.*; + +/////////////////////////////////////////////////////////////////// +//* *// +//* As the author of this code, I place all of this code into *// +//* the public domain. Users can use it for any legal purpose. *// +//* *// +//* - Dave Grossman *// +//* *// +/////////////////////////////////////////////////////////////////// +public class BlobDemo +{ + public static void main(String[] args) + { + System.out.println("STARTING...\n"); + demo(); + System.out.println("ALL DONE"); + } + + public static void demo() + { + int MinArea = 6; + int ErodeCount =0; + int DilateCount = 0; + + IplImage RawImage = null; + + // Read an image. + for(int k = 0; k < 7; k++) + { + if(k == 0) { RawImage = cvLoadImage("BlackBalls.jpg"); MinArea = 250; ErodeCount = 0; DilateCount = 1; } + else if(k == 1) { RawImage = cvLoadImage("Shapes1.jpg"); MinArea = 6; ErodeCount = 0; DilateCount = 1; } + else if(k == 2) { RawImage = cvLoadImage("Shapes2.jpg"); MinArea = 250; ErodeCount = 0; DilateCount = 1; } + else if(k == 3) { RawImage = cvLoadImage("Blob1.jpg"); MinArea = 2800; ErodeCount = 1; DilateCount = 1; } + else if(k == 4) { RawImage = cvLoadImage("Blob2.jpg"); MinArea = 2800; ErodeCount = 1; DilateCount = 1; } + else if(k == 5) { RawImage = cvLoadImage("Blob3.jpg"); MinArea = 2800; ErodeCount = 1; DilateCount = 1; } + else if(k == 6) { RawImage = cvLoadImage("Rice.jpg"); MinArea = 30; ErodeCount = 2; DilateCount = 1; } + //ShowImage(RawImage, "RawImage", 512); + + IplImage GrayImage = cvCreateImage(cvGetSize(RawImage), IPL_DEPTH_8U, 1); + cvCvtColor(RawImage, GrayImage, CV_BGR2GRAY); + //ShowImage(GrayImage, "GrayImage", 512); + + IplImage BWImage = cvCreateImage(cvGetSize(GrayImage), IPL_DEPTH_8U, 1); + cvThreshold(GrayImage, BWImage, 127, 255, CV_THRESH_BINARY); + //ShowImage(BWImage, "BWImage"); + + IplImage WorkingImage = cvCreateImage(cvGetSize(BWImage), IPL_DEPTH_8U, 1); + cvErode(BWImage, WorkingImage, null, ErodeCount); + cvDilate(WorkingImage, WorkingImage, null, DilateCount); + //ShowImage(WorkingImage, "WorkingImage", 512); + + //cvSaveImage("Working.jpg", WorkingImage); + //PrintGrayImage(WorkingImage, "WorkingImage"); + //BinaryHistogram(WorkingImage); + + Blobs Regions = new Blobs(); + Regions.BlobAnalysis( + WorkingImage, // image + -1, -1, // ROI start col, row + -1, -1, // ROI cols, rows + 1, // border (0 = black; 1 = white) + MinArea); // minarea + Regions.PrintRegionData(); + + for(int i = 1; i <= Blobs.MaxLabel; i++) + { + double [] Region = Blobs.RegionData[i]; + int Parent = (int) Region[Blobs.BLOBPARENT]; + int Color = (int) Region[Blobs.BLOBCOLOR]; + int MinX = (int) Region[Blobs.BLOBMINX]; + int MaxX = (int) Region[Blobs.BLOBMAXX]; + int MinY = (int) Region[Blobs.BLOBMINY]; + int MaxY = (int) Region[Blobs.BLOBMAXY]; + Highlight(RawImage, MinX, MinY, MaxX, MaxY, 1); + } + + ShowImage(RawImage, "RawImage", 512); + + cvReleaseImage(GrayImage); GrayImage = null; + cvReleaseImage(BWImage); BWImage = null; + cvReleaseImage(WorkingImage); WorkingImage = null; + } + cvReleaseImage(RawImage); RawImage = null; + } + + // Versions with 2, 3, and 4 parms respectively + public static void ShowImage(IplImage image, String caption) + { + CvMat mat = image.asCvMat(); + int width = mat.cols(); if(width < 1) width = 1; + int height = mat.rows(); if(height < 1) height = 1; + double aspect = 1.0 * width / height; + if(height < 128) { height = 128; width = (int) ( height * aspect ); } + if(width < 128) width = 128; + height = (int) ( width / aspect ); + ShowImage(image, caption, width, height); + } + public static void ShowImage(IplImage image, String caption, int size) + { + if(size < 128) size = 128; + CvMat mat = image.asCvMat(); + int width = mat.cols(); if(width < 1) width = 1; + int height = mat.rows(); if(height < 1) height = 1; + double aspect = 1.0 * width / height; + if(height != size) { height = size; width = (int) ( height * aspect ); } + if(width != size) width = size; + height = (int) ( width / aspect ); + ShowImage(image, caption, width, height); + } + public static void ShowImage(IplImage image, String caption, int width, int height) + { + CanvasFrame canvas = new CanvasFrame(caption, 1); // gamma=1 + canvas.setDefaultCloseOperation(javax.swing.JFrame.EXIT_ON_CLOSE); + canvas.setCanvasSize(width, height); + OpenCVFrameConverter converter = new OpenCVFrameConverter.ToIplImage(); + canvas.showImage(converter.convert(image)); + } + + public static void Highlight(IplImage image, int [] inVec) + { + Highlight(image, inVec[0], inVec[1], inVec[2], inVec[3], 1); + } + public static void Highlight(IplImage image, int [] inVec, int Thick) + { + Highlight(image, inVec[0], inVec[1], inVec[2], inVec[3], Thick); + } + public static void Highlight(IplImage image, int xMin, int yMin, int xMax, int yMax) + { + Highlight(image, xMin, yMin, xMax, yMax, 1); + } + public static void Highlight(IplImage image, int xMin, int yMin, int xMax, int yMax, int Thick) + { + CvPoint pt1 = cvPoint(xMin,yMin); + CvPoint pt2 = cvPoint(xMax,yMax); + CvScalar color = cvScalar(255,0,0,0); // blue [green] [red] + cvRectangle(image, pt1, pt2, color, Thick, 4, 0); + } + + public static void PrintGrayImage(IplImage image, String caption) + { + int size = 512; // impractical to print anything larger + CvMat mat = image.asCvMat(); + int cols = mat.cols(); if(cols < 1) cols = 1; + int rows = mat.rows(); if(rows < 1) rows = 1; + double aspect = 1.0 * cols / rows; + if(rows > size) { rows = size; cols = (int) ( rows * aspect ); } + if(cols > size) cols = size; + rows = (int) ( cols / aspect ); + PrintGrayImage(image, caption, 0, cols, 0, rows); + } + public static void PrintGrayImage(IplImage image, String caption, int MinX, int MaxX, int MinY, int MaxY) + { + int size = 512; // impractical to print anything larger + CvMat mat = image.asCvMat(); + int cols = mat.cols(); if(cols < 1) cols = 1; + int rows = mat.rows(); if(rows < 1) rows = 1; + + if(MinX < 0) MinX = 0; if(MinX > cols) MinX = cols; + if(MaxX < 0) MaxX = 0; if(MaxX > cols) MaxX = cols; + if(MinY < 0) MinY = 0; if(MinY > rows) MinY = rows; + if(MaxY < 0) MaxY = 0; if(MaxY > rows) MaxY = rows; + + System.out.println("\n" + caption); + System.out.print(" +"); + for(int icol = MinX; icol < MaxX; icol++) System.out.print("-"); + System.out.println("+"); + + for(int irow = MinY; irow < MaxY; irow++) + { + if(irow<10) System.out.print(" "); + if(irow<100) System.out.print(" "); + System.out.print(irow); + System.out.print("|"); + for(int icol = MinX; icol < MaxX; icol++) + { + int val = (int) mat.get(irow,icol); + String C = " "; + if(val == 0) C = "*"; + System.out.print(C); + } + System.out.println("|"); + } + System.out.print(" +"); + for(int icol = MinX; icol < MaxX; icol++) System.out.print("-"); + System.out.println("+"); + } + + public static void PrintImageProperties(IplImage image) + { + CvMat mat = image.asCvMat(); + int cols = mat.cols(); + int rows = mat.rows(); + int depth = mat.depth(); + System.out.println("ImageProperties for " + image + " : cols=" + cols + " rows=" + rows + " depth=" + depth); + } + + public static float BinaryHistogram(IplImage image) + { + CvScalar Sum = cvSum(image); + float WhitePixels = (float) ( Sum.getVal(0) / 255 ); + CvMat mat = image.asCvMat(); + float TotalPixels = mat.cols() * mat.rows(); + //float BlackPixels = TotalPixels - WhitePixels; + return WhitePixels / TotalPixels; + } + + // Counterclockwise small angle rotation by skewing - Does not stretch border pixels + public static IplImage SkewGrayImage(IplImage Src, double angle) // angle is in radians + { + //double radians = - Math.PI * angle / 360.0; // Half because skew is horizontal and vertical + double sin = - Math.sin(angle); + double AbsSin = Math.abs(sin); + + int nChannels = Src.nChannels(); + if(nChannels != 1) + { + System.out.println("ERROR: SkewGrayImage: Require 1 channel: nChannels=" + nChannels); + System.exit(1); + } + + CvMat SrcMat = Src.asCvMat(); + int SrcCols = SrcMat.cols(); + int SrcRows = SrcMat.rows(); + + double WidthSkew = AbsSin * SrcRows; + double HeightSkew = AbsSin * SrcCols; + + int DstCols = (int) ( SrcCols + WidthSkew ); + int DstRows = (int) ( SrcRows + HeightSkew ); + + CvMat DstMat = cvCreateMat(DstRows, DstCols, CV_8UC1); // Type matches IPL_DEPTH_8U + cvSetZero(DstMat); + cvNot(DstMat, DstMat); + + for(int irow = 0; irow < DstRows; irow++) + { + int dcol = (int) ( WidthSkew * irow / SrcRows ); + for(int icol = 0; icol < DstCols; icol++) + { + int drow = (int) ( HeightSkew - HeightSkew * icol / SrcCols ); + int jrow = irow - drow; + int jcol = icol - dcol; + if(jrow < 0 || jcol < 0 || jrow >= SrcRows || jcol >= SrcCols) DstMat.put(irow, icol, 255); + else DstMat.put(irow, icol, (int) SrcMat.get(jrow,jcol)); + } + } + + IplImage Dst = cvCreateImage(cvSize(DstCols, DstRows), IPL_DEPTH_8U, 1); + Dst = DstMat.asIplImage(); + return Dst; + } + + public static IplImage TransposeImage(IplImage SrcImage) + { + CvMat mat = SrcImage.asCvMat(); + int cols = mat.cols(); + int rows = mat.rows(); + IplImage DstImage = cvCreateImage(cvSize(rows, cols), IPL_DEPTH_8U, 1); + cvTranspose(SrcImage, DstImage); + cvFlip(DstImage,DstImage,1); + return DstImage; + } +} + diff --git a/samples/CaffeGooglenet.java b/samples/CaffeGooglenet.java index 06627c6c..d2aeb825 100644 --- a/samples/CaffeGooglenet.java +++ b/samples/CaffeGooglenet.java @@ -1,105 +1,105 @@ -/* - * JavaCV version of OpenCV caffe_googlenet.cpp - * https://github.com/ludv1x/opencv_contrib/blob/master/modules/dnn/samples/caffe_googlenet.cpp - * - * Paolo Bolettieri - */ - -import java.io.BufferedReader; -import java.io.File; -import java.io.FileReader; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; - -import org.bytedeco.opencv.opencv_core.*; -import org.bytedeco.opencv.opencv_dnn.*; -import org.bytedeco.opencv.opencv_imgproc.*; -import static org.bytedeco.opencv.global.opencv_core.*; -import static org.bytedeco.opencv.global.opencv_dnn.*; -import static org.bytedeco.opencv.global.opencv_imgcodecs.*; -import static org.bytedeco.opencv.global.opencv_imgproc.*; - -public class CaffeGooglenet { - - /* Find best class for the blob (i. e. class with maximal probability) */ - public static void getMaxClass(Mat probBlob, Point classId, double[] classProb) { - Mat probMat = probBlob.reshape(1, 1); //reshape the blob to 1x1000 matrix - minMaxLoc(probMat, null, classProb, null, classId, null); - } - - public static List readClassNames() { - String filename = "synset_words.txt"; - List classNames = null; - - try (BufferedReader br = new BufferedReader(new FileReader(new File(filename)))) { - classNames = new ArrayList(); - String name = null; - while ((name = br.readLine()) != null) { - classNames.add(name.substring(name.indexOf(' ')+1)); - } - } catch (IOException ex) { - System.err.println("File with classes labels not found " + filename); - System.exit(-1); - } - return classNames; - } - - public static void main(String[] args) throws Exception { - String modelTxt = "bvlc_googlenet.prototxt"; - String modelBin = "bvlc_googlenet.caffemodel"; - String imageFile = (args.length > 0) ? args[0] : "space_shuttle.jpg"; - - //! [Initialize network] - Net net = null; - try { //Try to import Caffe GoogleNet model - net = readNetFromCaffe(modelTxt, modelBin); - } catch (Exception e) { //Importer can throw errors, we will catch them - e.printStackTrace(); - } - - if (net == null || net.empty()) { - System.err.println("Can't load network by using the following files: "); - System.err.println("prototxt: " + modelTxt); - System.err.println("caffemodel: " + modelBin); - System.err.println("bvlc_googlenet.caffemodel can be downloaded here:"); - System.err.println("http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel"); - System.exit(-1); - } - //! [Initialize network] - - //! [Prepare blob] - Mat img = imread(imageFile); - - if (img.empty()) { - System.err.println("Can't read image from the file: " + imageFile); - System.exit(-1); - } - - resize(img, img, new Size(224, 224)); //GoogLeNet accepts only 224x224 RGB-images - Mat inputBlob = blobFromImage(img); //Convert Mat to 4-dimensional dnn::Blob from image - //! [Prepare blob] - - //! [Set input blob] - net.setInput(inputBlob, "data", 1.0, null); //set the network input - //! [Set input blob] - - //! [Make forward pass] - Mat prob = net.forward("prob"); //compute output - //! [Make forward pass] - - //! [Gather output] - Point classId = new Point(); - double[] classProb = new double[1]; - getMaxClass(prob, classId, classProb);//find the best class - //! [Gather output] - - //! [Print results] - List classNames = readClassNames(); - - System.out.println("Best class: #" + classId.x() + " '" + classNames.get(classId.x()) + "'"); - System.out.println("Best class: #" + classId.x()); - System.out.println("Probability: " + classProb[0] * 100 + "%"); - //! [Print results] - } //main -} +/* + * JavaCV version of OpenCV caffe_googlenet.cpp + * https://github.com/ludv1x/opencv_contrib/blob/master/modules/dnn/samples/caffe_googlenet.cpp + * + * Paolo Bolettieri + */ + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileReader; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import org.bytedeco.opencv.opencv_core.*; +import org.bytedeco.opencv.opencv_dnn.*; +import org.bytedeco.opencv.opencv_imgproc.*; +import static org.bytedeco.opencv.global.opencv_core.*; +import static org.bytedeco.opencv.global.opencv_dnn.*; +import static org.bytedeco.opencv.global.opencv_imgcodecs.*; +import static org.bytedeco.opencv.global.opencv_imgproc.*; + +public class CaffeGooglenet { + + /* Find best class for the blob (i. e. class with maximal probability) */ + public static void getMaxClass(Mat probBlob, Point classId, double[] classProb) { + Mat probMat = probBlob.reshape(1, 1); //reshape the blob to 1x1000 matrix + minMaxLoc(probMat, null, classProb, null, classId, null); + } + + public static List readClassNames() { + String filename = "synset_words.txt"; + List classNames = null; + + try (BufferedReader br = new BufferedReader(new FileReader(new File(filename)))) { + classNames = new ArrayList(); + String name = null; + while ((name = br.readLine()) != null) { + classNames.add(name.substring(name.indexOf(' ')+1)); + } + } catch (IOException ex) { + System.err.println("File with classes labels not found " + filename); + System.exit(-1); + } + return classNames; + } + + public static void main(String[] args) throws Exception { + String modelTxt = "bvlc_googlenet.prototxt"; + String modelBin = "bvlc_googlenet.caffemodel"; + String imageFile = (args.length > 0) ? args[0] : "space_shuttle.jpg"; + + //! [Initialize network] + Net net = null; + try { //Try to import Caffe GoogleNet model + net = readNetFromCaffe(modelTxt, modelBin); + } catch (Exception e) { //Importer can throw errors, we will catch them + e.printStackTrace(); + } + + if (net == null || net.empty()) { + System.err.println("Can't load network by using the following files: "); + System.err.println("prototxt: " + modelTxt); + System.err.println("caffemodel: " + modelBin); + System.err.println("bvlc_googlenet.caffemodel can be downloaded here:"); + System.err.println("http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel"); + System.exit(-1); + } + //! [Initialize network] + + //! [Prepare blob] + Mat img = imread(imageFile); + + if (img.empty()) { + System.err.println("Can't read image from the file: " + imageFile); + System.exit(-1); + } + + resize(img, img, new Size(224, 224)); //GoogLeNet accepts only 224x224 RGB-images + Mat inputBlob = blobFromImage(img); //Convert Mat to 4-dimensional dnn::Blob from image + //! [Prepare blob] + + //! [Set input blob] + net.setInput(inputBlob, "data", 1.0, null); //set the network input + //! [Set input blob] + + //! [Make forward pass] + Mat prob = net.forward("prob"); //compute output + //! [Make forward pass] + + //! [Gather output] + Point classId = new Point(); + double[] classProb = new double[1]; + getMaxClass(prob, classId, classProb);//find the best class + //! [Gather output] + + //! [Print results] + List classNames = readClassNames(); + + System.out.println("Best class: #" + classId.x() + " '" + classNames.get(classId.x()) + "'"); + System.out.println("Best class: #" + classId.x()); + System.out.println("Probability: " + classProb[0] * 100 + "%"); + //! [Print results] + } //main +} diff --git a/samples/ColoredObjectTrack.java b/samples/ColoredObjectTrack.java index 9ad1383c..3eb933d6 100644 --- a/samples/ColoredObjectTrack.java +++ b/samples/ColoredObjectTrack.java @@ -1,120 +1,120 @@ -/* - * Just an example using the opencv to make a colored object tracking, - * i adpted this code to bytedeco/javacv, i think this will help some people. - * - * Waldemar - */ - -import java.awt.Color; -import java.awt.Graphics; -import java.awt.image.BufferedImage; -import javax.swing.JPanel; -import org.bytedeco.javacv.CanvasFrame; -import org.bytedeco.javacv.FrameGrabber; -import org.bytedeco.javacv.Java2DFrameConverter; -import org.bytedeco.javacv.OpenCVFrameConverter; - -import org.bytedeco.opencv.opencv_core.*; -import org.bytedeco.opencv.opencv_imgproc.*; -import static org.bytedeco.opencv.global.opencv_core.*; -import static org.bytedeco.opencv.global.opencv_imgcodecs.*; -import static org.bytedeco.opencv.global.opencv_imgproc.*; - -public class ColoredObjectTrack implements Runnable { - - public static void main(String[] args) { - ColoredObjectTrack cot = new ColoredObjectTrack(); - Thread th = new Thread(cot); - th.start(); - } - - final int INTERVAL = 10;// 1sec - final int CAMERA_NUM = 0; // Default camera for this time - - /** - * Correct the color range- it depends upon the object, camera quality, - * environment. - */ - static CvScalar rgba_min = cvScalar(0, 0, 130, 0);// RED wide dabur birko - static CvScalar rgba_max = cvScalar(80, 80, 255, 0); - - IplImage image; - CanvasFrame canvas = new CanvasFrame("Web Cam Live"); - CanvasFrame path = new CanvasFrame("Detection"); - int ii = 0; - JPanel jp = new JPanel(); - - public ColoredObjectTrack() { - canvas.setDefaultCloseOperation(javax.swing.JFrame.EXIT_ON_CLOSE); - path.setDefaultCloseOperation(javax.swing.JFrame.EXIT_ON_CLOSE); - path.setContentPane(jp); - } - - @Override - public void run() { - try { - FrameGrabber grabber = FrameGrabber.createDefault(CAMERA_NUM); - OpenCVFrameConverter.ToIplImage converter = new OpenCVFrameConverter.ToIplImage(); - grabber.start(); - IplImage img; - int posX = 0; - int posY = 0; - while (true) { - img = converter.convert(grabber.grab()); - if (img != null) { - // show image on window - cvFlip(img, img, 1);// l-r = 90_degrees_steps_anti_clockwise - canvas.showImage(converter.convert(img)); - IplImage detectThrs = getThresholdImage(img); - - CvMoments moments = new CvMoments(); - cvMoments(detectThrs, moments, 1); - double mom10 = cvGetSpatialMoment(moments, 1, 0); - double mom01 = cvGetSpatialMoment(moments, 0, 1); - double area = cvGetCentralMoment(moments, 0, 0); - posX = (int) (mom10 / area); - posY = (int) (mom01 / area); - // only if its a valid position - if (posX > 0 && posY > 0) { - paint(img, posX, posY); - } - } - // Thread.sleep(INTERVAL); - } - } catch (Exception e) { - } - } - - private void paint(IplImage img, int posX, int posY) { - Graphics g = jp.getGraphics(); - path.setSize(img.width(), img.height()); - // g.clearRect(0, 0, img.width(), img.height()); - g.setColor(Color.RED); - // g.fillOval(posX, posY, 20, 20); - g.drawOval(posX, posY, 20, 20); - System.out.println(posX + " , " + posY); - - } - - private IplImage getThresholdImage(IplImage orgImg) { - IplImage imgThreshold = cvCreateImage(cvGetSize(orgImg), 8, 1); - // - cvInRangeS(orgImg, rgba_min, rgba_max, imgThreshold);// red - - cvSmooth(imgThreshold, imgThreshold, CV_MEDIAN, 15,0,0,0); - cvSaveImage(++ii + "dsmthreshold.jpg", imgThreshold); - return imgThreshold; - } - - - public IplImage Equalize(BufferedImage bufferedimg) { - Java2DFrameConverter converter1 = new Java2DFrameConverter(); - OpenCVFrameConverter.ToIplImage converter2 = new OpenCVFrameConverter.ToIplImage(); - IplImage iploriginal = converter2.convert(converter1.convert(bufferedimg)); - IplImage srcimg = IplImage.create(iploriginal.width(), iploriginal.height(), IPL_DEPTH_8U, 1); - IplImage destimg = IplImage.create(iploriginal.width(), iploriginal.height(), IPL_DEPTH_8U, 1); - cvCvtColor(iploriginal, srcimg, CV_BGR2GRAY); - cvEqualizeHist(srcimg, destimg); - return destimg; - } -} +/* + * Just an example using the opencv to make a colored object tracking, + * i adpted this code to bytedeco/javacv, i think this will help some people. + * + * Waldemar + */ + +import java.awt.Color; +import java.awt.Graphics; +import java.awt.image.BufferedImage; +import javax.swing.JPanel; +import org.bytedeco.javacv.CanvasFrame; +import org.bytedeco.javacv.FrameGrabber; +import org.bytedeco.javacv.Java2DFrameConverter; +import org.bytedeco.javacv.OpenCVFrameConverter; + +import org.bytedeco.opencv.opencv_core.*; +import org.bytedeco.opencv.opencv_imgproc.*; +import static org.bytedeco.opencv.global.opencv_core.*; +import static org.bytedeco.opencv.global.opencv_imgcodecs.*; +import static org.bytedeco.opencv.global.opencv_imgproc.*; + +public class ColoredObjectTrack implements Runnable { + + public static void main(String[] args) { + ColoredObjectTrack cot = new ColoredObjectTrack(); + Thread th = new Thread(cot); + th.start(); + } + + final int INTERVAL = 10;// 1sec + final int CAMERA_NUM = 0; // Default camera for this time + + /** + * Correct the color range- it depends upon the object, camera quality, + * environment. + */ + static CvScalar rgba_min = cvScalar(0, 0, 130, 0);// RED wide dabur birko + static CvScalar rgba_max = cvScalar(80, 80, 255, 0); + + IplImage image; + CanvasFrame canvas = new CanvasFrame("Web Cam Live"); + CanvasFrame path = new CanvasFrame("Detection"); + int ii = 0; + JPanel jp = new JPanel(); + + public ColoredObjectTrack() { + canvas.setDefaultCloseOperation(javax.swing.JFrame.EXIT_ON_CLOSE); + path.setDefaultCloseOperation(javax.swing.JFrame.EXIT_ON_CLOSE); + path.setContentPane(jp); + } + + @Override + public void run() { + try { + FrameGrabber grabber = FrameGrabber.createDefault(CAMERA_NUM); + OpenCVFrameConverter.ToIplImage converter = new OpenCVFrameConverter.ToIplImage(); + grabber.start(); + IplImage img; + int posX = 0; + int posY = 0; + while (true) { + img = converter.convert(grabber.grab()); + if (img != null) { + // show image on window + cvFlip(img, img, 1);// l-r = 90_degrees_steps_anti_clockwise + canvas.showImage(converter.convert(img)); + IplImage detectThrs = getThresholdImage(img); + + CvMoments moments = new CvMoments(); + cvMoments(detectThrs, moments, 1); + double mom10 = cvGetSpatialMoment(moments, 1, 0); + double mom01 = cvGetSpatialMoment(moments, 0, 1); + double area = cvGetCentralMoment(moments, 0, 0); + posX = (int) (mom10 / area); + posY = (int) (mom01 / area); + // only if its a valid position + if (posX > 0 && posY > 0) { + paint(img, posX, posY); + } + } + // Thread.sleep(INTERVAL); + } + } catch (Exception e) { + } + } + + private void paint(IplImage img, int posX, int posY) { + Graphics g = jp.getGraphics(); + path.setSize(img.width(), img.height()); + // g.clearRect(0, 0, img.width(), img.height()); + g.setColor(Color.RED); + // g.fillOval(posX, posY, 20, 20); + g.drawOval(posX, posY, 20, 20); + System.out.println(posX + " , " + posY); + + } + + private IplImage getThresholdImage(IplImage orgImg) { + IplImage imgThreshold = cvCreateImage(cvGetSize(orgImg), 8, 1); + // + cvInRangeS(orgImg, rgba_min, rgba_max, imgThreshold);// red + + cvSmooth(imgThreshold, imgThreshold, CV_MEDIAN, 15,0,0,0); + cvSaveImage(++ii + "dsmthreshold.jpg", imgThreshold); + return imgThreshold; + } + + + public IplImage Equalize(BufferedImage bufferedimg) { + Java2DFrameConverter converter1 = new Java2DFrameConverter(); + OpenCVFrameConverter.ToIplImage converter2 = new OpenCVFrameConverter.ToIplImage(); + IplImage iploriginal = converter2.convert(converter1.convert(bufferedimg)); + IplImage srcimg = IplImage.create(iploriginal.width(), iploriginal.height(), IPL_DEPTH_8U, 1); + IplImage destimg = IplImage.create(iploriginal.width(), iploriginal.height(), IPL_DEPTH_8U, 1); + cvCvtColor(iploriginal, srcimg, CV_BGR2GRAY); + cvEqualizeHist(srcimg, destimg); + return destimg; + } +} diff --git a/samples/DeepLearningFaceDetection.java b/samples/DeepLearningFaceDetection.java index 4b820fdc..96251330 100644 --- a/samples/DeepLearningFaceDetection.java +++ b/samples/DeepLearningFaceDetection.java @@ -1,107 +1,107 @@ -import org.bytedeco.javacpp.indexer.FloatIndexer; -import org.bytedeco.javacv.CanvasFrame; -import org.bytedeco.javacv.OpenCVFrameConverter; - -import org.bytedeco.opencv.opencv_core.*; -import org.bytedeco.opencv.opencv_dnn.*; -import org.bytedeco.opencv.opencv_imgproc.*; -import org.bytedeco.opencv.opencv_videoio.*; -import static org.bytedeco.opencv.global.opencv_core.*; -import static org.bytedeco.opencv.global.opencv_dnn.*; -import static org.bytedeco.opencv.global.opencv_imgproc.*; -import static org.bytedeco.opencv.global.opencv_videoio.*; - -/** - * Created on Jul 28, 2018 - * - * @author Taha Emara - * Email : taha@emaraic.com - * - * This example does face detection using deep learning model which provides a - * great accuracy compared to OpenCV face detection using Haar cascades. - * - * This example is based on this code - * https://github.com/opencv/opencv/blob/master/modules/dnn/misc/face_detector_accuracy.py - * - * To run this example you need two files: deploy.prototxt can be downloaded - * from - * https://github.com/opencv/opencv/blob/master/samples/dnn/face_detector/deploy.prototxt - * - * and res10_300x300_ssd_iter_140000.caffemodel - * https://github.com/opencv/opencv_3rdparty/blob/dnn_samples_face_detector_20170830/res10_300x300_ssd_iter_140000.caffemodel - * - */ -public class DeepLearningFaceDetection { - - private static final String PROTO_FILE = "deploy.prototxt"; - private static final String CAFFE_MODEL_FILE = "res10_300x300_ssd_iter_140000.caffemodel"; - private static final OpenCVFrameConverter.ToIplImage converter = new OpenCVFrameConverter.ToIplImage(); - private static Net net = null; - - static { - net = readNetFromCaffe(PROTO_FILE, CAFFE_MODEL_FILE); - } - - private static void detectAndDraw(Mat image) {//detect faces and draw a blue rectangle arroung each face - - resize(image, image, new Size(300, 300));//resize the image to match the input size of the model - - //create a 4-dimensional blob from image with NCHW (Number of images in the batch -for training only-, Channel, Height, Width) dimensions order, - //for more detailes read the official docs at https://docs.opencv.org/trunk/d6/d0f/group__dnn.html#gabd0e76da3c6ad15c08b01ef21ad55dd8 - Mat blob = blobFromImage(image, 1.0, new Size(300, 300), new Scalar(104.0, 177.0, 123.0, 0), false, false, CV_32F); - - net.setInput(blob);//set the input to network model - Mat output = net.forward();//feed forward the input to the netwrok to get the output matrix - - Mat ne = new Mat(new Size(output.size(3), output.size(2)), CV_32F, output.ptr(0, 0));//extract a 2d matrix for 4d output matrix with form of (number of detections x 7) - - FloatIndexer srcIndexer = ne.createIndexer(); // create indexer to access elements of the matric - - for (int i = 0; i < output.size(3); i++) {//iterate to extract elements - float confidence = srcIndexer.get(i, 2); - float f1 = srcIndexer.get(i, 3); - float f2 = srcIndexer.get(i, 4); - float f3 = srcIndexer.get(i, 5); - float f4 = srcIndexer.get(i, 6); - if (confidence > .6) { - float tx = f1 * 300;//top left point's x - float ty = f2 * 300;//top left point's y - float bx = f3 * 300;//bottom right point's x - float by = f4 * 300;//bottom right point's y - rectangle(image, new Rect(new Point((int) tx, (int) ty), new Point((int) bx, (int) by)), new Scalar(255, 0, 0, 0));//print blue rectangle - } - } - } - - public static void main(String[] args) { - VideoCapture capture = new VideoCapture(); - capture.set(CAP_PROP_FRAME_WIDTH, 1280); - capture.set(CAP_PROP_FRAME_HEIGHT, 720); - - if (!capture.open(0)) { - System.out.println("Can not open the cam !!!"); - } - - Mat colorimg = new Mat(); - - CanvasFrame mainframe = new CanvasFrame("Face Detection", CanvasFrame.getDefaultGamma() / 2.2); - mainframe.setDefaultCloseOperation(javax.swing.JFrame.EXIT_ON_CLOSE); - mainframe.setCanvasSize(600, 600); - mainframe.setLocationRelativeTo(null); - mainframe.setVisible(true); - - while (true) { - while (capture.read(colorimg) && mainframe.isVisible()) { - detectAndDraw(colorimg); - mainframe.showImage(converter.convert(colorimg)); - try { - Thread.sleep(50); - } catch (InterruptedException ex) { - System.out.println(ex.getMessage()); - } - - } - } - } - -} +import org.bytedeco.javacpp.indexer.FloatIndexer; +import org.bytedeco.javacv.CanvasFrame; +import org.bytedeco.javacv.OpenCVFrameConverter; + +import org.bytedeco.opencv.opencv_core.*; +import org.bytedeco.opencv.opencv_dnn.*; +import org.bytedeco.opencv.opencv_imgproc.*; +import org.bytedeco.opencv.opencv_videoio.*; +import static org.bytedeco.opencv.global.opencv_core.*; +import static org.bytedeco.opencv.global.opencv_dnn.*; +import static org.bytedeco.opencv.global.opencv_imgproc.*; +import static org.bytedeco.opencv.global.opencv_videoio.*; + +/** + * Created on Jul 28, 2018 + * + * @author Taha Emara + * Email : taha@emaraic.com + * + * This example does face detection using deep learning model which provides a + * great accuracy compared to OpenCV face detection using Haar cascades. + * + * This example is based on this code + * https://github.com/opencv/opencv/blob/master/modules/dnn/misc/face_detector_accuracy.py + * + * To run this example you need two files: deploy.prototxt can be downloaded + * from + * https://github.com/opencv/opencv/blob/master/samples/dnn/face_detector/deploy.prototxt + * + * and res10_300x300_ssd_iter_140000.caffemodel + * https://github.com/opencv/opencv_3rdparty/blob/dnn_samples_face_detector_20170830/res10_300x300_ssd_iter_140000.caffemodel + * + */ +public class DeepLearningFaceDetection { + + private static final String PROTO_FILE = "deploy.prototxt"; + private static final String CAFFE_MODEL_FILE = "res10_300x300_ssd_iter_140000.caffemodel"; + private static final OpenCVFrameConverter.ToIplImage converter = new OpenCVFrameConverter.ToIplImage(); + private static Net net = null; + + static { + net = readNetFromCaffe(PROTO_FILE, CAFFE_MODEL_FILE); + } + + private static void detectAndDraw(Mat image) {//detect faces and draw a blue rectangle arroung each face + + resize(image, image, new Size(300, 300));//resize the image to match the input size of the model + + //create a 4-dimensional blob from image with NCHW (Number of images in the batch -for training only-, Channel, Height, Width) dimensions order, + //for more detailes read the official docs at https://docs.opencv.org/trunk/d6/d0f/group__dnn.html#gabd0e76da3c6ad15c08b01ef21ad55dd8 + Mat blob = blobFromImage(image, 1.0, new Size(300, 300), new Scalar(104.0, 177.0, 123.0, 0), false, false, CV_32F); + + net.setInput(blob);//set the input to network model + Mat output = net.forward();//feed forward the input to the netwrok to get the output matrix + + Mat ne = new Mat(new Size(output.size(3), output.size(2)), CV_32F, output.ptr(0, 0));//extract a 2d matrix for 4d output matrix with form of (number of detections x 7) + + FloatIndexer srcIndexer = ne.createIndexer(); // create indexer to access elements of the matric + + for (int i = 0; i < output.size(3); i++) {//iterate to extract elements + float confidence = srcIndexer.get(i, 2); + float f1 = srcIndexer.get(i, 3); + float f2 = srcIndexer.get(i, 4); + float f3 = srcIndexer.get(i, 5); + float f4 = srcIndexer.get(i, 6); + if (confidence > .6) { + float tx = f1 * 300;//top left point's x + float ty = f2 * 300;//top left point's y + float bx = f3 * 300;//bottom right point's x + float by = f4 * 300;//bottom right point's y + rectangle(image, new Rect(new Point((int) tx, (int) ty), new Point((int) bx, (int) by)), new Scalar(255, 0, 0, 0));//print blue rectangle + } + } + } + + public static void main(String[] args) { + VideoCapture capture = new VideoCapture(); + capture.set(CAP_PROP_FRAME_WIDTH, 1280); + capture.set(CAP_PROP_FRAME_HEIGHT, 720); + + if (!capture.open(0)) { + System.out.println("Can not open the cam !!!"); + } + + Mat colorimg = new Mat(); + + CanvasFrame mainframe = new CanvasFrame("Face Detection", CanvasFrame.getDefaultGamma() / 2.2); + mainframe.setDefaultCloseOperation(javax.swing.JFrame.EXIT_ON_CLOSE); + mainframe.setCanvasSize(600, 600); + mainframe.setLocationRelativeTo(null); + mainframe.setVisible(true); + + while (true) { + while (capture.read(colorimg) && mainframe.isVisible()) { + detectAndDraw(colorimg); + mainframe.showImage(converter.convert(colorimg)); + try { + Thread.sleep(50); + } catch (InterruptedException ex) { + System.out.println(ex.getMessage()); + } + + } + } + } + +} diff --git a/samples/DeinterlacedVideoPlayer.java b/samples/DeinterlacedVideoPlayer.java index 40317dc4..124af76b 100644 --- a/samples/DeinterlacedVideoPlayer.java +++ b/samples/DeinterlacedVideoPlayer.java @@ -1,83 +1,83 @@ -import org.bytedeco.javacpp.Loader; -import org.bytedeco.javacv.FFmpegFrameFilter; -import org.bytedeco.javacv.Frame; -import org.bytedeco.javacv.FrameFilter; -import org.bytedeco.javacv.FrameGrabber; -import org.bytedeco.javacv.FrameGrabber.Exception; -import org.bytedeco.javacv.OpenCVFrameGrabber; - -import org.bytedeco.ffmpeg.global.avutil; - -public class DeinterlacedVideoPlayer { - - private static final int DEVICE_ID = 0; - private static final int WIDTH = 640; - private static final int HEIGHT = 480; - private static final int FRAMERATE = 25; - private static final int PIXEL_FORMAT = avutil.AV_PIX_FMT_BGR24;; - - private String ffmpegString = "yadif=mode=0:parity=-1:deint=0,format=bgr24"; - private FrameGrabber grabber; - - public DeinterlacedVideoPlayer() {} - - public void start() { - FrameFilter filter = null; - try { - startFrameGrabber(); - - Frame frame = null; - while ((frame = grabber.grab()) != null) { - if (filter == null) { - filter = new FFmpegFrameFilter(ffmpegString, frame.imageWidth, frame.imageHeight); - filter.setPixelFormat(PIXEL_FORMAT); - filter.start(); - } - - filter.push(frame); - frame = filter.pull(); - - // do something with the filtered frame - - } - } catch (Exception | org.bytedeco.javacv.FrameFilter.Exception e) { - throw new RuntimeException(e.getMessage(), e); - } finally { - releaseGrabberAndFilter(this.grabber, filter); - } - } - - private void startFrameGrabber() throws Exception { - grabber = new OpenCVFrameGrabber(DEVICE_ID); - grabber.setImageWidth(WIDTH); - grabber.setImageHeight(HEIGHT); - grabber.setFrameRate(FRAMERATE); - grabber.setPixelFormat(PIXEL_FORMAT); - grabber.start(); - } - - private void releaseGrabberAndFilter(FrameGrabber grabber, FrameFilter filter) { - try { - if (grabber != null) { - grabber.release(); - } - } catch (Exception e) { - throw new RuntimeException("Cannot release frame grabber!", e); - } finally { - releaseFilter(filter); - } - } - - private void releaseFilter(FrameFilter filter) { - if (filter == null) { - return; - } - - try { - filter.close(); - } catch (org.bytedeco.javacv.FrameFilter.Exception e) { - throw new RuntimeException("Cannot close frame filter!", e); - } - } - -} +import org.bytedeco.javacpp.Loader; +import org.bytedeco.javacv.FFmpegFrameFilter; +import org.bytedeco.javacv.Frame; +import org.bytedeco.javacv.FrameFilter; +import org.bytedeco.javacv.FrameGrabber; +import org.bytedeco.javacv.FrameGrabber.Exception; +import org.bytedeco.javacv.OpenCVFrameGrabber; + +import org.bytedeco.ffmpeg.global.avutil; + +public class DeinterlacedVideoPlayer { + + private static final int DEVICE_ID = 0; + private static final int WIDTH = 640; + private static final int HEIGHT = 480; + private static final int FRAMERATE = 25; + private static final int PIXEL_FORMAT = avutil.AV_PIX_FMT_BGR24;; + + private String ffmpegString = "yadif=mode=0:parity=-1:deint=0,format=bgr24"; + private FrameGrabber grabber; + + public DeinterlacedVideoPlayer() {} + + public void start() { + FrameFilter filter = null; + try { + startFrameGrabber(); + + Frame frame = null; + while ((frame = grabber.grab()) != null) { + if (filter == null) { + filter = new FFmpegFrameFilter(ffmpegString, frame.imageWidth, frame.imageHeight); + filter.setPixelFormat(PIXEL_FORMAT); + filter.start(); + } + + filter.push(frame); + frame = filter.pull(); + + // do something with the filtered frame + + } + } catch (Exception | org.bytedeco.javacv.FrameFilter.Exception e) { + throw new RuntimeException(e.getMessage(), e); + } finally { + releaseGrabberAndFilter(this.grabber, filter); + } + } + + private void startFrameGrabber() throws Exception { + grabber = new OpenCVFrameGrabber(DEVICE_ID); + grabber.setImageWidth(WIDTH); + grabber.setImageHeight(HEIGHT); + grabber.setFrameRate(FRAMERATE); + grabber.setPixelFormat(PIXEL_FORMAT); + grabber.start(); + } + + private void releaseGrabberAndFilter(FrameGrabber grabber, FrameFilter filter) { + try { + if (grabber != null) { + grabber.release(); + } + } catch (Exception e) { + throw new RuntimeException("Cannot release frame grabber!", e); + } finally { + releaseFilter(filter); + } + } + + private void releaseFilter(FrameFilter filter) { + if (filter == null) { + return; + } + + try { + filter.close(); + } catch (org.bytedeco.javacv.FrameFilter.Exception e) { + throw new RuntimeException("Cannot close frame filter!", e); + } + } + +} diff --git a/samples/Demo.java b/samples/Demo.java index eba55c7f..5b5683c7 100644 --- a/samples/Demo.java +++ b/samples/Demo.java @@ -1,154 +1,154 @@ -/* - * Copyright (C) 2009-2018 Samuel Audet - * - * Licensed either under the Apache License, Version 2.0, or (at your option) - * under the terms of the GNU General Public License as published by - * the Free Software Foundation (subject to the "Classpath" exception), - * either version 2, or any later version (collectively, the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * http://www.gnu.org/licenses/ - * http://www.gnu.org/software/classpath/license.html - * - * or as provided in the LICENSE.txt file that accompanied this code. - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import java.io.File; -import java.net.URL; -import org.bytedeco.javacv.*; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacpp.indexer.*; -import org.bytedeco.opencv.opencv_core.*; -import org.bytedeco.opencv.opencv_imgproc.*; -import org.bytedeco.opencv.opencv_calib3d.*; -import org.bytedeco.opencv.opencv_objdetect.*; -import static org.bytedeco.opencv.global.opencv_core.*; -import static org.bytedeco.opencv.global.opencv_imgproc.*; -import static org.bytedeco.opencv.global.opencv_calib3d.*; -import static org.bytedeco.opencv.global.opencv_objdetect.*; - -public class Demo { - public static void main(String[] args) throws Exception { - String classifierName = null; - if (args.length > 0) { - classifierName = args[0]; - } else { - URL url = new URL("https://raw.github.com/opencv/opencv/master/data/haarcascades/haarcascade_frontalface_alt.xml"); - File file = Loader.cacheResource(url); - classifierName = file.getAbsolutePath(); - } - - // We can "cast" Pointer objects by instantiating a new object of the desired class. - CascadeClassifier classifier = new CascadeClassifier(classifierName); - if (classifier == null) { - System.err.println("Error loading classifier file \"" + classifierName + "\"."); - System.exit(1); - } - - // The available FrameGrabber classes include OpenCVFrameGrabber (opencv_videoio), - // DC1394FrameGrabber, FlyCapture2FrameGrabber, OpenKinectFrameGrabber, OpenKinect2FrameGrabber, - // RealSenseFrameGrabber, RealSense2FrameGrabber, PS3EyeFrameGrabber, VideoInputFrameGrabber, and FFmpegFrameGrabber. - FrameGrabber grabber = FrameGrabber.createDefault(0); - grabber.start(); - - // CanvasFrame, FrameGrabber, and FrameRecorder use Frame objects to communicate image data. - // We need a FrameConverter to interface with other APIs (Android, Java 2D, JavaFX, Tesseract, OpenCV, etc). - OpenCVFrameConverter.ToMat converter = new OpenCVFrameConverter.ToMat(); - - // FAQ about IplImage and Mat objects from OpenCV: - // - For custom raw processing of data, createBuffer() returns an NIO direct - // buffer wrapped around the memory pointed by imageData, and under Android we can - // also use that Buffer with Bitmap.copyPixelsFromBuffer() and copyPixelsToBuffer(). - // - To get a BufferedImage from an IplImage, or vice versa, we can chain calls to - // Java2DFrameConverter and OpenCVFrameConverter, one after the other. - // - Java2DFrameConverter also has static copy() methods that we can use to transfer - // data more directly between BufferedImage and IplImage or Mat via Frame objects. - Mat grabbedImage = converter.convert(grabber.grab()); - int height = grabbedImage.rows(); - int width = grabbedImage.cols(); - - // Objects allocated with `new`, clone(), or a create*() factory method are automatically released - // by the garbage collector, but may still be explicitly released by calling deallocate(). - // You shall NOT call cvReleaseImage(), cvReleaseMemStorage(), etc. on objects allocated this way. - Mat grayImage = new Mat(height, width, CV_8UC1); - Mat rotatedImage = grabbedImage.clone(); - - // The OpenCVFrameRecorder class simply uses the VideoWriter of opencv_videoio, - // but FFmpegFrameRecorder also exists as a more versatile alternative. - FrameRecorder recorder = FrameRecorder.createDefault("output.avi", width, height); - recorder.start(); - - // CanvasFrame is a JFrame containing a Canvas component, which is hardware accelerated. - // It can also switch into full-screen mode when called with a screenNumber. - // We should also specify the relative monitor/camera response for proper gamma correction. - CanvasFrame frame = new CanvasFrame("Some Title", CanvasFrame.getDefaultGamma()/grabber.getGamma()); - - // Let's create some random 3D rotation... - Mat randomR = new Mat(3, 3, CV_64FC1), - randomAxis = new Mat(3, 1, CV_64FC1); - // We can easily and efficiently access the elements of matrices and images - // through an Indexer object with the set of get() and put() methods. - DoubleIndexer Ridx = randomR.createIndexer(), - axisIdx = randomAxis.createIndexer(); - axisIdx.put(0, (Math.random() - 0.5) / 4, - (Math.random() - 0.5) / 4, - (Math.random() - 0.5) / 4); - Rodrigues(randomAxis, randomR); - double f = (width + height) / 2.0; Ridx.put(0, 2, Ridx.get(0, 2) * f); - Ridx.put(1, 2, Ridx.get(1, 2) * f); - Ridx.put(2, 0, Ridx.get(2, 0) / f); Ridx.put(2, 1, Ridx.get(2, 1) / f); - System.out.println(Ridx); - - // We can allocate native arrays using constructors taking an integer as argument. - Point hatPoints = new Point(3); - - while (frame.isVisible() && (grabbedImage = converter.convert(grabber.grab())) != null) { - // Let's try to detect some faces! but we need a grayscale image... - cvtColor(grabbedImage, grayImage, CV_BGR2GRAY); - RectVector faces = new RectVector(); - classifier.detectMultiScale(grayImage, faces); - long total = faces.size(); - for (long i = 0; i < total; i++) { - Rect r = faces.get(i); - int x = r.x(), y = r.y(), w = r.width(), h = r.height(); - rectangle(grabbedImage, new Point(x, y), new Point(x + w, y + h), Scalar.RED, 1, CV_AA, 0); - - // To access or pass as argument the elements of a native array, call position() before. - hatPoints.position(0).x(x - w / 10 ).y(y - h / 10); - hatPoints.position(1).x(x + w * 11 / 10).y(y - h / 10); - hatPoints.position(2).x(x + w / 2 ).y(y - h / 2 ); - fillConvexPoly(grabbedImage, hatPoints.position(0), 3, Scalar.GREEN, CV_AA, 0); - } - - // Let's find some contours! but first some thresholding... - threshold(grayImage, grayImage, 64, 255, CV_THRESH_BINARY); - - // To check if an output argument is null we may call either isNull() or equals(null). - MatVector contours = new MatVector(); - findContours(grayImage, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE); - long n = contours.size(); - for (long i = 0; i < n; i++) { - Mat contour = contours.get(i); - Mat points = new Mat(); - approxPolyDP(contour, points, arcLength(contour, true) * 0.02, true); - drawContours(grabbedImage, new MatVector(points), -1, Scalar.BLUE); - } - - warpPerspective(grabbedImage, rotatedImage, randomR, rotatedImage.size()); - - Frame rotatedFrame = converter.convert(rotatedImage); - frame.showImage(rotatedFrame); - recorder.record(rotatedFrame); - } - frame.dispose(); - recorder.stop(); - grabber.stop(); - } -} +/* + * Copyright (C) 2009-2018 Samuel Audet + * + * Licensed either under the Apache License, Version 2.0, or (at your option) + * under the terms of the GNU General Public License as published by + * the Free Software Foundation (subject to the "Classpath" exception), + * either version 2, or any later version (collectively, the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.gnu.org/licenses/ + * http://www.gnu.org/software/classpath/license.html + * + * or as provided in the LICENSE.txt file that accompanied this code. + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.io.File; +import java.net.URL; +import org.bytedeco.javacv.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.indexer.*; +import org.bytedeco.opencv.opencv_core.*; +import org.bytedeco.opencv.opencv_imgproc.*; +import org.bytedeco.opencv.opencv_calib3d.*; +import org.bytedeco.opencv.opencv_objdetect.*; +import static org.bytedeco.opencv.global.opencv_core.*; +import static org.bytedeco.opencv.global.opencv_imgproc.*; +import static org.bytedeco.opencv.global.opencv_calib3d.*; +import static org.bytedeco.opencv.global.opencv_objdetect.*; + +public class Demo { + public static void main(String[] args) throws Exception { + String classifierName = null; + if (args.length > 0) { + classifierName = args[0]; + } else { + URL url = new URL("https://raw.github.com/opencv/opencv/master/data/haarcascades/haarcascade_frontalface_alt.xml"); + File file = Loader.cacheResource(url); + classifierName = file.getAbsolutePath(); + } + + // We can "cast" Pointer objects by instantiating a new object of the desired class. + CascadeClassifier classifier = new CascadeClassifier(classifierName); + if (classifier == null) { + System.err.println("Error loading classifier file \"" + classifierName + "\"."); + System.exit(1); + } + + // The available FrameGrabber classes include OpenCVFrameGrabber (opencv_videoio), + // DC1394FrameGrabber, FlyCapture2FrameGrabber, OpenKinectFrameGrabber, OpenKinect2FrameGrabber, + // RealSenseFrameGrabber, RealSense2FrameGrabber, PS3EyeFrameGrabber, VideoInputFrameGrabber, and FFmpegFrameGrabber. + FrameGrabber grabber = FrameGrabber.createDefault(0); + grabber.start(); + + // CanvasFrame, FrameGrabber, and FrameRecorder use Frame objects to communicate image data. + // We need a FrameConverter to interface with other APIs (Android, Java 2D, JavaFX, Tesseract, OpenCV, etc). + OpenCVFrameConverter.ToMat converter = new OpenCVFrameConverter.ToMat(); + + // FAQ about IplImage and Mat objects from OpenCV: + // - For custom raw processing of data, createBuffer() returns an NIO direct + // buffer wrapped around the memory pointed by imageData, and under Android we can + // also use that Buffer with Bitmap.copyPixelsFromBuffer() and copyPixelsToBuffer(). + // - To get a BufferedImage from an IplImage, or vice versa, we can chain calls to + // Java2DFrameConverter and OpenCVFrameConverter, one after the other. + // - Java2DFrameConverter also has static copy() methods that we can use to transfer + // data more directly between BufferedImage and IplImage or Mat via Frame objects. + Mat grabbedImage = converter.convert(grabber.grab()); + int height = grabbedImage.rows(); + int width = grabbedImage.cols(); + + // Objects allocated with `new`, clone(), or a create*() factory method are automatically released + // by the garbage collector, but may still be explicitly released by calling deallocate(). + // You shall NOT call cvReleaseImage(), cvReleaseMemStorage(), etc. on objects allocated this way. + Mat grayImage = new Mat(height, width, CV_8UC1); + Mat rotatedImage = grabbedImage.clone(); + + // The OpenCVFrameRecorder class simply uses the VideoWriter of opencv_videoio, + // but FFmpegFrameRecorder also exists as a more versatile alternative. + FrameRecorder recorder = FrameRecorder.createDefault("output.avi", width, height); + recorder.start(); + + // CanvasFrame is a JFrame containing a Canvas component, which is hardware accelerated. + // It can also switch into full-screen mode when called with a screenNumber. + // We should also specify the relative monitor/camera response for proper gamma correction. + CanvasFrame frame = new CanvasFrame("Some Title", CanvasFrame.getDefaultGamma()/grabber.getGamma()); + + // Let's create some random 3D rotation... + Mat randomR = new Mat(3, 3, CV_64FC1), + randomAxis = new Mat(3, 1, CV_64FC1); + // We can easily and efficiently access the elements of matrices and images + // through an Indexer object with the set of get() and put() methods. + DoubleIndexer Ridx = randomR.createIndexer(), + axisIdx = randomAxis.createIndexer(); + axisIdx.put(0, (Math.random() - 0.5) / 4, + (Math.random() - 0.5) / 4, + (Math.random() - 0.5) / 4); + Rodrigues(randomAxis, randomR); + double f = (width + height) / 2.0; Ridx.put(0, 2, Ridx.get(0, 2) * f); + Ridx.put(1, 2, Ridx.get(1, 2) * f); + Ridx.put(2, 0, Ridx.get(2, 0) / f); Ridx.put(2, 1, Ridx.get(2, 1) / f); + System.out.println(Ridx); + + // We can allocate native arrays using constructors taking an integer as argument. + Point hatPoints = new Point(3); + + while (frame.isVisible() && (grabbedImage = converter.convert(grabber.grab())) != null) { + // Let's try to detect some faces! but we need a grayscale image... + cvtColor(grabbedImage, grayImage, CV_BGR2GRAY); + RectVector faces = new RectVector(); + classifier.detectMultiScale(grayImage, faces); + long total = faces.size(); + for (long i = 0; i < total; i++) { + Rect r = faces.get(i); + int x = r.x(), y = r.y(), w = r.width(), h = r.height(); + rectangle(grabbedImage, new Point(x, y), new Point(x + w, y + h), Scalar.RED, 1, CV_AA, 0); + + // To access or pass as argument the elements of a native array, call position() before. + hatPoints.position(0).x(x - w / 10 ).y(y - h / 10); + hatPoints.position(1).x(x + w * 11 / 10).y(y - h / 10); + hatPoints.position(2).x(x + w / 2 ).y(y - h / 2 ); + fillConvexPoly(grabbedImage, hatPoints.position(0), 3, Scalar.GREEN, CV_AA, 0); + } + + // Let's find some contours! but first some thresholding... + threshold(grayImage, grayImage, 64, 255, CV_THRESH_BINARY); + + // To check if an output argument is null we may call either isNull() or equals(null). + MatVector contours = new MatVector(); + findContours(grayImage, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE); + long n = contours.size(); + for (long i = 0; i < n; i++) { + Mat contour = contours.get(i); + Mat points = new Mat(); + approxPolyDP(contour, points, arcLength(contour, true) * 0.02, true); + drawContours(grabbedImage, new MatVector(points), -1, Scalar.BLUE); + } + + warpPerspective(grabbedImage, rotatedImage, randomR, rotatedImage.size()); + + Frame rotatedFrame = converter.convert(rotatedImage); + frame.showImage(rotatedFrame); + recorder.record(rotatedFrame); + } + frame.dispose(); + recorder.stop(); + grabber.stop(); + } +} diff --git a/samples/FFmpegStreamingTimeout.java b/samples/FFmpegStreamingTimeout.java index b7cec71b..a8dad9ad 100644 --- a/samples/FFmpegStreamingTimeout.java +++ b/samples/FFmpegStreamingTimeout.java @@ -1,148 +1,148 @@ -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import org.bytedeco.javacpp.Pointer; -import org.bytedeco.javacv.FFmpegFrameGrabber; -import org.bytedeco.javacv.Frame; -import org.bytedeco.javacv.FrameGrabber; - -import org.bytedeco.ffmpeg.avformat.*; -import static org.bytedeco.ffmpeg.global.avformat.*; - -/** - * - * @author Dmitriy Gerashenko - */ -public class FFmpegStreamingTimeout { - - /** - * There is no universal option for streaming timeout. Each of protocols has - * its own list of options. - */ - private static enum TimeoutOption { - /** - * Depends on protocol (FTP, HTTP, RTMP, RTSP, SMB, SSH, TCP, UDP, or UNIX). - * http://ffmpeg.org/ffmpeg-all.html - * - * Specific for RTSP: - * Set socket TCP I/O timeout in microseconds. - * http://ffmpeg.org/ffmpeg-all.html#rtsp - */ - TIMEOUT, - /** - * Protocols - * - * Maximum time to wait for (network) read/write operations to complete, - * in microseconds. - * - * http://ffmpeg.org/ffmpeg-all.html#Protocols - */ - RW_TIMEOUT; - - public String getKey() { - return toString().toLowerCase(); - } - - } - - private static final String SOURCE_RTSP = "rtsp://184.72.239.149/vod/mp4:BigBuckBunny_115k.mov"; - private static final int TIMEOUT = 10; // In seconds. - - public static void main(String[] args) { - rtspStreamingTest(); -// testWithCallback(); // This is not working properly. It's just for test. - } - - private static void rtspStreamingTest() { - try { - FFmpegFrameGrabber grabber = new FFmpegFrameGrabber(SOURCE_RTSP); - /** - * "rw_timeout" - IS IGNORED when a network cable have been - * unplugged before a connection but the option takes effect after a - * connection was established. - * - * "timeout" - works fine. - */ - grabber.setOption( - TimeoutOption.TIMEOUT.getKey(), - String.valueOf(TIMEOUT * 1000000) - ); // In microseconds. - grabber.start(); - - Frame frame = null; - /** - * When network is disabled (before grabber was started) grabber - * throws exception: "org.bytedeco.javacv.FrameGrabber$Exception: - * avformat_open_input() error -138: Could not open input...". - * - * When connections is lost (after a few grabbed frames) - * grabber.grab() returns null without exception. - */ - while ((frame = grabber.grab()) != null) { - System.out.println("frame grabbed at " + grabber.getTimestamp()); - } - System.out.println("loop end with frame: " + frame); - } catch (FrameGrabber.Exception ex) { - System.out.println("exception: " + ex); - } - System.out.println("end"); - } - - private static void testWithCallback() { - try { - FFmpegFrameGrabber grabber = new FFmpegFrameGrabber(SOURCE_RTSP); - /** - * grabber.getFormatContext() is null before grabber.start(). - * - * But if network is disabled grabber.start() will never return. - * - * That's why interrupt_callback not suitable for "network disabled - * case". - */ - grabber.start(); - - final AtomicBoolean interruptFlag = new AtomicBoolean(false); - AVIOInterruptCB.Callback_Pointer cp = new AVIOInterruptCB.Callback_Pointer() { - @Override - public int call(Pointer pointer) { - // 0 - continue, 1 - exit - int interruptFlagInt = interruptFlag.get() ? 1 : 0; - System.out.println("callback, interrupt flag == " + interruptFlagInt); - return interruptFlagInt; - } - - }; - AVFormatContext oc = grabber.getFormatContext(); - avformat_alloc_context(); - AVIOInterruptCB cb = new AVIOInterruptCB(); - cb.callback(cp); - oc.interrupt_callback(cb); - new Thread(new Runnable() { public void run() { - try { - TimeUnit.SECONDS.sleep(TIMEOUT); - interruptFlag.set(true); - System.out.println("interrupt flag was changed"); - } catch (InterruptedException ex) { - System.out.println("exception in interruption thread: " + ex); - } - }}).start(); - - Frame frame = null; - /** - * On one of my RTSP cams grabber stops calling callback on - * connection lost. I think it's has something to do with message: - * "[swscaler @ 0000000029af49e0] deprecated pixel format used, make - * sure you did set range correctly". - * - * So there is at least one case when grabber stops calling - * callback. - */ - while ((frame = grabber.grab()) != null) { - System.out.println("frame grabbed at " + grabber.getTimestamp()); - } - System.out.println("loop end with frame: " + frame); - } catch (FrameGrabber.Exception ex) { - System.out.println("exception: " + ex); - } - System.out.println("end"); - } -} +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import org.bytedeco.javacpp.Pointer; +import org.bytedeco.javacv.FFmpegFrameGrabber; +import org.bytedeco.javacv.Frame; +import org.bytedeco.javacv.FrameGrabber; + +import org.bytedeco.ffmpeg.avformat.*; +import static org.bytedeco.ffmpeg.global.avformat.*; + +/** + * + * @author Dmitriy Gerashenko + */ +public class FFmpegStreamingTimeout { + + /** + * There is no universal option for streaming timeout. Each of protocols has + * its own list of options. + */ + private static enum TimeoutOption { + /** + * Depends on protocol (FTP, HTTP, RTMP, RTSP, SMB, SSH, TCP, UDP, or UNIX). + * http://ffmpeg.org/ffmpeg-all.html + * + * Specific for RTSP: + * Set socket TCP I/O timeout in microseconds. + * http://ffmpeg.org/ffmpeg-all.html#rtsp + */ + TIMEOUT, + /** + * Protocols + * + * Maximum time to wait for (network) read/write operations to complete, + * in microseconds. + * + * http://ffmpeg.org/ffmpeg-all.html#Protocols + */ + RW_TIMEOUT; + + public String getKey() { + return toString().toLowerCase(); + } + + } + + private static final String SOURCE_RTSP = "rtsp://184.72.239.149/vod/mp4:BigBuckBunny_115k.mov"; + private static final int TIMEOUT = 10; // In seconds. + + public static void main(String[] args) { + rtspStreamingTest(); +// testWithCallback(); // This is not working properly. It's just for test. + } + + private static void rtspStreamingTest() { + try { + FFmpegFrameGrabber grabber = new FFmpegFrameGrabber(SOURCE_RTSP); + /** + * "rw_timeout" - IS IGNORED when a network cable have been + * unplugged before a connection but the option takes effect after a + * connection was established. + * + * "timeout" - works fine. + */ + grabber.setOption( + TimeoutOption.TIMEOUT.getKey(), + String.valueOf(TIMEOUT * 1000000) + ); // In microseconds. + grabber.start(); + + Frame frame = null; + /** + * When network is disabled (before grabber was started) grabber + * throws exception: "org.bytedeco.javacv.FrameGrabber$Exception: + * avformat_open_input() error -138: Could not open input...". + * + * When connections is lost (after a few grabbed frames) + * grabber.grab() returns null without exception. + */ + while ((frame = grabber.grab()) != null) { + System.out.println("frame grabbed at " + grabber.getTimestamp()); + } + System.out.println("loop end with frame: " + frame); + } catch (FrameGrabber.Exception ex) { + System.out.println("exception: " + ex); + } + System.out.println("end"); + } + + private static void testWithCallback() { + try { + FFmpegFrameGrabber grabber = new FFmpegFrameGrabber(SOURCE_RTSP); + /** + * grabber.getFormatContext() is null before grabber.start(). + * + * But if network is disabled grabber.start() will never return. + * + * That's why interrupt_callback not suitable for "network disabled + * case". + */ + grabber.start(); + + final AtomicBoolean interruptFlag = new AtomicBoolean(false); + AVIOInterruptCB.Callback_Pointer cp = new AVIOInterruptCB.Callback_Pointer() { + @Override + public int call(Pointer pointer) { + // 0 - continue, 1 - exit + int interruptFlagInt = interruptFlag.get() ? 1 : 0; + System.out.println("callback, interrupt flag == " + interruptFlagInt); + return interruptFlagInt; + } + + }; + AVFormatContext oc = grabber.getFormatContext(); + avformat_alloc_context(); + AVIOInterruptCB cb = new AVIOInterruptCB(); + cb.callback(cp); + oc.interrupt_callback(cb); + new Thread(new Runnable() { public void run() { + try { + TimeUnit.SECONDS.sleep(TIMEOUT); + interruptFlag.set(true); + System.out.println("interrupt flag was changed"); + } catch (InterruptedException ex) { + System.out.println("exception in interruption thread: " + ex); + } + }}).start(); + + Frame frame = null; + /** + * On one of my RTSP cams grabber stops calling callback on + * connection lost. I think it's has something to do with message: + * "[swscaler @ 0000000029af49e0] deprecated pixel format used, make + * sure you did set range correctly". + * + * So there is at least one case when grabber stops calling + * callback. + */ + while ((frame = grabber.grab()) != null) { + System.out.println("frame grabbed at " + grabber.getTimestamp()); + } + System.out.println("loop end with frame: " + frame); + } catch (FrameGrabber.Exception ex) { + System.out.println("exception: " + ex); + } + System.out.println("end"); + } +} diff --git a/samples/FaceApplet.html b/samples/FaceApplet.html index dab5395b..282881e4 100644 --- a/samples/FaceApplet.html +++ b/samples/FaceApplet.html @@ -1,17 +1,17 @@ - - - - FaceApplet - - - - -

FaceApplet

- - - - + + + + FaceApplet + + + + +

FaceApplet

+ + + + diff --git a/samples/FaceApplet.java b/samples/FaceApplet.java index 58669de1..d59c38e1 100644 --- a/samples/FaceApplet.java +++ b/samples/FaceApplet.java @@ -1,152 +1,152 @@ -import java.applet.Applet; -import java.awt.BasicStroke; -import java.awt.Color; -import java.awt.Graphics; -import java.awt.Graphics2D; -import java.awt.image.BufferedImage; -import java.io.File; -import java.io.IOException; -import org.bytedeco.javacpp.Loader; -import org.bytedeco.javacv.Frame; -import org.bytedeco.javacv.FrameGrabber; -import org.bytedeco.javacv.Java2DFrameConverter; -import org.bytedeco.javacv.OpenCVFrameConverter; -import org.bytedeco.javacv.OpenCVFrameGrabber; - -import org.bytedeco.opencv.opencv_core.*; -import org.bytedeco.opencv.opencv_imgproc.*; -import org.bytedeco.opencv.opencv_objdetect.*; -import static org.bytedeco.opencv.global.opencv_core.*; -import static org.bytedeco.opencv.global.opencv_imgproc.*; -import static org.bytedeco.opencv.global.opencv_objdetect.*; - -/** - * - * @author Samuel Audet - */ -public class FaceApplet extends Applet implements Runnable { - - private CvHaarClassifierCascade classifier = null; - private CvMemStorage storage = null; - private FrameGrabber grabber = null; - private IplImage grabbedImage = null, grayImage = null, smallImage = null; - private CvSeq faces = null; - private boolean stop = false; - private Exception exception = null; - OpenCVFrameConverter.ToIplImage grabberConverter = new OpenCVFrameConverter.ToIplImage(); - Java2DFrameConverter paintConverter = new Java2DFrameConverter(); - - @Override public void init() { - try { - // Load the classifier file from Java resources. - String classiferName = "haarcascade_frontalface_alt.xml"; - File classifierFile = Loader.extractResource(classiferName, null, "classifier", ".xml"); - if (classifierFile == null || classifierFile.length() <= 0) { - throw new IOException("Could not extract \"" + classiferName + "\" from Java resources."); - } - - // Preload the opencv_objdetect module to work around a known bug. - Loader.load(opencv_objdetect.class); - classifier = new CvHaarClassifierCascade(cvLoad(classifierFile.getAbsolutePath())); - classifierFile.delete(); - if (classifier.isNull()) { - throw new IOException("Could not load the classifier file."); - } - - storage = CvMemStorage.create(); - } catch (Exception e) { - if (exception == null) { - exception = e; - repaint(); - } - } - } - - @Override public void start() { - try { - new Thread(this).start(); - } catch (Exception e) { - if (exception == null) { - exception = e; - repaint(); - } - } - } - - public void run() { - try { - try { - grabber = FrameGrabber.createDefault(0); - grabber.setImageWidth(getWidth()); - grabber.setImageHeight(getHeight()); - grabber.start(); - grabbedImage = grabberConverter.convert(grabber.grab()); - } catch (Exception e) { - if (grabber != null) grabber.release(); - grabber = new OpenCVFrameGrabber(0); - grabber.setImageWidth(getWidth()); - grabber.setImageHeight(getHeight()); - grabber.start(); - grabbedImage = grabberConverter.convert(grabber.grab()); - } - grayImage = IplImage.create(grabbedImage.width(), grabbedImage.height(), IPL_DEPTH_8U, 1); - smallImage = IplImage.create(grabbedImage.width()/4, grabbedImage.height()/4, IPL_DEPTH_8U, 1); - stop = false; - while (!stop && (grabbedImage = grabberConverter.convert(grabber.grab())) != null) { - if (faces == null) { - cvClearMemStorage(storage); - cvCvtColor(grabbedImage, grayImage, CV_BGR2GRAY); - cvResize(grayImage, smallImage, CV_INTER_AREA); - faces = cvHaarDetectObjects(smallImage, classifier, storage, 1.1, 3, - CV_HAAR_FIND_BIGGEST_OBJECT | CV_HAAR_DO_ROUGH_SEARCH); - repaint(); - } - } - grabbedImage = grayImage = smallImage = null; - grabber.stop(); - grabber.release(); - grabber = null; - } catch (Exception e) { - if (exception == null) { - exception = e; - repaint(); - } - } - } - - @Override public void update(Graphics g) { - paint(g); - } - - @Override public void paint(Graphics g) { - if (grabbedImage != null) { - Frame frame = grabberConverter.convert(grabbedImage); - BufferedImage image = paintConverter.getBufferedImage(frame, 2.2/grabber.getGamma()); - Graphics2D g2 = image.createGraphics(); - if (faces != null) { - g2.setColor(Color.RED); - g2.setStroke(new BasicStroke(2)); - int total = faces.total(); - for (int i = 0; i < total; i++) { - CvRect r = new CvRect(cvGetSeqElem(faces, i)); - g2.drawRect(r.x()*4, r.y()*4, r.width()*4, r.height()*4); - } - faces = null; - } - g.drawImage(image, 0, 0, null); - } - if (exception != null) { - int y = 0, h = g.getFontMetrics().getHeight(); - g.drawString(exception.toString(), 5, y += h); - for (StackTraceElement e : exception.getStackTrace()) { - g.drawString(" at " + e.toString(), 5, y += h); - } - } - } - - @Override public void stop() { - stop = true; - } - - @Override public void destroy() { } -} +import java.applet.Applet; +import java.awt.BasicStroke; +import java.awt.Color; +import java.awt.Graphics; +import java.awt.Graphics2D; +import java.awt.image.BufferedImage; +import java.io.File; +import java.io.IOException; +import org.bytedeco.javacpp.Loader; +import org.bytedeco.javacv.Frame; +import org.bytedeco.javacv.FrameGrabber; +import org.bytedeco.javacv.Java2DFrameConverter; +import org.bytedeco.javacv.OpenCVFrameConverter; +import org.bytedeco.javacv.OpenCVFrameGrabber; + +import org.bytedeco.opencv.opencv_core.*; +import org.bytedeco.opencv.opencv_imgproc.*; +import org.bytedeco.opencv.opencv_objdetect.*; +import static org.bytedeco.opencv.global.opencv_core.*; +import static org.bytedeco.opencv.global.opencv_imgproc.*; +import static org.bytedeco.opencv.global.opencv_objdetect.*; + +/** + * + * @author Samuel Audet + */ +public class FaceApplet extends Applet implements Runnable { + + private CvHaarClassifierCascade classifier = null; + private CvMemStorage storage = null; + private FrameGrabber grabber = null; + private IplImage grabbedImage = null, grayImage = null, smallImage = null; + private CvSeq faces = null; + private boolean stop = false; + private Exception exception = null; + OpenCVFrameConverter.ToIplImage grabberConverter = new OpenCVFrameConverter.ToIplImage(); + Java2DFrameConverter paintConverter = new Java2DFrameConverter(); + + @Override public void init() { + try { + // Load the classifier file from Java resources. + String classiferName = "haarcascade_frontalface_alt.xml"; + File classifierFile = Loader.extractResource(classiferName, null, "classifier", ".xml"); + if (classifierFile == null || classifierFile.length() <= 0) { + throw new IOException("Could not extract \"" + classiferName + "\" from Java resources."); + } + + // Preload the opencv_objdetect module to work around a known bug. + Loader.load(opencv_objdetect.class); + classifier = new CvHaarClassifierCascade(cvLoad(classifierFile.getAbsolutePath())); + classifierFile.delete(); + if (classifier.isNull()) { + throw new IOException("Could not load the classifier file."); + } + + storage = CvMemStorage.create(); + } catch (Exception e) { + if (exception == null) { + exception = e; + repaint(); + } + } + } + + @Override public void start() { + try { + new Thread(this).start(); + } catch (Exception e) { + if (exception == null) { + exception = e; + repaint(); + } + } + } + + public void run() { + try { + try { + grabber = FrameGrabber.createDefault(0); + grabber.setImageWidth(getWidth()); + grabber.setImageHeight(getHeight()); + grabber.start(); + grabbedImage = grabberConverter.convert(grabber.grab()); + } catch (Exception e) { + if (grabber != null) grabber.release(); + grabber = new OpenCVFrameGrabber(0); + grabber.setImageWidth(getWidth()); + grabber.setImageHeight(getHeight()); + grabber.start(); + grabbedImage = grabberConverter.convert(grabber.grab()); + } + grayImage = IplImage.create(grabbedImage.width(), grabbedImage.height(), IPL_DEPTH_8U, 1); + smallImage = IplImage.create(grabbedImage.width()/4, grabbedImage.height()/4, IPL_DEPTH_8U, 1); + stop = false; + while (!stop && (grabbedImage = grabberConverter.convert(grabber.grab())) != null) { + if (faces == null) { + cvClearMemStorage(storage); + cvCvtColor(grabbedImage, grayImage, CV_BGR2GRAY); + cvResize(grayImage, smallImage, CV_INTER_AREA); + faces = cvHaarDetectObjects(smallImage, classifier, storage, 1.1, 3, + CV_HAAR_FIND_BIGGEST_OBJECT | CV_HAAR_DO_ROUGH_SEARCH); + repaint(); + } + } + grabbedImage = grayImage = smallImage = null; + grabber.stop(); + grabber.release(); + grabber = null; + } catch (Exception e) { + if (exception == null) { + exception = e; + repaint(); + } + } + } + + @Override public void update(Graphics g) { + paint(g); + } + + @Override public void paint(Graphics g) { + if (grabbedImage != null) { + Frame frame = grabberConverter.convert(grabbedImage); + BufferedImage image = paintConverter.getBufferedImage(frame, 2.2/grabber.getGamma()); + Graphics2D g2 = image.createGraphics(); + if (faces != null) { + g2.setColor(Color.RED); + g2.setStroke(new BasicStroke(2)); + int total = faces.total(); + for (int i = 0; i < total; i++) { + CvRect r = new CvRect(cvGetSeqElem(faces, i)); + g2.drawRect(r.x()*4, r.y()*4, r.width()*4, r.height()*4); + } + faces = null; + } + g.drawImage(image, 0, 0, null); + } + if (exception != null) { + int y = 0, h = g.getFontMetrics().getHeight(); + g.drawString(exception.toString(), 5, y += h); + for (StackTraceElement e : exception.getStackTrace()) { + g.drawString(" at " + e.toString(), 5, y += h); + } + } + } + + @Override public void stop() { + stop = true; + } + + @Override public void destroy() { } +} diff --git a/samples/FaceApplet.jnlp b/samples/FaceApplet.jnlp index f97f22a3..1f3ce809 100644 --- a/samples/FaceApplet.jnlp +++ b/samples/FaceApplet.jnlp @@ -1,16 +1,16 @@ - - - - FaceApplet - Samuel Audet - - - - - - - - - - - + + + + FaceApplet + Samuel Audet + + + + + + + + + + + diff --git a/samples/FacePreview.java b/samples/FacePreview.java index 7ad6805f..c44c9eee 100644 --- a/samples/FacePreview.java +++ b/samples/FacePreview.java @@ -1,290 +1,290 @@ -/* - * Copyright (C) 2010-2019 Samuel Audet - * - * FacePreview - A fusion of OpenCV's facedetect and Android's CameraPreview samples, - * with JavaCV + JavaCPP as the glue in between. - * - * This file was based on CameraPreview.java that came with the Samples for - * Android SDK API 8, revision 1 and contained the following copyright notice: - * - * Copyright (C) 2007 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * - * IMPORTANT - Make sure the AndroidManifest.xml file looks like this: - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - */ - -package org.bytedeco.javacv.facepreview; - -import android.app.Activity; -import android.app.AlertDialog; -import android.content.Context; -import android.graphics.Canvas; -import android.graphics.Color; -import android.graphics.ImageFormat; -import android.graphics.Paint; -import android.hardware.Camera; -import android.hardware.Camera.Size; -import android.os.Bundle; -import android.view.SurfaceHolder; -import android.view.SurfaceView; -import android.view.View; -import android.view.Window; -import android.view.WindowManager; -import android.widget.FrameLayout; -import java.io.File; -import java.io.IOException; -import java.net.URL; -import java.nio.ByteBuffer; -import java.util.List; -import org.bytedeco.javacpp.Loader; - -import org.bytedeco.opencv.opencv_core.*; -import org.bytedeco.opencv.opencv_objdetect.*; -import static org.bytedeco.opencv.global.opencv_core.*; -import static org.bytedeco.opencv.global.opencv_objdetect.*; - -// ---------------------------------------------------------------------- - -public class FacePreview extends Activity { - private FrameLayout layout; - private FaceView faceView; - private Preview mPreview; - - @Override - protected void onCreate(Bundle savedInstanceState) { - // Hide the window title. - requestWindowFeature(Window.FEATURE_NO_TITLE); - - super.onCreate(savedInstanceState); - - getWindow().addFlags(WindowManager.LayoutParams.FLAG_FULLSCREEN); - - // Create our Preview view and set it as the content of our activity. - try { - layout = new FrameLayout(this); - faceView = new FaceView(this); - mPreview = new Preview(this, faceView); - layout.addView(mPreview); - layout.addView(faceView); - setContentView(layout); - } catch (IOException e) { - e.printStackTrace(); - new AlertDialog.Builder(this).setMessage(e.getMessage()).create().show(); - } - } -} - -// ---------------------------------------------------------------------- - -class FaceView extends View implements Camera.PreviewCallback { - public static final int SUBSAMPLING_FACTOR = 4; - - private Mat grayImage; - private CascadeClassifier classifier; - private RectVector faces; - - public FaceView(FacePreview context) throws IOException { - super(context); - - // Load the classifier file from Java resources. - File classifierFile = Loader.extractResource(getClass(), - "/org/bytedeco/javacv/facepreview/haarcascade_frontalface_alt.xml", - context.getCacheDir(), "classifier", ".xml"); - if (classifierFile == null || classifierFile.length() <= 0) { - throw new IOException("Could not extract the classifier file from Java resource."); - } - - classifier = new CascadeClassifier(classifierFile.getAbsolutePath()); - classifierFile.delete(); - if (classifier.isNull()) { - throw new IOException("Could not load the classifier file."); - } - } - - public void onPreviewFrame(final byte[] data, final Camera camera) { - try { - Camera.Size size = camera.getParameters().getPreviewSize(); - processImage(data, size.width, size.height); - camera.addCallbackBuffer(data); - } catch (RuntimeException e) { - // The camera has probably just been released, ignore. - } - } - - protected void processImage(byte[] data, int width, int height) { - // First, downsample our image and convert it into a grayscale Mat - int f = SUBSAMPLING_FACTOR; - if (grayImage == null || grayImage.cols() != width/f || grayImage.rows() != height/f) { - grayImage = new Mat(height/f, width/f, CV_8UC1); - } - int imageWidth = grayImage.cols(); - int imageHeight = grayImage.rows(); - int dataStride = f*width; - int imageStride = (int)grayImage.step(0); - ByteBuffer imageBuffer = grayImage.createBuffer(); - for (int y = 0; y < imageHeight; y++) { - int dataLine = y*dataStride; - int imageLine = y*imageStride; - for (int x = 0; x < imageWidth; x++) { - imageBuffer.put(imageLine + x, data[dataLine + f*x]); - } - } - - faces = new RectVector(); - classifier.detectMultiScale(grayImage, faces); - postInvalidate(); - } - - @Override - protected void onDraw(Canvas canvas) { - Paint paint = new Paint(); - paint.setColor(Color.RED); - paint.setTextSize(20); - - String s = "FacePreview - This side up."; - float textWidth = paint.measureText(s); - canvas.drawText(s, (getWidth()-textWidth)/2, 20, paint); - - if (faces != null) { - paint.setStrokeWidth(2); - paint.setStyle(Paint.Style.STROKE); - float scaleX = (float)getWidth()/grayImage.cols(); - float scaleY = (float)getHeight()/grayImage.rows(); - long total = faces.size(); - for (long i = 0; i < total; i++) { - Rect r = faces.get(i); - int x = r.x(), y = r.y(), w = r.width(), h = r.height(); - canvas.drawRect(x*scaleX, y*scaleY, (x+w)*scaleX, (y+h)*scaleY, paint); - } - } - } -} - -// ---------------------------------------------------------------------- - -class Preview extends SurfaceView implements SurfaceHolder.Callback { - SurfaceHolder mHolder; - Camera mCamera; - Camera.PreviewCallback previewCallback; - - Preview(Context context, Camera.PreviewCallback previewCallback) { - super(context); - this.previewCallback = previewCallback; - - // Install a SurfaceHolder.Callback so we get notified when the - // underlying surface is created and destroyed. - mHolder = getHolder(); - mHolder.addCallback(this); - mHolder.setType(SurfaceHolder.SURFACE_TYPE_PUSH_BUFFERS); - } - - public void surfaceCreated(SurfaceHolder holder) { - // The Surface has been created, acquire the camera and tell it where - // to draw. - mCamera = Camera.open(); - try { - mCamera.setPreviewDisplay(holder); - } catch (IOException exception) { - mCamera.release(); - mCamera = null; - // TODO: add more exception handling logic here - } - } - - public void surfaceDestroyed(SurfaceHolder holder) { - // Surface will be destroyed when we return, so stop the preview. - // Because the CameraDevice object is not a shared resource, it's very - // important to release it when the activity is paused. - mCamera.stopPreview(); - mCamera.release(); - mCamera = null; - } - - - private Size getOptimalPreviewSize(List sizes, int w, int h) { - final double ASPECT_TOLERANCE = 0.05; - double targetRatio = (double) w / h; - if (sizes == null) return null; - - Size optimalSize = null; - double minDiff = Double.MAX_VALUE; - - int targetHeight = h; - - // Try to find an size match aspect ratio and size - for (Size size : sizes) { - double ratio = (double) size.width / size.height; - if (Math.abs(ratio - targetRatio) > ASPECT_TOLERANCE) continue; - if (Math.abs(size.height - targetHeight) < minDiff) { - optimalSize = size; - minDiff = Math.abs(size.height - targetHeight); - } - } - - // Cannot find the one match the aspect ratio, ignore the requirement - if (optimalSize == null) { - minDiff = Double.MAX_VALUE; - for (Size size : sizes) { - if (Math.abs(size.height - targetHeight) < minDiff) { - optimalSize = size; - minDiff = Math.abs(size.height - targetHeight); - } - } - } - return optimalSize; - } - - public void surfaceChanged(SurfaceHolder holder, int format, int w, int h) { - // Now that the size is known, set up the camera parameters and begin - // the preview. - Camera.Parameters parameters = mCamera.getParameters(); - - List sizes = parameters.getSupportedPreviewSizes(); - Size optimalSize = getOptimalPreviewSize(sizes, w, h); - parameters.setPreviewSize(optimalSize.width, optimalSize.height); - - mCamera.setParameters(parameters); - if (previewCallback != null) { - mCamera.setPreviewCallbackWithBuffer(previewCallback); - Camera.Size size = parameters.getPreviewSize(); - byte[] data = new byte[size.width*size.height* - ImageFormat.getBitsPerPixel(parameters.getPreviewFormat())/8]; - mCamera.addCallbackBuffer(data); - } - mCamera.startPreview(); - } - -} +/* + * Copyright (C) 2010-2019 Samuel Audet + * + * FacePreview - A fusion of OpenCV's facedetect and Android's CameraPreview samples, + * with JavaCV + JavaCPP as the glue in between. + * + * This file was based on CameraPreview.java that came with the Samples for + * Android SDK API 8, revision 1 and contained the following copyright notice: + * + * Copyright (C) 2007 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * + * IMPORTANT - Make sure the AndroidManifest.xml file looks like this: + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + */ + +package org.bytedeco.javacv.facepreview; + +import android.app.Activity; +import android.app.AlertDialog; +import android.content.Context; +import android.graphics.Canvas; +import android.graphics.Color; +import android.graphics.ImageFormat; +import android.graphics.Paint; +import android.hardware.Camera; +import android.hardware.Camera.Size; +import android.os.Bundle; +import android.view.SurfaceHolder; +import android.view.SurfaceView; +import android.view.View; +import android.view.Window; +import android.view.WindowManager; +import android.widget.FrameLayout; +import java.io.File; +import java.io.IOException; +import java.net.URL; +import java.nio.ByteBuffer; +import java.util.List; +import org.bytedeco.javacpp.Loader; + +import org.bytedeco.opencv.opencv_core.*; +import org.bytedeco.opencv.opencv_objdetect.*; +import static org.bytedeco.opencv.global.opencv_core.*; +import static org.bytedeco.opencv.global.opencv_objdetect.*; + +// ---------------------------------------------------------------------- + +public class FacePreview extends Activity { + private FrameLayout layout; + private FaceView faceView; + private Preview mPreview; + + @Override + protected void onCreate(Bundle savedInstanceState) { + // Hide the window title. + requestWindowFeature(Window.FEATURE_NO_TITLE); + + super.onCreate(savedInstanceState); + + getWindow().addFlags(WindowManager.LayoutParams.FLAG_FULLSCREEN); + + // Create our Preview view and set it as the content of our activity. + try { + layout = new FrameLayout(this); + faceView = new FaceView(this); + mPreview = new Preview(this, faceView); + layout.addView(mPreview); + layout.addView(faceView); + setContentView(layout); + } catch (IOException e) { + e.printStackTrace(); + new AlertDialog.Builder(this).setMessage(e.getMessage()).create().show(); + } + } +} + +// ---------------------------------------------------------------------- + +class FaceView extends View implements Camera.PreviewCallback { + public static final int SUBSAMPLING_FACTOR = 4; + + private Mat grayImage; + private CascadeClassifier classifier; + private RectVector faces; + + public FaceView(FacePreview context) throws IOException { + super(context); + + // Load the classifier file from Java resources. + File classifierFile = Loader.extractResource(getClass(), + "/org/bytedeco/javacv/facepreview/haarcascade_frontalface_alt.xml", + context.getCacheDir(), "classifier", ".xml"); + if (classifierFile == null || classifierFile.length() <= 0) { + throw new IOException("Could not extract the classifier file from Java resource."); + } + + classifier = new CascadeClassifier(classifierFile.getAbsolutePath()); + classifierFile.delete(); + if (classifier.isNull()) { + throw new IOException("Could not load the classifier file."); + } + } + + public void onPreviewFrame(final byte[] data, final Camera camera) { + try { + Camera.Size size = camera.getParameters().getPreviewSize(); + processImage(data, size.width, size.height); + camera.addCallbackBuffer(data); + } catch (RuntimeException e) { + // The camera has probably just been released, ignore. + } + } + + protected void processImage(byte[] data, int width, int height) { + // First, downsample our image and convert it into a grayscale Mat + int f = SUBSAMPLING_FACTOR; + if (grayImage == null || grayImage.cols() != width/f || grayImage.rows() != height/f) { + grayImage = new Mat(height/f, width/f, CV_8UC1); + } + int imageWidth = grayImage.cols(); + int imageHeight = grayImage.rows(); + int dataStride = f*width; + int imageStride = (int)grayImage.step(0); + ByteBuffer imageBuffer = grayImage.createBuffer(); + for (int y = 0; y < imageHeight; y++) { + int dataLine = y*dataStride; + int imageLine = y*imageStride; + for (int x = 0; x < imageWidth; x++) { + imageBuffer.put(imageLine + x, data[dataLine + f*x]); + } + } + + faces = new RectVector(); + classifier.detectMultiScale(grayImage, faces); + postInvalidate(); + } + + @Override + protected void onDraw(Canvas canvas) { + Paint paint = new Paint(); + paint.setColor(Color.RED); + paint.setTextSize(20); + + String s = "FacePreview - This side up."; + float textWidth = paint.measureText(s); + canvas.drawText(s, (getWidth()-textWidth)/2, 20, paint); + + if (faces != null) { + paint.setStrokeWidth(2); + paint.setStyle(Paint.Style.STROKE); + float scaleX = (float)getWidth()/grayImage.cols(); + float scaleY = (float)getHeight()/grayImage.rows(); + long total = faces.size(); + for (long i = 0; i < total; i++) { + Rect r = faces.get(i); + int x = r.x(), y = r.y(), w = r.width(), h = r.height(); + canvas.drawRect(x*scaleX, y*scaleY, (x+w)*scaleX, (y+h)*scaleY, paint); + } + } + } +} + +// ---------------------------------------------------------------------- + +class Preview extends SurfaceView implements SurfaceHolder.Callback { + SurfaceHolder mHolder; + Camera mCamera; + Camera.PreviewCallback previewCallback; + + Preview(Context context, Camera.PreviewCallback previewCallback) { + super(context); + this.previewCallback = previewCallback; + + // Install a SurfaceHolder.Callback so we get notified when the + // underlying surface is created and destroyed. + mHolder = getHolder(); + mHolder.addCallback(this); + mHolder.setType(SurfaceHolder.SURFACE_TYPE_PUSH_BUFFERS); + } + + public void surfaceCreated(SurfaceHolder holder) { + // The Surface has been created, acquire the camera and tell it where + // to draw. + mCamera = Camera.open(); + try { + mCamera.setPreviewDisplay(holder); + } catch (IOException exception) { + mCamera.release(); + mCamera = null; + // TODO: add more exception handling logic here + } + } + + public void surfaceDestroyed(SurfaceHolder holder) { + // Surface will be destroyed when we return, so stop the preview. + // Because the CameraDevice object is not a shared resource, it's very + // important to release it when the activity is paused. + mCamera.stopPreview(); + mCamera.release(); + mCamera = null; + } + + + private Size getOptimalPreviewSize(List sizes, int w, int h) { + final double ASPECT_TOLERANCE = 0.05; + double targetRatio = (double) w / h; + if (sizes == null) return null; + + Size optimalSize = null; + double minDiff = Double.MAX_VALUE; + + int targetHeight = h; + + // Try to find an size match aspect ratio and size + for (Size size : sizes) { + double ratio = (double) size.width / size.height; + if (Math.abs(ratio - targetRatio) > ASPECT_TOLERANCE) continue; + if (Math.abs(size.height - targetHeight) < minDiff) { + optimalSize = size; + minDiff = Math.abs(size.height - targetHeight); + } + } + + // Cannot find the one match the aspect ratio, ignore the requirement + if (optimalSize == null) { + minDiff = Double.MAX_VALUE; + for (Size size : sizes) { + if (Math.abs(size.height - targetHeight) < minDiff) { + optimalSize = size; + minDiff = Math.abs(size.height - targetHeight); + } + } + } + return optimalSize; + } + + public void surfaceChanged(SurfaceHolder holder, int format, int w, int h) { + // Now that the size is known, set up the camera parameters and begin + // the preview. + Camera.Parameters parameters = mCamera.getParameters(); + + List sizes = parameters.getSupportedPreviewSizes(); + Size optimalSize = getOptimalPreviewSize(sizes, w, h); + parameters.setPreviewSize(optimalSize.width, optimalSize.height); + + mCamera.setParameters(parameters); + if (previewCallback != null) { + mCamera.setPreviewCallbackWithBuffer(previewCallback); + Camera.Size size = parameters.getPreviewSize(); + byte[] data = new byte[size.width*size.height* + ImageFormat.getBitsPerPixel(parameters.getPreviewFormat())/8]; + mCamera.addCallbackBuffer(data); + } + mCamera.startPreview(); + } + +} diff --git a/samples/FaceRecognizerInVideo.java b/samples/FaceRecognizerInVideo.java index 9576426f..332b170b 100644 --- a/samples/FaceRecognizerInVideo.java +++ b/samples/FaceRecognizerInVideo.java @@ -1,114 +1,114 @@ -import java.io.File; - -import org.bytedeco.javacpp.IntPointer; -import org.bytedeco.javacpp.DoublePointer; - -import org.bytedeco.javacv.Frame; -import org.bytedeco.javacv.FrameGrabber.Exception; -import org.bytedeco.javacv.OpenCVFrameConverter; -import org.bytedeco.javacv.OpenCVFrameGrabber; - -import org.bytedeco.opencv.opencv_core.*; -import org.bytedeco.opencv.opencv_face.*; -import org.bytedeco.opencv.opencv_highgui.*; -import org.bytedeco.opencv.opencv_imgproc.*; -import org.bytedeco.opencv.opencv_objdetect.*; -import static org.bytedeco.opencv.global.opencv_core.*; -import static org.bytedeco.opencv.global.opencv_face.*; -import static org.bytedeco.opencv.global.opencv_highgui.*; -import static org.bytedeco.opencv.global.opencv_imgproc.*; -import static org.bytedeco.opencv.global.opencv_objdetect.*; - -/** - * This is an example how to detect face in a video file with javacv - * @author Vincent He (chinadragon0515@gmail.com) - * - */ -public class FaceRecognizerInVideo { - - public static void main(String[] args) throws Exception { - - OpenCVFrameConverter.ToMat converterToMat = new OpenCVFrameConverter.ToMat(); - - if (args.length < 2) { - System.out.println("Two parameters are required to run this program, first parameter is the analized video and second parameter is the trained result for fisher faces."); - } - - String videoFileName = args[0]; - String trainedResult = args[1]; - - CascadeClassifier face_cascade = new CascadeClassifier( - "data\\haarcascade_frontalface_default.xml"); - FaceRecognizer lbphFaceRecognizer = LBPHFaceRecognizer.create(); - lbphFaceRecognizer.read(trainedResult); - - File f = new File(videoFileName); - - OpenCVFrameGrabber grabber = null; - try { - grabber = OpenCVFrameGrabber.createDefault(f); - grabber.start(); - } catch (Exception e) { - System.err.println("Failed start the grabber."); - } - - Frame videoFrame = null; - Mat videoMat = new Mat(); - while (true) { - videoFrame = grabber.grab(); - videoMat = converterToMat.convert(videoFrame); - Mat videoMatGray = new Mat(); - // Convert the current frame to grayscale: - cvtColor(videoMat, videoMatGray, COLOR_BGRA2GRAY); - equalizeHist(videoMatGray, videoMatGray); - - Point p = new Point(); - RectVector faces = new RectVector(); - // Find the faces in the frame: - face_cascade.detectMultiScale(videoMatGray, faces); - - // At this point you have the position of the faces in - // faces. Now we'll get the faces, make a prediction and - // annotate it in the video. Cool or what? - for (int i = 0; i < faces.size(); i++) { - Rect face_i = faces.get(i); - - Mat face = new Mat(videoMatGray, face_i); - // If fisher face recognizer is used, the face need to be - // resized. - // resize(face, face_resized, new Size(im_width, im_height), - // 1.0, 1.0, INTER_CUBIC); - - // Now perform the prediction, see how easy that is: - IntPointer label = new IntPointer(1); - DoublePointer confidence = new DoublePointer(1); - lbphFaceRecognizer.predict(face, label, confidence); - int prediction = label.get(0); - - // And finally write all we've found out to the original image! - // First of all draw a green rectangle around the detected face: - rectangle(videoMat, face_i, new Scalar(0, 255, 0, 1)); - - // Create the text we will annotate the box with: - String box_text = "Prediction = " + prediction; - // Calculate the position for annotated text (make sure we don't - // put illegal values in there): - int pos_x = Math.max(face_i.tl().x() - 10, 0); - int pos_y = Math.max(face_i.tl().y() - 10, 0); - // And now put it into the image: - putText(videoMat, box_text, new Point(pos_x, pos_y), - FONT_HERSHEY_PLAIN, 1.0, new Scalar(0, 255, 0, 2.0)); - } - // Show the result: - imshow("face_recognizer", videoMat); - - char key = (char) waitKey(20); - // Exit this loop on escape: - if (key == 27) { - destroyAllWindows(); - break; - } - } - } - -} +import java.io.File; + +import org.bytedeco.javacpp.IntPointer; +import org.bytedeco.javacpp.DoublePointer; + +import org.bytedeco.javacv.Frame; +import org.bytedeco.javacv.FrameGrabber.Exception; +import org.bytedeco.javacv.OpenCVFrameConverter; +import org.bytedeco.javacv.OpenCVFrameGrabber; + +import org.bytedeco.opencv.opencv_core.*; +import org.bytedeco.opencv.opencv_face.*; +import org.bytedeco.opencv.opencv_highgui.*; +import org.bytedeco.opencv.opencv_imgproc.*; +import org.bytedeco.opencv.opencv_objdetect.*; +import static org.bytedeco.opencv.global.opencv_core.*; +import static org.bytedeco.opencv.global.opencv_face.*; +import static org.bytedeco.opencv.global.opencv_highgui.*; +import static org.bytedeco.opencv.global.opencv_imgproc.*; +import static org.bytedeco.opencv.global.opencv_objdetect.*; + +/** + * This is an example how to detect face in a video file with javacv + * @author Vincent He (chinadragon0515@gmail.com) + * + */ +public class FaceRecognizerInVideo { + + public static void main(String[] args) throws Exception { + + OpenCVFrameConverter.ToMat converterToMat = new OpenCVFrameConverter.ToMat(); + + if (args.length < 2) { + System.out.println("Two parameters are required to run this program, first parameter is the analized video and second parameter is the trained result for fisher faces."); + } + + String videoFileName = args[0]; + String trainedResult = args[1]; + + CascadeClassifier face_cascade = new CascadeClassifier( + "data\\haarcascade_frontalface_default.xml"); + FaceRecognizer lbphFaceRecognizer = LBPHFaceRecognizer.create(); + lbphFaceRecognizer.read(trainedResult); + + File f = new File(videoFileName); + + OpenCVFrameGrabber grabber = null; + try { + grabber = OpenCVFrameGrabber.createDefault(f); + grabber.start(); + } catch (Exception e) { + System.err.println("Failed start the grabber."); + } + + Frame videoFrame = null; + Mat videoMat = new Mat(); + while (true) { + videoFrame = grabber.grab(); + videoMat = converterToMat.convert(videoFrame); + Mat videoMatGray = new Mat(); + // Convert the current frame to grayscale: + cvtColor(videoMat, videoMatGray, COLOR_BGRA2GRAY); + equalizeHist(videoMatGray, videoMatGray); + + Point p = new Point(); + RectVector faces = new RectVector(); + // Find the faces in the frame: + face_cascade.detectMultiScale(videoMatGray, faces); + + // At this point you have the position of the faces in + // faces. Now we'll get the faces, make a prediction and + // annotate it in the video. Cool or what? + for (int i = 0; i < faces.size(); i++) { + Rect face_i = faces.get(i); + + Mat face = new Mat(videoMatGray, face_i); + // If fisher face recognizer is used, the face need to be + // resized. + // resize(face, face_resized, new Size(im_width, im_height), + // 1.0, 1.0, INTER_CUBIC); + + // Now perform the prediction, see how easy that is: + IntPointer label = new IntPointer(1); + DoublePointer confidence = new DoublePointer(1); + lbphFaceRecognizer.predict(face, label, confidence); + int prediction = label.get(0); + + // And finally write all we've found out to the original image! + // First of all draw a green rectangle around the detected face: + rectangle(videoMat, face_i, new Scalar(0, 255, 0, 1)); + + // Create the text we will annotate the box with: + String box_text = "Prediction = " + prediction; + // Calculate the position for annotated text (make sure we don't + // put illegal values in there): + int pos_x = Math.max(face_i.tl().x() - 10, 0); + int pos_y = Math.max(face_i.tl().y() - 10, 0); + // And now put it into the image: + putText(videoMat, box_text, new Point(pos_x, pos_y), + FONT_HERSHEY_PLAIN, 1.0, new Scalar(0, 255, 0, 2.0)); + } + // Show the result: + imshow("face_recognizer", videoMat); + + char key = (char) waitKey(20); + // Exit this loop on escape: + if (key == 27) { + destroyAllWindows(); + break; + } + } + } + +} diff --git a/samples/HoughLines.java b/samples/HoughLines.java index 5552d759..c989e76a 100644 --- a/samples/HoughLines.java +++ b/samples/HoughLines.java @@ -1,120 +1,120 @@ -import javax.swing.JFrame; -import org.bytedeco.javacpp.*; -import org.bytedeco.javacv.*; - -import org.bytedeco.opencv.opencv_core.*; -import org.bytedeco.opencv.opencv_imgproc.*; -import static org.bytedeco.opencv.global.opencv_core.*; -import static org.bytedeco.opencv.global.opencv_imgproc.*; -import static org.bytedeco.opencv.global.opencv_imgcodecs.*; - -/** - * C to Java translation of the houghlines.c sample provided in the c sample directory of OpenCV 2.1, - * using the JavaCV Java wrapper of OpenCV 2.2 developped by Samuel Audet. - * - * @author Jeremy Nicola - * jeremy.nicola@gmail.com - */ -public class HoughLines { - - /** - * usage: java HoughLines imageDir\imageName TransformType - */ - public static void main(String[] args) { - - String fileName = args.length >= 1 ? args[0] : "pic1.png"; // if no params provided, compute the defaut image - IplImage src = cvLoadImage(fileName, 0); - IplImage dst; - IplImage colorDst; - CvMemStorage storage = cvCreateMemStorage(0); - CvSeq lines = new CvSeq(); - - CanvasFrame source = new CanvasFrame("Source"); - CanvasFrame hough = new CanvasFrame("Hough"); - OpenCVFrameConverter.ToIplImage sourceConverter = new OpenCVFrameConverter.ToIplImage(); - OpenCVFrameConverter.ToIplImage houghConverter = new OpenCVFrameConverter.ToIplImage(); - if (src == null) { - System.out.println("Couldn't load source image."); - return; - } - - dst = cvCreateImage(cvGetSize(src), src.depth(), 1); - colorDst = cvCreateImage(cvGetSize(src), src.depth(), 3); - - cvCanny(src, dst, 50, 200, 3); - cvCvtColor(dst, colorDst, CV_GRAY2BGR); - - /* - * apply the probabilistic hough transform - * which returns for each line deteced two points ((x1, y1); (x2,y2)) - * defining the detected segment - */ - if (args.length == 2 && args[1].contentEquals("probabilistic")) { - System.out.println("Using the Probabilistic Hough Transform"); - lines = cvHoughLines2(dst, storage, CV_HOUGH_PROBABILISTIC, 1, Math.PI / 180, 40, 50, 10, 0, CV_PI); - for (int i = 0; i < lines.total(); i++) { - // Based on JavaCPP, the equivalent of the C code: - // CvPoint* line = (CvPoint*)cvGetSeqElem(lines,i); - // CvPoint first=line[0], second=line[1] - // is: - Pointer line = cvGetSeqElem(lines, i); - CvPoint pt1 = new CvPoint(line).position(0); - CvPoint pt2 = new CvPoint(line).position(1); - - System.out.println("Line spotted: "); - System.out.println("\t pt1: " + pt1); - System.out.println("\t pt2: " + pt2); - cvLine(colorDst, pt1, pt2, CV_RGB(255, 0, 0), 3, CV_AA, 0); // draw the segment on the image - } - } - /* - * Apply the multiscale hough transform which returns for each line two float parameters (rho, theta) - * rho: distance from the origin of the image to the line - * theta: angle between the x-axis and the normal line of the detected line - */ - else if(args.length==2 && args[1].contentEquals("multiscale")){ - System.out.println("Using the multiscale Hough Transform"); // - lines = cvHoughLines2(dst, storage, CV_HOUGH_MULTI_SCALE, 1, Math.PI / 180, 40, 1, 1, 0, CV_PI); - for (int i = 0; i < lines.total(); i++) { - CvPoint2D32f point = new CvPoint2D32f(cvGetSeqElem(lines, i)); - - float rho=point.x(); - float theta=point.y(); - - double a = Math.cos((double) theta), b = Math.sin((double) theta); - double x0 = a * rho, y0 = b * rho; - CvPoint pt1 = cvPoint((int) Math.round(x0 + 1000 * (-b)), (int) Math.round(y0 + 1000 * (a))), pt2 = cvPoint((int) Math.round(x0 - 1000 * (-b)), (int) Math.round(y0 - 1000 * (a))); - System.out.println("Line spoted: "); - System.out.println("\t rho= " + rho); - System.out.println("\t theta= " + theta); - cvLine(colorDst, pt1, pt2, CV_RGB(255, 0, 0), 3, CV_AA, 0); - } - } - /* - * Default: apply the standard hough transform. Outputs: same as the multiscale output. - */ - else { - System.out.println("Using the Standard Hough Transform"); - lines = cvHoughLines2(dst, storage, CV_HOUGH_STANDARD, 1, Math.PI / 180, 90, 0, 0, 0, CV_PI); - for (int i = 0; i < lines.total(); i++) { - CvPoint2D32f point = new CvPoint2D32f(cvGetSeqElem(lines, i)); - - float rho=point.x(); - float theta=point.y(); - - double a = Math.cos((double) theta), b = Math.sin((double) theta); - double x0 = a * rho, y0 = b * rho; - CvPoint pt1 = cvPoint((int) Math.round(x0 + 1000 * (-b)), (int) Math.round(y0 + 1000 * (a))), pt2 = cvPoint((int) Math.round(x0 - 1000 * (-b)), (int) Math.round(y0 - 1000 * (a))); - System.out.println("Line spotted: "); - System.out.println("\t rho= " + rho); - System.out.println("\t theta= " + theta); - cvLine(colorDst, pt1, pt2, CV_RGB(255, 0, 0), 3, CV_AA, 0); - } - } - source.showImage(sourceConverter.convert(src)); - hough.showImage(houghConverter.convert(colorDst)); - - source.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE); - hough.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE); - } -} +import javax.swing.JFrame; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacv.*; + +import org.bytedeco.opencv.opencv_core.*; +import org.bytedeco.opencv.opencv_imgproc.*; +import static org.bytedeco.opencv.global.opencv_core.*; +import static org.bytedeco.opencv.global.opencv_imgproc.*; +import static org.bytedeco.opencv.global.opencv_imgcodecs.*; + +/** + * C to Java translation of the houghlines.c sample provided in the c sample directory of OpenCV 2.1, + * using the JavaCV Java wrapper of OpenCV 2.2 developped by Samuel Audet. + * + * @author Jeremy Nicola + * jeremy.nicola@gmail.com + */ +public class HoughLines { + + /** + * usage: java HoughLines imageDir\imageName TransformType + */ + public static void main(String[] args) { + + String fileName = args.length >= 1 ? args[0] : "pic1.png"; // if no params provided, compute the defaut image + IplImage src = cvLoadImage(fileName, 0); + IplImage dst; + IplImage colorDst; + CvMemStorage storage = cvCreateMemStorage(0); + CvSeq lines = new CvSeq(); + + CanvasFrame source = new CanvasFrame("Source"); + CanvasFrame hough = new CanvasFrame("Hough"); + OpenCVFrameConverter.ToIplImage sourceConverter = new OpenCVFrameConverter.ToIplImage(); + OpenCVFrameConverter.ToIplImage houghConverter = new OpenCVFrameConverter.ToIplImage(); + if (src == null) { + System.out.println("Couldn't load source image."); + return; + } + + dst = cvCreateImage(cvGetSize(src), src.depth(), 1); + colorDst = cvCreateImage(cvGetSize(src), src.depth(), 3); + + cvCanny(src, dst, 50, 200, 3); + cvCvtColor(dst, colorDst, CV_GRAY2BGR); + + /* + * apply the probabilistic hough transform + * which returns for each line deteced two points ((x1, y1); (x2,y2)) + * defining the detected segment + */ + if (args.length == 2 && args[1].contentEquals("probabilistic")) { + System.out.println("Using the Probabilistic Hough Transform"); + lines = cvHoughLines2(dst, storage, CV_HOUGH_PROBABILISTIC, 1, Math.PI / 180, 40, 50, 10, 0, CV_PI); + for (int i = 0; i < lines.total(); i++) { + // Based on JavaCPP, the equivalent of the C code: + // CvPoint* line = (CvPoint*)cvGetSeqElem(lines,i); + // CvPoint first=line[0], second=line[1] + // is: + Pointer line = cvGetSeqElem(lines, i); + CvPoint pt1 = new CvPoint(line).position(0); + CvPoint pt2 = new CvPoint(line).position(1); + + System.out.println("Line spotted: "); + System.out.println("\t pt1: " + pt1); + System.out.println("\t pt2: " + pt2); + cvLine(colorDst, pt1, pt2, CV_RGB(255, 0, 0), 3, CV_AA, 0); // draw the segment on the image + } + } + /* + * Apply the multiscale hough transform which returns for each line two float parameters (rho, theta) + * rho: distance from the origin of the image to the line + * theta: angle between the x-axis and the normal line of the detected line + */ + else if(args.length==2 && args[1].contentEquals("multiscale")){ + System.out.println("Using the multiscale Hough Transform"); // + lines = cvHoughLines2(dst, storage, CV_HOUGH_MULTI_SCALE, 1, Math.PI / 180, 40, 1, 1, 0, CV_PI); + for (int i = 0; i < lines.total(); i++) { + CvPoint2D32f point = new CvPoint2D32f(cvGetSeqElem(lines, i)); + + float rho=point.x(); + float theta=point.y(); + + double a = Math.cos((double) theta), b = Math.sin((double) theta); + double x0 = a * rho, y0 = b * rho; + CvPoint pt1 = cvPoint((int) Math.round(x0 + 1000 * (-b)), (int) Math.round(y0 + 1000 * (a))), pt2 = cvPoint((int) Math.round(x0 - 1000 * (-b)), (int) Math.round(y0 - 1000 * (a))); + System.out.println("Line spoted: "); + System.out.println("\t rho= " + rho); + System.out.println("\t theta= " + theta); + cvLine(colorDst, pt1, pt2, CV_RGB(255, 0, 0), 3, CV_AA, 0); + } + } + /* + * Default: apply the standard hough transform. Outputs: same as the multiscale output. + */ + else { + System.out.println("Using the Standard Hough Transform"); + lines = cvHoughLines2(dst, storage, CV_HOUGH_STANDARD, 1, Math.PI / 180, 90, 0, 0, 0, CV_PI); + for (int i = 0; i < lines.total(); i++) { + CvPoint2D32f point = new CvPoint2D32f(cvGetSeqElem(lines, i)); + + float rho=point.x(); + float theta=point.y(); + + double a = Math.cos((double) theta), b = Math.sin((double) theta); + double x0 = a * rho, y0 = b * rho; + CvPoint pt1 = cvPoint((int) Math.round(x0 + 1000 * (-b)), (int) Math.round(y0 + 1000 * (a))), pt2 = cvPoint((int) Math.round(x0 - 1000 * (-b)), (int) Math.round(y0 - 1000 * (a))); + System.out.println("Line spotted: "); + System.out.println("\t rho= " + rho); + System.out.println("\t theta= " + theta); + cvLine(colorDst, pt1, pt2, CV_RGB(255, 0, 0), 3, CV_AA, 0); + } + } + source.showImage(sourceConverter.convert(src)); + hough.showImage(houghConverter.convert(colorDst)); + + source.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE); + hough.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE); + } +} diff --git a/samples/ImageSegmentation.java b/samples/ImageSegmentation.java index fa49431e..e22371f3 100644 --- a/samples/ImageSegmentation.java +++ b/samples/ImageSegmentation.java @@ -1,158 +1,158 @@ -/* - * JavaCV version of OpenCV imageSegmentation.cpp - * https://github.com/opencv/opencv/blob/master/samples/cpp/tutorial_code/ImgTrans/imageSegmentation.cpp - * - * The OpenCV example image is available at the following address - * https://github.com/opencv/opencv/blob/master/samples/data/cards.png - * - * Paolo Bolettieri - */ - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; - -import org.bytedeco.javacpp.indexer.FloatIndexer; -import org.bytedeco.javacpp.indexer.IntIndexer; -import org.bytedeco.javacpp.indexer.UByteIndexer; -import org.bytedeco.javacv.CanvasFrame; -import org.bytedeco.javacv.OpenCVFrameConverter; - -import org.bytedeco.opencv.opencv_core.*; -import org.bytedeco.opencv.opencv_imgproc.*; -import static org.bytedeco.opencv.global.opencv_core.*; -import static org.bytedeco.opencv.global.opencv_imgcodecs.*; -import static org.bytedeco.opencv.global.opencv_imgproc.*; - -public class ImageSegmentation { - private static final int[] WHITE = {255, 255, 255}; - private static final int[] BLACK = {0, 0, 0}; - - public static void main(String[] args) { - // Load the image - Mat src = imread(args[0]); - // Check if everything was fine - if (src.data().isNull()) - return; - // Show source image - imshow("Source Image", src); - - // Change the background from white to black, since that will help later to extract - // better results during the use of Distance Transform - UByteIndexer srcIndexer = src.createIndexer(); - for (int x = 0; x < srcIndexer.rows(); x++) { - for (int y = 0; y < srcIndexer.cols(); y++) { - int[] values = new int[3]; - srcIndexer.get(x, y, values); - if (Arrays.equals(values, WHITE)) { - srcIndexer.put(x, y, BLACK); - } - } - } - // Show output image - imshow("Black Background Image", src); - - // Create a kernel that we will use for accuting/sharpening our image - Mat kernel = Mat.ones(3, 3, CV_32F).asMat(); - FloatIndexer kernelIndexer = kernel.createIndexer(); - kernelIndexer.put(1, 1, -8); // an approximation of second derivative, a quite strong kernel - - // do the laplacian filtering as it is - // well, we need to convert everything in something more deeper then CV_8U - // because the kernel has some negative values, - // and we can expect in general to have a Laplacian image with negative values - // BUT a 8bits unsigned int (the one we are working with) can contain values from 0 to 255 - // so the possible negative number will be truncated - Mat imgLaplacian = new Mat(); - Mat sharp = src; // copy source image to another temporary one - filter2D(sharp, imgLaplacian, CV_32F, kernel); - src.convertTo(sharp, CV_32F); - Mat imgResult = subtract(sharp, imgLaplacian).asMat(); - // convert back to 8bits gray scale - imgResult.convertTo(imgResult, CV_8UC3); - imgLaplacian.convertTo(imgLaplacian, CV_8UC3); - // imshow( "Laplace Filtered Image", imgLaplacian ); - imshow("New Sharped Image", imgResult); - - src = imgResult; // copy back - // Create binary image from source image - Mat bw = new Mat(); - cvtColor(src, bw, CV_BGR2GRAY); - threshold(bw, bw, 40, 255, CV_THRESH_BINARY | CV_THRESH_OTSU); - imshow("Binary Image", bw); - - // Perform the distance transform algorithm - Mat dist = new Mat(); - distanceTransform(bw, dist, CV_DIST_L2, 3); - // Normalize the distance image for range = {0.0, 1.0} - // so we can visualize and threshold it - normalize(dist, dist, 0, 1., NORM_MINMAX, -1, null); - imshow("Distance Transform Image", dist); - - // Threshold to obtain the peaks - // This will be the markers for the foreground objects - threshold(dist, dist, .4, 1., CV_THRESH_BINARY); - // Dilate a bit the dist image - Mat kernel1 = Mat.ones(3, 3, CV_8UC1).asMat(); - dilate(dist, dist, kernel1); - imshow("Peaks", dist); - // Create the CV_8U version of the distance image - // It is needed for findContours() - Mat dist_8u = new Mat(); - dist.convertTo(dist_8u, CV_8U); - // Find total markers - MatVector contours = new MatVector(); - findContours(dist_8u, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE); - // Create the marker image for the watershed algorithm - Mat markers = Mat.zeros(dist.size(), CV_32SC1).asMat(); - // Draw the foreground markers - for (int i = 0; i < contours.size(); i++) - drawContours(markers, contours, i, Scalar.all((i) + 1)); - // Draw the background marker - circle(markers, new Point(5, 5), 3, RGB(255, 255, 255)); - imshow("Markers", multiply(markers, 10000).asMat()); - - // Perform the watershed algorithm - watershed(src, markers); - Mat mark = Mat.zeros(markers.size(), CV_8UC1).asMat(); - markers.convertTo(mark, CV_8UC1); - bitwise_not(mark, mark); -// imshow("Markers_v2", mark); // uncomment this if you want to see how the mark - // image looks like at that point - // Generate random colors - List colors = new ArrayList(); - for (int i = 0; i < contours.size(); i++) { - int b = theRNG().uniform(0, 255); - int g = theRNG().uniform(0, 255); - int r = theRNG().uniform(0, 255); - int[] color = { b, g, r }; - colors.add(color); - } - // Create the result image - Mat dst = Mat.zeros(markers.size(), CV_8UC3).asMat(); - // Fill labeled objects with random colors - IntIndexer markersIndexer = markers.createIndexer(); - UByteIndexer dstIndexer = dst.createIndexer(); - for (int i = 0; i < markersIndexer.rows(); i++) { - for (int j = 0; j < markersIndexer.cols(); j++) { - int index = markersIndexer.get(i, j); - if (index > 0 && index <= contours.size()) - dstIndexer.put(i, j, colors.get(index - 1)); - else - dstIndexer.put(i, j, BLACK); - } - } - // Visualize the final image - imshow("Final Result", dst); - } - - //I wrote a custom imshow method for problems using the OpenCV original one - private static void imshow(String txt, Mat img) { - CanvasFrame canvasFrame = new CanvasFrame(txt); - canvasFrame.setDefaultCloseOperation(javax.swing.JFrame.EXIT_ON_CLOSE); - canvasFrame.setCanvasSize(img.cols(), img.rows()); - canvasFrame.showImage(new OpenCVFrameConverter.ToMat().convert(img)); - } - -} - +/* + * JavaCV version of OpenCV imageSegmentation.cpp + * https://github.com/opencv/opencv/blob/master/samples/cpp/tutorial_code/ImgTrans/imageSegmentation.cpp + * + * The OpenCV example image is available at the following address + * https://github.com/opencv/opencv/blob/master/samples/data/cards.png + * + * Paolo Bolettieri + */ + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import org.bytedeco.javacpp.indexer.FloatIndexer; +import org.bytedeco.javacpp.indexer.IntIndexer; +import org.bytedeco.javacpp.indexer.UByteIndexer; +import org.bytedeco.javacv.CanvasFrame; +import org.bytedeco.javacv.OpenCVFrameConverter; + +import org.bytedeco.opencv.opencv_core.*; +import org.bytedeco.opencv.opencv_imgproc.*; +import static org.bytedeco.opencv.global.opencv_core.*; +import static org.bytedeco.opencv.global.opencv_imgcodecs.*; +import static org.bytedeco.opencv.global.opencv_imgproc.*; + +public class ImageSegmentation { + private static final int[] WHITE = {255, 255, 255}; + private static final int[] BLACK = {0, 0, 0}; + + public static void main(String[] args) { + // Load the image + Mat src = imread(args[0]); + // Check if everything was fine + if (src.data().isNull()) + return; + // Show source image + imshow("Source Image", src); + + // Change the background from white to black, since that will help later to extract + // better results during the use of Distance Transform + UByteIndexer srcIndexer = src.createIndexer(); + for (int x = 0; x < srcIndexer.rows(); x++) { + for (int y = 0; y < srcIndexer.cols(); y++) { + int[] values = new int[3]; + srcIndexer.get(x, y, values); + if (Arrays.equals(values, WHITE)) { + srcIndexer.put(x, y, BLACK); + } + } + } + // Show output image + imshow("Black Background Image", src); + + // Create a kernel that we will use for accuting/sharpening our image + Mat kernel = Mat.ones(3, 3, CV_32F).asMat(); + FloatIndexer kernelIndexer = kernel.createIndexer(); + kernelIndexer.put(1, 1, -8); // an approximation of second derivative, a quite strong kernel + + // do the laplacian filtering as it is + // well, we need to convert everything in something more deeper then CV_8U + // because the kernel has some negative values, + // and we can expect in general to have a Laplacian image with negative values + // BUT a 8bits unsigned int (the one we are working with) can contain values from 0 to 255 + // so the possible negative number will be truncated + Mat imgLaplacian = new Mat(); + Mat sharp = src; // copy source image to another temporary one + filter2D(sharp, imgLaplacian, CV_32F, kernel); + src.convertTo(sharp, CV_32F); + Mat imgResult = subtract(sharp, imgLaplacian).asMat(); + // convert back to 8bits gray scale + imgResult.convertTo(imgResult, CV_8UC3); + imgLaplacian.convertTo(imgLaplacian, CV_8UC3); + // imshow( "Laplace Filtered Image", imgLaplacian ); + imshow("New Sharped Image", imgResult); + + src = imgResult; // copy back + // Create binary image from source image + Mat bw = new Mat(); + cvtColor(src, bw, CV_BGR2GRAY); + threshold(bw, bw, 40, 255, CV_THRESH_BINARY | CV_THRESH_OTSU); + imshow("Binary Image", bw); + + // Perform the distance transform algorithm + Mat dist = new Mat(); + distanceTransform(bw, dist, CV_DIST_L2, 3); + // Normalize the distance image for range = {0.0, 1.0} + // so we can visualize and threshold it + normalize(dist, dist, 0, 1., NORM_MINMAX, -1, null); + imshow("Distance Transform Image", dist); + + // Threshold to obtain the peaks + // This will be the markers for the foreground objects + threshold(dist, dist, .4, 1., CV_THRESH_BINARY); + // Dilate a bit the dist image + Mat kernel1 = Mat.ones(3, 3, CV_8UC1).asMat(); + dilate(dist, dist, kernel1); + imshow("Peaks", dist); + // Create the CV_8U version of the distance image + // It is needed for findContours() + Mat dist_8u = new Mat(); + dist.convertTo(dist_8u, CV_8U); + // Find total markers + MatVector contours = new MatVector(); + findContours(dist_8u, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE); + // Create the marker image for the watershed algorithm + Mat markers = Mat.zeros(dist.size(), CV_32SC1).asMat(); + // Draw the foreground markers + for (int i = 0; i < contours.size(); i++) + drawContours(markers, contours, i, Scalar.all((i) + 1)); + // Draw the background marker + circle(markers, new Point(5, 5), 3, RGB(255, 255, 255)); + imshow("Markers", multiply(markers, 10000).asMat()); + + // Perform the watershed algorithm + watershed(src, markers); + Mat mark = Mat.zeros(markers.size(), CV_8UC1).asMat(); + markers.convertTo(mark, CV_8UC1); + bitwise_not(mark, mark); +// imshow("Markers_v2", mark); // uncomment this if you want to see how the mark + // image looks like at that point + // Generate random colors + List colors = new ArrayList(); + for (int i = 0; i < contours.size(); i++) { + int b = theRNG().uniform(0, 255); + int g = theRNG().uniform(0, 255); + int r = theRNG().uniform(0, 255); + int[] color = { b, g, r }; + colors.add(color); + } + // Create the result image + Mat dst = Mat.zeros(markers.size(), CV_8UC3).asMat(); + // Fill labeled objects with random colors + IntIndexer markersIndexer = markers.createIndexer(); + UByteIndexer dstIndexer = dst.createIndexer(); + for (int i = 0; i < markersIndexer.rows(); i++) { + for (int j = 0; j < markersIndexer.cols(); j++) { + int index = markersIndexer.get(i, j); + if (index > 0 && index <= contours.size()) + dstIndexer.put(i, j, colors.get(index - 1)); + else + dstIndexer.put(i, j, BLACK); + } + } + // Visualize the final image + imshow("Final Result", dst); + } + + //I wrote a custom imshow method for problems using the OpenCV original one + private static void imshow(String txt, Mat img) { + CanvasFrame canvasFrame = new CanvasFrame(txt); + canvasFrame.setDefaultCloseOperation(javax.swing.JFrame.EXIT_ON_CLOSE); + canvasFrame.setCanvasSize(img.cols(), img.rows()); + canvasFrame.showImage(new OpenCVFrameConverter.ToMat().convert(img)); + } + +} + diff --git a/samples/JavaFxPlayVideoAndAudio.java b/samples/JavaFxPlayVideoAndAudio.java index 5d32c06e..1c527eca 100644 --- a/samples/JavaFxPlayVideoAndAudio.java +++ b/samples/JavaFxPlayVideoAndAudio.java @@ -1,200 +1,199 @@ -import java.nio.ByteBuffer; -import java.nio.ShortBuffer; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.TimeUnit; -import java.util.logging.Level; -import java.util.logging.Logger; -import javafx.application.Application; -import javafx.application.Platform; -import javafx.scene.Scene; -import javafx.scene.image.Image; -import javafx.scene.image.ImageView; -import javafx.scene.layout.StackPane; -import javafx.stage.Stage; -import javax.sound.sampled.AudioFormat; -import javax.sound.sampled.AudioSystem; -import javax.sound.sampled.DataLine; -import javax.sound.sampled.SourceDataLine; -import org.bytedeco.javacv.FFmpegFrameGrabber; -import org.bytedeco.javacv.Frame; -import org.bytedeco.javacv.JavaFXFrameConverter; - -/** - * @author Dmitriy Gerashenko - * @author Jarek Sacha - */ -public class JavaFxPlayVideoAndAudio extends Application { - - private static class PlaybackTimer { - private Long startTime = -1L; - private final DataLine soundLine; - - public PlaybackTimer(DataLine soundLine) { - this.soundLine = soundLine; - } - - public PlaybackTimer() { - this.soundLine = null; - } - - public void start() { - if (soundLine == null) { - startTime = System.nanoTime(); - } - } - - public long elapsedMicros() { - if (soundLine == null) { - if (startTime < 0) { - throw new IllegalStateException("PlaybackTimer not initialized."); - } - return (System.nanoTime() - startTime) / 1000; - } else { - return soundLine.getMicrosecondPosition(); - } - } - } - - private static final Logger LOG = Logger.getLogger(JavaFxPlayVideoAndAudio.class.getName()); - - private static volatile Thread playThread; - - public static void main(String[] args) { - launch(args); - } - - @Override - public void start(final Stage primaryStage) throws Exception { - final StackPane root = new StackPane(); - final ImageView imageView = new ImageView(); - - root.getChildren().add(imageView); - imageView.fitWidthProperty().bind(primaryStage.widthProperty()); - imageView.fitHeightProperty().bind(primaryStage.heightProperty()); - - final Scene scene = new Scene(root, 640, 480); - - primaryStage.setTitle("Video + audio"); - primaryStage.setScene(scene); - primaryStage.show(); - - playThread = new Thread(new Runnable() { public void run() { - try { - final String videoFilename = getParameters().getRaw().get(0); - final FFmpegFrameGrabber grabber = new FFmpegFrameGrabber(videoFilename); - grabber.start(); - primaryStage.setWidth(grabber.getImageWidth()); - primaryStage.setHeight(grabber.getImageHeight()); - final PlaybackTimer playbackTimer; - final SourceDataLine soundLine; - if (grabber.getAudioChannels() > 0) { - final AudioFormat audioFormat = new AudioFormat(grabber.getSampleRate(), 16, grabber.getAudioChannels(), true, true); - - final DataLine.Info info = new DataLine.Info(SourceDataLine.class, audioFormat); - soundLine = (SourceDataLine) AudioSystem.getLine(info); - soundLine.open(audioFormat); - soundLine.start(); - playbackTimer = new PlaybackTimer(soundLine); - } else { - soundLine = null; - playbackTimer = new PlaybackTimer(); - } - - final JavaFXFrameConverter converter = new JavaFXFrameConverter(); - - final ExecutorService audioExecutor = Executors.newSingleThreadExecutor(); - final ExecutorService imageExecutor = Executors.newSingleThreadExecutor(); - - final long maxReadAheadBufferMicros = 1000 * 1000L; - - long lastTimeStamp = -1L; - while (!Thread.interrupted()) { - final Frame frame = grabber.grab(); - if (frame == null) { - break; - } - if (lastTimeStamp < 0) { - playbackTimer.start(); - } - lastTimeStamp = frame.timestamp; - if (frame.image != null) { - final Frame imageFrame = frame.clone(); - - imageExecutor.submit(new Runnable() { - public void run() { - final Image image = converter.convert(imageFrame); - final long timeStampDeltaMicros = imageFrame.timestamp - playbackTimer.elapsedMicros(); - imageFrame.close(); - if (timeStampDeltaMicros > 0) { - final long delayMillis = timeStampDeltaMicros / 1000L; - try { - Thread.sleep(delayMillis); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - } - Platform.runLater(new Runnable() { - public void run() { - imageView.setImage(image); - } - }); - } - }); - } else if (frame.samples != null) { - if (soundLine == null) { - throw new IllegalStateException("Internal error: sound playback not initialized"); - } - final ShortBuffer channelSamplesShortBuffer = (ShortBuffer) frame.samples[0]; - channelSamplesShortBuffer.rewind(); - - final ByteBuffer outBuffer = ByteBuffer.allocate(channelSamplesShortBuffer.capacity() * 2); - - for (int i = 0; i < channelSamplesShortBuffer.capacity(); i++) { - short val = channelSamplesShortBuffer.get(i); - outBuffer.putShort(val); - } - - audioExecutor.submit(new Runnable() { - public void run() { - soundLine.write(outBuffer.array(), 0, outBuffer.capacity()); - outBuffer.clear(); - } - }); - } - final long timeStampDeltaMicros = frame.timestamp - playbackTimer.elapsedMicros(); - if (timeStampDeltaMicros > maxReadAheadBufferMicros) { - Thread.sleep((timeStampDeltaMicros - maxReadAheadBufferMicros) / 1000); - } - } - - if (!Thread.interrupted()) { - long delay = (lastTimeStamp - playbackTimer.elapsedMicros()) / 1000 + - Math.round(1 / grabber.getFrameRate() * 1000); - Thread.sleep(Math.max(0, delay)); - } - grabber.stop(); - grabber.release(); - if (soundLine != null) { - soundLine.stop(); - } - audioExecutor.shutdownNow(); - audioExecutor.awaitTermination(10, TimeUnit.SECONDS); - imageExecutor.shutdownNow(); - imageExecutor.awaitTermination(10, TimeUnit.SECONDS); - - Platform.exit(); - } catch (Exception exception) { - LOG.log(Level.SEVERE, null, exception); - System.exit(1); - } - }}); - playThread.start(); - } - - @Override - public void stop() { - playThread.interrupt(); - } - -} +import java.nio.ByteBuffer; +import java.nio.ShortBuffer; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.logging.Level; +import java.util.logging.Logger; +import javafx.application.Application; +import javafx.application.Platform; +import javafx.scene.Scene; +import javafx.scene.image.Image; +import javafx.scene.image.ImageView; +import javafx.scene.layout.StackPane; +import javafx.stage.Stage; +import javax.sound.sampled.AudioFormat; +import javax.sound.sampled.AudioSystem; +import javax.sound.sampled.DataLine; +import javax.sound.sampled.SourceDataLine; +import org.bytedeco.javacv.FFmpegFrameGrabber; +import org.bytedeco.javacv.Frame; + +/** + * @author Dmitriy Gerashenko + * @author Jarek Sacha + */ +public class JavaFxPlayVideoAndAudio extends Application { + + private static class PlaybackTimer { + private Long startTime = -1L; + private final DataLine soundLine; + + public PlaybackTimer(DataLine soundLine) { + this.soundLine = soundLine; + } + + public PlaybackTimer() { + this.soundLine = null; + } + + public void start() { + if (soundLine == null) { + startTime = System.nanoTime(); + } + } + + public long elapsedMicros() { + if (soundLine == null) { + if (startTime < 0) { + throw new IllegalStateException("PlaybackTimer not initialized."); + } + return (System.nanoTime() - startTime) / 1000; + } else { + return soundLine.getMicrosecondPosition(); + } + } + } + + private static final Logger LOG = Logger.getLogger(JavaFxPlayVideoAndAudio.class.getName()); + + private static volatile Thread playThread; + + public static void main(String[] args) { + launch(args); + } + + @Override + public void start(final Stage primaryStage) throws Exception { + final StackPane root = new StackPane(); + final ImageView imageView = new ImageView(); + + root.getChildren().add(imageView); + imageView.fitWidthProperty().bind(primaryStage.widthProperty()); + imageView.fitHeightProperty().bind(primaryStage.heightProperty()); + + final Scene scene = new Scene(root, 640, 480); + + primaryStage.setTitle("Video + audio"); + primaryStage.setScene(scene); + primaryStage.show(); + + playThread = new Thread(new Runnable() { public void run() { + try { + final String videoFilename = getParameters().getRaw().get(0); + final FFmpegFrameGrabber grabber = new FFmpegFrameGrabber(videoFilename); + grabber.start(); + primaryStage.setWidth(grabber.getImageWidth()); + primaryStage.setHeight(grabber.getImageHeight()); + final PlaybackTimer playbackTimer; + final SourceDataLine soundLine; + if (grabber.getAudioChannels() > 0) { + final AudioFormat audioFormat = new AudioFormat(grabber.getSampleRate(), 16, grabber.getAudioChannels(), true, true); + + final DataLine.Info info = new DataLine.Info(SourceDataLine.class, audioFormat); + soundLine = (SourceDataLine) AudioSystem.getLine(info); + soundLine.open(audioFormat); + soundLine.start(); + playbackTimer = new PlaybackTimer(soundLine); + } else { + soundLine = null; + playbackTimer = new PlaybackTimer(); + } + + final JavaFXFrameConverter converter = new JavaFXFrameConverter(); + + final ExecutorService audioExecutor = Executors.newSingleThreadExecutor(); + final ExecutorService imageExecutor = Executors.newSingleThreadExecutor(); + + final long maxReadAheadBufferMicros = 1000 * 1000L; + + long lastTimeStamp = -1L; + while (!Thread.interrupted()) { + final Frame frame = grabber.grab(); + if (frame == null) { + break; + } + if (lastTimeStamp < 0) { + playbackTimer.start(); + } + lastTimeStamp = frame.timestamp; + if (frame.image != null) { + final Frame imageFrame = frame.clone(); + + imageExecutor.submit(new Runnable() { + public void run() { + final Image image = converter.convert(imageFrame); + final long timeStampDeltaMicros = imageFrame.timestamp - playbackTimer.elapsedMicros(); + imageFrame.close(); + if (timeStampDeltaMicros > 0) { + final long delayMillis = timeStampDeltaMicros / 1000L; + try { + Thread.sleep(delayMillis); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + } + Platform.runLater(new Runnable() { + public void run() { + imageView.setImage(image); + } + }); + } + }); + } else if (frame.samples != null) { + if (soundLine == null) { + throw new IllegalStateException("Internal error: sound playback not initialized"); + } + final ShortBuffer channelSamplesShortBuffer = (ShortBuffer) frame.samples[0]; + channelSamplesShortBuffer.rewind(); + + final ByteBuffer outBuffer = ByteBuffer.allocate(channelSamplesShortBuffer.capacity() * 2); + + for (int i = 0; i < channelSamplesShortBuffer.capacity(); i++) { + short val = channelSamplesShortBuffer.get(i); + outBuffer.putShort(val); + } + + audioExecutor.submit(new Runnable() { + public void run() { + soundLine.write(outBuffer.array(), 0, outBuffer.capacity()); + outBuffer.clear(); + } + }); + } + final long timeStampDeltaMicros = frame.timestamp - playbackTimer.elapsedMicros(); + if (timeStampDeltaMicros > maxReadAheadBufferMicros) { + Thread.sleep((timeStampDeltaMicros - maxReadAheadBufferMicros) / 1000); + } + } + + if (!Thread.interrupted()) { + long delay = (lastTimeStamp - playbackTimer.elapsedMicros()) / 1000 + + Math.round(1 / grabber.getFrameRate() * 1000); + Thread.sleep(Math.max(0, delay)); + } + grabber.stop(); + grabber.release(); + if (soundLine != null) { + soundLine.stop(); + } + audioExecutor.shutdownNow(); + audioExecutor.awaitTermination(10, TimeUnit.SECONDS); + imageExecutor.shutdownNow(); + imageExecutor.awaitTermination(10, TimeUnit.SECONDS); + + Platform.exit(); + } catch (Exception exception) { + LOG.log(Level.SEVERE, null, exception); + System.exit(1); + } + }}); + playThread.start(); + } + + @Override + public void stop() { + playThread.interrupt(); + } + +} diff --git a/samples/KazemiFacemarkExample.java b/samples/KazemiFacemarkExample.java index 44c58157..a84b05aa 100644 --- a/samples/KazemiFacemarkExample.java +++ b/samples/KazemiFacemarkExample.java @@ -1,72 +1,72 @@ -/** - * Kazemi Facemark example for JavaCV - * - * @author Théophile Gonos - * - * Link to Kazemi model : - * https://raw.githubusercontent.com/opencv/opencv_3rdparty/contrib_face_alignment_20170818/face_landmark_model.dat - */ - -import java.io.IOException; -import java.net.URISyntaxException; - -import org.bytedeco.opencv.opencv_core.*; -import org.bytedeco.opencv.opencv_face.*; -import org.bytedeco.opencv.opencv_highgui.*; -import org.bytedeco.opencv.opencv_imgproc.*; -import org.bytedeco.opencv.opencv_objdetect.*; -import static org.bytedeco.opencv.global.opencv_core.*; -import static org.bytedeco.opencv.global.opencv_face.*; -import static org.bytedeco.opencv.global.opencv_highgui.*; -import static org.bytedeco.opencv.global.opencv_imgcodecs.*; -import static org.bytedeco.opencv.global.opencv_imgproc.*; -import static org.bytedeco.opencv.global.opencv_objdetect.*; - - -public class KazemiFacemarkExample { - public static void main(String[] args) throws IOException, URISyntaxException, InterruptedException { - // Load Face Detector - CascadeClassifier faceDetector = new CascadeClassifier ("haarcascade_frontalface_alt2.xml"); - - // Create an instance of Facemark - FacemarkKazemi facemark = FacemarkKazemi.create(); - - // Load landmark detector - facemark.loadModel("face_landmark_model.dat"); - - // Load image - Mat img = imread("face.jpg"); - - // convert to grayscale and equalize histograe for better detection - Mat gray = new Mat (); - cvtColor(img, gray, COLOR_BGR2GRAY); - equalizeHist( gray, gray ); - - // Find faces on the image - RectVector faces = new RectVector (); - faceDetector.detectMultiScale(gray, faces); - - System.out.println ("Faces detected: "+faces.size()); - // Variable for landmarks. - // Landmarks for one face is a vector of points - // There can be more than one face in the image. - Point2fVectorVector landmarks = new Point2fVectorVector(); - - // Run landmark detector - boolean success = facemark.fit(img, faces, landmarks); - - if(success) { - // If successful, render the landmarks on each face - for (long i = 0; i < landmarks.size(); i++) { - Point2fVector v = landmarks.get(i); - drawFacemarks(img, v, Scalar.YELLOW); - } - } - - // Display results - imshow("Kazemi Facial Landmark", img); - cvWaitKey(0); - // Save results - imwrite ("kazemi_landmarks.jpg", img); - } -} +/** + * Kazemi Facemark example for JavaCV + * + * @author Théophile Gonos + * + * Link to Kazemi model : + * https://raw.githubusercontent.com/opencv/opencv_3rdparty/contrib_face_alignment_20170818/face_landmark_model.dat + */ + +import java.io.IOException; +import java.net.URISyntaxException; + +import org.bytedeco.opencv.opencv_core.*; +import org.bytedeco.opencv.opencv_face.*; +import org.bytedeco.opencv.opencv_highgui.*; +import org.bytedeco.opencv.opencv_imgproc.*; +import org.bytedeco.opencv.opencv_objdetect.*; +import static org.bytedeco.opencv.global.opencv_core.*; +import static org.bytedeco.opencv.global.opencv_face.*; +import static org.bytedeco.opencv.global.opencv_highgui.*; +import static org.bytedeco.opencv.global.opencv_imgcodecs.*; +import static org.bytedeco.opencv.global.opencv_imgproc.*; +import static org.bytedeco.opencv.global.opencv_objdetect.*; + + +public class KazemiFacemarkExample { + public static void main(String[] args) throws IOException, URISyntaxException, InterruptedException { + // Load Face Detector + CascadeClassifier faceDetector = new CascadeClassifier ("haarcascade_frontalface_alt2.xml"); + + // Create an instance of Facemark + FacemarkKazemi facemark = FacemarkKazemi.create(); + + // Load landmark detector + facemark.loadModel("face_landmark_model.dat"); + + // Load image + Mat img = imread("face.jpg"); + + // convert to grayscale and equalize histograe for better detection + Mat gray = new Mat (); + cvtColor(img, gray, COLOR_BGR2GRAY); + equalizeHist( gray, gray ); + + // Find faces on the image + RectVector faces = new RectVector (); + faceDetector.detectMultiScale(gray, faces); + + System.out.println ("Faces detected: "+faces.size()); + // Variable for landmarks. + // Landmarks for one face is a vector of points + // There can be more than one face in the image. + Point2fVectorVector landmarks = new Point2fVectorVector(); + + // Run landmark detector + boolean success = facemark.fit(img, faces, landmarks); + + if(success) { + // If successful, render the landmarks on each face + for (long i = 0; i < landmarks.size(); i++) { + Point2fVector v = landmarks.get(i); + drawFacemarks(img, v, Scalar.YELLOW); + } + } + + // Display results + imshow("Kazemi Facial Landmark", img); + cvWaitKey(0); + // Save results + imwrite ("kazemi_landmarks.jpg", img); + } +} diff --git a/samples/LBFFacemarkExampleWithVideo.java b/samples/LBFFacemarkExampleWithVideo.java index 32d4b96b..5645b2f1 100644 --- a/samples/LBFFacemarkExampleWithVideo.java +++ b/samples/LBFFacemarkExampleWithVideo.java @@ -1,89 +1,89 @@ -/** - * LBF Facemark example for JavaCV with Video camera and Transparent API - * - * @author Théophile Gonos - * - * you can find the lbfmodel here: - * https://raw.githubusercontent.com/kurnianggoro/GSOC2017/master/data/lbfmodel.yaml - */ - -import java.io.IOException; -import java.net.URISyntaxException; - -import org.bytedeco.opencv.opencv_core.*; -import org.bytedeco.opencv.opencv_face.*; -import org.bytedeco.opencv.opencv_highgui.*; -import org.bytedeco.opencv.opencv_imgproc.*; -import org.bytedeco.opencv.opencv_objdetect.*; -import org.bytedeco.opencv.opencv_videoio.*; -import static org.bytedeco.opencv.global.opencv_core.*; -import static org.bytedeco.opencv.global.opencv_face.*; -import static org.bytedeco.opencv.global.opencv_highgui.*; -import static org.bytedeco.opencv.global.opencv_imgproc.*; -import static org.bytedeco.opencv.global.opencv_objdetect.*; -import static org.bytedeco.opencv.global.opencv_videoio.*; - -public class LBFFacemarkExampleWithVideo { - - /** - * @param args the command line arguments - * @throws java.io.IOException - * @throws java.net.URISyntaxException - * @throws java.lang.InterruptedException - */ - public static void main(String[] args) throws IOException, URISyntaxException, InterruptedException { - // Load Face Detector - CascadeClassifier faceDetector = new CascadeClassifier ("haarcascade_frontalface_alt2.xml"); - - // Create an instance of Facemark - Facemark facemark = FacemarkLBF.create(); - - // Load landmark detector - facemark.loadModel("lbfmodel.yaml"); - - // Set up webcam for video capture - VideoCapture cam = new VideoCapture (0); - // Variable to store a video frame and its grayscale - Mat frame = new Mat (); - - // Read a frame - while(cam.read(frame)) { - // convert to grayscale and equalize histograe for better detection - // + use of transparent API - UMat gray = new UMat (); - frame.copyTo(gray); - cvtColor(gray, gray, COLOR_BGR2GRAY); - equalizeHist( gray, gray ); - - // Find faces on the image - RectVector faces = new RectVector (); - faceDetector.detectMultiScale(gray, faces); - - System.out.println ("Faces detected: "+faces.size()); - // Verify is at least one face is detected - // With some Facemark algorithms it crashes if there is no faces - if (!faces.empty()) { - - // Variable for landmarks. - // Landmarks for one face is a vector of points - // There can be more than one face in the image. - Point2fVectorVector landmarks = new Point2fVectorVector(); - - // Run landmark detector - boolean success = facemark.fit(frame, faces, landmarks); - - if(success) { - // If successful, render the landmarks on the face - for (long i = 0; i < landmarks.size(); i++) { - Point2fVector v = landmarks.get(i); - drawFacemarks(frame, v, Scalar.YELLOW); - } - } - } - // Display results - imshow("LBF Facial Landmark", frame); - // Exit loop if ESC is pressed - if (waitKey(1) == 27) break; - } - } -} +/** + * LBF Facemark example for JavaCV with Video camera and Transparent API + * + * @author Théophile Gonos + * + * you can find the lbfmodel here: + * https://raw.githubusercontent.com/kurnianggoro/GSOC2017/master/data/lbfmodel.yaml + */ + +import java.io.IOException; +import java.net.URISyntaxException; + +import org.bytedeco.opencv.opencv_core.*; +import org.bytedeco.opencv.opencv_face.*; +import org.bytedeco.opencv.opencv_highgui.*; +import org.bytedeco.opencv.opencv_imgproc.*; +import org.bytedeco.opencv.opencv_objdetect.*; +import org.bytedeco.opencv.opencv_videoio.*; +import static org.bytedeco.opencv.global.opencv_core.*; +import static org.bytedeco.opencv.global.opencv_face.*; +import static org.bytedeco.opencv.global.opencv_highgui.*; +import static org.bytedeco.opencv.global.opencv_imgproc.*; +import static org.bytedeco.opencv.global.opencv_objdetect.*; +import static org.bytedeco.opencv.global.opencv_videoio.*; + +public class LBFFacemarkExampleWithVideo { + + /** + * @param args the command line arguments + * @throws java.io.IOException + * @throws java.net.URISyntaxException + * @throws java.lang.InterruptedException + */ + public static void main(String[] args) throws IOException, URISyntaxException, InterruptedException { + // Load Face Detector + CascadeClassifier faceDetector = new CascadeClassifier ("haarcascade_frontalface_alt2.xml"); + + // Create an instance of Facemark + Facemark facemark = FacemarkLBF.create(); + + // Load landmark detector + facemark.loadModel("lbfmodel.yaml"); + + // Set up webcam for video capture + VideoCapture cam = new VideoCapture (0); + // Variable to store a video frame and its grayscale + Mat frame = new Mat (); + + // Read a frame + while(cam.read(frame)) { + // convert to grayscale and equalize histograe for better detection + // + use of transparent API + UMat gray = new UMat (); + frame.copyTo(gray); + cvtColor(gray, gray, COLOR_BGR2GRAY); + equalizeHist( gray, gray ); + + // Find faces on the image + RectVector faces = new RectVector (); + faceDetector.detectMultiScale(gray, faces); + + System.out.println ("Faces detected: "+faces.size()); + // Verify is at least one face is detected + // With some Facemark algorithms it crashes if there is no faces + if (!faces.empty()) { + + // Variable for landmarks. + // Landmarks for one face is a vector of points + // There can be more than one face in the image. + Point2fVectorVector landmarks = new Point2fVectorVector(); + + // Run landmark detector + boolean success = facemark.fit(frame, faces, landmarks); + + if(success) { + // If successful, render the landmarks on the face + for (long i = 0; i < landmarks.size(); i++) { + Point2fVector v = landmarks.get(i); + drawFacemarks(frame, v, Scalar.YELLOW); + } + } + } + // Display results + imshow("LBF Facial Landmark", frame); + // Exit loop if ESC is pressed + if (waitKey(1) == 27) break; + } + } +} diff --git a/samples/MotionDetector.java b/samples/MotionDetector.java index e44c7a62..3af20d7f 100644 --- a/samples/MotionDetector.java +++ b/samples/MotionDetector.java @@ -1,101 +1,101 @@ -/* - * I developed some code for recognize motion detections with JavaCV. - * Actually, it works with an array of Rect, performing, every cicle, an - * intersection test with area of difference with the rect of interests - * (this array is callad "sa", stands for SizedArea). I hope could this - * helps someone. - * - * Feel free to ask about any question regarding the code above, cheers! - * - * Angelo Marchesin - */ - -import org.bytedeco.javacpp.*; -import org.bytedeco.javacv.*; - -import org.bytedeco.opencv.opencv_core.*; -import org.bytedeco.opencv.opencv_imgproc.*; -import static org.bytedeco.opencv.global.opencv_core.*; -import static org.bytedeco.opencv.global.opencv_imgproc.*; - -public class MotionDetector { - public static void main(String[] args) throws Exception { - OpenCVFrameGrabber grabber = new OpenCVFrameGrabber(0); - OpenCVFrameConverter.ToIplImage converter = new OpenCVFrameConverter.ToIplImage(); - grabber.start(); - - IplImage frame = converter.convert(grabber.grab()); - IplImage image = null; - IplImage prevImage = null; - IplImage diff = null; - - CanvasFrame canvasFrame = new CanvasFrame("Some Title"); - canvasFrame.setCanvasSize(frame.width(), frame.height()); - - CvMemStorage storage = CvMemStorage.create(); - - while (canvasFrame.isVisible() && (frame = converter.convert(grabber.grab())) != null) { - cvClearMemStorage(storage); - - cvSmooth(frame, frame, CV_GAUSSIAN, 9, 9, 2, 2); - if (image == null) { - image = IplImage.create(frame.width(), frame.height(), IPL_DEPTH_8U, 1); - cvCvtColor(frame, image, CV_RGB2GRAY); - } else { - prevImage = IplImage.create(frame.width(), frame.height(), IPL_DEPTH_8U, 1); - prevImage = image; - image = IplImage.create(frame.width(), frame.height(), IPL_DEPTH_8U, 1); - cvCvtColor(frame, image, CV_RGB2GRAY); - } - - if (diff == null) { - diff = IplImage.create(frame.width(), frame.height(), IPL_DEPTH_8U, 1); - } - - if (prevImage != null) { - // perform ABS difference - cvAbsDiff(image, prevImage, diff); - // do some threshold for wipe away useless details - cvThreshold(diff, diff, 64, 255, CV_THRESH_BINARY); - - canvasFrame.showImage(converter.convert(diff)); - - // recognize contours - CvSeq contour = new CvSeq(null); - cvFindContours(diff, storage, contour, Loader.sizeof(CvContour.class), CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE); - - while (contour != null && !contour.isNull()) { - if (contour.elem_size() > 0) { - CvBox2D box = cvMinAreaRect2(contour, storage); - // test intersection - if (box != null) { - CvPoint2D32f center = box.center(); - CvSize2D32f size = box.size(); -/* for (int i = 0; i < sa.length; i++) { - if ((Math.abs(center.x - (sa[i].offsetX + sa[i].width / 2))) < ((size.width / 2) + (sa[i].width / 2)) && - (Math.abs(center.y - (sa[i].offsetY + sa[i].height / 2))) < ((size.height / 2) + (sa[i].height / 2))) { - - if (!alarmedZones.containsKey(i)) { - alarmedZones.put(i, true); - activeAlarms.put(i, 1); - } else { - activeAlarms.remove(i); - activeAlarms.put(i, 1); - } - System.out.println("Motion Detected in the area no: " + i + - " Located at points: (" + sa[i].x + ", " + sa[i].y+ ") -" - + " (" + (sa[i].x +sa[i].width) + ", " - + (sa[i].y+sa[i].height) + ")"); - } - } -*/ - } - } - contour = contour.h_next(); - } - } - } - grabber.stop(); - canvasFrame.dispose(); - } -} +/* + * I developed some code for recognize motion detections with JavaCV. + * Actually, it works with an array of Rect, performing, every cicle, an + * intersection test with area of difference with the rect of interests + * (this array is callad "sa", stands for SizedArea). I hope could this + * helps someone. + * + * Feel free to ask about any question regarding the code above, cheers! + * + * Angelo Marchesin + */ + +import org.bytedeco.javacpp.*; +import org.bytedeco.javacv.*; + +import org.bytedeco.opencv.opencv_core.*; +import org.bytedeco.opencv.opencv_imgproc.*; +import static org.bytedeco.opencv.global.opencv_core.*; +import static org.bytedeco.opencv.global.opencv_imgproc.*; + +public class MotionDetector { + public static void main(String[] args) throws Exception { + OpenCVFrameGrabber grabber = new OpenCVFrameGrabber(0); + OpenCVFrameConverter.ToIplImage converter = new OpenCVFrameConverter.ToIplImage(); + grabber.start(); + + IplImage frame = converter.convert(grabber.grab()); + IplImage image = null; + IplImage prevImage = null; + IplImage diff = null; + + CanvasFrame canvasFrame = new CanvasFrame("Some Title"); + canvasFrame.setCanvasSize(frame.width(), frame.height()); + + CvMemStorage storage = CvMemStorage.create(); + + while (canvasFrame.isVisible() && (frame = converter.convert(grabber.grab())) != null) { + cvClearMemStorage(storage); + + cvSmooth(frame, frame, CV_GAUSSIAN, 9, 9, 2, 2); + if (image == null) { + image = IplImage.create(frame.width(), frame.height(), IPL_DEPTH_8U, 1); + cvCvtColor(frame, image, CV_RGB2GRAY); + } else { + prevImage = IplImage.create(frame.width(), frame.height(), IPL_DEPTH_8U, 1); + prevImage = image; + image = IplImage.create(frame.width(), frame.height(), IPL_DEPTH_8U, 1); + cvCvtColor(frame, image, CV_RGB2GRAY); + } + + if (diff == null) { + diff = IplImage.create(frame.width(), frame.height(), IPL_DEPTH_8U, 1); + } + + if (prevImage != null) { + // perform ABS difference + cvAbsDiff(image, prevImage, diff); + // do some threshold for wipe away useless details + cvThreshold(diff, diff, 64, 255, CV_THRESH_BINARY); + + canvasFrame.showImage(converter.convert(diff)); + + // recognize contours + CvSeq contour = new CvSeq(null); + cvFindContours(diff, storage, contour, Loader.sizeof(CvContour.class), CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE); + + while (contour != null && !contour.isNull()) { + if (contour.elem_size() > 0) { + CvBox2D box = cvMinAreaRect2(contour, storage); + // test intersection + if (box != null) { + CvPoint2D32f center = box.center(); + CvSize2D32f size = box.size(); +/* for (int i = 0; i < sa.length; i++) { + if ((Math.abs(center.x - (sa[i].offsetX + sa[i].width / 2))) < ((size.width / 2) + (sa[i].width / 2)) && + (Math.abs(center.y - (sa[i].offsetY + sa[i].height / 2))) < ((size.height / 2) + (sa[i].height / 2))) { + + if (!alarmedZones.containsKey(i)) { + alarmedZones.put(i, true); + activeAlarms.put(i, 1); + } else { + activeAlarms.remove(i); + activeAlarms.put(i, 1); + } + System.out.println("Motion Detected in the area no: " + i + + " Located at points: (" + sa[i].x + ", " + sa[i].y+ ") -" + + " (" + (sa[i].x +sa[i].width) + ", " + + (sa[i].y+sa[i].height) + ")"); + } + } +*/ + } + } + contour = contour.h_next(); + } + } + } + grabber.stop(); + canvasFrame.dispose(); + } +} diff --git a/samples/OpenCVFaceRecognizer.java b/samples/OpenCVFaceRecognizer.java index e3eb81d8..c1e4b56c 100644 --- a/samples/OpenCVFaceRecognizer.java +++ b/samples/OpenCVFaceRecognizer.java @@ -1,88 +1,88 @@ -import java.io.File; -import java.io.FilenameFilter; -import java.nio.IntBuffer; - -import org.bytedeco.javacpp.BytePointer; -import org.bytedeco.javacpp.IntPointer; -import org.bytedeco.javacpp.DoublePointer; - -import org.bytedeco.opencv.opencv_core.*; -import org.bytedeco.opencv.opencv_face.*; -import static org.bytedeco.opencv.global.opencv_core.*; -import static org.bytedeco.opencv.global.opencv_face.*; -import static org.bytedeco.opencv.global.opencv_imgcodecs.*; - -/** - * I couldn't find any tutorial on how to perform face recognition using OpenCV and Java, - * so I decided to share a viable solution here. The solution is very inefficient in its - * current form as the training model is built at each run, however it shows what's needed - * to make it work. - * - * The class below takes two arguments: The path to the directory containing the training - * faces and the path to the image you want to classify. Not that all images has to be of - * the same size and that the faces already has to be cropped out of their original images - * (Take a look here http://fivedots.coe.psu.ac.th/~ad/jg/nui07/index.html if you haven't - * done the face detection yet). - * - * For the simplicity of this post, the class also requires that the training images have - * filename format: