diff --git a/cv_bridge/CMakeLists.txt b/cv_bridge/CMakeLists.txt index 942cb0fac..e29e5d495 100644 --- a/cv_bridge/CMakeLists.txt +++ b/cv_bridge/CMakeLists.txt @@ -76,6 +76,10 @@ set(cv_bridge_lib_dir "$") if(BUILD_TESTING) add_subdirectory(test) + + find_package(ament_lint_auto REQUIRED) + set(ament_cmake_copyright_FOUND TRUE) # Setting this flag disables the copyright check + ament_lint_auto_find_test_dependencies() endif() ament_export_dependencies( diff --git a/cv_bridge/include/cv_bridge/cv_bridge.hpp b/cv_bridge/include/cv_bridge/cv_bridge.hpp index e3c58d773..cee794a96 100644 --- a/cv_bridge/include/cv_bridge/cv_bridge.hpp +++ b/cv_bridge/include/cv_bridge/cv_bridge.hpp @@ -37,19 +37,20 @@ #ifndef CV_BRIDGE__CV_BRIDGE_HPP_ #define CV_BRIDGE__CV_BRIDGE_HPP_ -#include -#include -#include -#include -#include -#include #include +#include #include #include #include #include +#include +#include +#include +#include +#include + namespace cv_bridge { diff --git a/cv_bridge/include/cv_bridge/cv_mat_sensor_msgs_image_type_adapter.hpp b/cv_bridge/include/cv_bridge/cv_mat_sensor_msgs_image_type_adapter.hpp index c0b15a327..d171995cf 100644 --- a/cv_bridge/include/cv_bridge/cv_mat_sensor_msgs_image_type_adapter.hpp +++ b/cv_bridge/include/cv_bridge/cv_mat_sensor_msgs_image_type_adapter.hpp @@ -17,6 +17,8 @@ #include #include +#include +#include #include #include "opencv2/core/mat.hpp" @@ -26,7 +28,6 @@ #include "cv_bridge/visibility_control.h" -#include namespace cv_bridge { @@ -213,7 +214,7 @@ class ROSCvMatContainer CV_BRIDGE_PUBLIC bool is_bigendian() const; - + /// Return the encoding override if provided. CV_BRIDGE_PUBLIC std::optional @@ -244,29 +245,26 @@ struct rclcpp::TypeAdapter(source.cv_mat().step); size_t size = source.cv_mat().step * source.cv_mat().rows; diff --git a/cv_bridge/include/cv_bridge/rgb_colors.hpp b/cv_bridge/include/cv_bridge/rgb_colors.hpp index b403de97c..19dd013b1 100644 --- a/cv_bridge/include/cv_bridge/rgb_colors.hpp +++ b/cv_bridge/include/cv_bridge/rgb_colors.hpp @@ -36,9 +36,8 @@ #ifndef CV_BRIDGE__RGB_COLORS_HPP_ #define CV_BRIDGE__RGB_COLORS_HPP_ -#include #include - +#include namespace cv_bridge { diff --git a/cv_bridge/python/cv_bridge/__init__.py b/cv_bridge/python/cv_bridge/__init__.py index e887f6be9..c9545ce39 100644 --- a/cv_bridge/python/cv_bridge/__init__.py +++ b/cv_bridge/python/cv_bridge/__init__.py @@ -1,8 +1,8 @@ -from .core import CvBridge, CvBridgeError +from .core import CvBridge, CvBridgeError # noqa: F401 # python bindings # This try is just to satisfy doc jobs that are built differently. try: - from cv_bridge.boost.cv_bridge_boost import cvtColorForDisplay, getCvType + from cv_bridge.boost.cv_bridge_boost import cvtColorForDisplay, getCvType # noqa: F401 except ImportError: pass diff --git a/cv_bridge/python/cv_bridge/core.py b/cv_bridge/python/cv_bridge/core.py index 43797b4a0..5b92d5e4c 100644 --- a/cv_bridge/python/cv_bridge/core.py +++ b/cv_bridge/python/cv_bridge/core.py @@ -83,7 +83,7 @@ def __init__(self): self.numpy_type_to_cvtype = {'uint8': '8U', 'int8': '8S', 'uint16': '16U', 'int16': '16S', 'int32': '32S', 'float32': '32F', 'float64': '64F'} - self.numpy_type_to_cvtype.update(dict((v, k) for (k, v) in self.numpy_type_to_cvtype.items())) + self.numpy_type_to_cvtype.update({v: k for k, v in self.numpy_type_to_cvtype.items()}) def dtype_with_channels_to_cvtype2(self, dtype, n_channels): return '%sC%d' % (self.numpy_type_to_cvtype[dtype.name], n_channels) @@ -170,14 +170,16 @@ def imgmsg_to_cv2(self, img_msg, desired_encoding='passthrough'): dtype = np.dtype(dtype) dtype = dtype.newbyteorder('>' if img_msg.is_bigendian else '<') - img_buf = np.asarray(img_msg.data, dtype=dtype) if isinstance(img_msg.data, list) else img_msg.data + img_buf = np.asarray(img_msg.data, dtype=dtype) if isinstance( + img_msg.data, list) else img_msg.data if n_channels == 1: im = np.ndarray(shape=(img_msg.height, int(img_msg.step/dtype.itemsize)), dtype=dtype, buffer=img_buf) im = np.ascontiguousarray(im[:img_msg.height, :img_msg.width]) else: - im = np.ndarray(shape=(img_msg.height, int(img_msg.step/dtype.itemsize/n_channels), n_channels), + im = np.ndarray(shape=(img_msg.height, int(img_msg.step/dtype.itemsize/n_channels), + n_channels), dtype=dtype, buffer=img_buf) im = np.ascontiguousarray(im[:img_msg.height, :img_msg.width, :]) @@ -236,7 +238,7 @@ def cv2_to_compressed_imgmsg(self, cvim, dst_format='jpg'): return cmprs_img_msg - def cv2_to_imgmsg(self, cvim, encoding='passthrough', header = None): + def cv2_to_imgmsg(self, cvim, encoding='passthrough', header=None): """ Convert an OpenCV :cpp:type:`cv::Mat` type to a ROS sensor_msgs::Image message. diff --git a/cv_bridge/src/CMakeLists.txt b/cv_bridge/src/CMakeLists.txt index 76ccfd9f9..d643417f7 100644 --- a/cv_bridge/src/CMakeLists.txt +++ b/cv_bridge/src/CMakeLists.txt @@ -32,7 +32,7 @@ install(FILES DESTINATION include/${PROJECT_NAME}/${PROJECT_NAME}) if(NOT CV_BRIDGE_DISABLE_PYTHON) - Python3_add_library(${PROJECT_NAME}_boost MODULE module.cpp module_opencv4.cpp) + python3_add_library(${PROJECT_NAME}_boost MODULE module.cpp module_opencv4.cpp) target_link_libraries(${PROJECT_NAME}_boost PRIVATE ${PROJECT_NAME} ${boost_python_target} diff --git a/cv_bridge/src/cv_bridge.cpp b/cv_bridge/src/cv_bridge.cpp index dd11c78d2..94be625f4 100644 --- a/cv_bridge/src/cv_bridge.cpp +++ b/cv_bridge/src/cv_bridge.cpp @@ -35,12 +35,6 @@ *********************************************************************/ #include -#include -#include -#include -#include -#include -#include "rcpputils/endian.hpp" #include #include @@ -49,6 +43,14 @@ #include #include +#include +#include +#include +#include +#include +#include + + namespace enc = sensor_msgs::image_encodings; namespace cv_bridge @@ -120,8 +122,21 @@ int getCvType(const std::string & encoding) /// @cond DOXYGEN_IGNORE -enum Encoding { INVALID = -1, GRAY = 0, RGB, BGR, RGBA, BGRA, YUV422, YUV422_YUY2, BAYER_RGGB, BAYER_BGGR, - BAYER_GBRG, BAYER_GRBG}; +enum Encoding +{ + INVALID = -1, + GRAY = 0, + RGB, + BGR, + RGBA, + BGRA, + YUV422, + YUV422_YUY2, + BAYER_RGGB, + BAYER_BGGR, + BAYER_GBRG, + BAYER_GRBG +}; Encoding getEncoding(const std::string & encoding) { @@ -217,9 +232,11 @@ const std::vector getConversionCode(std::string src_encoding, std::string d Encoding src_encod = getEncoding(src_encoding); Encoding dst_encod = getEncoding(dst_encoding); bool is_src_color_format = enc::isColor(src_encoding) || enc::isMono(src_encoding) || - enc::isBayer(src_encoding) || (src_encoding == enc::YUV422) || (src_encoding == enc::YUV422_YUY2); + enc::isBayer(src_encoding) || (src_encoding == enc::YUV422) || + (src_encoding == enc::YUV422_YUY2); bool is_dst_color_format = enc::isColor(dst_encoding) || enc::isMono(dst_encoding) || - enc::isBayer(dst_encoding) || (dst_encoding == enc::YUV422) || (dst_encoding == enc::YUV422_YUY2); + enc::isBayer(dst_encoding) || (dst_encoding == enc::YUV422) || + (dst_encoding == enc::YUV422_YUY2); bool is_num_channels_the_same = (enc::numChannels(src_encoding) == enc::numChannels(dst_encoding)); @@ -299,7 +316,7 @@ cv::Mat matFromImage(const sensor_msgs::msg::Image & source) cv::Mat mat(source.height, source.width, source_type, const_cast(&source.data[0]), source.step); - if ((rcpputils::endian::native == rcpputils::endian::big && source.is_bigendian) || + if ((rcpputils::endian::native == rcpputils::endian::big && source.is_bigendian) || (rcpputils::endian::native == rcpputils::endian::little && !source.is_bigendian) || byte_depth == 1) { @@ -515,7 +532,8 @@ void CvImage::toCompressedImageMsg( { ros_image.header = header; cv::Mat image; - if (encoding == enc::BGR8 || encoding == enc::BGRA8 || encoding == enc::MONO8 || encoding == enc::MONO16) + if (encoding == enc::BGR8 || encoding == enc::BGRA8 || encoding == enc::MONO8 || + encoding == enc::MONO16) { image = this->image; } else { @@ -550,8 +568,7 @@ CvImagePtr toCvCopy(const sensor_msgs::msg::CompressedImage & source, const std: const cv::Mat rgb_a = cv::imdecode(in, cv::IMREAD_UNCHANGED); if (encoding != enc::MONO16) { - switch (rgb_a.channels()) - { + switch (rgb_a.channels()) { case 4: return toCvCopyImpl(rgb_a, source.header, enc::BGRA8, encoding); break; @@ -564,8 +581,7 @@ CvImagePtr toCvCopy(const sensor_msgs::msg::CompressedImage & source, const std: default: return CvImagePtr(); } - } - else { + } else { return toCvCopyImpl(rgb_a, source.header, enc::MONO16, encoding); } } @@ -599,7 +615,7 @@ CvImageConstPtr cvtColorForDisplay( } else { // We choose BGR by default here as we assume people will use OpenCV if ((enc::bitDepth(source->encoding) == 8) || - (enc::bitDepth(source->encoding) == 16) || + (enc::bitDepth(source->encoding) == 16) || (enc::bitDepth(source->encoding) == 32)) { encoding = enc::BGR8; @@ -619,8 +635,8 @@ CvImageConstPtr cvtColorForDisplay( (enc::bitDepth(encoding) != 8)) { throw Exception( - "cv_bridge.cvtColorForDisplay() does not have an output encoding \ - that is color or mono, and has is bit in depth"); + "cv_bridge.cvtColorForDisplay() does not have an output encoding " + "that is color or mono, and has is bit in depth"); } } @@ -651,7 +667,7 @@ CvImageConstPtr cvtColorForDisplay( // Perform scaling if asked for if (options.do_dynamic_scaling) { float inf = std::numeric_limits::infinity(); - cv::Mat mask = ((source->image!=inf) & (source->image!=-inf)); + cv::Mat mask = ((source->image != inf) & (source->image != -inf)); cv::minMaxLoc(source->image, &min_image_value, &max_image_value, NULL, NULL, mask); if (min_image_value == max_image_value) { CvImagePtr result(new CvImage()); @@ -671,8 +687,8 @@ CvImageConstPtr cvtColorForDisplay( if (min_image_value != max_image_value) { if (enc::numChannels(source->encoding) != 1) { throw Exception( - "cv_bridge.cvtColorForDisplay() scaling for images \ - with more than one channel is unsupported"); + "cv_bridge.cvtColorForDisplay() scaling for images " + "with more than one channel is unsupported"); } CvImagePtr img_scaled(new CvImage()); img_scaled->header = source->header; diff --git a/cv_bridge/src/cv_mat_sensor_msgs_image_type_adapter.cpp b/cv_bridge/src/cv_mat_sensor_msgs_image_type_adapter.cpp index 0633e3d2a..5abb6a63f 100644 --- a/cv_bridge/src/cv_mat_sensor_msgs_image_type_adapter.cpp +++ b/cv_bridge/src/cv_mat_sensor_msgs_image_type_adapter.cpp @@ -179,12 +179,9 @@ ROSCvMatContainer::get_sensor_msgs_msg_image_copy( { sensor_msgs_image.height = frame_.rows; sensor_msgs_image.width = frame_.cols; - if (encoding_override_.has_value() && !encoding_override_.value().empty()) - { + if (encoding_override_.has_value() && !encoding_override_.value().empty()) { sensor_msgs_image.encoding = encoding_override_.value(); - } - else - { + } else { switch (frame_.type()) { case CV_8UC1: sensor_msgs_image.encoding = "mono8"; diff --git a/cv_bridge/src/module.hpp b/cv_bridge/src/module.hpp index b9ea35bd0..5561c1fe2 100644 --- a/cv_bridge/src/module.hpp +++ b/cv_bridge/src/module.hpp @@ -13,8 +13,11 @@ // limitations under the License. // -#ifndef CV_BRIDGE_MODULE_HPP_ -#define CV_BRIDGE_MODULE_HPP_ +#ifndef MODULE_HPP_ +#define MODULE_HPP_ + +#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION +#include #include @@ -33,9 +36,6 @@ #include #include -#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION -#include - #include namespace bp = boost::python; @@ -50,4 +50,4 @@ static void * do_numpy_import() return nullptr; } -#endif // CV_BRIDGE_MODULE_HPP_ +#endif // MODULE_HPP_ diff --git a/cv_bridge/src/module_opencv4.cpp b/cv_bridge/src/module_opencv4.cpp index ac93f0897..1356fd74d 100644 --- a/cv_bridge/src/module_opencv4.cpp +++ b/cv_bridge/src/module_opencv4.cpp @@ -1,4 +1,5 @@ -/* Taken from opencv/modules/python/src2/cv2.cpp */ +// Copyright (c) 2014, OpenCV +// Copyright (c) 2024, ROS Perception #include "module.hpp" @@ -77,9 +78,6 @@ class PyEnsureGIL return 0; \ } -using namespace cv; - - [[gnu::unused]] static PyObject * failmsgp(const char * fmt, ...) { char str[1000]; @@ -93,25 +91,25 @@ using namespace cv; return 0; } -class NumpyAllocator : public MatAllocator +class NumpyAllocator : public cv::MatAllocator { public: - NumpyAllocator() {stdAllocator = Mat::getStdAllocator();} + NumpyAllocator() {stdAllocator = cv::Mat::getStdAllocator();} ~NumpyAllocator() {} // To compile openCV3 with OpenCV4 APIs. #ifndef OPENCV_VERSION_4 -#define AccessFlag int +#define cv::AccessFlag int #endif - UMatData * allocate(PyObject * o, int dims, const int * sizes, int type, size_t * step) const + cv::UMatData * allocate(PyObject * o, int dims, const int * sizes, int type, size_t * step) const { - UMatData * u = new UMatData(this); + cv::UMatData * u = new cv::UMatData(this); u->data = u->origdata = reinterpret_cast(PyArray_DATA(reinterpret_cast(o))); npy_intp * _strides = PyArray_STRIDES(reinterpret_cast(o)); for (int i = 0; i < dims - 1; i++) { - step[i] = (size_t)_strides[i]; + step[i] = static_cast(_strides[i]); } step[dims - 1] = CV_ELEM_SIZE(type); u->size = sizes[0] * step[0]; @@ -119,12 +117,12 @@ class NumpyAllocator : public MatAllocator return u; } - UMatData * allocate( - int dims0, const int * sizes, int type, void * data, size_t * step, AccessFlag flags, - UMatUsageFlags usageFlags) const + cv::UMatData * allocate( + int dims0, const int * sizes, int type, void * data, size_t * step, cv::AccessFlag flags, + cv::UMatUsageFlags usageFlags) const { if (data != 0) { - CV_Error(Error::StsAssert, "The data should normally be NULL!"); + CV_Error(cv::Error::StsAssert, "The data should normally be NULL!"); // probably this is safe to do in such extreme case return stdAllocator->allocate(dims0, sizes, type, data, step, flags, usageFlags); } @@ -147,18 +145,18 @@ class NumpyAllocator : public MatAllocator } PyObject * o = PyArray_SimpleNew(dims, _sizes, typenum); if (!o) { - CV_Error_(Error::StsError, + CV_Error_(cv::Error::StsError, ("The numpy array of typenum=%d, ndims=%d can not be created", typenum, dims)); } return allocate(o, dims0, sizes, type, step); } - bool allocate(UMatData * u, AccessFlag accessFlags, UMatUsageFlags usageFlags) const + bool allocate(cv::UMatData * u, cv::AccessFlag accessFlags, cv::UMatUsageFlags usageFlags) const { return stdAllocator->allocate(u, accessFlags, usageFlags); } - void deallocate(UMatData * u) const + void deallocate(cv::UMatData * u) const { if (u) { PyEnsureGIL gil; @@ -168,7 +166,7 @@ class NumpyAllocator : public MatAllocator } } - const MatAllocator * stdAllocator; + const cv::MatAllocator * stdAllocator; }; NumpyAllocator g_numpyAllocator; @@ -185,7 +183,7 @@ PyObject * pyopencv_from(const T & src); enum { ARG_NONE = 0, ARG_MAT = 1, ARG_SCALAR = 2 }; // special case, when the convertor needs full ArgInfo structure -static bool pyopencv_to(PyObject * o, Mat & m, const ArgInfo info) +static bool pyopencv_to(PyObject * o, cv::Mat & m, const ArgInfo info) { // to avoid PyArray_Check() to crash even with valid array do_numpy_import(); @@ -201,17 +199,17 @@ static bool pyopencv_to(PyObject * o, Mat & m, const ArgInfo info) if (PyInt_Check(o) ) { double v[] = {static_cast(PyInt_AsLong(reinterpret_cast(o))), 0., 0., 0.}; - m = Mat(4, 1, CV_64F, v).clone(); + m = cv::Mat(4, 1, CV_64F, v).clone(); return true; } if (PyFloat_Check(o) ) { double v[] = {PyFloat_AsDouble(reinterpret_cast(o)), 0., 0., 0.}; - m = Mat(4, 1, CV_64F, v).clone(); + m = cv::Mat(4, 1, CV_64F, v).clone(); return true; } if (PyTuple_Check(o) ) { int i, sz = static_cast(PyTuple_Size(reinterpret_cast(o))); - m = Mat(sz, 1, CV_64F); + m = cv::Mat(sz, 1, CV_64F); for (i = 0; i < sz; i++) { PyObject * oi = PyTuple_GET_ITEM(o, i); if (PyInt_Check(oi) ) { @@ -278,7 +276,7 @@ static bool pyopencv_to(PyObject * o, Mat & m, const ArgInfo info) // a) multi-dimensional (ndims > 2) arrays, as well as simpler 1- and 2-dimensional cases // b) transposed arrays, where _strides[] elements go in non-descending order // c) flipped arrays, where some of _strides[] elements are negative - if ( (i == ndims - 1 && (size_t)_strides[i] != elemsize) || + if ( (i == ndims - 1 && static_cast(_strides[i]) != elemsize) || (i < ndims - 1 && _strides[i] < _strides[i + 1]) ) { needcopy = true; @@ -292,8 +290,8 @@ static bool pyopencv_to(PyObject * o, Mat & m, const ArgInfo info) if (needcopy) { if (info.outputarg) { failmsg( - "Layout of the output array %s is incompatible with \ - cv::Mat (step[ndims-1] != elemsize or step[1] != elemsize*nchannels)", + "Layout of the output array %s is incompatible with " + "cv::Mat (step[ndims-1] != elemsize or step[1] != elemsize*nchannels)", info.name); return false; } @@ -311,7 +309,7 @@ static bool pyopencv_to(PyObject * o, Mat & m, const ArgInfo info) for (int i = 0; i < ndims; i++) { size[i] = static_cast(_sizes[i]); - step[i] = (size_t)_strides[i]; + step[i] = static_cast(_strides[i]); } // handle degenerate case @@ -331,7 +329,7 @@ static bool pyopencv_to(PyObject * o, Mat & m, const ArgInfo info) return false; } - m = Mat(ndims, size, type, PyArray_DATA(oarr), step); + m = cv::Mat(ndims, size, type, PyArray_DATA(oarr), step); m.u = g_numpyAllocator.allocate(o, ndims, size, type, step); m.addref(); @@ -344,17 +342,17 @@ static bool pyopencv_to(PyObject * o, Mat & m, const ArgInfo info) } template<> -bool pyopencv_to(PyObject * o, Mat & m, const char * name) +bool pyopencv_to(PyObject * o, cv::Mat & m, const char * name) { return pyopencv_to(o, m, ArgInfo(name, 0)); } -PyObject * pyopencv_from(const Mat & m) +PyObject * pyopencv_from(const cv::Mat & m) { if (!m.data) { Py_RETURN_NONE; } - Mat temp, * p = const_cast(&m); + cv::Mat temp, * p = const_cast(&m); if (!p->u || p->allocator != &g_numpyAllocator) { temp.allocator = &g_numpyAllocator; ERRWRAP2(m.copyTo(temp)); diff --git a/cv_bridge/src/pycompat.hpp b/cv_bridge/src/pycompat.hpp index 576d93008..932c101e6 100644 --- a/cv_bridge/src/pycompat.hpp +++ b/cv_bridge/src/pycompat.hpp @@ -1,3 +1,6 @@ +// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. +// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved. +// Copyright (c) 2018 Intel Corporation. /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. diff --git a/cv_bridge/test/conversions.py b/cv_bridge/test/conversions.py index a9bd576c6..53ec719d0 100644 --- a/cv_bridge/test/conversions.py +++ b/cv_bridge/test/conversions.py @@ -1,6 +1,8 @@ +import unittest + from cv_bridge import CvBridge, CvBridgeError import numpy as np -import unittest + class TestConversions(unittest.TestCase): @@ -40,8 +42,9 @@ def test_encode_decode_cv2(self): # http://docs.opencv.org/2.4/modules/highgui/doc/reading_and_writing_images_and_video.html#Mat # imread(const string& filename, int flags) def test_encode_decode_cv2_compressed(self): - # FIXME: remove jp2(a.k.a JPEG2000) as its JASPER codec is disabled within Ubuntu opencv library - # due to security issues, but it works once you rebuild your opencv library with JASPER enabled + # FIXME: remove jp2(a.k.a JPEG2000) as its JASPER codec is disabled within Ubuntu opencv + # library due to security issues, but it works once you rebuild your opencv library with + # JASPER enabled formats = ['jpg', 'jpeg', 'jpe', 'png', 'bmp', 'dib', 'sr', 'ras', 'tif', 'tiff'] # this formats rviz is not support diff --git a/cv_bridge/test/test_compression.cpp b/cv_bridge/test/test_compression.cpp index f299468f4..a4a4a8676 100644 --- a/cv_bridge/test/test_compression.cpp +++ b/cv_bridge/test/test_compression.cpp @@ -1,9 +1,12 @@ -#include +// Copyright (c) 2017, ROS Perception #include #include #include +#include + + TEST(CvBridgeTest, compression) { cv::RNG rng(0); diff --git a/cv_bridge/test/test_dynamic_scaling.cpp b/cv_bridge/test/test_dynamic_scaling.cpp index 0dad07e4d..1611b6736 100644 --- a/cv_bridge/test/test_dynamic_scaling.cpp +++ b/cv_bridge/test/test_dynamic_scaling.cpp @@ -1,8 +1,9 @@ +// Copyright (c) 2022, ROS Perception +#include +#include #include #include #include -#include -#include TEST(TestDynamicScaling, ignoreInfAndNanValues) { @@ -30,9 +31,9 @@ TEST(TestDynamicScaling, ignoreInfAndNanValues) auto converted = cv_bridge::cvtColorForDisplay(img, "", options); // Check that the scaling works for non-inf and non-nan values. - std::vector expected = {0, 0, 0, 128, 128, 128, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0}; - for (unsigned i = 0; i < expected.size(); ++i) - { + std::vector expected = {0, 0, 0, 128, 128, 128, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, + 0}; + for (unsigned i = 0; i < expected.size(); ++i) { EXPECT_EQ(converted->image.at(i), expected.at(i)); } } diff --git a/cv_bridge/test/test_endian.cpp b/cv_bridge/test/test_endian.cpp index 088091822..8da4472a9 100644 --- a/cv_bridge/test/test_endian.cpp +++ b/cv_bridge/test/test_endian.cpp @@ -1,12 +1,11 @@ -#include -#include +// Copyright (c) 2015, ROS Perception #include #include +#include +#include TEST(CvBridgeTest, endianness) { - using namespace boost::endian; - // Create an image of the type opposite to the platform sensor_msgs::msg::Image msg; msg.height = 1; @@ -18,14 +17,14 @@ TEST(CvBridgeTest, endianness) int32_t * data = reinterpret_cast(&msg.data[0]); // Write 1 and 2 in order, but with an endianness opposite to the platform - if (order::native == order::little) { + if (boost::endian::order::native == boost::endian::order::little) { msg.is_bigendian = true; - *(data++) = native_to_big(static_cast(1)); - *data = native_to_big(static_cast(2)); + *(data++) = boost::endian::native_to_big(static_cast(1)); + *data = boost::endian::native_to_big(static_cast(2)); } else { msg.is_bigendian = false; - *(data++) = native_to_little(static_cast(1)); - *data = native_to_little(static_cast(2)); + *(data++) = boost::endian::native_to_little(static_cast(1)); + *data = boost::endian::native_to_little(static_cast(2)); } // Make sure the values are still the same diff --git a/cv_bridge/test/test_rgb_colors.cpp b/cv_bridge/test/test_rgb_colors.cpp index e8de3dfae..aa1120a9d 100644 --- a/cv_bridge/test/test_rgb_colors.cpp +++ b/cv_bridge/test/test_rgb_colors.cpp @@ -1,3 +1,4 @@ +// Copyright (c) 2015, ROS Perception #include #include diff --git a/cv_bridge/test/utest.cpp b/cv_bridge/test/utest.cpp index 4ef3a091d..3a7b65fc3 100644 --- a/cv_bridge/test/utest.cpp +++ b/cv_bridge/test/utest.cpp @@ -1,3 +1,5 @@ +// Copyright (c) 2011, Willow Garage +// Copyright (c) 2013-2024, ROS Perception #include #include diff --git a/cv_bridge/test/utest2.cpp b/cv_bridge/test/utest2.cpp index 7121b04b4..3a5792498 100644 --- a/cv_bridge/test/utest2.cpp +++ b/cv_bridge/test/utest2.cpp @@ -34,55 +34,122 @@ *********************************************************************/ #include -#include -#include #include #include -#include "cv_bridge/cv_bridge.hpp" -#include "opencv2/core/core.hpp" +#include +#include +#include +#include -using namespace sensor_msgs::image_encodings; +namespace enc = sensor_msgs::image_encodings; bool isUnsigned(const std::string & encoding) { - return encoding == RGB8 || encoding == RGBA8 || encoding == RGB16 || encoding == RGBA16 || - encoding == BGR8 || encoding == BGRA8 || encoding == BGR16 || encoding == BGRA16 || - encoding == MONO8 || encoding == MONO16 || - encoding == MONO8 || encoding == MONO16 || encoding == TYPE_8UC1 || - encoding == TYPE_8UC2 || encoding == TYPE_8UC3 || encoding == TYPE_8UC4 || - encoding == TYPE_16UC1 || encoding == TYPE_16UC2 || encoding == TYPE_16UC3 || - encoding == TYPE_16UC4; - // BAYER_RGGB8, BAYER_BGGR8, BAYER_GBRG8, BAYER_GRBG8, BAYER_RGGB16, - // BAYER_BGGR16, BAYER_GBRG16, BAYER_GRBG16, YUV422 + const std::vector unsigned_encodings = { + enc::RGB8, + enc::RGBA8, + enc::RGB16, + enc::RGBA16, + enc::BGR8, + enc::BGRA8, + enc::BGR16, + enc::BGRA16, + enc::MONO8, + enc::MONO16, + enc::TYPE_8UC1, + enc::TYPE_8UC2, + enc::TYPE_8UC3, + enc::TYPE_8UC4, + enc::TYPE_16UC1, + enc::TYPE_16UC2, + enc::TYPE_16UC3, + enc::TYPE_16UC4, + // enc::BAYER_RGGB8, + // enc::BAYER_BGGR8, + // enc::BAYER_GBRG8, + // enc::BAYER_GRBG8, + // enc::BAYER_RGGB16, + // enc::BAYER_BGGR16, + // enc::BAYER_GBRG16, + // enc::BAYER_GRBG16, + // enc::YUV422 + }; + return std::find(unsigned_encodings.begin(), unsigned_encodings.end(), encoding) != + unsigned_encodings.end(); } + std::vector getEncodings() { -// TODO(N/A) for Groovy, the following types should be uncommented - std::string encodings[] = {RGB8, RGBA8, RGB16, RGBA16, BGR8, BGRA8, BGR16, BGRA16, MONO8, MONO16, - TYPE_8UC1, /*TYPE_8UC2,*/ TYPE_8UC3, TYPE_8UC4, - TYPE_8SC1, /*TYPE_8SC2,*/ TYPE_8SC3, TYPE_8SC4, - TYPE_16UC1, /*TYPE_16UC2,*/ TYPE_16UC3, TYPE_16UC4, - TYPE_16SC1, /*TYPE_16SC2,*/ TYPE_16SC3, TYPE_16SC4, - TYPE_32SC1, /*TYPE_32SC2,*/ TYPE_32SC3, TYPE_32SC4, - TYPE_32FC1, /*TYPE_32FC2,*/ TYPE_32FC3, TYPE_32FC4, - TYPE_64FC1, /*TYPE_64FC2,*/ TYPE_64FC3, TYPE_64FC4, - // BAYER_RGGB8, BAYER_BGGR8, BAYER_GBRG8, BAYER_GRBG8, - // BAYER_RGGB16, BAYER_BGGR16, BAYER_GBRG16, BAYER_GRBG16, - YUV422, YUV422_YUY2}; + // TODO(N/A) for Groovy, the following types should be uncommented + std::string encodings[] = { + enc::RGB8, + enc::RGBA8, + enc::RGB16, + enc::RGBA16, + enc::BGR8, + enc::BGRA8, + enc::BGR16, + enc::BGRA16, + enc::MONO8, + enc::MONO16, + enc::TYPE_8UC1, + // enc::TYPE_8UC2, + enc::TYPE_8UC3, + enc::TYPE_8UC4, + enc::TYPE_8SC1, + // enc::TYPE_8SC2, + enc::TYPE_8SC3, + enc::TYPE_8SC4, + enc::TYPE_16UC1, + // enc::TYPE_16UC2, + enc::TYPE_16UC3, + enc::TYPE_16UC4, + enc::TYPE_16SC1, + // enc::TYPE_16SC2, + enc::TYPE_16SC3, + enc::TYPE_16SC4, + enc::TYPE_32SC1, + // enc::TYPE_32SC2, + enc::TYPE_32SC3, + enc::TYPE_32SC4, + enc::TYPE_32FC1, + // enc::TYPE_32FC2, + enc::TYPE_32FC3, + enc::TYPE_32FC4, + enc::TYPE_64FC1, + // enc::TYPE_64FC2, + enc::TYPE_64FC3, + enc::TYPE_64FC4, + // enc::BAYER_RGGB8, + // enc::BAYER_BGGR8, + // enc::BAYER_GBRG8, + // enc::BAYER_GRBG8, + // enc::BAYER_RGGB16, + // enc::BAYER_BGGR16, + // enc::BAYER_GBRG16, + // enc::BAYER_GRBG16, + enc::YUV422, + enc::YUV422_YUY2 + }; return std::vector(encodings, encodings + 48 - 8 - 7); } TEST(OpencvTests, testCase_encode_decode) { + using enc::isColor; + using enc::isMono; + using enc::numChannels; + using enc::bitDepth; + std::vector encodings = getEncodings(); for (size_t i = 0; i < encodings.size(); ++i) { std::string src_encoding = encodings[i]; bool is_src_color_format = isColor(src_encoding) || isMono(src_encoding) || - (src_encoding == sensor_msgs::image_encodings::YUV422) || - (src_encoding == sensor_msgs::image_encodings::YUV422_YUY2); + (src_encoding == enc::YUV422) || + (src_encoding == enc::YUV422_YUY2); cv::Mat image_original(cv::Size(400, 400), cv_bridge::getCvType(src_encoding)); cv::RNG r(77); r.fill(image_original, cv::RNG::UNIFORM, 0, 127); @@ -96,8 +163,8 @@ TEST(OpencvTests, testCase_encode_decode) for (size_t j = 0; j < encodings.size(); ++j) { std::string dst_encoding = encodings[j]; bool is_dst_color_format = isColor(dst_encoding) || isMono(dst_encoding) || - (dst_encoding == sensor_msgs::image_encodings::YUV422) || - (dst_encoding == sensor_msgs::image_encodings::YUV422_YUY2); + (dst_encoding == enc::YUV422) || + (dst_encoding == enc::YUV422_YUY2); bool is_num_channels_the_same = (numChannels(src_encoding) == numChannels(dst_encoding)); cv_bridge::CvImageConstPtr cv_image; @@ -128,8 +195,9 @@ TEST(OpencvTests, testCase_encode_decode) continue; } // We do not support conversion to YUV422 for now, except from YUV422 - if (((dst_encoding == YUV422) && (src_encoding != YUV422)) || - ((dst_encoding == YUV422_YUY2) && (src_encoding != YUV422_YUY2))) { + if (((dst_encoding == enc::YUV422) && (src_encoding != enc::YUV422)) || + ((dst_encoding == enc::YUV422_YUY2) && (src_encoding != enc::YUV422_YUY2))) + { EXPECT_THROW(cv_bridge::toCvShare(image_msg, dst_encoding), cv_bridge::Exception); continue; } @@ -137,8 +205,9 @@ TEST(OpencvTests, testCase_encode_decode) cv_image = cv_bridge::toCvShare(image_msg, dst_encoding); // We do not support conversion to YUV422 for now, except from YUV422 - if (((src_encoding == YUV422) && (dst_encoding != YUV422)) || - ((src_encoding == YUV422_YUY2) && (dst_encoding != YUV422_YUY2))){ + if (((src_encoding == enc::YUV422) && (dst_encoding != enc::YUV422)) || + ((src_encoding == enc::YUV422_YUY2) && (dst_encoding != enc::YUV422_YUY2))) + { EXPECT_THROW((void)cvtColor(cv_image, src_encoding)->image, cv_bridge::Exception); continue; } diff --git a/image_geometry/CMakeLists.txt b/image_geometry/CMakeLists.txt index 5a8d2171b..1d691e3a7 100644 --- a/image_geometry/CMakeLists.txt +++ b/image_geometry/CMakeLists.txt @@ -46,6 +46,9 @@ install(TARGETS ${PROJECT_NAME} EXPORT export_${PROJECT_NAME} if(BUILD_TESTING) add_subdirectory(test) + + find_package(ament_lint_auto REQUIRED) + ament_lint_auto_find_test_dependencies() endif() ament_export_targets(export_${PROJECT_NAME})