xfeatures2d.hpp 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288
  1. /*
  2. By downloading, copying, installing or using the software you agree to this
  3. license. If you do not agree to this license, do not download, install,
  4. copy or use the software.
  5. License Agreement
  6. For Open Source Computer Vision Library
  7. (3-clause BSD License)
  8. Copyright (C) 2013, OpenCV Foundation, all rights reserved.
  9. Third party copyrights are property of their respective owners.
  10. Redistribution and use in source and binary forms, with or without modification,
  11. are permitted provided that the following conditions are met:
  12. * Redistributions of source code must retain the above copyright notice,
  13. this list of conditions and the following disclaimer.
  14. * Redistributions in binary form must reproduce the above copyright notice,
  15. this list of conditions and the following disclaimer in the documentation
  16. and/or other materials provided with the distribution.
  17. * Neither the names of the copyright holders nor the names of the contributors
  18. may be used to endorse or promote products derived from this software
  19. without specific prior written permission.
  20. This software is provided by the copyright holders and contributors "as is" and
  21. any express or implied warranties, including, but not limited to, the implied
  22. warranties of merchantability and fitness for a particular purpose are
  23. disclaimed. In no event shall copyright holders or contributors be liable for
  24. any direct, indirect, incidental, special, exemplary, or consequential damages
  25. (including, but not limited to, procurement of substitute goods or services;
  26. loss of use, data, or profits; or business interruption) however caused
  27. and on any theory of liability, whether in contract, strict liability,
  28. or tort (including negligence or otherwise) arising in any way out of
  29. the use of this software, even if advised of the possibility of such damage.
  30. */
  31. #ifndef __OPENCV_XFEATURES2D_HPP__
  32. #define __OPENCV_XFEATURES2D_HPP__
  33. #include "opencv2/features2d.hpp"
  34. #include "opencv2/xfeatures2d/nonfree.hpp"
  35. /** @defgroup xfeatures2d Extra 2D Features Framework
  36. @{
  37. @defgroup xfeatures2d_experiment Experimental 2D Features Algorithms
  38. This section describes experimental algorithms for 2d feature detection.
  39. @defgroup xfeatures2d_nonfree Non-free 2D Features Algorithms
  40. This section describes two popular algorithms for 2d feature detection, SIFT and SURF, that are
  41. known to be patented. Use them at your own risk.
  42. @}
  43. */
  44. namespace cv
  45. {
  46. namespace xfeatures2d
  47. {
  48. //! @addtogroup xfeatures2d_experiment
  49. //! @{
  50. /** @brief Class implementing the FREAK (*Fast Retina Keypoint*) keypoint descriptor, described in @cite AOV12 .
  51. The algorithm propose a novel keypoint descriptor inspired by the human visual system and more
  52. precisely the retina, coined Fast Retina Key- point (FREAK). A cascade of binary strings is
  53. computed by efficiently comparing image intensities over a retinal sampling pattern. FREAKs are in
  54. general faster to compute with lower memory load and also more robust than SIFT, SURF or BRISK.
  55. They are competitive alternatives to existing keypoints in particular for embedded applications.
  56. @note
  57. - An example on how to use the FREAK descriptor can be found at
  58. opencv_source_code/samples/cpp/freak_demo.cpp
  59. */
  60. class CV_EXPORTS_W FREAK : public Feature2D
  61. {
  62. public:
  63. enum
  64. {
  65. NB_SCALES = 64, NB_PAIRS = 512, NB_ORIENPAIRS = 45
  66. };
  67. /**
  68. @param orientationNormalized Enable orientation normalization.
  69. @param scaleNormalized Enable scale normalization.
  70. @param patternScale Scaling of the description pattern.
  71. @param nOctaves Number of octaves covered by the detected keypoints.
  72. @param selectedPairs (Optional) user defined selected pairs indexes,
  73. */
  74. CV_WRAP static Ptr<FREAK> create(bool orientationNormalized = true,
  75. bool scaleNormalized = true,
  76. float patternScale = 22.0f,
  77. int nOctaves = 4,
  78. const std::vector<int>& selectedPairs = std::vector<int>());
  79. };
  80. /** @brief The class implements the keypoint detector introduced by @cite Agrawal08, synonym of StarDetector. :
  81. */
  82. class CV_EXPORTS_W StarDetector : public Feature2D
  83. {
  84. public:
  85. //! the full constructor
  86. CV_WRAP static Ptr<StarDetector> create(int maxSize=45, int responseThreshold=30,
  87. int lineThresholdProjected=10,
  88. int lineThresholdBinarized=8,
  89. int suppressNonmaxSize=5);
  90. };
  91. /*
  92. * BRIEF Descriptor
  93. */
  94. /** @brief Class for computing BRIEF descriptors described in @cite calon2010 .
  95. @param bytes legth of the descriptor in bytes, valid values are: 16, 32 (default) or 64 .
  96. @param use_orientation sample patterns using keypoints orientation, disabled by default.
  97. */
  98. class CV_EXPORTS_W BriefDescriptorExtractor : public Feature2D
  99. {
  100. public:
  101. CV_WRAP static Ptr<BriefDescriptorExtractor> create( int bytes = 32, bool use_orientation = false );
  102. };
  103. /** @brief Class implementing the locally uniform comparison image descriptor, described in @cite LUCID
  104. An image descriptor that can be computed very fast, while being
  105. about as robust as, for example, SURF or BRIEF.
  106. */
  107. class CV_EXPORTS_W LUCID : public Feature2D
  108. {
  109. public:
  110. /**
  111. * @param lucid_kernel kernel for descriptor construction, where 1=3x3, 2=5x5, 3=7x7 and so forth
  112. * @param blur_kernel kernel for blurring image prior to descriptor construction, where 1=3x3, 2=5x5, 3=7x7 and so forth
  113. */
  114. CV_WRAP static Ptr<LUCID> create(const int lucid_kernel, const int blur_kernel);
  115. };
  116. /*
  117. * LATCH Descriptor
  118. */
  119. /** latch Class for computing the LATCH descriptor.
  120. If you find this code useful, please add a reference to the following paper in your work:
  121. Gil Levi and Tal Hassner, "LATCH: Learned Arrangements of Three Patch Codes", arXiv preprint arXiv:1501.03719, 15 Jan. 2015
  122. LATCH is a binary descriptor based on learned comparisons of triplets of image patches.
  123. * bytes is the size of the descriptor - can be 64, 32, 16, 8, 4, 2 or 1
  124. * rotationInvariance - whether or not the descriptor should compansate for orientation changes.
  125. * half_ssd_size - the size of half of the mini-patches size. For example, if we would like to compare triplets of patches of size 7x7x
  126. then the half_ssd_size should be (7-1)/2 = 3.
  127. Note: the descriptor can be coupled with any keypoint extractor. The only demand is that if you use set rotationInvariance = True then
  128. you will have to use an extractor which estimates the patch orientation (in degrees). Examples for such extractors are ORB and SIFT.
  129. Note: a complete example can be found under /samples/cpp/tutorial_code/xfeatures2D/latch_match.cpp
  130. */
  131. class CV_EXPORTS_W LATCH : public Feature2D
  132. {
  133. public:
  134. CV_WRAP static Ptr<LATCH> create(int bytes = 32, bool rotationInvariance = true, int half_ssd_size=3);
  135. };
  136. /** @brief Class implementing DAISY descriptor, described in @cite Tola10
  137. @param radius radius of the descriptor at the initial scale
  138. @param q_radius amount of radial range division quantity
  139. @param q_theta amount of angular range division quantity
  140. @param q_hist amount of gradient orientations range division quantity
  141. @param norm choose descriptors normalization type, where
  142. DAISY::NRM_NONE will not do any normalization (default),
  143. DAISY::NRM_PARTIAL mean that histograms are normalized independently for L2 norm equal to 1.0,
  144. DAISY::NRM_FULL mean that descriptors are normalized for L2 norm equal to 1.0,
  145. DAISY::NRM_SIFT mean that descriptors are normalized for L2 norm equal to 1.0 but no individual one is bigger than 0.154 as in SIFT
  146. @param H optional 3x3 homography matrix used to warp the grid of daisy but sampling keypoints remains unwarped on image
  147. @param interpolation switch to disable interpolation for speed improvement at minor quality loss
  148. @param use_orientation sample patterns using keypoints orientation, disabled by default.
  149. */
  150. class CV_EXPORTS_W DAISY : public Feature2D
  151. {
  152. public:
  153. enum
  154. {
  155. NRM_NONE = 100, NRM_PARTIAL = 101, NRM_FULL = 102, NRM_SIFT = 103,
  156. };
  157. CV_WRAP static Ptr<DAISY> create( float radius = 15, int q_radius = 3, int q_theta = 8,
  158. int q_hist = 8, int norm = DAISY::NRM_NONE, InputArray H = noArray(),
  159. bool interpolation = true, bool use_orientation = false );
  160. /** @overload
  161. * @param image image to extract descriptors
  162. * @param keypoints of interest within image
  163. * @param descriptors resulted descriptors array
  164. */
  165. virtual void compute( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors ) = 0;
  166. virtual void compute( InputArrayOfArrays images,
  167. std::vector<std::vector<KeyPoint> >& keypoints,
  168. OutputArrayOfArrays descriptors );
  169. /** @overload
  170. * @param image image to extract descriptors
  171. * @param roi region of interest within image
  172. * @param descriptors resulted descriptors array for roi image pixels
  173. */
  174. virtual void compute( InputArray image, Rect roi, OutputArray descriptors ) = 0;
  175. /**@overload
  176. * @param image image to extract descriptors
  177. * @param descriptors resulted descriptors array for all image pixels
  178. */
  179. virtual void compute( InputArray image, OutputArray descriptors ) = 0;
  180. /**
  181. * @param y position y on image
  182. * @param x position x on image
  183. * @param orientation orientation on image (0->360)
  184. * @param descriptor supplied array for descriptor storage
  185. */
  186. virtual void GetDescriptor( double y, double x, int orientation, float* descriptor ) const = 0;
  187. /**
  188. * @param y position y on image
  189. * @param x position x on image
  190. * @param orientation orientation on image (0->360)
  191. * @param descriptor supplied array for descriptor storage
  192. * @param H homography matrix for warped grid
  193. */
  194. virtual bool GetDescriptor( double y, double x, int orientation, float* descriptor, double* H ) const = 0;
  195. /**
  196. * @param y position y on image
  197. * @param x position x on image
  198. * @param orientation orientation on image (0->360)
  199. * @param descriptor supplied array for descriptor storage
  200. */
  201. virtual void GetUnnormalizedDescriptor( double y, double x, int orientation, float* descriptor ) const = 0;
  202. /**
  203. * @param y position y on image
  204. * @param x position x on image
  205. * @param orientation orientation on image (0->360)
  206. * @param descriptor supplied array for descriptor storage
  207. * @param H homography matrix for warped grid
  208. */
  209. virtual bool GetUnnormalizedDescriptor( double y, double x, int orientation, float* descriptor , double *H ) const = 0;
  210. };
  211. /** @brief Class implementing the MSD (*Maximal Self-Dissimilarity*) keypoint detector, described in @cite Tombari14.
  212. The algorithm implements a novel interest point detector stemming from the intuition that image patches
  213. which are highly dissimilar over a relatively large extent of their surroundings hold the property of
  214. being repeatable and distinctive. This concept of "contextual self-dissimilarity" reverses the key
  215. paradigm of recent successful techniques such as the Local Self-Similarity descriptor and the Non-Local
  216. Means filter, which build upon the presence of similar - rather than dissimilar - patches. Moreover,
  217. it extends to contextual information the local self-dissimilarity notion embedded in established
  218. detectors of corner-like interest points, thereby achieving enhanced repeatability, distinctiveness and
  219. localization accuracy.
  220. */
  221. class CV_EXPORTS_W MSDDetector : public Feature2D {
  222. public:
  223. static Ptr<MSDDetector> create(int m_patch_radius = 3, int m_search_area_radius = 5,
  224. int m_nms_radius = 5, int m_nms_scale_radius = 0, float m_th_saliency = 250.0f, int m_kNN = 4,
  225. float m_scale_factor = 1.25f, int m_n_scales = -1, bool m_compute_orientation = false);
  226. };
  227. //! @}
  228. }
  229. }
  230. #endif