I'm trying to update my code to use cv2.SURF()
as opposed to cv2.FeatureDetector_create("SURF")
and cv2.DescriptorExtractor_create("SURF")
. However I'm having trouble getting the descriptors after detecting the keypoints. What's the correct way to call SURF.detect
?
我正在尝试更新我的代码以使用cv2.SURF(),而不是cv2.FeatureDetector_create(“SURF”)和cv2.DescriptorExtractor_create(“SURF”)。但是,在检测到关键点之后,我很难得到描述符。打电话的正确方法是什么?
I tried following the OpenCV documentation, but I'm a little confused. This is what it says in the documentation.
我尝试了OpenCV文档,但我有点困惑。这是它在文档中说的。
Python: cv2.SURF.detect(img, mask) → keypoints¶
Python: cv2.SURF.detect(img, mask[, descriptors[, useProvidedKeypoints]]) → keypoints, descriptors
How do I pass the keypoints in when making the second call to SURF.detect
?
我如何在第二次调用surf时传递关键点?
2 个解决方案
#1
34
I am not sure whether i understand your questions correctly. But if you are looking for a sample of matching SURF keypoints, a very simple and basic one is below, which is similar to template matching:
我不确定我是否正确地理解了你的问题。但是,如果你正在寻找一个匹配的冲浪关键点的样本,一个非常简单和基本的例子如下,这类似于模板匹配:
import cv2
import numpy as np
# Load the images
img =cv2.imread('messi4.jpg')
# Convert them to grayscale
imgg =cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# SURF extraction
surf = cv2.SURF()
kp, descritors = surf.detect(imgg,None,useProvidedKeypoints = False)
# Setting up samples and responses for kNN
samples = np.array(descritors)
responses = np.arange(len(kp),dtype = np.float32)
# kNN training
knn = cv2.KNearest()
knn.train(samples,responses)
# Now loading a template image and searching for similar keypoints
template = cv2.imread('template.jpg')
templateg= cv2.cvtColor(template,cv2.COLOR_BGR2GRAY)
keys,desc = surf.detect(templateg,None,useProvidedKeypoints = False)
for h,des in enumerate(desc):
des = np.array(des,np.float32).reshape((1,128))
retval, results, neigh_resp, dists = knn.find_nearest(des,1)
res,dist = int(results[0][0]),dists[0][0]
if dist<0.1: # draw matched keypoints in red color
color = (0,0,255)
else: # draw unmatched in blue color
print dist
color = (255,0,0)
#Draw matched key points on original image
x,y = kp[res].pt
center = (int(x),int(y))
cv2.circle(img,center,2,color,-1)
#Draw matched key points on template image
x,y = keys[h].pt
center = (int(x),int(y))
cv2.circle(template,center,2,color,-1)
cv2.imshow('img',img)
cv2.imshow('tm',template)
cv2.waitKey(0)
cv2.destroyAllWindows()
Below are the results I got (copy pasted template image on original image using paint):
下面是我得到的结果(在原图上使用油漆粘贴模板图像):
As you can see, there are some small mistakes. But for a startup, hope it is OK.
如你所见,有一些小错误。但对于初创公司来说,希望它是可以的。
#2
4
An improvement of the above algorithm is:
上述算法的改进是:
import cv2
import numpy
opencv_haystack =cv2.imread('haystack.jpg')
opencv_needle =cv2.imread('needle.jpg')
ngrey = cv2.cvtColor(opencv_needle, cv2.COLOR_BGR2GRAY)
hgrey = cv2.cvtColor(opencv_haystack, cv2.COLOR_BGR2GRAY)
# build feature detector and descriptor extractor
hessian_threshold = 85
detector = cv2.SURF(hessian_threshold)
(hkeypoints, hdescriptors) = detector.detect(hgrey, None, useProvidedKeypoints = False)
(nkeypoints, ndescriptors) = detector.detect(ngrey, None, useProvidedKeypoints = False)
# extract vectors of size 64 from raw descriptors numpy arrays
rowsize = len(hdescriptors) / len(hkeypoints)
if rowsize > 1:
hrows = numpy.array(hdescriptors, dtype = numpy.float32).reshape((-1, rowsize))
nrows = numpy.array(ndescriptors, dtype = numpy.float32).reshape((-1, rowsize))
#print hrows.shape, nrows.shape
else:
hrows = numpy.array(hdescriptors, dtype = numpy.float32)
nrows = numpy.array(ndescriptors, dtype = numpy.float32)
rowsize = len(hrows[0])
# kNN training - learn mapping from hrow to hkeypoints index
samples = hrows
responses = numpy.arange(len(hkeypoints), dtype = numpy.float32)
#print len(samples), len(responses)
knn = cv2.KNearest()
knn.train(samples,responses)
# retrieve index and value through enumeration
for i, descriptor in enumerate(nrows):
descriptor = numpy.array(descriptor, dtype = numpy.float32).reshape((1, rowsize))
#print i, descriptor.shape, samples[0].shape
retval, results, neigh_resp, dists = knn.find_nearest(descriptor, 1)
res, dist = int(results[0][0]), dists[0][0]
#print res, dist
if dist < 0.1:
# draw matched keypoints in red color
color = (0, 0, 255)
else:
# draw unmatched in blue color
color = (255, 0, 0)
# draw matched key points on haystack image
x,y = hkeypoints[res].pt
center = (int(x),int(y))
cv2.circle(opencv_haystack,center,2,color,-1)
# draw matched key points on needle image
x,y = nkeypoints[i].pt
center = (int(x),int(y))
cv2.circle(opencv_needle,center,2,color,-1)
cv2.imshow('haystack',opencv_haystack)
cv2.imshow('needle',opencv_needle)
cv2.waitKey(0)
cv2.destroyAllWindows()
You can uncomment the print statements to get a better idea about the data structures used.
您可以取消对print语句的注释,以便更好地了解所使用的数据结构。
#1
34
I am not sure whether i understand your questions correctly. But if you are looking for a sample of matching SURF keypoints, a very simple and basic one is below, which is similar to template matching:
我不确定我是否正确地理解了你的问题。但是,如果你正在寻找一个匹配的冲浪关键点的样本,一个非常简单和基本的例子如下,这类似于模板匹配:
import cv2
import numpy as np
# Load the images
img =cv2.imread('messi4.jpg')
# Convert them to grayscale
imgg =cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# SURF extraction
surf = cv2.SURF()
kp, descritors = surf.detect(imgg,None,useProvidedKeypoints = False)
# Setting up samples and responses for kNN
samples = np.array(descritors)
responses = np.arange(len(kp),dtype = np.float32)
# kNN training
knn = cv2.KNearest()
knn.train(samples,responses)
# Now loading a template image and searching for similar keypoints
template = cv2.imread('template.jpg')
templateg= cv2.cvtColor(template,cv2.COLOR_BGR2GRAY)
keys,desc = surf.detect(templateg,None,useProvidedKeypoints = False)
for h,des in enumerate(desc):
des = np.array(des,np.float32).reshape((1,128))
retval, results, neigh_resp, dists = knn.find_nearest(des,1)
res,dist = int(results[0][0]),dists[0][0]
if dist<0.1: # draw matched keypoints in red color
color = (0,0,255)
else: # draw unmatched in blue color
print dist
color = (255,0,0)
#Draw matched key points on original image
x,y = kp[res].pt
center = (int(x),int(y))
cv2.circle(img,center,2,color,-1)
#Draw matched key points on template image
x,y = keys[h].pt
center = (int(x),int(y))
cv2.circle(template,center,2,color,-1)
cv2.imshow('img',img)
cv2.imshow('tm',template)
cv2.waitKey(0)
cv2.destroyAllWindows()
Below are the results I got (copy pasted template image on original image using paint):
下面是我得到的结果(在原图上使用油漆粘贴模板图像):
As you can see, there are some small mistakes. But for a startup, hope it is OK.
如你所见,有一些小错误。但对于初创公司来说,希望它是可以的。
#2
4
An improvement of the above algorithm is:
上述算法的改进是:
import cv2
import numpy
opencv_haystack =cv2.imread('haystack.jpg')
opencv_needle =cv2.imread('needle.jpg')
ngrey = cv2.cvtColor(opencv_needle, cv2.COLOR_BGR2GRAY)
hgrey = cv2.cvtColor(opencv_haystack, cv2.COLOR_BGR2GRAY)
# build feature detector and descriptor extractor
hessian_threshold = 85
detector = cv2.SURF(hessian_threshold)
(hkeypoints, hdescriptors) = detector.detect(hgrey, None, useProvidedKeypoints = False)
(nkeypoints, ndescriptors) = detector.detect(ngrey, None, useProvidedKeypoints = False)
# extract vectors of size 64 from raw descriptors numpy arrays
rowsize = len(hdescriptors) / len(hkeypoints)
if rowsize > 1:
hrows = numpy.array(hdescriptors, dtype = numpy.float32).reshape((-1, rowsize))
nrows = numpy.array(ndescriptors, dtype = numpy.float32).reshape((-1, rowsize))
#print hrows.shape, nrows.shape
else:
hrows = numpy.array(hdescriptors, dtype = numpy.float32)
nrows = numpy.array(ndescriptors, dtype = numpy.float32)
rowsize = len(hrows[0])
# kNN training - learn mapping from hrow to hkeypoints index
samples = hrows
responses = numpy.arange(len(hkeypoints), dtype = numpy.float32)
#print len(samples), len(responses)
knn = cv2.KNearest()
knn.train(samples,responses)
# retrieve index and value through enumeration
for i, descriptor in enumerate(nrows):
descriptor = numpy.array(descriptor, dtype = numpy.float32).reshape((1, rowsize))
#print i, descriptor.shape, samples[0].shape
retval, results, neigh_resp, dists = knn.find_nearest(descriptor, 1)
res, dist = int(results[0][0]), dists[0][0]
#print res, dist
if dist < 0.1:
# draw matched keypoints in red color
color = (0, 0, 255)
else:
# draw unmatched in blue color
color = (255, 0, 0)
# draw matched key points on haystack image
x,y = hkeypoints[res].pt
center = (int(x),int(y))
cv2.circle(opencv_haystack,center,2,color,-1)
# draw matched key points on needle image
x,y = nkeypoints[i].pt
center = (int(x),int(y))
cv2.circle(opencv_needle,center,2,color,-1)
cv2.imshow('haystack',opencv_haystack)
cv2.imshow('needle',opencv_needle)
cv2.waitKey(0)
cv2.destroyAllWindows()
You can uncomment the print statements to get a better idea about the data structures used.
您可以取消对print语句的注释,以便更好地了解所使用的数据结构。