diff --git a/s2p/sift.py b/s2p/sift.py index b1b166a4..e024c64e 100644 --- a/s2p/sift.py +++ b/s2p/sift.py @@ -23,9 +23,10 @@ # TODO: This is kind of ugly. Cleaner way to do this is to update # LD_LIBRARY_PATH, which we should do once we have a proper config file -sift4ctypes_library = os.path.join(os.path.dirname( - os.path.abspath(__file__)), '../lib/libsift4ctypes.so') -ctypes.CDLL(sift4ctypes_library) +here = os.path.dirname(os.path.abspath(__file__)) +sift4ctypes = os.path.join(os.path.dirname(here), 'lib', 'libsift4ctypes.so') +lib = ctypes.CDLL(sift4ctypes) + # Filter warnings from rasterio reading files wihtout georeferencing warnings.filterwarnings("ignore", category=rio.errors.NotGeoreferencedWarning) @@ -51,9 +52,6 @@ def keypoints_from_nparray(arr, thresh_dog=0.0133, nb_octaves=8, nb_scales=3, of # retrieve numpy buffer dimensions h, w = arr.shape - # load shared library - lib = ctypes.CDLL(sift4ctypes_library) - # Set expected args and return types lib.sift.argtypes = (ndpointer(dtype=ctypes.c_float, shape=(h, w)), ctypes.c_uint, ctypes.c_uint, ctypes.c_float, ctypes.c_uint, ctypes.c_uint, ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint)) @@ -69,7 +67,7 @@ def keypoints_from_nparray(arr, thresh_dog=0.0133, nb_octaves=8, nb_scales=3, of # Transform result into a numpy array keypoints = np.asarray([keypoints_ptr[i] - for i in range(0, nb_points.value*desc_size.value)]) + for i in range(nb_points.value*desc_size.value)]) # Delete results to release memory lib.delete_buffer.argtypes = (ctypes.POINTER(ctypes.c_float)), @@ -94,12 +92,12 @@ def image_keypoints(im, x, y, w, h, max_nb=None, thresh_dog=0.0133, nb_octaves=8 http://www.ipol.im/pub/pre/82/ Args: - im: path to the input image + im (str): path to the input image max_nb (optional): maximal number of keypoints. If more keypoints are detected, those at smallest scales are discarded Returns: - path to the file containing the list of descriptors + numpy array of shape (n, 132) containing, on each row: (y, x, s, o, 128-descriptor) """ # Read file with rasterio with rio.open(im) as ds: @@ -124,10 +122,7 @@ def image_keypoints(im, x, y, w, h, max_nb=None, thresh_dog=0.0133, nb_octaves=8 if max_nb is not None: keypoints = keypoints[:max_nb] - keyfile = common.tmpfile('.txt') - np.savetxt(keyfile, keypoints, delimiter=' ', fmt='%.3f') - - return keyfile + return keypoints def keypoints_match(k1, k2, method='relative', sift_thresh=0.6, F=None, @@ -136,7 +131,10 @@ def keypoints_match(k1, k2, method='relative', sift_thresh=0.6, F=None, Find matches among two lists of sift keypoints. Args: - k1, k2: paths to text files containing the lists of sift descriptors + k1 (array): numpy array of shape (n, 132), where each row represents a + sift keypoint with (y, x, scale, orientation, 128-descriptor) + k2 (array): numpy array of shape (m, 132), where each row represents a + sift keypoint method (optional, default is 'relative'): flag ('relative' or 'absolute') indicating wether to use absolute distance or relative distance @@ -157,21 +155,7 @@ def keypoints_match(k1, k2, method='relative', sift_thresh=0.6, F=None, if any, a numpy 2D array containing the list of inliers matches. """ # compute matches - # Open k1 and k2 files containing the two lists of SIFTs - k1_keys_descriptors = list() - with open(k1) as f: - for line in f: - ll = [float(val) for val in line.split(' ')[:-1]] - k1_keys_descriptors.append(ll) - k2_keys_descriptors = list() - with open(k2) as f: - for line in f: - ll = [float(val) for val in line.split(' ')[:-1]] - k2_keys_descriptors.append(ll) - k1_keys_descriptors = np.asarray(k1_keys_descriptors, dtype=np.float32) - k2_keys_descriptors = np.asarray(k2_keys_descriptors, dtype=np.float32) - - matches = keypoints_match_from_nparray(k1_keys_descriptors, k2_keys_descriptors, method, sift_thresh, + matches = keypoints_match_from_nparray(k1, k2, method, sift_thresh, epipolar_threshold, F) # Write this to file @@ -179,7 +163,6 @@ def keypoints_match(k1, k2, method='relative', sift_thresh=0.6, F=None, with open(mfile, 'w') as f: np.savetxt(f, matches, delimiter=' ') - matches = np.loadtxt(mfile) if matches.ndim == 2: # filter outliers with ransac if model == 'fundamental' and len(matches) >= 7: common.run("ransac fmn 1000 .3 7 %s < %s" % (mfile, mfile)) @@ -196,9 +179,8 @@ def keypoints_match(k1, k2, method='relative', sift_thresh=0.6, F=None, def keypoints_match_from_nparray(k1, k2, method, sift_threshold, epi_threshold, F): - # load shared library - lib = ctypes.CDLL(sift4ctypes_library) - + """ + """ # Set expected args and return types lib.matching.argtypes = (ndpointer(dtype=ctypes.c_float, shape=k1.shape), ndpointer(dtype=ctypes.c_float, shape=k2.shape), @@ -215,9 +197,7 @@ def keypoints_match_from_nparray(k1, k2, method, sift_threshold, epi_threshold, length_descr = descr - sift_offset # Transform information of method into boolean - use_relative_method = False - if method == 'relative': - use_relative_method = True + use_relative_method = (method == 'relative') # Format fundamental matrix use_fundamental_matrix = False @@ -230,24 +210,21 @@ def keypoints_match_from_nparray(k1, k2, method, sift_threshold, epi_threshold, nb_matches = ctypes.c_uint() # Call sift fonction from sift4ctypes.so - matches_ptr = lib.matching(k1, k2, length_descr, sift_offset, nb_sift_k1, len(k2), - sift_threshold, epi_threshold, - coeff_mat, use_fundamental_matrix, - use_relative_method, - ctypes.byref(nb_matches) - ) + matches_ptr = lib.matching(k1.astype('float32'), k2.astype('float32'), + length_descr, sift_offset, len(k1), len(k2), + sift_threshold, epi_threshold, coeff_mat, + use_fundamental_matrix, use_relative_method, + ctypes.byref(nb_matches)) # Transform result into a numpy array - matches = np.asarray([matches_ptr[i] for i in range(0, nb_matches.value * 4)]) + matches = np.asarray([matches_ptr[i] for i in range(nb_matches.value * 4)]) # Delete results to release memory - lib.delete_buffer.argtypes = (ctypes.POINTER(ctypes.c_float)), + lib.delete_buffer.argtypes = ctypes.POINTER(ctypes.c_float), lib.delete_buffer(matches_ptr) # Reshape keypoints array - matches = matches.reshape((nb_matches.value, 4)) - - return matches + return matches.reshape((nb_matches.value, 4)) def matches_on_rpc_roi(im1, im2, rpc1, rpc2, x, y, w, h): diff --git a/tests/test_s2p.py b/tests/test_s2p.py index 5075ea09..713c6138 100644 --- a/tests/test_s2p.py +++ b/tests/test_s2p.py @@ -40,9 +40,7 @@ def unit_gdal_version(): def unit_image_keypoints(): - kpts = s2p.sift.image_keypoints('tests/data/input_triplet/img_02.tif', 100, 100, 200, 200) - - test_kpts = np.loadtxt(kpts) + test_kpts = s2p.sift.image_keypoints('tests/data/input_triplet/img_02.tif', 100, 100, 200, 200) ref_kpts = np.loadtxt('tests/data/expected_output/units/unit_image_keypoints.txt') test_set = set(map(tuple,test_kpts[:,0:2])) @@ -90,7 +88,9 @@ def unit_image_keypoints(): def unit_matching(): - test_matches = s2p.sift.keypoints_match('tests/data/units/sift1.txt','tests/data/units/sift2.txt') + k1 = np.loadtxt('tests/data/units/sift1.txt') + k2 = np.loadtxt('tests/data/units/sift2.txt') + test_matches = s2p.sift.keypoints_match(k1, k2) expected_matches = np.loadtxt('tests/data/expected_output/units/unit_keypoints_match.txt') # Check that numbers of matches are the same