Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

New experimental camera tracking pipeline #1379

Merged
merged 5 commits into from
May 3, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion bin/meshroom_batch
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ parser.add_argument('-I', '--inputRecursive', metavar='FOLDERS/IMAGES', type=str
help='Input folders containing all images recursively.')

parser.add_argument('-p', '--pipeline', metavar='photogrammetry/panoramaHdr/panoramaFisheyeHdr/MG_FILE', type=str, default='photogrammetry',
help='"photogrammetry" pipeline, "panoramaHdr" pipeline, "panoramaFisheyeHdr" pipeline or a Meshroom file containing a custom pipeline to run on input images. '
help='"photogrammetry", "panoramaHdr", "panoramaFisheyeHdr", "cameraTracking" pipeline or a Meshroom file containing a custom pipeline to run on input images. '
'Requirements: the graph must contain one CameraInit node, '
'and one Publish node if --output is set.')

Expand Down Expand Up @@ -119,6 +119,9 @@ with multiview.GraphModification(graph):
elif args.pipeline.lower() == "panoramafisheyehdr":
# default panorama Fisheye Hdr pipeline
multiview.panoramaFisheyeHdr(inputViewpoints=views, inputIntrinsics=intrinsics, output=args.output, graph=graph)
elif args.pipeline.lower() == "cameratracking":
# default panorama Fisheye Hdr pipeline
multiview.cameraTracking(inputViewpoints=views, inputIntrinsics=intrinsics, output=args.output, graph=graph)
else:
# custom pipeline
graph.load(args.pipeline)
Expand Down
63 changes: 61 additions & 2 deletions meshroom/multiview.py
Original file line number Diff line number Diff line change
Expand Up @@ -182,9 +182,11 @@ def panoramaFisheyeHdr(inputImages=None, inputViewpoints=None, inputIntrinsics=N
panoramaHdr(inputImages, inputViewpoints, inputIntrinsics, output, graph)
for panoramaInit in graph.nodesOfType("PanoramaInit"):
panoramaInit.attribute("useFisheye").value = True
# when using fisheye images, the overlap between images can be small
# and thus requires many features to get enough correspondances for cameras estimation
for featureExtraction in graph.nodesOfType("FeatureExtraction"):
# when using fisheye images, 'sift' performs better than 'dspsift'
featureExtraction.attribute("describerTypes").value = ['sift']
# when using fisheye images, the overlap between images can be small
# and thus requires many features to get enough correspondances for cameras estimation
featureExtraction.attribute("describerPreset").value = 'high'
return graph

Expand Down Expand Up @@ -468,3 +470,60 @@ def sfmAugmentation(graph, sourceSfm, withMVS=False):
mvsNodes = mvsPipeline(graph, structureFromMotion)

return sfmNodes, mvsNodes


def cameraTrackingPipeline(graph):
"""
Instantiate a camera tracking pipeline inside 'graph'.

Args:
graph (Graph/UIGraph): the graph in which nodes should be instantiated

Returns:
list of Node: the created nodes
"""

with GraphModification(graph):

cameraInit, featureExtraction, imageMatching, featureMatching, structureFromMotion = sfmPipeline(graph)

imageMatching.attribute("nbMatches").value = 5 # voctree nb matches
imageMatching.attribute("nbNeighbors").value = 10

structureFromMotion.attribute("minNumberOfMatches").value = 0
structureFromMotion.attribute("minInputTrackLength").value = 5
structureFromMotion.attribute("minNumberOfObservationsForTriangulation").value = 3
structureFromMotion.attribute("minAngleForTriangulation").value = 1.0
structureFromMotion.attribute("minAngleForLandmark").value = 0.5

exportAnimatedCamera = graph.addNewNode('ExportAnimatedCamera', input=structureFromMotion.output)

# store current pipeline version in graph header
graph.header.update({'pipelineVersion': __version__})

return [
cameraInit,
featureExtraction,
imageMatching,
featureMatching,
structureFromMotion,
exportAnimatedCamera,
]


def cameraTracking(inputImages=list(), inputViewpoints=list(), inputIntrinsics=list(), output='', graph=None):
if not graph:
graph = Graph('Camera Tracking')
with GraphModification(graph):
trackingNodes = cameraTrackingPipeline(graph)
cameraInit = trackingNodes[0]
cameraInit.viewpoints.extend([{'path': image} for image in inputImages])
cameraInit.viewpoints.extend(inputViewpoints)
cameraInit.intrinsics.extend(inputIntrinsics)

if output:
exportNode = trackingNodes[-1]
graph.addNewNode('Publish', output=output, inputFiles=[exportNode.output])

return graph

2 changes: 1 addition & 1 deletion meshroom/nodes/aliceVision/CameraLocalization.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ class CameraLocalization(desc.CommandLineNode):
name='matchDescTypes',
label='Match Desc Types',
description='''Describer types to use for the matching.''',
value=['sift'],
value=['dspsift'],
values=['sift', 'sift_float', 'sift_upright', 'dspsift', 'akaze', 'akaze_liop', 'akaze_mldb', 'cctag3', 'cctag4', 'sift_ocv', 'akaze_ocv'],
exclusive=False,
uid=[0],
Expand Down
2 changes: 1 addition & 1 deletion meshroom/nodes/aliceVision/CameraRigCalibration.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ class CameraRigCalibration(desc.CommandLineNode):
name='matchDescTypes',
label='Match Describer Types',
description='''The describer types to use for the matching''',
value=['sift'],
value=['dspsift'],
values=['sift', 'sift_float', 'sift_upright', 'dspsift', 'akaze', 'akaze_liop', 'akaze_mldb', 'cctag3', 'cctag4', 'sift_ocv', 'akaze_ocv'],
exclusive=False,
uid=[0],
Expand Down
4 changes: 2 additions & 2 deletions meshroom/nodes/aliceVision/CameraRigLocalization.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,8 +49,8 @@ class CameraRigLocalization(desc.CommandLineNode):
name='matchDescTypes',
label='Match Describer Types',
description='''The describer types to use for the matching''',
value=['sift'],
values=['sift', 'sift_float', 'sift_upright', 'akaze', 'akaze_liop', 'akaze_mldb', 'cctag3', 'cctag4', 'sift_ocv', 'akaze_ocv'],
value=['dspsift'],
values=['sift', 'sift_float', 'sift_upright', 'dspsift', 'akaze', 'akaze_liop', 'akaze_mldb', 'cctag3', 'cctag4', 'sift_ocv', 'akaze_ocv'],
exclusive=False,
uid=[0],
joinChar=',',
Expand Down
2 changes: 1 addition & 1 deletion meshroom/nodes/aliceVision/ConvertSfMFormat.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ class ConvertSfMFormat(desc.CommandLineNode):
name='describerTypes',
label='Describer Types',
description='Describer types to keep.',
value=['sift'],
value=['dspsift'],
values=['sift', 'sift_float', 'sift_upright', 'dspsift', 'akaze', 'akaze_liop', 'akaze_mldb', 'cctag3', 'cctag4', 'sift_ocv', 'akaze_ocv', 'unknown'],
exclusive=False,
uid=[0],
Expand Down
2 changes: 1 addition & 1 deletion meshroom/nodes/aliceVision/ExportMatches.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ class ExportMatches(desc.CommandLineNode):
name='describerTypes',
label='Describer Types',
description='Describer types used to describe an image.',
value=['sift'],
value=['dspsift'],
values=['sift', 'sift_float', 'sift_upright', 'dspsift', 'akaze', 'akaze_liop', 'akaze_mldb', 'cctag3', 'cctag4', 'sift_ocv', 'akaze_ocv'],
exclusive=False,
uid=[0],
Expand Down
2 changes: 1 addition & 1 deletion meshroom/nodes/aliceVision/FeatureExtraction.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ class FeatureExtraction(desc.CommandLineNode):
name='describerTypes',
label='Describer Types',
description='Describer types used to describe an image.',
value=['sift'],
value=['dspsift'],
values=['sift', 'sift_float', 'sift_upright', 'dspsift', 'akaze', 'akaze_liop', 'akaze_mldb', 'cctag3', 'cctag4', 'sift_ocv', 'akaze_ocv'],
exclusive=False,
uid=[0],
Expand Down
2 changes: 1 addition & 1 deletion meshroom/nodes/aliceVision/FeatureMatching.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ class FeatureMatching(desc.CommandLineNode):
name='describerTypes',
label='Describer Types',
description='Describer types used to describe an image.',
value=['sift'],
value=['dspsift'],
values=['sift', 'sift_float', 'sift_upright', 'dspsift', 'akaze', 'akaze_liop', 'akaze_mldb', 'cctag3', 'cctag4', 'sift_ocv', 'akaze_ocv'],
exclusive=False,
uid=[0],
Expand Down
2 changes: 1 addition & 1 deletion meshroom/nodes/aliceVision/GlobalSfM.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ class GlobalSfM(desc.CommandLineNode):
name='describerTypes',
label='Describer Types',
description='Describer types used to describe an image.',
value=['sift'],
value=['dspsift'],
values=['sift', 'sift_float', 'sift_upright', 'dspsift', 'akaze', 'akaze_liop', 'akaze_mldb', 'cctag3', 'cctag4',
'sift_ocv', 'akaze_ocv'],
exclusive=False,
Expand Down
6 changes: 3 additions & 3 deletions meshroom/nodes/aliceVision/ImageMatching.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ class ImageMatching(desc.CommandLineNode):
' * Exhaustive: Export all image pairs.\n'
' * Frustum: If images have known poses, computes the intersection between cameras frustums to create the list of image pairs.\n'
' * FrustumOrVocabularyTree: If images have known poses, use frustum intersection else use VocabularyTree.\n',
value='VocabularyTree',
value='SequentialAndVocabularyTree',
values=['VocabularyTree', 'Sequential', 'SequentialAndVocabularyTree', 'Exhaustive', 'Frustum', 'FrustumOrVocabularyTree'],
exclusive=True,
uid=[0],
Expand Down Expand Up @@ -111,7 +111,7 @@ class ImageMatching(desc.CommandLineNode):
name='nbMatches',
label='Voc Tree: Nb Matches',
description='The number of matches to retrieve for each image (If 0 it will retrieve all the matches).',
value=50,
value=40,
range=(0, 1000, 1),
uid=[0],
advanced=True,
Expand All @@ -121,7 +121,7 @@ class ImageMatching(desc.CommandLineNode):
name='nbNeighbors',
label='Sequential: Nb Neighbors',
description='The number of neighbors to retrieve for each image (If 0 it will retrieve all the neighbors).',
value=50,
value=5,
range=(0, 1000, 1),
uid=[0],
advanced=True,
Expand Down
8 changes: 4 additions & 4 deletions meshroom/nodes/aliceVision/KeyframeSelection.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,14 +109,14 @@ class KeyframeSelection(desc.CommandLineNode):
name='useSparseDistanceSelection',
label='Use Sparse Distance Selection',
description='Use sparseDistance selection in order to avoid similar keyframes.',
value=True,
value=False,
uid=[0],
),
desc.BoolParam(
name='useSharpnessSelection',
label='Use Sharpness Selection',
description='Use frame sharpness score for keyframe selection.',
value=True,
value=False,
uid=[0],
),
desc.FloatParam(
Expand Down Expand Up @@ -148,15 +148,15 @@ class KeyframeSelection(desc.CommandLineNode):
name='minFrameStep',
label='Min Frame Step',
description='''minimum number of frames between two keyframes''',
value=12,
value=1,
range=(1, 100, 1),
uid=[0],
),
desc.IntParam(
name='maxFrameStep',
label='Max Frame Step',
description='''maximum number of frames after which a keyframe can be taken''',
value=36,
value=2,
range=(2, 1000, 1),
uid=[0],
),
Expand Down
4 changes: 2 additions & 2 deletions meshroom/nodes/aliceVision/StructureFromMotion.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ class StructureFromMotion(desc.CommandLineNode):
name='describerTypes',
label='Describer Types',
description='Describer types used to describe an image.',
value=['sift'],
value=['dspsift'],
values=['sift', 'sift_float', 'sift_upright', 'dspsift', 'akaze', 'akaze_liop', 'akaze_mldb', 'cctag3', 'cctag4', 'sift_ocv', 'akaze_ocv'],
exclusive=False,
uid=[0],
Expand All @@ -119,7 +119,7 @@ class StructureFromMotion(desc.CommandLineNode):
description='Observation contraint mode used in the optimization:\n'
' * Basic: Use standard reprojection error in pixel coordinates\n'
' * Scale: Use reprojection error in pixel coordinates but relative to the feature scale',
value='Basic',
value='Scale',
values=['Basic', 'Scale'],
exclusive=True,
uid=[0],
Expand Down
4 changes: 4 additions & 0 deletions meshroom/ui/qml/main.qml
Original file line number Diff line number Diff line change
Expand Up @@ -420,6 +420,10 @@ ApplicationWindow {
text: "Panorama Fisheye HDR"
onTriggered: ensureSaved(function() { _reconstruction.new("panoramafisheyehdr") })
}
Action {
text: "Camera Tracking (experimental)"
onTriggered: ensureSaved(function() { _reconstruction.new("cameratracking") })
}
}
Action {
id: openActionItem
Expand Down
3 changes: 3 additions & 0 deletions meshroom/ui/reconstruction.py
Original file line number Diff line number Diff line change
Expand Up @@ -490,6 +490,9 @@ def new(self, pipeline=None):
elif p.lower() == "panoramafisheyehdr":
# default panorama fisheye hdr pipeline
self.setGraph(multiview.panoramaFisheyeHdr())
elif p.lower() == "cameratracking":
# default camera tracking pipeline
self.setGraph(multiview.cameraTracking())
else:
# use the user-provided default photogrammetry project file
self.load(p, setupProjectFile=False)
Expand Down