diff --git a/contrib/opentimelineio_contrib/adapters/advanced_authoring_format.py b/contrib/opentimelineio_contrib/adapters/advanced_authoring_format.py index 37ad5e4be..343ca398b 100644 --- a/contrib/opentimelineio_contrib/adapters/advanced_authoring_format.py +++ b/contrib/opentimelineio_contrib/adapters/advanced_authoring_format.py @@ -754,23 +754,51 @@ def _transcribe(item, parents, edit_rate, indent=0): elif isinstance(item, aaf2.components.Selector): msg = "Transcribe selector for {}".format(_encoded_name(item)) _transcribe_log(msg, indent) - # If you mute a clip in media composer, it becomes one of these in the - # AAF. - result = _transcribe(item.getvalue("Selected"), - parents + [item], edit_rate, indent + 2) - - alternates = [ - _transcribe(alt, parents + [item], edit_rate, indent + 2) - for alt in item.getvalue("Alternates") - ] - # muted case -- if there is only one item its muted, otherwise its - # a multi cam thing - if alternates and len(alternates) == 1: - metadata['muted_clip'] = True - result.name = str(alternates[0].name) + "_MUTED" + selected = item.getvalue('Selected') + alternates = item.getvalue('Alternates', None) + + # First we check to see if the Selected component is either a Filler + # or ScopeReference object, meaning we have to use the alternate instead + if isinstance(selected, aaf2.components.Filler) or \ + isinstance(selected, aaf2.components.ScopeReference): + + # Safety check of the alternates list, then transcribe first object - + # there should only ever be one alternate in this situation + if alternates is None or len(alternates) != 1: + err = "AAF Selector parsing error: object has unexpected number of " \ + "alternates - {}".format(len(alternates)) + raise AAFAdapterError(err) + result = _transcribe(alternates[0], parents + [item], edit_rate, indent + 2) + + # Filler/ScopeReference means the clip is muted/not enabled + result.enabled = False + + # Muted tracks are handled in a slightly odd way so we need to do a + # check here and pass the param back up to the track object + # if isinstance(parents[-1], aaf2.mobslots.TimelineMobSlot): + # pass # TODO: Figure out mechanism for passing this up to parent + + else: + + # This is most likely a multi-cam clip + result = _transcribe(selected, parents + [item], edit_rate, indent + 2) + + # Perform a check here to make sure no potential Gap objects + # are slipping through the cracks + if isinstance(result, otio.schema.Gap): + err = "AAF Selector parsing error: {}".format(type(item)) + raise AAFAdapterError(err) - metadata['alternates'] = alternates + # A Selector can have a set of alternates to handle multiple options for an + # editorial decision - we do a full parse on those obects too + if alternates is not None: + alternates = [ + _transcribe(alt, parents + [item], edit_rate, indent + 2) + for alt in alternates + ] + + metadata['alternates'] = alternates # @TODO: There are a bunch of other AAF object types that we will # likely need to add support for. I'm leaving this code here to help @@ -1378,6 +1406,9 @@ def _simplify(thing): # Note: we don't merge effects, because we already made # sure the child had no effects in the if statement above. + # Preserve the enabled/disabled state as we merge these two. + thing.enabled = thing.enabled and child.enabled + c = c + num c = c - 1 @@ -1385,8 +1416,16 @@ def _simplify(thing): if _is_redundant_container(thing): # TODO: We may be discarding metadata here, should we merge it? result = thing[0].deepcopy() + + # As we are reducing the complexity of the object structure through + # this process, we need to make sure that any/all enabled statuses + # are being respected and applied in an appropriate way + if not thing.enabled: + result.enabled = False + # TODO: Do we need to offset the markers in time? result.markers.extend(thing.markers) + # TODO: The order of the effects is probably important... # should they be added to the end or the front? # Intuitively it seems like the child's effects should come before diff --git a/contrib/opentimelineio_contrib/adapters/tests/test_aaf_adapter.py b/contrib/opentimelineio_contrib/adapters/tests/test_aaf_adapter.py index f3ef70c58..8c202f91b 100644 --- a/contrib/opentimelineio_contrib/adapters/tests/test_aaf_adapter.py +++ b/contrib/opentimelineio_contrib/adapters/tests/test_aaf_adapter.py @@ -873,13 +873,15 @@ def test_read_misc_speed_effects(self): # do then this effect is a "Speed Bump" from 166% to 44% to 166% def test_muted_clip(self): - sc = otio.adapters.read_from_file(MUTED_CLIP_PATH, simplify=False) - gp = sc[0].tracks[8][0][0] - - self.assertIsNotNone(gp) - self.assertTrue(gp.metadata['AAF']['muted_clip']) - self.assertIsInstance(gp, otio.schema.Gap) - self.assertEqual(gp.name, 'Frame Debugger 0h.mov_MUTED') + timeline = otio.adapters.read_from_file(MUTED_CLIP_PATH) + self.assertIsInstance(timeline, otio.schema.Timeline) + self.assertEqual(len(timeline.tracks), 1) + track = timeline.tracks[0] + self.assertEqual(len(track), 1) + clip = track[0] + self.assertIsInstance(clip, otio.schema.Clip) + self.assertEqual(clip.name, 'Frame Debugger 0h.mov') + self.assertEqual(clip.enabled, False) def test_essence_group(self): timeline = otio.adapters.read_from_file(ESSENCE_GROUP_PATH) diff --git a/tests/test_item.py b/tests/test_item.py index 86b3da1bb..57d1afeff 100755 --- a/tests/test_item.py +++ b/tests/test_item.py @@ -298,9 +298,9 @@ def test_enabled(self): duration=otio.opentime.RationalTime(10, 1) ) it = otio.core.Item(source_range=tr) - self.assertEquals(it.enabled, True) + self.assertEqual(it.enabled, True) it.enabled = False - self.assertEquals(it.enabled, False) + self.assertEqual(it.enabled, False) encoded = otio.adapters.otio_json.write_to_string(it) decoded = otio.adapters.otio_json.read_from_string(encoded) self.assertIsOTIOEquivalentTo(it, decoded)