Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

AAF Clip Enabled/Muting Update #1277

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -754,23 +754,51 @@ def _transcribe(item, parents, edit_rate, indent=0):
elif isinstance(item, aaf2.components.Selector):
msg = "Transcribe selector for {}".format(_encoded_name(item))
_transcribe_log(msg, indent)
# If you mute a clip in media composer, it becomes one of these in the
# AAF.
result = _transcribe(item.getvalue("Selected"),
parents + [item], edit_rate, indent + 2)

alternates = [
_transcribe(alt, parents + [item], edit_rate, indent + 2)
for alt in item.getvalue("Alternates")
]

# muted case -- if there is only one item its muted, otherwise its
# a multi cam thing
if alternates and len(alternates) == 1:
metadata['muted_clip'] = True
result.name = str(alternates[0].name) + "_MUTED"
selected = item.getvalue('Selected')
alternates = item.getvalue('Alternates', None)

# First we check to see if the Selected component is either a Filler
# or ScopeReference object, meaning we have to use the alternate instead
if isinstance(selected, aaf2.components.Filler) or \
isinstance(selected, aaf2.components.ScopeReference):

# Safety check of the alternates list, then transcribe first object -
# there should only ever be one alternate in this situation
if alternates is None or len(alternates) != 1:
err = "AAF Selector parsing error: object has unexpected number of " \
"alternates - {}".format(len(alternates))
raise AAFAdapterError(err)
result = _transcribe(alternates[0], parents + [item], edit_rate, indent + 2)

# Filler/ScopeReference means the clip is muted/not enabled
result.enabled = False

# Muted tracks are handled in a slightly odd way so we need to do a
# check here and pass the param back up to the track object
# if isinstance(parents[-1], aaf2.mobslots.TimelineMobSlot):
# pass # TODO: Figure out mechanism for passing this up to parent

else:

# This is most likely a multi-cam clip
result = _transcribe(selected, parents + [item], edit_rate, indent + 2)

# Perform a check here to make sure no potential Gap objects
# are slipping through the cracks
if isinstance(result, otio.schema.Gap):
err = "AAF Selector parsing error: {}".format(type(item))
raise AAFAdapterError(err)

metadata['alternates'] = alternates
# A Selector can have a set of alternates to handle multiple options for an
# editorial decision - we do a full parse on those obects too
if alternates is not None:
alternates = [
_transcribe(alt, parents + [item], edit_rate, indent + 2)
for alt in alternates
]

metadata['alternates'] = alternates

# @TODO: There are a bunch of other AAF object types that we will
# likely need to add support for. I'm leaving this code here to help
Expand Down Expand Up @@ -1378,15 +1406,26 @@ def _simplify(thing):
# Note: we don't merge effects, because we already made
# sure the child had no effects in the if statement above.

# Preserve the enabled/disabled state as we merge these two.
thing.enabled = thing.enabled and child.enabled

c = c + num
c = c - 1

# skip redundant containers
if _is_redundant_container(thing):
# TODO: We may be discarding metadata here, should we merge it?
result = thing[0].deepcopy()

# As we are reducing the complexity of the object structure through
# this process, we need to make sure that any/all enabled statuses
# are being respected and applied in an appropriate way
if not thing.enabled:
result.enabled = False

# TODO: Do we need to offset the markers in time?
result.markers.extend(thing.markers)

# TODO: The order of the effects is probably important...
# should they be added to the end or the front?
# Intuitively it seems like the child's effects should come before
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -873,13 +873,15 @@ def test_read_misc_speed_effects(self):
# do then this effect is a "Speed Bump" from 166% to 44% to 166%

def test_muted_clip(self):
sc = otio.adapters.read_from_file(MUTED_CLIP_PATH, simplify=False)
gp = sc[0].tracks[8][0][0]

self.assertIsNotNone(gp)
self.assertTrue(gp.metadata['AAF']['muted_clip'])
self.assertIsInstance(gp, otio.schema.Gap)
self.assertEqual(gp.name, 'Frame Debugger 0h.mov_MUTED')
timeline = otio.adapters.read_from_file(MUTED_CLIP_PATH)
self.assertIsInstance(timeline, otio.schema.Timeline)
self.assertEqual(len(timeline.tracks), 1)
track = timeline.tracks[0]
self.assertEqual(len(track), 1)
clip = track[0]
self.assertIsInstance(clip, otio.schema.Clip)
self.assertEqual(clip.name, 'Frame Debugger 0h.mov')
self.assertEqual(clip.enabled, False)

def test_essence_group(self):
timeline = otio.adapters.read_from_file(ESSENCE_GROUP_PATH)
Expand Down
4 changes: 2 additions & 2 deletions tests/test_item.py
Original file line number Diff line number Diff line change
Expand Up @@ -298,9 +298,9 @@ def test_enabled(self):
duration=otio.opentime.RationalTime(10, 1)
)
it = otio.core.Item(source_range=tr)
self.assertEquals(it.enabled, True)
self.assertEqual(it.enabled, True)
it.enabled = False
self.assertEquals(it.enabled, False)
self.assertEqual(it.enabled, False)
encoded = otio.adapters.otio_json.write_to_string(it)
decoded = otio.adapters.otio_json.read_from_string(encoded)
self.assertIsOTIOEquivalentTo(it, decoded)
Expand Down