Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

cmx3600 reverse and dissolve fixes #687

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
123 changes: 107 additions & 16 deletions src/py-opentimelineio/opentimelineio/adapters/cmx_3600.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ class EDLParseError(exceptions.OTIOError):

# regex for parsing the playback speed of an M2 event
SPEED_EFFECT_RE = re.compile(
r"(?P<name>.*?)\s*(?P<speed>[0-9\.]*)\s*(?P<tc>[0-9:]{11})$"
r"(?P<name>.*?)\s*(?P<speed>-?[0-9\.]*)\s*(?P<tc>[0-9:]{11})$"
)


Expand Down Expand Up @@ -450,6 +450,13 @@ def make_clip(self, comment_data):
}
}

# In transitions, some of the source clip metadata might fall in the
# transition clip event
if 'dest_clip_name' in comment_data:
previous_meta = clip.metadata.setdefault('previous_metadata', {})
previous_meta['source_clip_name'] = clip.name
clip.name = comment_data['dest_clip_name']

if 'locators' in comment_data:
# An example EDL locator line looks like this:
# * LOC: 01:00:01:14 RED ANIM FIX NEEDED
Expand Down Expand Up @@ -578,6 +585,7 @@ class CommentHandler(object):
# needs to be ordered so that FROM CLIP NAME gets matched before FROM CLIP
comment_id_map = collections.OrderedDict([
('FROM CLIP NAME', 'clip_name'),
('TO CLIP NAME', 'dest_clip_name'),
('FROM CLIP', 'media_reference'),
('FROM FILE', 'media_reference'),
('LOC', 'locators'),
Expand Down Expand Up @@ -622,7 +630,7 @@ def _expand_transitions(timeline):

tracks = timeline.tracks
remove_list = []
replace_list = []
replace_or_insert_list = []
append_list = []
for track in tracks:
track_iter = iter(track)
Expand Down Expand Up @@ -665,14 +673,29 @@ def _expand_transitions(timeline):
transition_duration.rate
)

# Because transitions can have two event entries followed by
# comments, some of the previous clip's metadata might land in the
# transition clip
if prev:
if 'previous_metadata' in clip.metadata:
prev_metadata = clip.metadata['previous_metadata']
if 'source_clip_name' in prev_metadata:
# Give the transition the event name and the
# previous clip the appropriate name
prev.name = prev_metadata['source_clip_name']

# expand the previous
expansion_clip = None
if prev and not prev_prev:
expansion_clip = prev
elif prev_prev:
expansion_clip = prev_prev
if prev:
remove_list.append((track, prev))
# If the previous clip is continuous to this one, we can combine
if _transition_clips_continuous(prev_prev, prev):
expansion_clip = prev_prev
if prev:
remove_list.append((track, prev))
else:
expansion_clip = prev

_extend_source_range_duration(expansion_clip, mid_tran_cut_pre_duration)

Expand All @@ -686,16 +709,25 @@ def _expand_transitions(timeline):
new_trx.in_offset = mid_tran_cut_pre_duration
new_trx.out_offset = mid_tran_cut_post_duration

# in from to
replace_list.append((track, clip, new_trx))

# expand the next_clip
# expand the next_clip or contract this clip
keep_transition_clip = False
if next_clip:
sr = next_clip.source_range
next_clip.source_range = opentime.TimeRange(
sr.start_time - mid_tran_cut_post_duration,
sr.duration + mid_tran_cut_post_duration
)
if _transition_clips_continuous(clip, next_clip):
sr = next_clip.source_range
next_clip.source_range = opentime.TimeRange(
sr.start_time - mid_tran_cut_post_duration,
sr.duration + mid_tran_cut_post_duration,
)
else:
# The clip was only expressed in the transition, keep it,
# though it needs the previous clip transition time removed
keep_transition_clip = True

sr = clip.source_range
clip.source_range = opentime.TimeRange(
sr.start_time + mid_tran_cut_pre_duration,
sr.duration - mid_tran_cut_pre_duration,
)
else:
fill = schema.Gap(
source_range=opentime.TimeRange(
Expand All @@ -708,12 +740,27 @@ def _expand_transitions(timeline):
)
append_list.append((track, fill))

# in from to
replace_or_insert_list.append((keep_transition_clip, track, clip, new_trx))

# Scrub some temporary metadata stashed on clips about their
# neighbors
if 'previous_metadata' in clip.metadata:
del(clip.metadata['previous_metadata'])

if 'previous_metadata' in new_trx.metadata:
del(new_trx.metadata['previous_metadata'])

prev = clip
clip = next_clip
next_clip = next(track_iter, None)

for (track, from_clip, to_transition) in replace_list:
track[track.index(from_clip)] = to_transition
for (insert, track, from_clip, to_transition) in replace_or_insert_list:
clip_index = track.index(from_clip)
if insert:
track.insert(clip_index, to_transition)
else:
track[clip_index] = to_transition

for (track, clip_to_remove) in list(set(remove_list)):
# if clip_to_remove in track:
Expand All @@ -725,6 +772,50 @@ def _expand_transitions(timeline):
return timeline


def _transition_clips_continuous(clip_a, clip_b):
"""Tests if two clips are continuous. They are continuous if the following
conditions are met:
1. clip_a's source range ends on the last frame before clip_b's
2a. If clip_a's name matches clip_b's
- or -
2b. clip_a name matches metadata source_clip_name in clip_b
- or -
2c. Reel name matches
- or -
2d. Both clips are gaps


This is specific to how this adapter parses EDLs and is meant to be run only
within _expand_transitions.
"""
clip_a_end = clip_a.source_range.end_time_exclusive()
if not clip_a_end == clip_b.source_range.start_time:
return False

if all(isinstance(clip, schema.Gap) for clip in (clip_a, clip_b)):
return True

# The time ranges are continuous, match the names
if (clip_a.name == clip_b.name):
return True

def reelname(clip):
return clip.metadata['cmx_3600']['reel']

try:
if reelname(clip_a) == reelname(clip_b):
return True
except KeyError:
pass

try:
return clip_a.name == clip_b.metadata['previous_metadata']['source_clip_name']
except KeyError:
pass

reinecke marked this conversation as resolved.
Show resolved Hide resolved
return False


def read_from_string(input_str, rate=24, ignore_timecode_mismatch=False):
"""Reads a CMX Edit Decision List (EDL) from a string.
Since EDLs don't contain metadata specifying the rate they are meant
Expand Down
15 changes: 15 additions & 0 deletions tests/sample_data/dissolve_test_3.edl
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
TITLE: dissolve test 3
FCM: NON-DROP FRAME

001 AX V C 01:00:03:23 01:00:06:12 01:00:00:00 01:00:02:13
* FROM CLIP NAME: Clip A.mov

002 AX V C 01:00:06:00 01:00:06:00 01:00:02:13 01:00:02:13
FCM: NON-DROP FRAME
002 AX V D 030 01:00:33:22 01:00:35:04 01:00:02:13 01:00:03:19
EFFECTS NAME IS CROSS DISSOLVE
* FROM CLIP NAME: Clip B.mov
* TO CLIP NAME: Clip C.mov

003 AX V C 01:00:00:00 01:00:01:22 01:00:03:19 01:00:05:17
* FROM CLIP NAME: Clip D.mov
31 changes: 31 additions & 0 deletions tests/test_cmx_3600_adapter.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@
NO_SPACES_PATH = os.path.join(SAMPLE_DATA_DIR, "no_spaces_test.edl")
DISSOLVE_TEST = os.path.join(SAMPLE_DATA_DIR, "dissolve_test.edl")
DISSOLVE_TEST_2 = os.path.join(SAMPLE_DATA_DIR, "dissolve_test_2.edl")
DISSOLVE_TEST_3 = os.path.join(SAMPLE_DATA_DIR, "dissolve_test_3.edl")
GAP_TEST = os.path.join(SAMPLE_DATA_DIR, "gap_test.edl")
TIMECODE_MISMATCH_TEST = os.path.join(SAMPLE_DATA_DIR, "timecode_mismatch.edl")
SPEED_EFFECTS_TEST = os.path.join(SAMPLE_DATA_DIR, "speed_effects.edl")
Expand Down Expand Up @@ -400,6 +401,36 @@ def test_dissolve_parse_middle(self):
self.assertEqual(trck[2].source_range.start_time.value, 86400 + 201)
self.assertEqual(trck[2].duration().value, 10)

def test_dissolve_parse_full_clip_dissolve(self):
tl = otio.adapters.read_from_file(DISSOLVE_TEST_3)
self.assertEqual(len(tl.tracks[0]), 5)

self.assertTrue(isinstance(tl.tracks[0][2], otio.schema.Transition))

trck = tl.tracks[0]
clip_a = trck[0]
self.assertEqual(clip_a.name, "Clip A.mov")
self.assertEqual(clip_a.duration().value, 61)

clip_b = trck[1]
self.assertEqual(clip_b.name, "Clip B.mov")
self.assertEqual(clip_b.source_range.start_time.value, 86400 + 144)
self.assertEqual(clip_b.duration().value, 15)

transition = trck[2]
self.assertEqual(transition.in_offset.value, 15)
self.assertEqual(transition.out_offset.value, 15)

clip_c = trck[3]
self.assertEqual(clip_c.name, "Clip C.mov")
self.assertEqual(clip_c.source_range.start_time.value, 86400 + 829)
self.assertEqual(clip_c.duration().value, 15)

clip_d = trck[4]
self.assertEqual(clip_d.name, "Clip D.mov")
self.assertEqual(clip_d.source_range.start_time.value, 86400)
self.assertEqual(clip_d.duration().value, 46)

def test_dissolve_with_odd_frame_count_maintains_length(self):
# EXERCISE
tl = otio.adapters.read_from_string(
Expand Down