diff --git a/qencoder/aomkf.py b/qencoder/aomkf.py index 4be751c..e234f9a 100644 --- a/qencoder/aomkf.py +++ b/qencoder/aomkf.py @@ -137,7 +137,7 @@ def find_aom_keyframes(stat_file, key_freq_min): return keyframes_list -def aom_keyframes(video_path, video_path_str, stat_file, stat_file_str, min_scene_len, ffmpeg_pipe, encoder, netThreads, video_params, qinterface): +def aom_keyframes(video_path: Path, stat_file: Path, min_scene_len, ffmpeg_pipe, encoder, netThreads, video_params, rtenc, qinterface): """[Get frame numbers for splits from aomenc 1 pass stat file] """ qinterface.q.put([0, "Analysis", 0]) @@ -147,21 +147,26 @@ def aom_keyframes(video_path, video_path_str, stat_file, stat_file_str, min_scen video.release() if total < 1: - total = frame_probe(video_path_str) + total = frame_probe(video_path) ffmpeg_pipe = ffmpeg_pipe[:-2] # remove the ' |' at the end - f = f'ffmpeg -y -hide_banner -loglevel error -i \'{video_path_str}\' {ffmpeg_pipe}' + f1 = ["ffmpeg", "-y", "-hide_banner", "-loglevel", "error", "-i", video_path.as_posix()] + f = f1 + ffmpeg_pipe.split() # removed -w -h from aomenc since ffmpeg filters can change it and it can be added into video_params # TODO(n9Mtq4): if an encoder other than aom is being used, video_params becomes the default so -w -h may be needed again - e = "" - if (encoder == "aom"): - e = f'aomenc --passes=2 --pass=1 {video_params} --fpf=\'{stat_file_str}\' -o {os.devnull} -' + e = [] + if (encoder == "aom" and not rtenc): + e621 = ["aomenc", "--passes=2", "--pass=1"] + e2 = ["--fpf=" + stat_file.as_posix(), "-o", os.devnull, "-"] + e = e621 + video_params.split() + e2 else: - e = f'aomenc --passes=2 --pass=1 --threads={str(netThreads)} --cpu-used=5 --end-usage=q --cq-level=40 --fpf=\'{stat_file_str}\' -o {os.devnull} -' - - ffmpeg_pipe = subprocess.Popen(f, shell=True, stdout=PIPE, stderr=STDOUT) - pipe = subprocess.Popen(e, shell=True, stdin=ffmpeg_pipe.stdout, stdout=PIPE, + e = ["aomenc", "--passes=2", "--pass=1", "--threads=" + str(netThreads), + "--cpu-used=5", "--end-usage=q", "--cq-level=40", "--fpf=" + stat_file.as_posix(), + "-o", os.devnull, "-"] + print(e) + ffmpeg_pipe = subprocess.Popen(f, stdout=PIPE, stderr=STDOUT) + pipe = subprocess.Popen(e, stdin=ffmpeg_pipe.stdout, stdout=PIPE, stderr=STDOUT, universal_newlines=True) encoder_history = deque(maxlen=20) diff --git a/qencoder/ffmpeg.py b/qencoder/ffmpeg.py index 3fbecf9..57b929e 100644 --- a/qencoder/ffmpeg.py +++ b/qencoder/ffmpeg.py @@ -9,10 +9,10 @@ import sys -def frame_probe(source): +def frame_probe(source: Path): """Get frame count.""" - cmd = "ffmpeg -hide_banner -i '" + str(source) + "' -map 0:v:0 -f null -" - r = subprocess.run(cmd, shell = True, stdout=PIPE, stderr=PIPE) + cmd = ["ffmpeg", "-hide_banner", "-i", source.as_posix(), "-map", "0:v:0", "-f", "null", "-"] + r = subprocess.run(cmd, stdout=PIPE, stderr=PIPE) matches = re.findall(r"frame=\s*([0-9]+)\s", r.stderr.decode("utf-8") + r.stdout.decode("utf-8")) return int(matches[-1]) @@ -56,16 +56,16 @@ def write_progress_file(file, chunk, frames): json.dump(d, f) -def frame_check(source: Path, source_str, encoded: Path, encoded_str, temp: Path, nocheck): +def frame_check(source: Path, encoded: Path, temp: Path, nocheck): """Checking if source and encoded video frame count match.""" try: status_file = Path(temp / 'done.json') if nocheck: - s1 = frame_probe(source_str) + s1 = frame_probe(source) write_progress_file(status_file, source, s1) else: - s1, s2 = [frame_probe(i) for i in (source_str, encoded_str)] + s1, s2 = [frame_probe(i) for i in (source, encoded)] if s1 == s2: write_progress_file(status_file, source, s1) else: @@ -82,26 +82,26 @@ def frame_check(source: Path, source_str, encoded: Path, encoded_str, temp: Path doneFileLock.release() -def concatenate_video(temp, temp_folder, output): +def concatenate_video(temp: Path, output: Path): """With FFMPEG concatenate encoded segments into final file.""" - with open(f'{temp_folder / "concat" }', 'w') as f: + with open(f'{temp / "concat" }', 'w') as f: - encode_files = sorted((temp_folder / 'encode').iterdir()) + encode_files = sorted((temp / 'encode').iterdir()) # Replace all the ' with '/'' so ffmpeg can read the path correctly f.writelines("file '" + str(file.absolute()).replace('\'','\'\\\'\'') + "'\n" for file in encode_files) # Add the audio file if one was extracted from the input - audio_file = temp + "/audio.mkv" - audio_actualfile = temp_folder / "audio.mkv" - if audio_actualfile.exists(): - audio = f'-i \'{audio_file}\' -c:a copy -map 1' + audio_file = temp / "audio.mkv" + if audio_file.exists(): + audio = ["-i", audio_file.as_posix(), "-c:a", "copy", "-map", "1"] else: - audio = '' + audio = [] - cmd = f' ffmpeg -y -hide_banner -loglevel error -f concat -safe 0 -i \'{temp + "/concat"}\' ' \ - f'{audio} -c copy -map 0 -y \'{output}\'' - concat = subprocess.run(cmd, shell=True, stdout=PIPE, stderr=STDOUT).stdout + cmd1 = ["ffmpeg", "-y", "-hide_banner", "-loglevel", "error", "-f", "concat", "-safe", "0", "-i", temp / "concat"] + cmd2 = ["-c", "copy", "-map", "0", "-y", output.as_posix()] + cmd = cmd1 + audio + cmd2 + concat = subprocess.run(cmd, stdout=PIPE, stderr=STDOUT).stdout if len(concat) > 0: @@ -109,16 +109,17 @@ def concatenate_video(temp, temp_folder, output): raise Exception -def extract_audio(input_vid, temp, audio_params): +def extract_audio(input_vid: Path, temp: Path, audio_params): """Extracting audio from source, transcoding if needed.""" - audio_file = temp + "/audio.mkv" + audio_file = temp / "audio.mkv" # Checking is source have audio track - check = fr' ffmpeg -y -hide_banner -loglevel error -ss 0 -i \'{input_vid}\' -t 0 -vn -c:a copy -f null -' - is_audio_here = len(subprocess.run(check, shell=True, stdout=PIPE, stderr=STDOUT).stdout) == 0 + check = ["ffmpeg", "-y", "-hide_banner", "-loglevel", "error", + "-ss", "0", "-i", input_vid.as_posix(), "-t", "0", "-vn", "-c:a", "copy", "-f", "null", "-"] + is_audio_here = len(subprocess.run(check, stdout=PIPE, stderr=STDOUT).stdout) == 0 # If source have audio track - process it if is_audio_here: - cmd = f'ffmpeg -y -hide_banner -loglevel error -i \'{input_vid}\' -vn ' \ - f'{audio_params} {audio_file}' - subprocess.run(cmd, shell=True) + cmd1 = ["ffmpeg", "-y", "-hide_banner", "-loglevel", "error", "-i", input_vid.as_posix(), "-vn"] + cmd = cmd1 + audio_params.split() + [audio_file.as_posix()] + subprocess.run(cmd) diff --git a/qencoder/mainwindow.py b/qencoder/mainwindow.py index 5493ee2..1cc20ce 100644 --- a/qencoder/mainwindow.py +++ b/qencoder/mainwindow.py @@ -737,7 +737,7 @@ def retranslateUi(self, qencoder): self.pushButton_vmafmodel.setText(_translate("qencoder", "Choose Model")) self.checkBox_shutdown.setStatusTip(_translate("qencoder", "May not work depending on OS/config, please test first with a short queue/video.")) self.checkBox_shutdown.setText(_translate("qencoder", "Shutdown after completion")) - self.checkBox_lessshitsplit.setStatusTip(_translate("qencoder", "Do not use pyscenedetect at all. Still in testing but if you can test it, check this. It\'s totally worth it.")) + self.checkBox_lessshitsplit.setStatusTip(_translate("qencoder", "Do not use pyscenedetect. Far slower, but less fail prone and better. Recommended for most encodes!")) self.checkBox_lessshitsplit.setText(_translate("qencoder", "Use First Pass For Splitting")) self.checkBox_unsafeSplit.setStatusTip(_translate("qencoder", "Allow splitting on frames that weren\'t keyframes in the original. might cause chunking errors.")) self.checkBox_unsafeSplit.setText(_translate("qencoder", "Unsafe Splitting")) diff --git a/qencoder/mainwindow.ui b/qencoder/mainwindow.ui index 45a183b..3f5090e 100644 --- a/qencoder/mainwindow.ui +++ b/qencoder/mainwindow.ui @@ -1401,7 +1401,7 @@ - Do not use pyscenedetect at all. Still in testing but if you can test it, check this. It's totally worth it. + Do not use pyscenedetect. Far slower, but less fail prone and better. Recommended for most encodes! Use First Pass For Splitting diff --git a/qencoder/pav1n.py b/qencoder/pav1n.py index 7f1f8f4..1177ac5 100644 --- a/qencoder/pav1n.py +++ b/qencoder/pav1n.py @@ -66,10 +66,10 @@ def log(self, info): """Default logging function, write to file.""" print(info) with open(self.d.get('logging'), 'a') as log: - log.write(time.strftime('%X') + ' ' + info) + log.write(time.strftime('%X') + ' ' + str(info)) - def lineByLineCmd(self, cmd): - popen = subprocess.Popen(cmd, shell=True, stderr=subprocess.PIPE, universal_newlines=True) + def lineByLineCmd(self, cmd: list): + popen = subprocess.Popen(cmd, stderr=subprocess.PIPE, universal_newlines=True) for stderr_line in iter(popen.stderr.readline, ""): yield stderr_line popen.stderr.close() @@ -77,13 +77,40 @@ def lineByLineCmd(self, cmd): if return_code: raise subprocess.CalledProcessError(return_code, cmd) - def call_cmd(self, cmd, capture_output=False): + def call_cmd(self, cmd: list, capture_output=False): """Calling system shell, if capture_output=True output string will be returned.""" if capture_output: - return subprocess.run(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout + return subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout with open(self.d.get('logging'), 'a') as log: - a = subprocess.run(cmd, shell=True, stdout=log, stderr=log) + a = subprocess.run(cmd, stdout=log, stderr=log) + + def two_step_linebyline(self, cmd: list): + cm1 = [] + cm2 = [] + for i in range(len(cmd)): + if (cmd[i] == "|"): + cm1 = cmd[:i] + cm2 = cmd[(i + 1):] + cm1_pipe = subprocess.Popen(cm1, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + popen = subprocess.Popen(cm2, stdin=cm1_pipe.stdout, stderr=subprocess.PIPE, universal_newlines=True) + for stderr_line in iter(popen.stderr.readline, ""): + yield stderr_line + popen.stderr.close() + return_code = popen.wait() + if return_code: + raise subprocess.CalledProcessError(return_code, cmd) + + def two_step_cmd(self, cmd: list): + cm1 = [] + cm2 = [] + for i in range(len(cmd)): + if (cmd[i] == "|"): + cm1 = cmd[:i] + cm2 = cmd[(i + 1):] + cm1_pipe = subprocess.Popen(cm1, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + pipe = subprocess.call(cm2, stdin=cm1_pipe.stdout, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + def determine_resources(self): """Returns number of workers that machine can handle with selected encoder.""" @@ -129,7 +156,7 @@ def reduce_scenes(self, scenes): return scenes - def scene_detect(self, video: Path, qinterface, videostr): + def scene_detect(self, video: Path, qinterface): """ Running PySceneDetect detection on source video for segmenting. Optimal threshold settings 15-50 @@ -140,7 +167,7 @@ def scene_detect(self, video: Path, qinterface, videostr): return '' try: - totalFrames = qencoder.ffmpeg.frame_probe(videostr) + totalFrames = qencoder.ffmpeg.frame_probe(video) # If stats file exists, load it. scenes = self.d.get('scenes') if scenes: @@ -158,8 +185,7 @@ def scene_detect(self, video: Path, qinterface, videostr): if (self.d['better_split']): scenes = [0] stat_file = self.d['temp'] / 'keyframes.log' - stat_file_str = self.d['temp_str'] + "/" + 'keyframes.log' - scene_list = qencoder.aomkf.aom_keyframes(self.d['input_file'], self.d['input_file_str'], stat_file, stat_file_str, self.d['min_split_dist'], self.d['ffmpeg_pipe'], self.d['encoder'], self.d['threads'] * self.d['workers'], self.d['video_params'], qinterface) + scene_list = qencoder.aomkf.aom_keyframes(self.d['input_file'], stat_file, self.d['min_split_dist'], self.d['ffmpeg_pipe'], self.d['encoder'], self.d['threads'] * self.d['workers'], self.d['video_params'], self.d['rtenc'], qinterface) self.log(f'Found scenes: {len(scene_list)}. Original video has {len(kfScenes)} keyframes\n') if (not self.d.get("unsafe_split")): for scene in scene_list: @@ -237,18 +263,18 @@ def scene_detect(self, video: Path, qinterface, videostr): self.log("Not able to split video. Possibly corrupted.") raise Exception - def split(self, video, frames): + def split(self, video: Path, frames): """Spliting video by frame numbers, or just copying video.""" if len(frames) == 1: self.log('Copying video for encode\n') - cmd = f'{self.FFMPEG} -i \'{video}\' -map_metadata -1 -an -c copy ' \ - f'-avoid_negative_ts 1 \'{self.d.get("temp_str") + "/split/0.mkv"}\'' + cmd = self.FFMPEG.split() + ["-i", video, "-map_metadata", "-1", "-an", "-c", "copy", + "-avoid_negative_ts", "1", (self.d['temp'] / "split" / "0.mkv").as_posix()] else: self.log('Splitting video\n') - cmd = f'{self.FFMPEG} -i \'{video}\' -map_metadata -1 -an -f segment -segment_frames {frames} ' \ - f'-c copy -avoid_negative_ts 1 \'{self.d.get("temp_str") + "/split" + "/%04d.mkv"}\'' + cmd = self.FFMPEG.split() + ["-i", video, "-map_metadata", "-1", "-an", "-f", "segment", + "-segment_frames", frames, "-c", "copy", "-avoid_negative_ts", "1", (self.d['temp'] / "split" / "%04d.mkv").as_posix()] self.log(cmd) - a = self.call_cmd(cmd) + self.call_cmd(cmd) def get_video_queue(self, temp: Path, resume): """ @@ -290,33 +316,25 @@ def aom_vpx_encode(self, input_files): if encoder == 'aom': enc = 'aomenc' - single_p = f'{enc} --passes=1 ' two_p_1 = f'{enc} --passes=2 --pass=1' two_p_2 = f'{enc} --passes=2 --pass=2' if self.d.get('passes') == 1: pass_1_commands = [] for index in range(len(input_files)): - p1IF0 = str(input_files[index][0]).replace("'", "'\"'\"'") - p1IF1 = str(input_files[index][1]).replace("'", "'\"'\"'") - suffix = os.path.splitext(input_files[index][1])[1] - pass_1_commands.append((f'-i \'{p1IF0}\' {self.d.get("ffmpeg_pipe")} ' + - f' {single_p} {self.d.get("video_params")} -o \'{p1IF1 + ".ivf"}\' - ', index, - (input_files[index][0], input_files[index][1].with_suffix(str(suffix) + ".ivf")))) + pass_1_commands.append((["-i", input_files[index][0].as_posix()] + self.d['ffmpeg_pipe'].split() + [enc, "--passes=1"] + self.d['video_params'].split() + ["-o", input_files[index][1].with_suffix(".ivf").as_posix(), "-"], index, (input_files[index][0], input_files[index][1].with_suffix(".ivf")))) return pass_1_commands if self.d.get('passes') == 2: pass_2_commands = [] for index in range(len(input_files)): - p1IF0 = str(input_files[index][0]).replace("'", "'\"'\"'") - p1IF1 = str(input_files[index][1]).replace("'", "'\"'\"'") - suffix = os.path.splitext(input_files[index][1])[1] - pass_2_commands.append((f'-i \'{p1IF0}\' {self.d.get("ffmpeg_pipe")}' + - f' {two_p_1} {self.d.get("video_params")} --fpf=\'{p1IF0 + ".log"}\' -o {os.devnull} - ', - f'-i \'{p1IF0}\' {self.d.get("ffmpeg_pipe")}' + - f' {two_p_2} {self.d.get("video_params")} ' + - f'--fpf=\'{p1IF0 + ".log"}\' -o \'{p1IF1 + ".ivf"}\' - ', index, - (input_files[index][0], input_files[index][1].with_suffix(str(suffix) + ".ivf")))) + pass_2_commands.append( (["-i", input_files[index][0].as_posix()] + + self.d['ffmpeg_pipe'].split() + [enc, "--passes=2", "--pass=1"] + self.d['video_params'].split() + + ["--fpf=" + input_files[index][0].with_suffix(".log").as_posix(), "-o", input_files[index][1].with_suffix(".ivf").as_posix(), "-"], + ["-i", input_files[index][0].as_posix()] + + self.d['ffmpeg_pipe'].split() + [enc, "--passes=2", "--pass=2"] + self.d['video_params'].split() + + ["--fpf=" + input_files[index][0].with_suffix(".log").as_posix(), "-o", input_files[index][1].with_suffix(".ivf").as_posix(), "-"], + index, (input_files[index][0], input_files[index][1].with_suffix(".ivf")))) return pass_2_commands def compose_encoding_queue(self, files): @@ -366,15 +384,27 @@ def get_brightness(video): return brig_geom @staticmethod - def man_cq(command: str, cq: int): + def man_cq(command: list, cq: int): """Return command with new cq value""" mt = '--cq-level=' - cmd = command[:command.find(mt) + 11] + str(cq) + command[command.find(mt) + 13:] - return cmd + for i in range(len(command)): + if (mt in command[i]): + command[i] = mt + str(cq) + return command + + @staticmethod + def get_cq(command: list): + mt = '--cq-level=' + for i in range(len(command)): + if (mt in command[i]): + return int(command[i].replace(mt, '')) + self.log("ERROR: Unable to determine q") + return 32 + - def boost(self, command: str, br_geom, new_cq=0): + def boost(self, command: list, br_geom, new_cq=0): """Based on average brightness of video decrease(boost) Quantize value for encoding.""" - cq = self.man_cq(command, -1) + cq = self.get_cq(command) if not new_cq: if br_geom < 128: new_cq = cq - round((128 - br_geom) / 128 * self.d.get('br')) @@ -394,9 +424,7 @@ def encode(self, commands): # Replace ffmpeg with aom because ffmpeg aom doesn't work with parameters properly. st_time = time.time() source, target = Path(commands[-1][0]), Path(commands[-1][1]) - source_str = str(source).replace("'", "'\"'\"'") - target_str = str(target).replace("'", "'\"'\"'") - frame_probe_source = qencoder.ffmpeg.frame_probe(source_str) + frame_probe_source = qencoder.ffmpeg.frame_probe(source) if (self.d['use_vmaf']): tg_cq = qencoder.targetvmaf.target_vmaf(source, self.d) @@ -431,28 +459,28 @@ def encode(self, commands): startingFramecnt = self.frameCounterArray[frameCounterIndex] for i in range(len(commands[:-2])): - self.log(rf'{self.FFMPEG} {commands[i]}') - cmd = rf'{self.FFMPEG} {commands[i]}' + self.log(rf'{self.FFMPEG.split() + commands[i]}') + cmd = self.FFMPEG.split() + commands[i] if (i < (len(commands[:-2]) - 1)): - self.call_cmd(cmd) + self.two_step_cmd(cmd) else: regexp = re.compile("frame\\s+\\d+/(\\d+)") - for line in self.lineByLineCmd(cmd): + for line in self.two_step_linebyline(cmd): try: framecnt = int(re.findall(regexp, str(line))[-1]) Av1an.frameCounterArray[frameCounterIndex] = framecnt + startingFramecnt except: pass - qencoder.ffmpeg.frame_check(source, source_str, target, target_str, self.d['temp'], False) + qencoder.ffmpeg.frame_check(source, target, self.d['temp'], False) - frame_probe = qencoder.ffmpeg.frame_probe(target_str) + frame_probe = qencoder.ffmpeg.frame_probe(target) enc_time = round(time.time() - st_time, 2) self.log(f'Done: {source.name} Fr: {frame_probe}\n' f'Fps: {round(frame_probe / enc_time, 4)} Time: {enc_time} sec.\n') - return qencoder.ffmpeg.frame_probe(source_str) + return qencoder.ffmpeg.frame_probe(source) runningFrameCounter = False startingTime = datetime.datetime.now() @@ -492,7 +520,7 @@ def encoding_loop(self, commands, qinterface): else: done = 0 initial = 0 - total = qencoder.ffmpeg.frame_probe(self.d.get('input_file_str')) + total = qencoder.ffmpeg.frame_probe(self.d.get('input_file')) d = {'total': total, 'done': {}} with open(done_path, 'w') as f: json.dump(d, f) @@ -534,13 +562,13 @@ def setup_routine(self, qinterface): self.set_logging() # Splitting video and sorting big-first - framenums = self.scene_detect(self.d.get('input_file'), qinterface, self.d.get('input_file_str')) + framenums = self.scene_detect(self.d.get('input_file'), qinterface) if (len(framenums) == 0): return 1 - self.split(self.d.get('input_file_str'), framenums) + self.split(self.d.get('input_file'), framenums) # Extracting audio - qencoder.ffmpeg.extract_audio(self.d.get('input_file_str'), self.d.get('temp_str'), self.d.get('audio_params')) + qencoder.ffmpeg.extract_audio(self.d.get('input_file'), self.d.get('temp'), self.d.get('audio_params')) return 0 def video_encoding(self, qinterface): @@ -561,7 +589,7 @@ def video_encoding(self, qinterface): commands = self.compose_encoding_queue(files) self.encoding_loop(commands, qinterface) self.runningFrameCounter = False - qencoder.ffmpeg.concatenate_video(self.d['temp_str'], self.d['temp'], self.d['output_file_str']) + qencoder.ffmpeg.concatenate_video(self.d['temp'], self.d['output_file']) sleep(0.2) if (not self.d.get('keep')): shutil.rmtree(self.d.get('temp')) @@ -575,5 +603,7 @@ def main_thread(self, qinterface): try: self.video_encoding(qinterface) except Exception as e: - self.q.put([2, 0]) + traceback.print_exc() + qinterface.q.put([2, 0]) sys.exit(1) + qinterface.updateStatusProgress.emit("Encode completed!", 100) diff --git a/qencoder/targetvmaf.py b/qencoder/targetvmaf.py index 6006f74..19680c5 100644 --- a/qencoder/targetvmaf.py +++ b/qencoder/targetvmaf.py @@ -44,14 +44,10 @@ def call_vmaf(source: Path, encoded: Path, n_threads, model, res = "1920x1080"): # for proper vmaf calculation # Also it's required to use -r before both files of vmaf calculation to avoid errors fl = source.with_name(encoded.stem).with_suffix('.json').as_posix() - cmd = f'ffmpeg -loglevel error -hide_banner -r 60 -i \'{encoded.as_posix()}\' -r 60 -i {source.as_posix()} ' \ - f'-filter_complex "[0:v]scale={res}:flags=spline:force_original_aspect_ratio=decrease[distorted];' \ - f'[1:v]scale={res}:flags=spline:force_original_aspect_ratio=decrease[ref];' \ - f'[distorted][ref]libvmaf=log_fmt="json":log_path={fl}{mod}{n_threads}" -f null - ' - print(cmd) + cmd = ["ffmpeg", "-loglevel", "error", "-hide_banner", "-i", encoded.as_posix(), "-i", source.as_posix(), "-filter_complex", f'[0:v]scale={res}:flags=spline:force_original_aspect_ratio=decrease[distorted];[1:v]scale={res}:flags=spline:force_original_aspect_ratio=decrease[ref];[distorted][ref]libvmaf=log_fmt=json:log_path={fl}{mod}{n_threads}', "-f", "null", "-"] try: - c = subprocess.run(cmd, shell=True, stdout=PIPE, stderr=STDOUT) + c = subprocess.run(cmd, stdout=PIPE, stderr=STDOUT) call = c.stdout # print(c.stdout.decode()) if 'error' in call.decode().lower(): @@ -65,17 +61,11 @@ def call_vmaf(source: Path, encoded: Path, n_threads, model, res = "1920x1080"): def x264_probes(video: Path, ffmpeg: str, probe_framerate): - - if probe_framerate == 0: - fr = '' - else: - fr = f'-r {probe_framerate}' - - cmd = f' ffmpeg -y -hide_banner -loglevel error -i {video.as_posix()} ' \ - f' {fr} -an {ffmpeg} -c:v libx264 -crf 0 {video.with_suffix(".mp4")}' - - subprocess.run(cmd, shell=True) - + cmd = ['ffmpeg', '-y', '-hide_banner', '-loglevel', 'error', "-i", video.as_posix()] + if probe_framerate != 0: + cmd = cmd + ["-r", str(probe_framerate)] + cmd = cmd + ["-an"] + ffmpeg.split() + ["-c:v", "libx264", "-crf", "0", video.with_suffix(".mp4").as_posix()] + subprocess.run(cmd) def gen_probes_names(probe, q): """Make name of vmaf probe @@ -87,13 +77,12 @@ def probe_cmd(probe, q, ffmpeg_pipe, encoder, threads): """Generate and return commands for probes at set Q values """ pipe = f'ffmpeg -y -hide_banner -loglevel error -i {probe} {ffmpeg_pipe}' + cmd1 = ["ffmpeg", "-y", "-hide_banner", "-loglevel", "error", "-i", probe.as_posix()] + ffmpeg_pipe.split() if encoder == 'aom': - params = " aomenc -q --passes=1 --threads=" + str(threads) + " --end-usage=q --cpu-used=6 --cq-level=" - cmd = f'{pipe} {params}{q} -o {probe.with_name(f"v_{q}{probe.stem}")}.ivf - ' + cmd = cmd1 + ["aomenc", "-q", "--passes=1", "--threads=" + str(threads), "--end-usage=q", "--cpu-used=6", "--cq-level=" + str(q), "-o", probe.with_name(f"v_{q}{probe.stem}").with_suffix(".ivf").as_posix(), "-"] else: - params = "vpxenc --passes=1 --pass=1 --codec=vp9 --threads=" + str(threads) + " --cpu-used=9 --end-usage=q --cq-level=" - cmd = f'{pipe} {params}{q} -o {probe.with_name(f"v_{q}{probe.stem}")}.ivf - ' + cmd = cmd1 + ["vpxenc", "--codec=vp9", "--passes=1", "--pass=1", "--threads=" + str(threads), "--end-usage=q", "--cpu-used=9", "--cq-level=" + str(q), "-o", probe.with_name(f"v_{q}{probe.stem}").with_suffix(".ivf").as_posix(), "-"] return cmd @@ -121,10 +110,21 @@ def interpolate_data(vmaf_cq: list, vmaf_target): vmaf_target_cq = min(tl, key=lambda x: abs(x[1] - vmaf_target)) return vmaf_target_cq, tl, f, xnew + +def two_step_cmd(cmd: list): + cm1 = [] + cm2 = [] + for i in range(len(cmd)): + if (cmd[i] == "|"): + cm1 = cmd[:i] + cm2 = cmd[(i + 1):] + cm1_pipe = subprocess.Popen(cm1, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + pipe = subprocess.call(cm2, stdin=cm1_pipe.stdout, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + def vmaf_probe(probe, q, args): cmd = probe_cmd(probe, q, args['ffmpeg_pipe'], args['encoder'], args['threads']) - c = subprocess.run(cmd, shell=True, stdout=PIPE, stderr=STDOUT) + two_step_cmd(cmd) #make_pipes(cmd).wait() # TODO: Add graphics here diff --git a/qencoder/window.py b/qencoder/window.py index 4873ae4..1ee67f7 100644 --- a/qencoder/window.py +++ b/qencoder/window.py @@ -760,7 +760,8 @@ def getArgs(self): 'ffmpeg_cmd' : self.getFFMPEGParams(), 'min_split_dist' : self.spinBox_minsplit.value(), 'use_vmaf' : self.checkBox_vmaf.isChecked(), 'threads' : self.spinBox_threads.value(), 'better_split' : self.checkBox_lessshitsplit.isChecked(), 'cpuused' : self.spinBox_speed.value(), - 'unsafe_split' : self.checkBox_unsafeSplit.isChecked() + 'unsafe_split' : self.checkBox_unsafeSplit.isChecked(), + 'rtenc' : self.checkBox_rtenc.isChecked() } args['temp'] = Path(str(os.path.dirname(self.outputPath.text())) + "/temp_" + str(os.path.basename(self.outputPath.text()))) @@ -783,12 +784,7 @@ def getArgs(self): args['br'] = self.spinBox_boost.value() if (self.comboBox_encoder.currentIndex() >= 1): args['encoder'] = 'vpx' - - args['temp_str'] = str(args['temp']).replace("'", "'\"'\"'") - args['input_file_str'] = str(args['input_file']).replace("'", "'\"'\"'") - args['output_file_str'] = str(args['output_file']).replace("'", "'\"'\"'") - print(args['temp_str']) - + args['temp'] = Path(str((args['temp'])).replace("'", "_")) return args def encodeVideoQueue(self): @@ -1125,4 +1121,3 @@ def run(self): self.encodeFinished.emit(qdat[1]) except Exception as e: pass - self.updateStatusProgress.emit("Encode completed!", 100) diff --git a/setup.py b/setup.py index ea5e189..4365df4 100644 --- a/setup.py +++ b/setup.py @@ -14,7 +14,7 @@ setuptools.setup( name="qencoder", - version="1.5.3", + version="1.5.4", author="Eli Stone", author_email="eli.stonium@gmail.com", description="Qt graphical interface for encoding",