diff --git a/.flake8 b/.flake8 index 43a1b7693..4c9027e76 100644 --- a/.flake8 +++ b/.flake8 @@ -1,3 +1,3 @@ [flake8] select = E3, E4, F -per-file-ignores = roop/core.py:E402 \ No newline at end of file +per-file-ignores = roop/core.py:E402,F401 \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/bug.md b/.github/ISSUE_TEMPLATE/bug.md new file mode 100644 index 000000000..ee035739a --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug.md @@ -0,0 +1,44 @@ +--- +name: Bug +about: Report a bug +title: '[Bug]' +labels: 'bug' + +--- + +## Description + +A concise description of the bug and how to reproduce it. + +## Error + +Paste the error or exception from your console: + +``` + +``` + +## Details + +What operating system are you using? + +- [ ] Windows +- [ ] MacOS (Apple Silicon) +- [ ] MacOS (Apple Legacy) +- [ ] Linux +- [ ] Linux in WSL + +What execution provider are you using? + +- [ ] CPU +- [ ] CUDA +- [ ] CoreML +- [ ] DirectML +- [ ] OpenVINO +- [ ] Other + +What version of Roop are you using? + +- [ ] 1.0.0 +- [ ] 1.1.0 +- [ ] next diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md deleted file mode 100644 index 2addecc2a..000000000 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -name: Bug report -about: Create a report to help us improve -title: '' -labels: '' -assignees: '' - ---- - -**Describe the bug** -A clear and concise description of what the bug is. - -**To Reproduce** -Steps to reproduce the behavior: -1. Go to '...' -2. Click on '....' -3. Scroll down to '....' -4. See error - -**Details** -What OS are you using? -- [ ] Linux -- [ ] Linux in WSL -- [ ] Windows -- [ ] Mac - -Are you try to use a GPU? -- [ ] No. I am not using the `---gpu` flag -- [ ] NVIDIA -- [ ] AMD -- [ ] Intel -- [ ] Mac - -**Screenshots** -If applicable, add screenshots to help explain your problem. - -**Sanity Check** -- [ ] I have the latest code from the github repository -- [ ] I have followed the installation guide diff --git a/.github/ISSUE_TEMPLATE/installation.md b/.github/ISSUE_TEMPLATE/installation.md new file mode 100644 index 000000000..966417b00 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/installation.md @@ -0,0 +1,12 @@ +--- +name: Installation +about: Platform and installation issues +title: '[Installation]' +labels: 'installation' + +--- + +Please **DO NOT OPEN** platform and installation issues! + +- Check the [troubleshooting](https://github.com/s0md3v/roop/wiki/4.-Troubleshooting) that covers many issues. +- Join our helpful community on [Discord](https://discord.gg/Y9p4ZQ2sB9) for instant help. diff --git a/.github/ISSUE_TEMPLATE/suggestion.md b/.github/ISSUE_TEMPLATE/suggestion.md deleted file mode 100644 index 180c3849c..000000000 --- a/.github/ISSUE_TEMPLATE/suggestion.md +++ /dev/null @@ -1,11 +0,0 @@ ---- -name: Suggestion -about: Suggest an idea for this project -title: '' -labels: '' -assignees: '' - ---- - -**Describe your suggestion** -A clear and concise description of what you want to happen. diff --git a/README.md b/README.md index ca3e92a9f..e2d7920b1 100644 --- a/README.md +++ b/README.md @@ -7,22 +7,24 @@ You can watch some demos [here](https://drive.google.com/drive/folders/1KHv8n_rd ## Disclaimer This software is meant to be a productive contribution to the rapidly growing AI-generated media industry. It will help artists with tasks such as animating a custom character or using the character as a model for clothing etc. -The developers of this software are aware of its possible unethical applicaitons and are committed to take preventative measures against them. It has a built-in check which prevents the program from working on inappropriate media including but not limited to nudity, graphic content, sensitive material such as war footage etc. We will continue to develop this project in the positive direction while adhering to law and ethics. This project may be shut down or include watermarks on the output if requested by law. +The developers of this software are aware of its possible unethical applications and are committed to take preventative measures against them. It has a built-in check which prevents the program from working on inappropriate media including but not limited to nudity, graphic content, sensitive material such as war footage etc. We will continue to develop this project in the positive direction while adhering to law and ethics. This project may be shut down or include watermarks on the output if requested by law. Users of this software are expected to use this software responsibly while abiding the local law. If face of a real person is being used, users are suggested to get consent from the concerned person and clearly mention that it is a deepfake when posting content online. Developers of this software will not be responsible for actions of end-users. ## How do I install it? -**Issues regarding installation will be closed from now on, we cannot handle the amount of requests.** +### Basic -- **Basic:** It is more likely to work on your computer but it will also be very slow. You can follow instructions for the basic install [here](https://github.com/s0md3v/roop/wiki/1.-Installation). +It is more likely to work on your computer but it will also be very slow. You can follow instructions for the basic install [here](https://github.com/s0md3v/roop/wiki/1.-Installation). -- **Acceleration:** If you have a good GPU and are ready for solving any software issues you may face, you can enable GPU which is wayyy faster. To do this, first follow the basic install instructions given above and then follow GPU-specific instructions [here](https://github.com/s0md3v/roop/wiki/2.-Acceleration). +### Acceleration + +If you have a good GPU and are ready for solving any software issues you may face, you can enable GPU which is wayyy faster. To do this, first follow the basic install instructions given above and then follow GPU-specific instructions [here](https://github.com/s0md3v/roop/wiki/2.-Acceleration). ## How do I use it? -> Note: When you run this program for the first time, it will download some models ~300MB in size. Executing `python run.py` command will launch this window: + ![gui-demo](gui-demo.png) Choose a face (image with desired face) and the target image/video (image/video in which you want to replace the face) and click on `Start`. Open file explorer and navigate to the directory you select your output to be in. You will find a directory named `` where you can see the frames being swapped in realtime. Once the processing is done, it will create the output file. That's it. @@ -36,10 +38,13 @@ options: -t TARGET_PATH, --target TARGET_PATH select an target image or video -o OUTPUT_PATH, --output OUTPUT_PATH select output file or directory --frame-processor FRAME_PROCESSOR [FRAME_PROCESSOR ...] frame processors (choices: face_swapper, face_enhancer, ...) - --keep-fps keep original fps - --keep-audio keep original audio + --keep-fps keep target fps --keep-frames keep temporary frames + --skip-audio skip target audio --many-faces process every face + --reference-face-position REFERENCE_FACE_POSITION position of the reference face + --reference-frame-number REFERENCE_FRAME_NUMBER number of the reference frame + --similar-face-distance SIMILAR_FACE_DISTANCE face distance used for recognition --video-encoder {libx264,libx265,libvpx-vp9} adjust output video encoder --video-quality [0-51] adjust output video quality --max-memory MAX_MEMORY maximum amount of RAM in GB diff --git a/gui-demo.png b/gui-demo.png index b76a54da3..345c420f4 100644 Binary files a/gui-demo.png and b/gui-demo.png differ diff --git a/requirements-ci.txt b/requirements-ci.txt index cf8aff4df..c924b6a26 100644 --- a/requirements-ci.txt +++ b/requirements-ci.txt @@ -1,14 +1,15 @@ numpy==1.23.5 -opencv-python==4.7.0.72 +opencv-python==4.8.0.74 onnx==1.14.0 insightface==0.7.3 psutil==5.9.5 tk==0.1.0 -customtkinter==5.1.3 +customtkinter==5.2.0 +tkinterdnd2==0.3.0 torch==2.0.1 torchvision==0.15.2 onnxruntime==1.15.0 -tensorflow==2.12.0 +tensorflow==2.13.0 opennsfw2==0.10.2 -protobuf==4.23.2 +protobuf==4.23.4 tqdm==4.65.0 \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 859654edc..e95a759e1 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,23 +1,23 @@ --extra-index-url https://download.pytorch.org/whl/cu118 numpy==1.23.5 -opencv-python==4.7.0.72 +opencv-python==4.8.0.74 onnx==1.14.0 insightface==0.7.3 psutil==5.9.5 tk==0.1.0 -customtkinter==5.1.3 -pillow==9.5.0 +customtkinter==5.2.0 +tkinterdnd2==0.3.0 +pillow==10.0.0 torch==2.0.1+cu118; sys_platform != 'darwin' torch==2.0.1; sys_platform == 'darwin' torchvision==0.15.2+cu118; sys_platform != 'darwin' torchvision==0.15.2; sys_platform == 'darwin' -onnxruntime==1.15.0; sys_platform == 'darwin' and platform_machine != 'arm64' +onnxruntime-coreml==1.13.1; sys_platform == 'darwin' and platform_machine != 'arm64' onnxruntime-silicon==1.13.1; sys_platform == 'darwin' and platform_machine == 'arm64' onnxruntime-gpu==1.15.0; sys_platform != 'darwin' -tensorflow==2.13.0rc1; sys_platform == 'darwin' -tensorflow==2.12.0; sys_platform != 'darwin' +tensorflow==2.13.0 opennsfw2==0.10.2 -protobuf==4.23.2 +protobuf==4.23.4 tqdm==4.65.0 gfpgan==1.3.8 \ No newline at end of file diff --git a/roop/capturer.py b/roop/capturer.py index fd49d468d..515fc8e54 100644 --- a/roop/capturer.py +++ b/roop/capturer.py @@ -1,8 +1,10 @@ -from typing import Any +from typing import Optional import cv2 +from roop.typing import Frame -def get_video_frame(video_path: str, frame_number: int = 0) -> Any: + +def get_video_frame(video_path: str, frame_number: int = 0) -> Optional[Frame]: capture = cv2.VideoCapture(video_path) frame_total = capture.get(cv2.CAP_PROP_FRAME_COUNT) capture.set(cv2.CAP_PROP_POS_FRAMES, min(frame_total, frame_number - 1)) diff --git a/roop/core.py b/roop/core.py index b70d85481..32a6d3952 100755 --- a/roop/core.py +++ b/roop/core.py @@ -15,18 +15,17 @@ import argparse import torch import onnxruntime +if not 'CUDAExecutionProvider' in onnxruntime.get_available_providers(): + del torch import tensorflow import roop.globals import roop.metadata import roop.ui as ui -from roop.predicter import predict_image, predict_video +from roop.predictor import predict_image, predict_video from roop.processors.frame.core import get_frame_processors_modules from roop.utilities import has_image_extension, is_image, is_video, detect_fps, create_video, extract_frames, get_temp_frame_paths, restore_audio, create_temp, move_temp, clean_temp, normalize_output_path -if 'ROCMExecutionProvider' in roop.globals.execution_providers: - del torch - warnings.filterwarnings('ignore', category=FutureWarning, module='insightface') warnings.filterwarnings('ignore', category=UserWarning, module='torchvision') @@ -38,13 +37,16 @@ def parse_args() -> None: program.add_argument('-t', '--target', help='select an target image or video', dest='target_path') program.add_argument('-o', '--output', help='select output file or directory', dest='output_path') program.add_argument('--frame-processor', help='frame processors (choices: face_swapper, face_enhancer, ...)', dest='frame_processor', default=['face_swapper'], nargs='+') - program.add_argument('--keep-fps', help='keep original fps', dest='keep_fps', action='store_true', default=False) - program.add_argument('--keep-audio', help='keep original audio', dest='keep_audio', action='store_true', default=True) - program.add_argument('--keep-frames', help='keep temporary frames', dest='keep_frames', action='store_true', default=False) - program.add_argument('--many-faces', help='process every face', dest='many_faces', action='store_true', default=False) + program.add_argument('--keep-fps', help='keep target fps', dest='keep_fps', action='store_true') + program.add_argument('--keep-frames', help='keep temporary frames', dest='keep_frames', action='store_true') + program.add_argument('--skip-audio', help='skip target audio', dest='skip_audio', action='store_true') + program.add_argument('--many-faces', help='process every face', dest='many_faces', action='store_true') + program.add_argument('--reference-face-position', help='position of the reference face', dest='reference_face_position', type=int, default=0) + program.add_argument('--reference-frame-number', help='number of the reference frame', dest='reference_frame_number', type=int, default=0) + program.add_argument('--similar-face-distance', help='face distance used for recognition', dest='similar_face_distance', type=float, default=0.85) program.add_argument('--video-encoder', help='adjust output video encoder', dest='video_encoder', default='libx264', choices=['libx264', 'libx265', 'libvpx-vp9']) program.add_argument('--video-quality', help='adjust output video quality', dest='video_quality', type=int, default=18, choices=range(52), metavar='[0-51]') - program.add_argument('--max-memory', help='maximum amount of RAM in GB', dest='max_memory', type=int, default=suggest_max_memory()) + program.add_argument('--max-memory', help='maximum amount of RAM in GB', dest='max_memory', type=int) program.add_argument('--execution-provider', help='available execution provider (choices: cpu, ...)', dest='execution_provider', default=['cpu'], choices=suggest_execution_providers(), nargs='+') program.add_argument('--execution-threads', help='number of execution threads', dest='execution_threads', type=int, default=suggest_execution_threads()) program.add_argument('-v', '--version', action='version', version=f'{roop.metadata.name} {roop.metadata.version}') @@ -53,13 +55,16 @@ def parse_args() -> None: roop.globals.source_path = args.source_path roop.globals.target_path = args.target_path - roop.globals.output_path = normalize_output_path(roop.globals.source_path, roop.globals.target_path, args.output_path) + roop.globals.output_path = normalize_output_path(roop.globals.source_path, roop.globals.target_path, args.output_path) # type: ignore + roop.globals.headless = roop.globals.source_path and roop.globals.target_path and roop.globals.output_path roop.globals.frame_processors = args.frame_processor - roop.globals.headless = args.source_path or args.target_path or args.output_path roop.globals.keep_fps = args.keep_fps - roop.globals.keep_audio = args.keep_audio roop.globals.keep_frames = args.keep_frames + roop.globals.skip_audio = args.skip_audio roop.globals.many_faces = args.many_faces + roop.globals.reference_face_position = args.reference_face_position + roop.globals.reference_frame_number = args.reference_frame_number + roop.globals.similar_face_distance = args.similar_face_distance roop.globals.video_encoder = args.video_encoder roop.globals.video_quality = args.video_quality roop.globals.max_memory = args.max_memory @@ -76,22 +81,14 @@ def decode_execution_providers(execution_providers: List[str]) -> List[str]: if any(execution_provider in encoded_execution_provider for execution_provider in execution_providers)] -def suggest_max_memory() -> int: - if platform.system().lower() == 'darwin': - return 4 - return 16 - - def suggest_execution_providers() -> List[str]: return encode_execution_providers(onnxruntime.get_available_providers()) def suggest_execution_threads() -> int: - if 'DmlExecutionProvider' in roop.globals.execution_providers: - return 1 - if 'ROCMExecutionProvider' in roop.globals.execution_providers: - return 1 - return 8 + if 'CUDAExecutionProvider' in onnxruntime.get_available_providers(): + return 8 + return 1 def limit_resources() -> None: @@ -115,11 +112,6 @@ def limit_resources() -> None: resource.setrlimit(resource.RLIMIT_DATA, (memory, memory)) -def release_resources() -> None: - if 'CUDAExecutionProvider' in roop.globals.execution_providers: - torch.cuda.empty_cache() - - def pre_check() -> bool: if sys.version_info < (3, 9): update_status('Python version is not supported - please upgrade to 3.9 or higher.') @@ -145,11 +137,12 @@ def start() -> None: if predict_image(roop.globals.target_path): destroy() shutil.copy2(roop.globals.target_path, roop.globals.output_path) + # process frame for frame_processor in get_frame_processors_modules(roop.globals.frame_processors): update_status('Progressing...', frame_processor.NAME) frame_processor.process_image(roop.globals.source_path, roop.globals.output_path, roop.globals.output_path) frame_processor.post_process() - release_resources() + # validate image if is_image(roop.globals.target_path): update_status('Processing to image succeed!') else: @@ -160,34 +153,41 @@ def start() -> None: destroy() update_status('Creating temp resources...') create_temp(roop.globals.target_path) - update_status('Extracting frames...') - extract_frames(roop.globals.target_path) + # extract frames + if roop.globals.keep_fps: + fps = detect_fps(roop.globals.target_path) + update_status(f'Extracting frames with {fps} FPS...') + extract_frames(roop.globals.target_path, fps) + else: + update_status('Extracting frames with 30 FPS...') + extract_frames(roop.globals.target_path) + # process frame temp_frame_paths = get_temp_frame_paths(roop.globals.target_path) for frame_processor in get_frame_processors_modules(roop.globals.frame_processors): update_status('Progressing...', frame_processor.NAME) frame_processor.process_video(roop.globals.source_path, temp_frame_paths) frame_processor.post_process() - release_resources() - # handles fps + # create video if roop.globals.keep_fps: - update_status('Detecting fps...') fps = detect_fps(roop.globals.target_path) - update_status(f'Creating video with {fps} fps...') + update_status(f'Creating video with {fps} FPS...') create_video(roop.globals.target_path, fps) else: - update_status('Creating video with 30.0 fps...') + update_status('Creating video with 30 FPS...') create_video(roop.globals.target_path) # handle audio - if roop.globals.keep_audio: + if roop.globals.skip_audio: + move_temp(roop.globals.target_path, roop.globals.output_path) + update_status('Skipping audio...') + else: if roop.globals.keep_fps: update_status('Restoring audio...') else: update_status('Restoring audio might cause issues as fps are not kept...') restore_audio(roop.globals.target_path, roop.globals.output_path) - else: - move_temp(roop.globals.target_path, roop.globals.output_path) - # clean and validate + # clean temp clean_temp(roop.globals.target_path) + # validate video if is_video(roop.globals.target_path): update_status('Processing to video succeed!') else: @@ -197,7 +197,7 @@ def start() -> None: def destroy() -> None: if roop.globals.target_path: clean_temp(roop.globals.target_path) - quit() + sys.exit() def run() -> None: diff --git a/roop/face_analyser.py b/roop/face_analyser.py index 9c0afe458..4c1a350dc 100644 --- a/roop/face_analyser.py +++ b/roop/face_analyser.py @@ -1,9 +1,10 @@ import threading -from typing import Any +from typing import Any, Optional, List import insightface +import numpy import roop.globals -from roop.typing import Frame +from roop.typing import Frame, Face FACE_ANALYSER = None THREAD_LOCK = threading.Lock() @@ -15,20 +16,38 @@ def get_face_analyser() -> Any: with THREAD_LOCK: if FACE_ANALYSER is None: FACE_ANALYSER = insightface.app.FaceAnalysis(name='buffalo_l', providers=roop.globals.execution_providers) - FACE_ANALYSER.prepare(ctx_id=0, det_size=(640, 640)) + FACE_ANALYSER.prepare(ctx_id=0) return FACE_ANALYSER -def get_one_face(frame: Frame) -> Any: - face = get_face_analyser().get(frame) - try: - return min(face, key=lambda x: x.bbox[0]) - except ValueError: - return None +def clear_face_analyser() -> Any: + global FACE_ANALYSER + + FACE_ANALYSER = None -def get_many_faces(frame: Frame) -> Any: +def get_one_face(frame: Frame, position: int = 0) -> Optional[Face]: + faces = get_many_faces(frame) + if faces: + try: + return faces[position] + except IndexError: + return faces[-1] + return None + + +def get_many_faces(frame: Frame) -> Optional[List[Face]]: try: return get_face_analyser().get(frame) - except IndexError: + except ValueError: return None + + +def find_similar_face(frame: Frame, reference_face: Face) -> Optional[Face]: + faces = get_many_faces(frame) + for face in faces: + if hasattr(face, 'normed_embedding') and hasattr(reference_face, 'normed_embedding'): + distance = numpy.sum(numpy.square(face.normed_embedding - reference_face.normed_embedding)) + if distance < roop.globals.similar_face_distance: + return face + return None diff --git a/roop/face_reference.py b/roop/face_reference.py new file mode 100644 index 000000000..3c3e1f1c6 --- /dev/null +++ b/roop/face_reference.py @@ -0,0 +1,21 @@ +from typing import Optional + +from roop.typing import Face + +FACE_REFERENCE = None + + +def get_face_reference() -> Optional[Face]: + return FACE_REFERENCE + + +def set_face_reference(face: Face) -> None: + global FACE_REFERENCE + + FACE_REFERENCE = face + + +def clear_face_reference() -> None: + global FACE_REFERENCE + + FACE_REFERENCE = None diff --git a/roop/globals.py b/roop/globals.py index 77fd391db..3b8bdeb37 100644 --- a/roop/globals.py +++ b/roop/globals.py @@ -3,15 +3,18 @@ source_path = None target_path = None output_path = None +headless = None frame_processors: List[str] = [] keep_fps = None -keep_audio = None keep_frames = None +skip_audio = None many_faces = None +reference_face_position = None +reference_frame_number = None +similar_face_distance = None video_encoder = None video_quality = None max_memory = None execution_providers: List[str] = [] execution_threads = None -headless = None log_level = 'error' diff --git a/roop/metadata.py b/roop/metadata.py index 35b0f0245..0f4e05168 100644 --- a/roop/metadata.py +++ b/roop/metadata.py @@ -1,2 +1,2 @@ name = 'roop' -version = '1.1.0' +version = '1.2.0' diff --git a/roop/predicter.py b/roop/predictor.py similarity index 63% rename from roop/predicter.py rename to roop/predictor.py index 7ebc2b62e..b59fee93e 100644 --- a/roop/predicter.py +++ b/roop/predictor.py @@ -1,18 +1,36 @@ +import threading import numpy import opennsfw2 from PIL import Image +from keras import Model from roop.typing import Frame +PREDICTOR = None +THREAD_LOCK = threading.Lock() MAX_PROBABILITY = 0.85 +def get_predictor() -> Model: + global PREDICTOR + + with THREAD_LOCK: + if PREDICTOR is None: + PREDICTOR = opennsfw2.make_open_nsfw_model() + return PREDICTOR + + +def clear_predictor() -> None: + global PREDICTOR + + PREDICTOR = None + + def predict_frame(target_frame: Frame) -> bool: image = Image.fromarray(target_frame) image = opennsfw2.preprocess_image(image, opennsfw2.Preprocessing.YAHOO) - model = opennsfw2.make_open_nsfw_model() views = numpy.expand_dims(image, axis=0) - _, probability = model.predict(views)[0] + _, probability = get_predictor().predict(views)[0] return probability > MAX_PROBABILITY diff --git a/roop/processors/frame/core.py b/roop/processors/frame/core.py index c225f9de4..498169d34 100644 --- a/roop/processors/frame/core.py +++ b/roop/processors/frame/core.py @@ -1,4 +1,5 @@ import os +import sys import importlib import psutil from concurrent.futures import ThreadPoolExecutor, as_completed @@ -27,8 +28,10 @@ def load_frame_processor_module(frame_processor: str) -> Any: for method_name in FRAME_PROCESSORS_INTERFACE: if not hasattr(frame_processor_module, method_name): raise NotImplementedError - except (ImportError, NotImplementedError): - quit(f'Frame processor {frame_processor} crashed.') + except ModuleNotFoundError: + sys.exit(f'Frame processor {frame_processor} not found.') + except NotImplementedError: + sys.exit(f'Frame processor {frame_processor} not implemented correctly.') return frame_processor_module @@ -46,7 +49,7 @@ def multi_process_frame(source_path: str, temp_frame_paths: List[str], process_f with ThreadPoolExecutor(max_workers=roop.globals.execution_threads) as executor: futures = [] queue = create_queue(temp_frame_paths) - queue_per_future = len(temp_frame_paths) // roop.globals.execution_threads + queue_per_future = max(len(temp_frame_paths) // roop.globals.execution_threads, 1) while not queue.empty(): future = executor.submit(process_frames, source_path, pick_queue(queue, queue_per_future), update) futures.append(future) diff --git a/roop/processors/frame/face_enhancer.py b/roop/processors/frame/face_enhancer.py index 3ff92ce9d..7f9b0bba4 100644 --- a/roop/processors/frame/face_enhancer.py +++ b/roop/processors/frame/face_enhancer.py @@ -1,12 +1,12 @@ from typing import Any, List, Callable import cv2 import threading -import gfpgan +from gfpgan.utils import GFPGANer import roop.globals import roop.processors.frame.core from roop.core import update_status -from roop.face_analyser import get_one_face +from roop.face_analyser import get_many_faces from roop.typing import Frame, Face from roop.utilities import conditional_download, resolve_relative_path, is_image, is_video @@ -22,11 +22,25 @@ def get_face_enhancer() -> Any: with THREAD_LOCK: if FACE_ENHANCER is None: model_path = resolve_relative_path('../models/GFPGANv1.4.pth') - # todo: set models path https://github.com/TencentARC/GFPGAN/issues/399 - FACE_ENHANCER = gfpgan.GFPGANer(model_path=model_path, upscale=1) # type: ignore[attr-defined] + # todo: set models path -> https://github.com/TencentARC/GFPGAN/issues/399 + FACE_ENHANCER = GFPGANer(model_path=model_path, upscale=1, device=get_device()) return FACE_ENHANCER +def get_device() -> str: + if 'CUDAExecutionProvider' in roop.globals.execution_providers: + return 'cuda' + if 'CoreMLExecutionProvider' in roop.globals.execution_providers: + return 'mps' + return 'cpu' + + +def clear_face_enhancer() -> None: + global FACE_ENHANCER + + FACE_ENHANCER = None + + def pre_check() -> bool: download_directory_path = resolve_relative_path('../models') conditional_download(download_directory_path, ['https://huggingface.co/henryruhs/roop/resolve/main/GFPGANv1.4.pth']) @@ -41,31 +55,32 @@ def pre_start() -> bool: def post_process() -> None: - global FACE_ENHANCER - - FACE_ENHANCER = None + clear_face_enhancer() -def enhance_face(temp_frame: Frame) -> Frame: +def enhance_face(target_face: Face, temp_frame: Frame) -> Frame: + start_x, start_y, end_x, end_y = map(int, target_face['bbox']) with THREAD_SEMAPHORE: - _, _, temp_frame = get_face_enhancer().enhance( - temp_frame, + _, _, temp_face = get_face_enhancer().enhance( + temp_frame[start_y:end_y, start_x:end_x], paste_back=True ) + temp_frame[start_y:end_y, start_x:end_x] = temp_face return temp_frame -def process_frame(source_face: Face, temp_frame: Frame) -> Frame: - target_face = get_one_face(temp_frame) - if target_face: - temp_frame = enhance_face(temp_frame) +def process_frame(source_face: Face, reference_face: Face, temp_frame: Frame) -> Frame: + many_faces = get_many_faces(temp_frame) + if many_faces: + for target_face in many_faces: + temp_frame = enhance_face(target_face, temp_frame) return temp_frame def process_frames(source_path: str, temp_frame_paths: List[str], update: Callable[[], None]) -> None: for temp_frame_path in temp_frame_paths: temp_frame = cv2.imread(temp_frame_path) - result = process_frame(None, temp_frame) + result = process_frame(None, None, temp_frame) cv2.imwrite(temp_frame_path, result) if update: update() @@ -73,7 +88,7 @@ def process_frames(source_path: str, temp_frame_paths: List[str], update: Callab def process_image(source_path: str, target_path: str, output_path: str) -> None: target_frame = cv2.imread(target_path) - result = process_frame(None, target_frame) + result = process_frame(None, None, target_frame) cv2.imwrite(output_path, result) diff --git a/roop/processors/frame/face_swapper.py b/roop/processors/frame/face_swapper.py index c53b5b86d..3aa5257db 100644 --- a/roop/processors/frame/face_swapper.py +++ b/roop/processors/frame/face_swapper.py @@ -6,7 +6,8 @@ import roop.globals import roop.processors.frame.core from roop.core import update_status -from roop.face_analyser import get_one_face, get_many_faces +from roop.face_analyser import get_one_face, get_many_faces, find_similar_face +from roop.face_reference import get_face_reference, set_face_reference, clear_face_reference from roop.typing import Face, Frame from roop.utilities import conditional_download, resolve_relative_path, is_image, is_video @@ -25,6 +26,12 @@ def get_face_swapper() -> Any: return FACE_SWAPPER +def clear_face_swapper() -> None: + global FACE_SWAPPER + + FACE_SWAPPER = None + + def pre_check() -> bool: download_directory_path = resolve_relative_path('../models') conditional_download(download_directory_path, ['https://huggingface.co/henryruhs/roop/resolve/main/inswapper_128.onnx']) @@ -45,23 +52,22 @@ def pre_start() -> bool: def post_process() -> None: - global FACE_SWAPPER - - FACE_SWAPPER = None + clear_face_swapper() + clear_face_reference() def swap_face(source_face: Face, target_face: Face, temp_frame: Frame) -> Frame: return get_face_swapper().get(temp_frame, target_face, source_face, paste_back=True) -def process_frame(source_face: Face, temp_frame: Frame) -> Frame: +def process_frame(source_face: Face, reference_face: Face, temp_frame: Frame) -> Frame: if roop.globals.many_faces: many_faces = get_many_faces(temp_frame) if many_faces: for target_face in many_faces: temp_frame = swap_face(source_face, target_face, temp_frame) else: - target_face = get_one_face(temp_frame) + target_face = find_similar_face(temp_frame, reference_face) if target_face: temp_frame = swap_face(source_face, target_face, temp_frame) return temp_frame @@ -69,9 +75,10 @@ def process_frame(source_face: Face, temp_frame: Frame) -> Frame: def process_frames(source_path: str, temp_frame_paths: List[str], update: Callable[[], None]) -> None: source_face = get_one_face(cv2.imread(source_path)) + reference_face = get_face_reference() for temp_frame_path in temp_frame_paths: temp_frame = cv2.imread(temp_frame_path) - result = process_frame(source_face, temp_frame) + result = process_frame(source_face, reference_face, temp_frame) cv2.imwrite(temp_frame_path, result) if update: update() @@ -80,9 +87,14 @@ def process_frames(source_path: str, temp_frame_paths: List[str], update: Callab def process_image(source_path: str, target_path: str, output_path: str) -> None: source_face = get_one_face(cv2.imread(source_path)) target_frame = cv2.imread(target_path) - result = process_frame(source_face, target_frame) + reference_face = get_one_face(target_frame, roop.globals.reference_face_position) + result = process_frame(source_face, reference_face, target_frame) cv2.imwrite(output_path, result) def process_video(source_path: str, temp_frame_paths: List[str]) -> None: + if not get_face_reference(): + reference_frame = cv2.imread(temp_frame_paths[roop.globals.reference_frame_number]) + reference_face = get_one_face(reference_frame, roop.globals.reference_face_position) + set_face_reference(reference_face) roop.processors.frame.core.process_video(source_path, temp_frame_paths, process_frames) diff --git a/roop/ui.json b/roop/ui.json index 493099197..cf63a226f 100644 --- a/roop/ui.json +++ b/roop/ui.json @@ -152,6 +152,9 @@ "weight": "normal" } }, + "RoopDropArea": { + "fg_color": ["gray90", "gray13"] + }, "RoopDonate": { "text_color": ["#3a7ebf", "gray60"] } diff --git a/roop/ui.py b/roop/ui.py index ba693dac1..67ec32a52 100644 --- a/roop/ui.py +++ b/roop/ui.py @@ -1,7 +1,9 @@ import os +import sys import webbrowser import customtkinter as ctk -from typing import Callable, Tuple +from tkinterdnd2 import TkinterDnD, DND_ALL +from typing import Any, Callable, Tuple, Optional import cv2 from PIL import Image, ImageOps @@ -9,7 +11,8 @@ import roop.metadata from roop.face_analyser import get_one_face from roop.capturer import get_video_frame, get_video_frame_total -from roop.predicter import predict_frame +from roop.face_reference import get_face_reference, set_face_reference, clear_face_reference +from roop.predictor import predict_frame, clear_predictor from roop.processors.frame.core import get_frame_processors_modules from roop.utilities import is_image, is_video, resolve_relative_path @@ -32,6 +35,13 @@ status_label = None +# todo: remove by native support -> https://github.com/TomSchimansky/CustomTkinter/issues/934 +class CTk(ctk.CTk, TkinterDnD.DnDWrapper): + def __init__(self, *args: Any, **kwargs: Any) -> None: + super().__init__(*args, **kwargs) + self.TkdndVersion = TkinterDnD._require(self) + + def init(start: Callable[[], None], destroy: Callable[[], None]) -> ctk.CTk: global ROOT, PREVIEW @@ -48,17 +58,25 @@ def create_root(start: Callable[[], None], destroy: Callable[[], None]) -> ctk.C ctk.set_appearance_mode('system') ctk.set_default_color_theme(resolve_relative_path('ui.json')) - root = ctk.CTk() + root = CTk() root.minsize(ROOT_WIDTH, ROOT_HEIGHT) root.title(f'{roop.metadata.name} {roop.metadata.version}') root.configure() root.protocol('WM_DELETE_WINDOW', lambda: destroy()) - source_label = ctk.CTkLabel(root, text=None) + source_label = ctk.CTkLabel(root, text=None, fg_color=ctk.ThemeManager.theme.get('RoopDropArea').get('fg_color')) source_label.place(relx=0.1, rely=0.1, relwidth=0.3, relheight=0.25) + source_label.drop_target_register(DND_ALL) + source_label.dnd_bind('<>', lambda event: select_source_path(event.data)) + if roop.globals.source_path: + select_source_path(roop.globals.source_path) - target_label = ctk.CTkLabel(root, text=None) + target_label = ctk.CTkLabel(root, text=None, fg_color=ctk.ThemeManager.theme.get('RoopDropArea').get('fg_color')) target_label.place(relx=0.6, rely=0.1, relwidth=0.3, relheight=0.25) + target_label.drop_target_register(DND_ALL) + target_label.dnd_bind('<>', lambda event: select_target_path(event.data)) + if roop.globals.target_path: + select_target_path(roop.globals.target_path) source_button = ctk.CTkButton(root, text='Select a face', cursor='hand2', command=lambda: select_source_path()) source_button.place(relx=0.1, rely=0.4, relwidth=0.3, relheight=0.1) @@ -67,16 +85,16 @@ def create_root(start: Callable[[], None], destroy: Callable[[], None]) -> ctk.C target_button.place(relx=0.6, rely=0.4, relwidth=0.3, relheight=0.1) keep_fps_value = ctk.BooleanVar(value=roop.globals.keep_fps) - keep_fps_checkbox = ctk.CTkSwitch(root, text='Keep fps', variable=keep_fps_value, cursor='hand2', command=lambda: setattr(roop.globals, 'keep_fps', not roop.globals.keep_fps)) + keep_fps_checkbox = ctk.CTkSwitch(root, text='Keep target fps', variable=keep_fps_value, cursor='hand2', command=lambda: setattr(roop.globals, 'keep_fps', not roop.globals.keep_fps)) keep_fps_checkbox.place(relx=0.1, rely=0.6) keep_frames_value = ctk.BooleanVar(value=roop.globals.keep_frames) - keep_frames_switch = ctk.CTkSwitch(root, text='Keep frames', variable=keep_frames_value, cursor='hand2', command=lambda: setattr(roop.globals, 'keep_frames', keep_frames_value.get())) + keep_frames_switch = ctk.CTkSwitch(root, text='Keep temporary frames', variable=keep_frames_value, cursor='hand2', command=lambda: setattr(roop.globals, 'keep_frames', keep_frames_value.get())) keep_frames_switch.place(relx=0.1, rely=0.65) - keep_audio_value = ctk.BooleanVar(value=roop.globals.keep_audio) - keep_audio_switch = ctk.CTkSwitch(root, text='Keep audio', variable=keep_audio_value, cursor='hand2', command=lambda: setattr(roop.globals, 'keep_audio', keep_audio_value.get())) - keep_audio_switch.place(relx=0.6, rely=0.6) + skip_audio_value = ctk.BooleanVar(value=roop.globals.skip_audio) + skip_audio_switch = ctk.CTkSwitch(root, text='Skip target audio', variable=skip_audio_value, cursor='hand2', command=lambda: setattr(roop.globals, 'skip_audio', skip_audio_value.get())) + skip_audio_switch.place(relx=0.6, rely=0.6) many_faces_value = ctk.BooleanVar(value=roop.globals.many_faces) many_faces_switch = ctk.CTkSwitch(root, text='Many faces', variable=many_faces_value, cursor='hand2', command=lambda: setattr(roop.globals, 'many_faces', many_faces_value.get())) @@ -107,7 +125,6 @@ def create_preview(parent: ctk.CTkToplevel) -> ctk.CTkToplevel: preview = ctk.CTkToplevel(parent) preview.withdraw() - preview.title('Preview') preview.configure() preview.protocol('WM_DELETE_WINDOW', lambda: toggle_preview()) preview.resizable(width=False, height=False) @@ -117,6 +134,8 @@ def create_preview(parent: ctk.CTkToplevel) -> ctk.CTkToplevel: preview_slider = ctk.CTkSlider(preview, from_=0, to=0, command=lambda frame_value: update_preview(frame_value)) + preview.bind('', lambda event: update_face_reference(1)) + preview.bind('', lambda event: update_face_reference(-1)) return preview @@ -125,13 +144,15 @@ def update_status(text: str) -> None: ROOT.update() -def select_source_path() -> None: +def select_source_path(source_path: Optional[str] = None) -> None: global RECENT_DIRECTORY_SOURCE - PREVIEW.withdraw() - source_path = ctk.filedialog.askopenfilename(title='select an source image', initialdir=RECENT_DIRECTORY_SOURCE) + if PREVIEW: + PREVIEW.withdraw() + if source_path is None: + source_path = ctk.filedialog.askopenfilename(title='select an source image', initialdir=RECENT_DIRECTORY_SOURCE) if is_image(source_path): - roop.globals.source_path = source_path + roop.globals.source_path = source_path # type: ignore RECENT_DIRECTORY_SOURCE = os.path.dirname(roop.globals.source_path) image = render_image_preview(roop.globals.source_path, (200, 200)) source_label.configure(image=image) @@ -140,18 +161,21 @@ def select_source_path() -> None: source_label.configure(image=None) -def select_target_path() -> None: +def select_target_path(target_path: Optional[str] = None) -> None: global RECENT_DIRECTORY_TARGET - PREVIEW.withdraw() - target_path = ctk.filedialog.askopenfilename(title='select an target image or video', initialdir=RECENT_DIRECTORY_TARGET) + if PREVIEW: + PREVIEW.withdraw() + clear_face_reference() + if target_path is None: + target_path = ctk.filedialog.askopenfilename(title='select an target image or video', initialdir=RECENT_DIRECTORY_TARGET) if is_image(target_path): - roop.globals.target_path = target_path + roop.globals.target_path = target_path # type: ignore RECENT_DIRECTORY_TARGET = os.path.dirname(roop.globals.target_path) image = render_image_preview(roop.globals.target_path, (200, 200)) target_label.configure(image=image) elif is_video(target_path): - roop.globals.target_path = target_path + roop.globals.target_path = target_path # type: ignore RECENT_DIRECTORY_TARGET = os.path.dirname(roop.globals.target_path) video_frame = render_video_preview(target_path, (200, 200)) target_label.configure(image=video_frame) @@ -198,34 +222,64 @@ def render_video_preview(video_path: str, size: Tuple[int, int], frame_number: i def toggle_preview() -> None: if PREVIEW.state() == 'normal': + PREVIEW.unbind('') + PREVIEW.unbind('') PREVIEW.withdraw() + clear_predictor() elif roop.globals.source_path and roop.globals.target_path: init_preview() - update_preview() + update_preview(roop.globals.reference_frame_number) PREVIEW.deiconify() def init_preview() -> None: + PREVIEW.title('Preview [ ↕ Reference face ]') if is_image(roop.globals.target_path): preview_slider.pack_forget() if is_video(roop.globals.target_path): video_frame_total = get_video_frame_total(roop.globals.target_path) + if video_frame_total > 0: + PREVIEW.title('Preview [ ↕ Reference face ] [ ↔ Frame number ]') + PREVIEW.bind('', lambda event: update_frame(int(video_frame_total / 20))) + PREVIEW.bind('', lambda event: update_frame(int(video_frame_total / -20))) preview_slider.configure(to=video_frame_total) preview_slider.pack(fill='x') - preview_slider.set(0) + preview_slider.set(roop.globals.reference_frame_number) def update_preview(frame_number: int = 0) -> None: if roop.globals.source_path and roop.globals.target_path: temp_frame = get_video_frame(roop.globals.target_path, frame_number) if predict_frame(temp_frame): - quit() + sys.exit() + source_face = get_one_face(cv2.imread(roop.globals.source_path)) + if not get_face_reference(): + reference_frame = get_video_frame(roop.globals.target_path, roop.globals.reference_frame_number) + reference_face = get_one_face(reference_frame, roop.globals.reference_face_position) + set_face_reference(reference_face) + else: + reference_face = get_face_reference() for frame_processor in get_frame_processors_modules(roop.globals.frame_processors): temp_frame = frame_processor.process_frame( - get_one_face(cv2.imread(roop.globals.source_path)), + source_face, + reference_face, temp_frame ) image = Image.fromarray(cv2.cvtColor(temp_frame, cv2.COLOR_BGR2RGB)) image = ImageOps.contain(image, (PREVIEW_MAX_WIDTH, PREVIEW_MAX_HEIGHT), Image.LANCZOS) image = ctk.CTkImage(image, size=image.size) preview_label.configure(image=image) + + +def update_face_reference(steps: int) -> None: + clear_face_reference() + reference_frame_number = preview_slider.get() + roop.globals.reference_face_position += steps # type: ignore + roop.globals.reference_frame_number = reference_frame_number + update_preview(reference_frame_number) + + +def update_frame(steps: int) -> None: + frame_number = preview_slider.get() + steps + preview_slider.set(frame_number) + update_preview(preview_slider.get()) diff --git a/roop/utilities.py b/roop/utilities.py index 90c8d981f..c84eeb600 100644 --- a/roop/utilities.py +++ b/roop/utilities.py @@ -7,7 +7,7 @@ import subprocess import urllib from pathlib import Path -from typing import List, Any +from typing import List, Optional from tqdm import tqdm import roop.globals @@ -39,15 +39,15 @@ def detect_fps(target_path: str) -> float: return numerator / denominator except Exception: pass - return 30.0 + return 30 -def extract_frames(target_path: str) -> None: +def extract_frames(target_path: str, fps: float = 30) -> None: temp_directory_path = get_temp_directory_path(target_path) - run_ffmpeg(['-i', target_path, '-pix_fmt', 'rgb24', os.path.join(temp_directory_path, '%04d.png')]) + run_ffmpeg(['-i', target_path, '-pix_fmt', 'rgb24', '-vf', 'fps=' + str(fps), os.path.join(temp_directory_path, '%04d.png')]) -def create_video(target_path: str, fps: float = 30.0) -> None: +def create_video(target_path: str, fps: float = 30) -> None: temp_output_path = get_temp_output_path(target_path) temp_directory_path = get_temp_directory_path(target_path) run_ffmpeg(['-r', str(fps), '-i', os.path.join(temp_directory_path, '%04d.png'), '-c:v', roop.globals.video_encoder, '-crf', str(roop.globals.video_quality), '-pix_fmt', 'yuv420p', '-vf', 'colorspace=bt709:iall=bt601-6-625:fast=1', '-y', temp_output_path]) @@ -76,8 +76,8 @@ def get_temp_output_path(target_path: str) -> str: return os.path.join(temp_directory_path, TEMP_FILE) -def normalize_output_path(source_path: str, target_path: str, output_path: str) -> Any: - if source_path and target_path: +def normalize_output_path(source_path: str, target_path: str, output_path: str) -> Optional[str]: + if source_path and target_path and output_path: source_name, _ = os.path.splitext(os.path.basename(source_path)) target_name, target_extension = os.path.splitext(os.path.basename(target_path)) if os.path.isdir(output_path): @@ -131,10 +131,10 @@ def conditional_download(download_directory_path: str, urls: List[str]) -> None: for url in urls: download_file_path = os.path.join(download_directory_path, os.path.basename(url)) if not os.path.exists(download_file_path): - request = urllib.request.urlopen(url) # type: ignore[attr-defined] + request = urllib.request.urlopen(url) # type: ignore[attr-defined] total = int(request.headers.get('Content-Length', 0)) with tqdm(total=total, desc='Downloading', unit='B', unit_scale=True, unit_divisor=1024) as progress: - urllib.request.urlretrieve(url, download_file_path, reporthook=lambda count, block_size, total_size: progress.update(block_size)) # type: ignore[attr-defined] + urllib.request.urlretrieve(url, download_file_path, reporthook=lambda count, block_size, total_size: progress.update(block_size)) # type: ignore[attr-defined] def resolve_relative_path(path: str) -> str: