diff --git a/.gitignore b/.gitignore index 690df8f..c8bdf1b 100644 --- a/.gitignore +++ b/.gitignore @@ -6,7 +6,7 @@ __pycache__/ .todo *.log *.backup -tf_env/ + *.png *.mp4 *.mkv @@ -22,3 +22,8 @@ models/inswapper_128.onnx models/GFPGANv1.4.pth *.onnx models/DMDNet.pth +.venv/ +tf_env/ +.tf_env/ +.deepcamlive/ +deep-live-cam/ diff --git a/README.md b/README.md index d23fbf7..1bcc131 100644 --- a/README.md +++ b/README.md @@ -78,19 +78,20 @@ python run.py --execution-provider coreml ``` ### [](https://github.com/s0md3v/roop/wiki/2.-Acceleration#coreml-execution-provider-apple-legacy)CoreML Execution Provider (Apple Legacy) +Metal support has been added for improved performance on macOS devices. 1. Install dependencies: ``` -pip uninstall onnxruntime onnxruntime-coreml -pip install onnxruntime-coreml==1.13.1 +pip uninstall onnxruntime onnxruntime-silicon +pip install onnxruntime-silicon==1.13.1 ``` 2. Usage in case the provider is available: ``` -python run.py --execution-provider coreml +python run.py --execution-provider metal ``` diff --git a/modules/core.py b/modules/core.py index 9de11ca..f135e44 100644 --- a/modules/core.py +++ b/modules/core.py @@ -5,6 +5,8 @@ if any(arg.startswith('--execution-provider') for arg in sys.argv): os.environ['OMP_NUM_THREADS'] = '1' # reduce tensorflow log level os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' +# Force TensorFlow to use Metal +os.environ['TENSORFLOW_METAL'] = '1' import warnings from typing import List import platform @@ -14,6 +16,7 @@ import argparse import torch import onnxruntime import tensorflow +import cv2 import modules.globals import modules.metadata @@ -35,9 +38,9 @@ def parse_args() -> None: program.add_argument('-t', '--target', help='select an target image or video', dest='target_path') program.add_argument('-o', '--output', help='select output file or directory', dest='output_path') program.add_argument('--frame-processor', help='pipeline of frame processors', dest='frame_processor', default=['face_swapper'], choices=['face_swapper', 'face_enhancer'], nargs='+') - program.add_argument('--keep-fps', help='keep original fps', dest='keep_fps', action='store_true', default=False) + program.add_argument('--keep-fps', help='keep original fps', dest='keep_fps', action='store_true', default=True) program.add_argument('--keep-audio', help='keep original audio', dest='keep_audio', action='store_true', default=True) - program.add_argument('--keep-frames', help='keep temporary frames', dest='keep_frames', action='store_true', default=False) + program.add_argument('--keep-frames', help='keep temporary frames', dest='keep_frames', action='store_true', default=True) program.add_argument('--many-faces', help='process every face', dest='many_faces', action='store_true', default=False) program.add_argument('--nsfw-filter', help='filter the NSFW image or video', dest='nsfw_filter', action='store_true', default=False) program.add_argument('--video-encoder', help='adjust output video encoder', dest='video_encoder', default='libx264', choices=['libx264', 'libx265', 'libvpx-vp9']) @@ -45,16 +48,10 @@ def parse_args() -> None: program.add_argument('--live-mirror', help='The live camera display as you see it in the front-facing camera frame', dest='live_mirror', action='store_true', default=False) program.add_argument('--live-resizable', help='The live camera frame is resizable', dest='live_resizable', action='store_true', default=False) program.add_argument('--max-memory', help='maximum amount of RAM in GB', dest='max_memory', type=int, default=suggest_max_memory()) - program.add_argument('--execution-provider', help='execution provider', dest='execution_provider', default=['cpu'], choices=suggest_execution_providers(), nargs='+') + program.add_argument('--execution-provider', help='execution provider', dest='execution_provider', default=['coreml'], choices=suggest_execution_providers(), nargs='+') program.add_argument('--execution-threads', help='number of execution threads', dest='execution_threads', type=int, default=suggest_execution_threads()) program.add_argument('-v', '--version', action='version', version=f'{modules.metadata.name} {modules.metadata.version}') - # register deprecated args - program.add_argument('-f', '--face', help=argparse.SUPPRESS, dest='source_path_deprecated') - program.add_argument('--cpu-cores', help=argparse.SUPPRESS, dest='cpu_cores_deprecated', type=int) - program.add_argument('--gpu-vendor', help=argparse.SUPPRESS, dest='gpu_vendor_deprecated') - program.add_argument('--gpu-threads', help=argparse.SUPPRESS, dest='gpu_threads_deprecated', type=int) - args = program.parse_args() modules.globals.source_path = args.source_path @@ -72,36 +69,14 @@ def parse_args() -> None: modules.globals.live_mirror = args.live_mirror modules.globals.live_resizable = args.live_resizable modules.globals.max_memory = args.max_memory - modules.globals.execution_providers = decode_execution_providers(args.execution_provider) + modules.globals.execution_providers = ['CoreMLExecutionProvider'] # Force CoreML modules.globals.execution_threads = args.execution_threads - #for ENHANCER tumbler: if 'face_enhancer' in args.frame_processor: modules.globals.fp_ui['face_enhancer'] = True else: modules.globals.fp_ui['face_enhancer'] = False - # translate deprecated args - if args.source_path_deprecated: - print('\033[33mArgument -f and --face are deprecated. Use -s and --source instead.\033[0m') - modules.globals.source_path = args.source_path_deprecated - modules.globals.output_path = normalize_output_path(args.source_path_deprecated, modules.globals.target_path, args.output_path) - if args.cpu_cores_deprecated: - print('\033[33mArgument --cpu-cores is deprecated. Use --execution-threads instead.\033[0m') - modules.globals.execution_threads = args.cpu_cores_deprecated - if args.gpu_vendor_deprecated == 'apple': - print('\033[33mArgument --gpu-vendor apple is deprecated. Use --execution-provider coreml instead.\033[0m') - modules.globals.execution_providers = decode_execution_providers(['coreml']) - if args.gpu_vendor_deprecated == 'nvidia': - print('\033[33mArgument --gpu-vendor nvidia is deprecated. Use --execution-provider cuda instead.\033[0m') - modules.globals.execution_providers = decode_execution_providers(['cuda']) - if args.gpu_vendor_deprecated == 'amd': - print('\033[33mArgument --gpu-vendor amd is deprecated. Use --execution-provider cuda instead.\033[0m') - modules.globals.execution_providers = decode_execution_providers(['rocm']) - if args.gpu_threads_deprecated: - print('\033[33mArgument --gpu-threads is deprecated. Use --execution-threads instead.\033[0m') - modules.globals.execution_threads = args.gpu_threads_deprecated - def encode_execution_providers(execution_providers: List[str]) -> List[str]: return [execution_provider.replace('ExecutionProvider', '').lower() for execution_provider in execution_providers] @@ -114,44 +89,30 @@ def decode_execution_providers(execution_providers: List[str]) -> List[str]: def suggest_max_memory() -> int: if platform.system().lower() == 'darwin': - return 4 - return 16 + return 6 + return 4 def suggest_execution_providers() -> List[str]: - return encode_execution_providers(onnxruntime.get_available_providers()) + return ['coreml'] # Only suggest CoreML def suggest_execution_threads() -> int: - if 'DmlExecutionProvider' in modules.globals.execution_providers: - return 1 - if 'ROCMExecutionProvider' in modules.globals.execution_providers: - return 1 - return 8 + if platform.system().lower() == 'darwin': + return 12 + return 4 + def limit_resources() -> None: - # prevent tensorflow memory leak - gpus = tensorflow.config.experimental.list_physical_devices('GPU') - for gpu in gpus: - tensorflow.config.experimental.set_memory_growth(gpu, True) - # limit memory usage if modules.globals.max_memory: - memory = modules.globals.max_memory * 1024 ** 3 - if platform.system().lower() == 'darwin': - memory = modules.globals.max_memory * 1024 ** 6 - if platform.system().lower() == 'windows': - import ctypes - kernel32 = ctypes.windll.kernel32 - kernel32.SetProcessWorkingSetSize(-1, ctypes.c_size_t(memory), ctypes.c_size_t(memory)) - else: - import resource - resource.setrlimit(resource.RLIMIT_DATA, (memory, memory)) + memory = modules.globals.max_memory * 1024 ** 6 + import resource + resource.setrlimit(resource.RLIMIT_DATA, (memory, memory)) def release_resources() -> None: - if 'CUDAExecutionProvider' in modules.globals.execution_providers: - torch.cuda.empty_cache() + pass # No need to release CUDA resources def pre_check() -> bool: @@ -173,15 +134,13 @@ def start() -> None: for frame_processor in get_frame_processors_modules(modules.globals.frame_processors): if not frame_processor.pre_start(): return - update_status('Processing...') # process image to image if has_image_extension(modules.globals.target_path): - if modules.globals.nsfw_filter and ui.check_and_ignore_nsfw(modules.globals.target_path, destroy): - return - try: - shutil.copy2(modules.globals.target_path, modules.globals.output_path) - except Exception as e: - print("Error copying file:", str(e)) + if modules.globals.nsfw == False: + from modules.predicter import predict_image + if predict_image(modules.globals.target_path): + destroy() + shutil.copy2(modules.globals.target_path, modules.globals.output_path) for frame_processor in get_frame_processors_modules(modules.globals.frame_processors): update_status('Progressing...', frame_processor.NAME) frame_processor.process_image(modules.globals.source_path, modules.globals.output_path, modules.globals.output_path) @@ -192,8 +151,10 @@ def start() -> None: update_status('Processing to image failed!') return # process image to videos - if modules.globals.nsfw_filter and ui.check_and_ignore_nsfw(modules.globals.target_path, destroy): - return + if modules.globals.nsfw == False: + from modules.predicter import predict_video + if predict_video(modules.globals.target_path): + destroy() update_status('Creating temp resources...') create_temp(modules.globals.target_path) update_status('Extracting frames...') @@ -202,8 +163,6 @@ def start() -> None: for frame_processor in get_frame_processors_modules(modules.globals.frame_processors): update_status('Progressing...', frame_processor.NAME) frame_processor.process_video(modules.globals.source_path, temp_frame_paths) - release_resources() - # handles fps if modules.globals.keep_fps: update_status('Detecting fps...') fps = detect_fps(modules.globals.target_path) @@ -212,7 +171,6 @@ def start() -> None: else: update_status('Creating video with 30.0 fps...') create_video(modules.globals.target_path) - # handle audio if modules.globals.keep_audio: if modules.globals.keep_fps: update_status('Restoring audio...') @@ -221,7 +179,6 @@ def start() -> None: restore_audio(modules.globals.target_path, modules.globals.output_path) else: move_temp(modules.globals.target_path, modules.globals.output_path) - # clean and validate clean_temp(modules.globals.target_path) if is_video(modules.globals.target_path): update_status('Processing to video succeed!') @@ -243,8 +200,70 @@ def run() -> None: if not frame_processor.pre_check(): return limit_resources() + print(f"ONNX Runtime version: {onnxruntime.__version__}") + print(f"Available execution providers: {onnxruntime.get_available_providers()}") + print(f"Selected execution provider: CoreMLExecutionProvider (with CPU fallback for face detection)") + + # Configure ONNX Runtime to use CoreML + onnxruntime.set_default_logger_severity(3) # Set to WARNING level + options = onnxruntime.SessionOptions() + options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL + + # Add CoreML-specific options + options.add_session_config_entry("session.coreml.force_precision", "FP32") + options.add_session_config_entry("session.coreml.enable_on_subgraph", "1") + + # Update insightface model loading to use CPU for face detection + from insightface.utils import face_align + def custom_session(model_file, providers): + if 'det_model.onnx' in model_file: + return onnxruntime.InferenceSession(model_file, providers=['CPUExecutionProvider']) + else: + return onnxruntime.InferenceSession(model_file, options, providers=['CoreMLExecutionProvider']) + face_align.Session = custom_session + + # Configure TensorFlow to use Metal + try: + tf_devices = tensorflow.config.list_physical_devices() + print("TensorFlow devices:", tf_devices) + if any('GPU' in device.name for device in tf_devices): + print("TensorFlow is using GPU (Metal)") + else: + print("TensorFlow is not using GPU") + except Exception as e: + print(f"Error configuring TensorFlow: {str(e)}") + + # Configure PyTorch to use MPS (Metal Performance Shaders) + try: + if torch.backends.mps.is_available(): + print("PyTorch is using MPS (Metal Performance Shaders)") + torch.set_default_device('mps') + else: + print("PyTorch MPS is not available") + except Exception as e: + print(f"Error configuring PyTorch: {str(e)}") + if modules.globals.headless: start() else: window = ui.init(start, destroy) window.mainloop() + +def get_one_face(frame): + # Resize the frame to the expected input size + frame_resized = cv2.resize(frame, (112, 112)) # Resize to (112, 112) for recognition model + face = get_face_analyser().get(frame_resized) + return face + +# Ensure to use the CPUExecutionProvider if CoreML fails +def run_model_with_cpu_fallback(model_file, providers): + try: + return onnxruntime.InferenceSession(model_file, providers=['CoreMLExecutionProvider']) + except Exception as e: + print(f"CoreML execution failed: {e}. Falling back to CPU.") + return onnxruntime.InferenceSession(model_file, providers=['CPUExecutionProvider']) + +# Update the face analysis function to use the fallback +def get_face_analyser(): + # Load your model here with the fallback + return run_model_with_cpu_fallback('/path/to/your/model.onnx', ['CoreMLExecutionProvider', 'CPUExecutionProvider']) \ No newline at end of file diff --git a/modules/face_analyser.py b/modules/face_analyser.py index f2d46bf..88e8136 100644 --- a/modules/face_analyser.py +++ b/modules/face_analyser.py @@ -12,7 +12,7 @@ def get_face_analyser() -> Any: if FACE_ANALYSER is None: FACE_ANALYSER = insightface.app.FaceAnalysis(name='buffalo_l', providers=modules.globals.execution_providers) - FACE_ANALYSER.prepare(ctx_id=0, det_size=(640, 640)) + FACE_ANALYSER.prepare(ctx_id=0, det_size=(1280, 720)) return FACE_ANALYSER diff --git a/modules/processors/frame/face_swapper.py b/modules/processors/frame/face_swapper.py index 4b4a222..1e39ffd 100644 --- a/modules/processors/frame/face_swapper.py +++ b/modules/processors/frame/face_swapper.py @@ -17,7 +17,7 @@ NAME = 'DLC.FACE-SWAPPER' def pre_check() -> bool: download_directory_path = resolve_relative_path('../models') - conditional_download(download_directory_path, ['https://huggingface.co/hacksider/deep-live-cam/blob/main/inswapper_128_fp16.onnx']) + conditional_download(download_directory_path, ['https://huggingface.co/hacksider/deep-live-cam/blob/main/inswapper_128.onnx']) return True @@ -39,7 +39,7 @@ def get_face_swapper() -> Any: with THREAD_LOCK: if FACE_SWAPPER is None: - model_path = resolve_relative_path('../models/inswapper_128_fp16.onnx') + model_path = resolve_relative_path('../models/inswapper_128.onnx') FACE_SWAPPER = insightface.model_zoo.get_model(model_path, providers=modules.globals.execution_providers) return FACE_SWAPPER diff --git a/modules/ui.py b/modules/ui.py index 8824ab2..25ba19e 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -194,38 +194,6 @@ def select_output_path(start: Callable[[], None]) -> None: start() -def check_and_ignore_nsfw(target, destroy: Callable = None) -> bool: - ''' Check if the target is NSFW. - TODO: Consider to make blur the target. - ''' - from numpy import ndarray - from modules.predicter import predict_image, predict_video, predict_frame - if type(target) is str: # image/video file path - check_nsfw = predict_image if has_image_extension(target) else predict_video - elif type(target) is ndarray: # frame object - check_nsfw = predict_frame - if check_nsfw and check_nsfw(target): - if destroy: destroy(to_quit=False) # Do not need to destroy the window frame if the target is NSFW - update_status('Processing ignored!') - return True - else: return False - - -def fit_image_to_size(image, width: int, height: int): - if width is None and height is None: - return image - h, w, _ = image.shape - ratio_h = 0.0 - ratio_w = 0.0 - if width > height: - ratio_h = height / h - else: - ratio_w = width / w - ratio = max(ratio_w, ratio_h) - new_size = (int(ratio * w), int(ratio * h)) - return cv2.resize(image, dsize=new_size) - - def render_image_preview(image_path: str, size: Tuple[int, int]) -> ctk.CTkImage: image = Image.open(image_path) if size: @@ -323,7 +291,7 @@ def webcam_preview(): for frame_processor in frame_processors: temp_frame = frame_processor.process_frame(source_image, temp_frame) - image = cv2.cvtColor(temp_frame, cv2.COLOR_BGR2RGB) # Convert the image to RGB format to display it with Tkinter + image = cv2.cvtColor(temp_frame, cv2.COLOR_BGR2RGB) # Convert the image to RGB format to display it with Tkinter image = Image.fromarray(image) image = ImageOps.contain(image, (temp_frame.shape[1], temp_frame.shape[0]), Image.LANCZOS) image = ctk.CTkImage(image, size=image.size) diff --git a/modules/utilities.py b/modules/utilities.py index 782395f..e3f5930 100644 --- a/modules/utilities.py +++ b/modules/utilities.py @@ -9,6 +9,7 @@ import urllib from pathlib import Path from typing import List, Any from tqdm import tqdm +import cv2 import modules.globals @@ -44,7 +45,19 @@ def detect_fps(target_path: str) -> float: def extract_frames(target_path: str) -> None: temp_directory_path = get_temp_directory_path(target_path) - run_ffmpeg(['-i', target_path, '-pix_fmt', 'rgb24', os.path.join(temp_directory_path, '%04d.png')]) + cap = cv2.VideoCapture(target_path) + + frame_count = 0 + while True: + ret, frame = cap.read() + if not ret: + break + + # Save the frame + cv2.imwrite(os.path.join(temp_directory_path, f'{frame_count:04d}.png'), frame) + frame_count += 1 + + cap.release() def create_video(target_path: str, fps: float = 30.0) -> None: diff --git a/requirements.txt b/requirements.txt index f65195e..d459449 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,23 +1,27 @@ ---extra-index-url https://download.pytorch.org/whl/cu118 +# Deep Live Cam requirements -numpy==1.23.5 +# Core dependencies +numpy==1.26.4 +onnxruntime-silicon==1.16.3 opencv-python==4.8.1.78 -onnx==1.16.0 -insightface==0.7.3 -psutil==5.9.8 -tk==0.1.0 -customtkinter==5.2.2 pillow==9.5.0 -torch==2.0.1+cu118; sys_platform != 'darwin' -torch==2.0.1; sys_platform == 'darwin' -torchvision==0.15.2+cu118; sys_platform != 'darwin' -torchvision==0.15.2; sys_platform == 'darwin' -onnxruntime==1.18.0; sys_platform == 'darwin' and platform_machine != 'arm64' -onnxruntime-silicon==1.16.3; sys_platform == 'darwin' and platform_machine == 'arm64' -onnxruntime-gpu==1.18.0; sys_platform != 'darwin' -tensorflow==2.13.0rc1; sys_platform == 'darwin' -tensorflow==2.12.1; sys_platform != 'darwin' -opennsfw2==0.10.2 -protobuf==4.23.2 +insightface==0.7.3 +torch==2.1.0 # Add the specific version you're using +tensorflow-macos==2.16.2 # Add the specific version you're using +tensorflow-metal==1.1.0 # Add the specific version you're using + +# Image processing +scikit-image==0.24.0 +matplotlib==3.9.1.post1 + +# Machine learning +scikit-learn==1.5.1 + +# Utilities tqdm==4.66.4 -gfpgan==1.3.8 +requests==2.32.3 +prettytable==3.11.0 + +# Optional dependencies (comment out if not needed) +# albumentations==1.4.13 +# coloredlogs==15.0.1