From b1f68934646d39a4550e71515e0a0c65213d9833 Mon Sep 17 00:00:00 2001 From: Jason Kneen Date: Tue, 13 Aug 2024 12:33:49 +0100 Subject: [PATCH 01/11] Update .gitignore --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 690df8f..0b368fc 100644 --- a/.gitignore +++ b/.gitignore @@ -22,3 +22,4 @@ models/inswapper_128.onnx models/GFPGANv1.4.pth *.onnx models/DMDNet.pth +.venv/ From f19c35bb072513972a0cb2ccc39115f3e27465ad Mon Sep 17 00:00:00 2001 From: Jason Kneen Date: Tue, 13 Aug 2024 13:06:14 +0100 Subject: [PATCH 02/11] Update .gitignore --- .gitignore | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 0b368fc..b704484 100644 --- a/.gitignore +++ b/.gitignore @@ -6,7 +6,8 @@ __pycache__/ .todo *.log *.backup -tf_env/ +.tfenv/ + *.png *.mp4 *.mkv From 9879c982e4aa137d473473cd1264ad55252f4f75 Mon Sep 17 00:00:00 2001 From: Jason Kneen Date: Tue, 13 Aug 2024 13:06:48 +0100 Subject: [PATCH 03/11] Update .gitignore --- .gitignore | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index b704484..5b709c6 100644 --- a/.gitignore +++ b/.gitignore @@ -6,7 +6,7 @@ __pycache__/ .todo *.log *.backup -.tfenv/ +.tf_env/ *.png *.mp4 From fde8742720f9f054857e679b8e398ff7aea0e1cb Mon Sep 17 00:00:00 2001 From: Jason Kneen Date: Tue, 13 Aug 2024 13:07:49 +0100 Subject: [PATCH 04/11] Update .gitignore --- .gitignore | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 5b709c6..a605cb8 100644 --- a/.gitignore +++ b/.gitignore @@ -6,7 +6,6 @@ __pycache__/ .todo *.log *.backup -.tf_env/ *.png *.mp4 @@ -24,3 +23,5 @@ models/GFPGANv1.4.pth *.onnx models/DMDNet.pth .venv/ +tf_env/ +.tf_env/ From 3fcc8d5416786d9d59fecf13136b4dcf5eed794e Mon Sep 17 00:00:00 2001 From: Jason Kneen Date: Tue, 13 Aug 2024 13:08:06 +0100 Subject: [PATCH 05/11] Updates for macOS and coreML / Metal --- modules/core.py | 129 +++++++++++++++-------- modules/processors/frame/face_swapper.py | 4 +- modules/ui.py | 34 +----- modules/utilities.py | 15 ++- requirements.txt | 41 +++---- 5 files changed, 122 insertions(+), 101 deletions(-) diff --git a/modules/core.py b/modules/core.py index 9de11ca..1748f5e 100644 --- a/modules/core.py +++ b/modules/core.py @@ -5,6 +5,8 @@ if any(arg.startswith('--execution-provider') for arg in sys.argv): os.environ['OMP_NUM_THREADS'] = '1' # reduce tensorflow log level os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' +# Force TensorFlow to use Metal +os.environ['TENSORFLOW_METAL'] = '1' import warnings from typing import List import platform @@ -35,9 +37,9 @@ def parse_args() -> None: program.add_argument('-t', '--target', help='select an target image or video', dest='target_path') program.add_argument('-o', '--output', help='select output file or directory', dest='output_path') program.add_argument('--frame-processor', help='pipeline of frame processors', dest='frame_processor', default=['face_swapper'], choices=['face_swapper', 'face_enhancer'], nargs='+') - program.add_argument('--keep-fps', help='keep original fps', dest='keep_fps', action='store_true', default=False) + program.add_argument('--keep-fps', help='keep original fps', dest='keep_fps', action='store_true', default=True) program.add_argument('--keep-audio', help='keep original audio', dest='keep_audio', action='store_true', default=True) - program.add_argument('--keep-frames', help='keep temporary frames', dest='keep_frames', action='store_true', default=False) + program.add_argument('--keep-frames', help='keep temporary frames', dest='keep_frames', action='store_true', default=True) program.add_argument('--many-faces', help='process every face', dest='many_faces', action='store_true', default=False) program.add_argument('--nsfw-filter', help='filter the NSFW image or video', dest='nsfw_filter', action='store_true', default=False) program.add_argument('--video-encoder', help='adjust output video encoder', dest='video_encoder', default='libx264', choices=['libx264', 'libx265', 'libvpx-vp9']) @@ -45,16 +47,10 @@ def parse_args() -> None: program.add_argument('--live-mirror', help='The live camera display as you see it in the front-facing camera frame', dest='live_mirror', action='store_true', default=False) program.add_argument('--live-resizable', help='The live camera frame is resizable', dest='live_resizable', action='store_true', default=False) program.add_argument('--max-memory', help='maximum amount of RAM in GB', dest='max_memory', type=int, default=suggest_max_memory()) - program.add_argument('--execution-provider', help='execution provider', dest='execution_provider', default=['cpu'], choices=suggest_execution_providers(), nargs='+') + program.add_argument('--execution-provider', help='execution provider', dest='execution_provider', default=['coreml'], choices=suggest_execution_providers(), nargs='+') program.add_argument('--execution-threads', help='number of execution threads', dest='execution_threads', type=int, default=suggest_execution_threads()) program.add_argument('-v', '--version', action='version', version=f'{modules.metadata.name} {modules.metadata.version}') - # register deprecated args - program.add_argument('-f', '--face', help=argparse.SUPPRESS, dest='source_path_deprecated') - program.add_argument('--cpu-cores', help=argparse.SUPPRESS, dest='cpu_cores_deprecated', type=int) - program.add_argument('--gpu-vendor', help=argparse.SUPPRESS, dest='gpu_vendor_deprecated') - program.add_argument('--gpu-threads', help=argparse.SUPPRESS, dest='gpu_threads_deprecated', type=int) - args = program.parse_args() modules.globals.source_path = args.source_path @@ -72,10 +68,9 @@ def parse_args() -> None: modules.globals.live_mirror = args.live_mirror modules.globals.live_resizable = args.live_resizable modules.globals.max_memory = args.max_memory - modules.globals.execution_providers = decode_execution_providers(args.execution_provider) + modules.globals.execution_providers = ['CoreMLExecutionProvider'] # Force CoreML modules.globals.execution_threads = args.execution_threads - #for ENHANCER tumbler: if 'face_enhancer' in args.frame_processor: modules.globals.fp_ui['face_enhancer'] = True else: @@ -119,39 +114,22 @@ def suggest_max_memory() -> int: def suggest_execution_providers() -> List[str]: - return encode_execution_providers(onnxruntime.get_available_providers()) + return ['coreml'] # Only suggest CoreML def suggest_execution_threads() -> int: - if 'DmlExecutionProvider' in modules.globals.execution_providers: - return 1 - if 'ROCMExecutionProvider' in modules.globals.execution_providers: - return 1 return 8 def limit_resources() -> None: - # prevent tensorflow memory leak - gpus = tensorflow.config.experimental.list_physical_devices('GPU') - for gpu in gpus: - tensorflow.config.experimental.set_memory_growth(gpu, True) - # limit memory usage if modules.globals.max_memory: - memory = modules.globals.max_memory * 1024 ** 3 - if platform.system().lower() == 'darwin': - memory = modules.globals.max_memory * 1024 ** 6 - if platform.system().lower() == 'windows': - import ctypes - kernel32 = ctypes.windll.kernel32 - kernel32.SetProcessWorkingSetSize(-1, ctypes.c_size_t(memory), ctypes.c_size_t(memory)) - else: - import resource - resource.setrlimit(resource.RLIMIT_DATA, (memory, memory)) + memory = modules.globals.max_memory * 1024 ** 6 + import resource + resource.setrlimit(resource.RLIMIT_DATA, (memory, memory)) def release_resources() -> None: - if 'CUDAExecutionProvider' in modules.globals.execution_providers: - torch.cuda.empty_cache() + pass # No need to release CUDA resources def pre_check() -> bool: @@ -173,15 +151,13 @@ def start() -> None: for frame_processor in get_frame_processors_modules(modules.globals.frame_processors): if not frame_processor.pre_start(): return - update_status('Processing...') # process image to image if has_image_extension(modules.globals.target_path): - if modules.globals.nsfw_filter and ui.check_and_ignore_nsfw(modules.globals.target_path, destroy): - return - try: - shutil.copy2(modules.globals.target_path, modules.globals.output_path) - except Exception as e: - print("Error copying file:", str(e)) + if modules.globals.nsfw == False: + from modules.predicter import predict_image + if predict_image(modules.globals.target_path): + destroy() + shutil.copy2(modules.globals.target_path, modules.globals.output_path) for frame_processor in get_frame_processors_modules(modules.globals.frame_processors): update_status('Progressing...', frame_processor.NAME) frame_processor.process_image(modules.globals.source_path, modules.globals.output_path, modules.globals.output_path) @@ -192,8 +168,10 @@ def start() -> None: update_status('Processing to image failed!') return # process image to videos - if modules.globals.nsfw_filter and ui.check_and_ignore_nsfw(modules.globals.target_path, destroy): - return + if modules.globals.nsfw == False: + from modules.predicter import predict_video + if predict_video(modules.globals.target_path): + destroy() update_status('Creating temp resources...') create_temp(modules.globals.target_path) update_status('Extracting frames...') @@ -202,8 +180,6 @@ def start() -> None: for frame_processor in get_frame_processors_modules(modules.globals.frame_processors): update_status('Progressing...', frame_processor.NAME) frame_processor.process_video(modules.globals.source_path, temp_frame_paths) - release_resources() - # handles fps if modules.globals.keep_fps: update_status('Detecting fps...') fps = detect_fps(modules.globals.target_path) @@ -212,7 +188,6 @@ def start() -> None: else: update_status('Creating video with 30.0 fps...') create_video(modules.globals.target_path) - # handle audio if modules.globals.keep_audio: if modules.globals.keep_fps: update_status('Restoring audio...') @@ -221,7 +196,6 @@ def start() -> None: restore_audio(modules.globals.target_path, modules.globals.output_path) else: move_temp(modules.globals.target_path, modules.globals.output_path) - # clean and validate clean_temp(modules.globals.target_path) if is_video(modules.globals.target_path): update_status('Processing to video succeed!') @@ -243,6 +217,69 @@ def run() -> None: if not frame_processor.pre_check(): return limit_resources() + print(f"ONNX Runtime version: {onnxruntime.__version__}") + print(f"Available execution providers: {onnxruntime.get_available_providers()}") + print(f"Selected execution provider: CoreMLExecutionProvider") + + # Configure ONNX Runtime to use only CoreML + onnxruntime.set_default_logger_severity(3) # Set to WARNING level + options = onnxruntime.SessionOptions() + options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL + + # Test CoreML with a dummy model + try: + import numpy as np + from onnx import helper, TensorProto + + # Create a simple ONNX model + X = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 3, 224, 224]) + Y = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 3, 224, 224]) + node = helper.make_node('Identity', ['input'], ['output']) + graph = helper.make_graph([node], 'test_model', [X], [Y]) + model = helper.make_model(graph) + + # Save the model + model_path = 'test_model.onnx' + with open(model_path, 'wb') as f: + f.write(model.SerializeToString()) + + # Create a CoreML session + session = onnxruntime.InferenceSession(model_path, options, providers=['CoreMLExecutionProvider']) + + # Run inference + input_data = np.random.rand(1, 3, 224, 224).astype(np.float32) + output = session.run(None, {'input': input_data}) + + print("CoreML init successful and being used") + print(f"Input shape: {input_data.shape}, Output shape: {output[0].shape}") + + # Clean up + os.remove(model_path) + except Exception as e: + print(f"Error testing CoreML: {str(e)}") + print("The application may not be able to use GPU acceleration") + + # Configure TensorFlow to use Metal + try: + tf_devices = tensorflow.config.list_physical_devices() + print("TensorFlow devices:", tf_devices) + if any('GPU' in device.name for device in tf_devices): + print("TensorFlow is using GPU (Metal)") + else: + print("TensorFlow is not using GPU") + except Exception as e: + print(f"Error configuring TensorFlow: {str(e)}") + + # Configure PyTorch to use MPS (Metal Performance Shaders) + try: + if torch.backends.mps.is_available(): + print("PyTorch is using MPS (Metal Performance Shaders)") + torch.set_default_device('mps') + else: + print("PyTorch MPS is not available") + except Exception as e: + print(f"Error configuring PyTorch: {str(e)}") + if modules.globals.headless: start() else: diff --git a/modules/processors/frame/face_swapper.py b/modules/processors/frame/face_swapper.py index 4b4a222..1e39ffd 100644 --- a/modules/processors/frame/face_swapper.py +++ b/modules/processors/frame/face_swapper.py @@ -17,7 +17,7 @@ NAME = 'DLC.FACE-SWAPPER' def pre_check() -> bool: download_directory_path = resolve_relative_path('../models') - conditional_download(download_directory_path, ['https://huggingface.co/hacksider/deep-live-cam/blob/main/inswapper_128_fp16.onnx']) + conditional_download(download_directory_path, ['https://huggingface.co/hacksider/deep-live-cam/blob/main/inswapper_128.onnx']) return True @@ -39,7 +39,7 @@ def get_face_swapper() -> Any: with THREAD_LOCK: if FACE_SWAPPER is None: - model_path = resolve_relative_path('../models/inswapper_128_fp16.onnx') + model_path = resolve_relative_path('../models/inswapper_128.onnx') FACE_SWAPPER = insightface.model_zoo.get_model(model_path, providers=modules.globals.execution_providers) return FACE_SWAPPER diff --git a/modules/ui.py b/modules/ui.py index 8824ab2..25ba19e 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -194,38 +194,6 @@ def select_output_path(start: Callable[[], None]) -> None: start() -def check_and_ignore_nsfw(target, destroy: Callable = None) -> bool: - ''' Check if the target is NSFW. - TODO: Consider to make blur the target. - ''' - from numpy import ndarray - from modules.predicter import predict_image, predict_video, predict_frame - if type(target) is str: # image/video file path - check_nsfw = predict_image if has_image_extension(target) else predict_video - elif type(target) is ndarray: # frame object - check_nsfw = predict_frame - if check_nsfw and check_nsfw(target): - if destroy: destroy(to_quit=False) # Do not need to destroy the window frame if the target is NSFW - update_status('Processing ignored!') - return True - else: return False - - -def fit_image_to_size(image, width: int, height: int): - if width is None and height is None: - return image - h, w, _ = image.shape - ratio_h = 0.0 - ratio_w = 0.0 - if width > height: - ratio_h = height / h - else: - ratio_w = width / w - ratio = max(ratio_w, ratio_h) - new_size = (int(ratio * w), int(ratio * h)) - return cv2.resize(image, dsize=new_size) - - def render_image_preview(image_path: str, size: Tuple[int, int]) -> ctk.CTkImage: image = Image.open(image_path) if size: @@ -323,7 +291,7 @@ def webcam_preview(): for frame_processor in frame_processors: temp_frame = frame_processor.process_frame(source_image, temp_frame) - image = cv2.cvtColor(temp_frame, cv2.COLOR_BGR2RGB) # Convert the image to RGB format to display it with Tkinter + image = cv2.cvtColor(temp_frame, cv2.COLOR_BGR2RGB) # Convert the image to RGB format to display it with Tkinter image = Image.fromarray(image) image = ImageOps.contain(image, (temp_frame.shape[1], temp_frame.shape[0]), Image.LANCZOS) image = ctk.CTkImage(image, size=image.size) diff --git a/modules/utilities.py b/modules/utilities.py index 782395f..e3f5930 100644 --- a/modules/utilities.py +++ b/modules/utilities.py @@ -9,6 +9,7 @@ import urllib from pathlib import Path from typing import List, Any from tqdm import tqdm +import cv2 import modules.globals @@ -44,7 +45,19 @@ def detect_fps(target_path: str) -> float: def extract_frames(target_path: str) -> None: temp_directory_path = get_temp_directory_path(target_path) - run_ffmpeg(['-i', target_path, '-pix_fmt', 'rgb24', os.path.join(temp_directory_path, '%04d.png')]) + cap = cv2.VideoCapture(target_path) + + frame_count = 0 + while True: + ret, frame = cap.read() + if not ret: + break + + # Save the frame + cv2.imwrite(os.path.join(temp_directory_path, f'{frame_count:04d}.png'), frame) + frame_count += 1 + + cap.release() def create_video(target_path: str, fps: float = 30.0) -> None: diff --git a/requirements.txt b/requirements.txt index f65195e..9396c93 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,23 +1,26 @@ ---extra-index-url https://download.pytorch.org/whl/cu118 +# Deep Live Cam requirements -numpy==1.23.5 +# Core dependencies +numpy==1.26.4 +onnxruntime-silicon==1.16.3 opencv-python==4.8.1.78 -onnx==1.16.0 -insightface==0.7.3 -psutil==5.9.8 -tk==0.1.0 -customtkinter==5.2.2 pillow==9.5.0 -torch==2.0.1+cu118; sys_platform != 'darwin' -torch==2.0.1; sys_platform == 'darwin' -torchvision==0.15.2+cu118; sys_platform != 'darwin' -torchvision==0.15.2; sys_platform == 'darwin' -onnxruntime==1.18.0; sys_platform == 'darwin' and platform_machine != 'arm64' -onnxruntime-silicon==1.16.3; sys_platform == 'darwin' and platform_machine == 'arm64' -onnxruntime-gpu==1.18.0; sys_platform != 'darwin' -tensorflow==2.13.0rc1; sys_platform == 'darwin' -tensorflow==2.12.1; sys_platform != 'darwin' -opennsfw2==0.10.2 -protobuf==4.23.2 +insightface==0.7.3 +torch==2.1.0 # Add the specific version you're using +tensorflow==2.16.1 # Add the specific version you're using + +# Image processing +scikit-image==0.24.0 +matplotlib==3.9.1.post1 + +# Machine learning +scikit-learn==1.5.1 + +# Utilities tqdm==4.66.4 -gfpgan==1.3.8 +requests==2.32.3 +prettytable==3.11.0 + +# Optional dependencies (comment out if not needed) +# albumentations==1.4.13 +# coloredlogs==15.0.1 From b8fde4de4b54378cfe3e6fe1744ecd6e10bc5193 Mon Sep 17 00:00:00 2001 From: Jason Kneen Date: Tue, 13 Aug 2024 13:11:51 +0100 Subject: [PATCH 06/11] Update README.md --- README.md | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 9a0334d..0579919 100644 --- a/README.md +++ b/README.md @@ -78,19 +78,20 @@ python run.py --execution-provider coreml ``` ### [](https://github.com/s0md3v/roop/wiki/2.-Acceleration#coreml-execution-provider-apple-legacy)CoreML Execution Provider (Apple Legacy) +Metal support has been added for improved performance on macOS devices. 1. Install dependencies: ``` -pip uninstall onnxruntime onnxruntime-coreml -pip install onnxruntime-coreml==1.13.1 +pip uninstall onnxruntime onnxruntime-silicon +pip install onnxruntime-silicon==1.13.1 ``` 2. Usage in case the provider is available: ``` -python run.py --execution-provider coreml +python run.py --execution-provider metal ``` From 77e7ba58d57847f01c6aaf8dfbcef6529ee2195d Mon Sep 17 00:00:00 2001 From: Jason Kneen Date: Tue, 13 Aug 2024 13:21:52 +0100 Subject: [PATCH 07/11] Enable GPU on tensorflow --- requirements.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 9396c93..d459449 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,7 +7,8 @@ opencv-python==4.8.1.78 pillow==9.5.0 insightface==0.7.3 torch==2.1.0 # Add the specific version you're using -tensorflow==2.16.1 # Add the specific version you're using +tensorflow-macos==2.16.2 # Add the specific version you're using +tensorflow-metal==1.1.0 # Add the specific version you're using # Image processing scikit-image==0.24.0 From dd7b899e7df7022856aa9a0bf59dbd5ee7790df5 Mon Sep 17 00:00:00 2001 From: Jason Kneen Date: Tue, 13 Aug 2024 19:03:22 +0100 Subject: [PATCH 08/11] Performance tweaks --- modules/core.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/modules/core.py b/modules/core.py index 1748f5e..691e29b 100644 --- a/modules/core.py +++ b/modules/core.py @@ -109,8 +109,8 @@ def decode_execution_providers(execution_providers: List[str]) -> List[str]: def suggest_max_memory() -> int: if platform.system().lower() == 'darwin': - return 4 - return 16 + return 6 + return 4 def suggest_execution_providers() -> List[str]: @@ -118,7 +118,10 @@ def suggest_execution_providers() -> List[str]: def suggest_execution_threads() -> int: - return 8 + if platform.system().lower() == 'darwin': + return 12 + return 4 + def limit_resources() -> None: From 6f096c980a1704c2536da35a3cac282f8baa7419 Mon Sep 17 00:00:00 2001 From: Jason Kneen Date: Mon, 19 Aug 2024 21:17:08 +0100 Subject: [PATCH 09/11] update --- .gitignore | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.gitignore b/.gitignore index a605cb8..c8bdf1b 100644 --- a/.gitignore +++ b/.gitignore @@ -25,3 +25,5 @@ models/DMDNet.pth .venv/ tf_env/ .tf_env/ +.deepcamlive/ +deep-live-cam/ From 5aceb771025c59a9169fcd051427158b4bf5ebdb Mon Sep 17 00:00:00 2001 From: Jason Kneen Date: Wed, 21 Aug 2024 19:49:13 +0100 Subject: [PATCH 10/11] tweaks --- modules/face_analyser.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/face_analyser.py b/modules/face_analyser.py index f2d46bf..88e8136 100644 --- a/modules/face_analyser.py +++ b/modules/face_analyser.py @@ -12,7 +12,7 @@ def get_face_analyser() -> Any: if FACE_ANALYSER is None: FACE_ANALYSER = insightface.app.FaceAnalysis(name='buffalo_l', providers=modules.globals.execution_providers) - FACE_ANALYSER.prepare(ctx_id=0, det_size=(640, 640)) + FACE_ANALYSER.prepare(ctx_id=0, det_size=(1280, 720)) return FACE_ANALYSER From 83e6aafa5c60e836daf6a6980dde52e19b3fdd5c Mon Sep 17 00:00:00 2001 From: Jason Kneen Date: Wed, 21 Aug 2024 20:15:35 +0100 Subject: [PATCH 11/11] Update core.py --- modules/core.py | 89 +++++++++++++++++++------------------------------ 1 file changed, 34 insertions(+), 55 deletions(-) diff --git a/modules/core.py b/modules/core.py index 691e29b..f135e44 100644 --- a/modules/core.py +++ b/modules/core.py @@ -16,6 +16,7 @@ import argparse import torch import onnxruntime import tensorflow +import cv2 import modules.globals import modules.metadata @@ -76,27 +77,6 @@ def parse_args() -> None: else: modules.globals.fp_ui['face_enhancer'] = False - # translate deprecated args - if args.source_path_deprecated: - print('\033[33mArgument -f and --face are deprecated. Use -s and --source instead.\033[0m') - modules.globals.source_path = args.source_path_deprecated - modules.globals.output_path = normalize_output_path(args.source_path_deprecated, modules.globals.target_path, args.output_path) - if args.cpu_cores_deprecated: - print('\033[33mArgument --cpu-cores is deprecated. Use --execution-threads instead.\033[0m') - modules.globals.execution_threads = args.cpu_cores_deprecated - if args.gpu_vendor_deprecated == 'apple': - print('\033[33mArgument --gpu-vendor apple is deprecated. Use --execution-provider coreml instead.\033[0m') - modules.globals.execution_providers = decode_execution_providers(['coreml']) - if args.gpu_vendor_deprecated == 'nvidia': - print('\033[33mArgument --gpu-vendor nvidia is deprecated. Use --execution-provider cuda instead.\033[0m') - modules.globals.execution_providers = decode_execution_providers(['cuda']) - if args.gpu_vendor_deprecated == 'amd': - print('\033[33mArgument --gpu-vendor amd is deprecated. Use --execution-provider cuda instead.\033[0m') - modules.globals.execution_providers = decode_execution_providers(['rocm']) - if args.gpu_threads_deprecated: - print('\033[33mArgument --gpu-threads is deprecated. Use --execution-threads instead.\033[0m') - modules.globals.execution_threads = args.gpu_threads_deprecated - def encode_execution_providers(execution_providers: List[str]) -> List[str]: return [execution_provider.replace('ExecutionProvider', '').lower() for execution_provider in execution_providers] @@ -222,45 +202,25 @@ def run() -> None: limit_resources() print(f"ONNX Runtime version: {onnxruntime.__version__}") print(f"Available execution providers: {onnxruntime.get_available_providers()}") - print(f"Selected execution provider: CoreMLExecutionProvider") + print(f"Selected execution provider: CoreMLExecutionProvider (with CPU fallback for face detection)") - # Configure ONNX Runtime to use only CoreML + # Configure ONNX Runtime to use CoreML onnxruntime.set_default_logger_severity(3) # Set to WARNING level options = onnxruntime.SessionOptions() options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL - # Test CoreML with a dummy model - try: - import numpy as np - from onnx import helper, TensorProto - - # Create a simple ONNX model - X = helper.make_tensor_value_info('input', TensorProto.FLOAT, [1, 3, 224, 224]) - Y = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 3, 224, 224]) - node = helper.make_node('Identity', ['input'], ['output']) - graph = helper.make_graph([node], 'test_model', [X], [Y]) - model = helper.make_model(graph) - - # Save the model - model_path = 'test_model.onnx' - with open(model_path, 'wb') as f: - f.write(model.SerializeToString()) - - # Create a CoreML session - session = onnxruntime.InferenceSession(model_path, options, providers=['CoreMLExecutionProvider']) - - # Run inference - input_data = np.random.rand(1, 3, 224, 224).astype(np.float32) - output = session.run(None, {'input': input_data}) - - print("CoreML init successful and being used") - print(f"Input shape: {input_data.shape}, Output shape: {output[0].shape}") - - # Clean up - os.remove(model_path) - except Exception as e: - print(f"Error testing CoreML: {str(e)}") - print("The application may not be able to use GPU acceleration") + # Add CoreML-specific options + options.add_session_config_entry("session.coreml.force_precision", "FP32") + options.add_session_config_entry("session.coreml.enable_on_subgraph", "1") + + # Update insightface model loading to use CPU for face detection + from insightface.utils import face_align + def custom_session(model_file, providers): + if 'det_model.onnx' in model_file: + return onnxruntime.InferenceSession(model_file, providers=['CPUExecutionProvider']) + else: + return onnxruntime.InferenceSession(model_file, options, providers=['CoreMLExecutionProvider']) + face_align.Session = custom_session # Configure TensorFlow to use Metal try: @@ -288,3 +248,22 @@ def run() -> None: else: window = ui.init(start, destroy) window.mainloop() + +def get_one_face(frame): + # Resize the frame to the expected input size + frame_resized = cv2.resize(frame, (112, 112)) # Resize to (112, 112) for recognition model + face = get_face_analyser().get(frame_resized) + return face + +# Ensure to use the CPUExecutionProvider if CoreML fails +def run_model_with_cpu_fallback(model_file, providers): + try: + return onnxruntime.InferenceSession(model_file, providers=['CoreMLExecutionProvider']) + except Exception as e: + print(f"CoreML execution failed: {e}. Falling back to CPU.") + return onnxruntime.InferenceSession(model_file, providers=['CPUExecutionProvider']) + +# Update the face analysis function to use the fallback +def get_face_analyser(): + # Load your model here with the fallback + return run_model_with_cpu_fallback('/path/to/your/model.onnx', ['CoreMLExecutionProvider', 'CPUExecutionProvider']) \ No newline at end of file