diff --git a/modules/core.py b/modules/core.py
index b6ef9b8..dbbf4f8 100644
--- a/modules/core.py
+++ b/modules/core.py
@@ -114,8 +114,46 @@ def encode_execution_providers(execution_providers: List[str]) -> List[str]:
 
 
 def decode_execution_providers(execution_providers: List[str]) -> List[str]:
-    return [provider for provider, encoded_execution_provider in zip(onnxruntime.get_available_providers(), encode_execution_providers(onnxruntime.get_available_providers()))
-            if any(execution_provider in encoded_execution_provider for execution_provider in execution_providers)]
+    try:
+        available_providers = onnxruntime.get_available_providers()
+        encoded_available_providers = encode_execution_providers(available_providers)
+        
+        selected_providers = []
+        unavailable_providers = []
+        
+        for execution_provider in execution_providers:
+            provider_found = False
+            for provider, encoded_provider in zip(available_providers, encoded_available_providers):
+                if execution_provider in encoded_provider:
+                    selected_providers.append(provider)
+                    provider_found = True
+                    break
+            
+            if not provider_found:
+                unavailable_providers.append(execution_provider)
+        
+        if 'cuda' in [p.lower() for p in unavailable_providers]:
+            # CUDA was requested but not available
+            cuda_path = os.environ.get('CUDA_PATH')
+            if cuda_path:
+                update_status(f"Warning: CUDA_PATH is set ({cuda_path}) but CUDA wasn't able to be loaded. Check your CUDA installation.", "DLC.CORE")
+                if os.path.exists(cuda_path):
+                    # CUDA path exists but couldn't be loaded - likely missing DLLs or incorrect configuration
+                    update_status("CUDA path exists but CUDA libraries couldn't be loaded. Check if the CUDA runtime is properly installed.", "DLC.CORE")
+                else:
+                    update_status("CUDA_PATH is set but the directory doesn't exist. Check your environment variables.", "DLC.CORE")
+            else:
+                update_status("CUDA was requested but no CUDA_PATH is set in environment variables.", "DLC.CORE")
+                
+            # If no providers were selected, fall back to CPU
+            if not selected_providers:
+                update_status("Falling back to CPU execution provider.", "DLC.CORE")
+                selected_providers = ['CPUExecutionProvider']
+        
+        return selected_providers
+    except Exception as e:
+        update_status(f"Error determining execution providers: {str(e)}. Falling back to CPU.", "DLC.CORE")
+        return ['CPUExecutionProvider']
 
 
 def suggest_max_memory() -> int:
@@ -160,6 +198,56 @@ def release_resources() -> None:
         torch.cuda.empty_cache()
 
 
+def check_cuda_configuration() -> None:
+    """
+    Check CUDA configuration and provide diagnostic information.
+    This helps users identify issues with their CUDA setup.
+    """
+    if 'cuda' in [p.lower() for p in encode_execution_providers(modules.globals.execution_providers)]:
+        update_status("CUDA execution provider requested, checking configuration...", "DLC.CUDA")
+        
+        # Check for CUDA environment variables
+        cuda_path = os.environ.get('CUDA_PATH')
+        if cuda_path:
+            update_status(f"CUDA_PATH is set to: {cuda_path}", "DLC.CUDA")
+            
+            # Check if the directory exists
+            if os.path.exists(cuda_path):
+                update_status("CUDA_PATH directory exists", "DLC.CUDA")
+                
+                # Check for critical CUDA DLLs on Windows
+                if platform.system().lower() == 'windows':
+                    cuda_dll_path = os.path.join(cuda_path, 'bin', 'cudart64_*.dll')
+                    import glob
+                    cuda_dlls = glob.glob(cuda_dll_path)
+                    
+                    if cuda_dlls:
+                        update_status(f"CUDA Runtime DLLs found: {', '.join(os.path.basename(dll) for dll in cuda_dlls)}", "DLC.CUDA")
+                    else:
+                        update_status("Warning: No CUDA Runtime DLLs found in CUDA_PATH/bin", "DLC.CUDA")
+                        update_status("This may cause CUDA initialization failures", "DLC.CUDA")
+            else:
+                update_status("Warning: CUDA_PATH is set but directory doesn't exist", "DLC.CUDA")
+        else:
+            update_status("Warning: CUDA_PATH environment variable is not set", "DLC.CUDA")
+        
+        # Check if CUDA is in PATH
+        path_env = os.environ.get('PATH', '')
+        if cuda_path and cuda_path + '\\bin' in path_env:
+            update_status("CUDA bin directory is in PATH", "DLC.CUDA")
+        else:
+            update_status("Warning: CUDA bin directory not found in PATH", "DLC.CUDA")
+            update_status("This may prevent CUDA libraries from being found", "DLC.CUDA")
+        
+        # Try CUDA provider availability directly from onnxruntime
+        available_providers = onnxruntime.get_available_providers()
+        if 'CUDAExecutionProvider' in available_providers:
+            update_status("CUDA provider is available in ONNX Runtime", "DLC.CUDA")
+        else:
+            update_status("Warning: CUDA provider is not available in ONNX Runtime", "DLC.CUDA")
+            update_status("Available providers: " + ', '.join(available_providers), "DLC.CUDA")
+
+
 def pre_check() -> bool:
     if sys.version_info < (3, 9):
         update_status('Python version is not supported - please upgrade to 3.9 or higher.')
@@ -167,6 +255,10 @@ def pre_check() -> bool:
     if not shutil.which('ffmpeg'):
         update_status('ffmpeg is not installed.')
         return False
+        
+    # Check CUDA configuration if requested
+    check_cuda_configuration()
+        
     return True
 
 
diff --git a/modules/face_analyser.py b/modules/face_analyser.py
index ef124d5..63e77e5 100644
--- a/modules/face_analyser.py
+++ b/modules/face_analyser.py
@@ -19,8 +19,26 @@ def get_face_analyser() -> Any:
     global FACE_ANALYSER
 
     if FACE_ANALYSER is None:
-        FACE_ANALYSER = insightface.app.FaceAnalysis(name='buffalo_l', providers=modules.globals.execution_providers)
-        FACE_ANALYSER.prepare(ctx_id=0, det_size=(640, 640))
+        try:
+            FACE_ANALYSER = insightface.app.FaceAnalysis(name='buffalo_l', providers=modules.globals.execution_providers)
+            FACE_ANALYSER.prepare(ctx_id=0, det_size=(640, 640))
+        except Exception as e:
+            error_msg = str(e)
+            print(f"[DLC.FACE-ANALYSER] Error initializing face analyser with providers {modules.globals.execution_providers}: {error_msg}")
+            
+            # If error is CUDA-related, try with CPU provider as fallback
+            if "cuda" in error_msg.lower() or "gpu" in error_msg.lower():
+                print("[DLC.FACE-ANALYSER] CUDA error detected. Falling back to CPU provider.")
+                modules.globals.execution_providers = ['CPUExecutionProvider'] 
+                try:
+                    FACE_ANALYSER = insightface.app.FaceAnalysis(name='buffalo_l', providers=modules.globals.execution_providers)
+                    FACE_ANALYSER.prepare(ctx_id=0, det_size=(640, 640))
+                    print("[DLC.FACE-ANALYSER] Successfully initialized with CPU provider as fallback.")
+                except Exception as fallback_error:
+                    print(f"[DLC.FACE-ANALYSER] Failed to initialize even with fallback provider: {str(fallback_error)}")
+                    raise
+            else:
+                raise
     return FACE_ANALYSER
 
 
diff --git a/modules/processors/frame/face_swapper.py b/modules/processors/frame/face_swapper.py
index 36b83d6..0d749bd 100644
--- a/modules/processors/frame/face_swapper.py
+++ b/modules/processors/frame/face_swapper.py
@@ -61,9 +61,29 @@ def get_face_swapper() -> Any:
     with THREAD_LOCK:
         if FACE_SWAPPER is None:
             model_path = os.path.join(models_dir, "inswapper_128_fp16.onnx")
-            FACE_SWAPPER = insightface.model_zoo.get_model(
-                model_path, providers=modules.globals.execution_providers
-            )
+            try:
+                FACE_SWAPPER = insightface.model_zoo.get_model(
+                    model_path, providers=modules.globals.execution_providers
+                )
+                update_status(f"Successfully loaded model with providers: {modules.globals.execution_providers}", NAME)
+            except Exception as e:
+                error_msg = str(e)
+                update_status(f"Error loading model with selected providers: {error_msg}", NAME)
+                
+                # If the error is related to CUDA, provide more helpful information
+                if "cuda" in error_msg.lower() or "gpu" in error_msg.lower():
+                    update_status("CUDA error detected. Trying to load with CPU provider instead.", NAME)
+                    modules.globals.execution_providers = ['CPUExecutionProvider']
+                    try:
+                        FACE_SWAPPER = insightface.model_zoo.get_model(
+                            model_path, providers=modules.globals.execution_providers
+                        )
+                        update_status("Successfully loaded model with CPU provider as fallback.", NAME)
+                    except Exception as fallback_error:
+                        update_status(f"Failed to load model even with fallback provider: {str(fallback_error)}", NAME)
+                        raise
+                else:
+                    raise
     return FACE_SWAPPER
 
 
@@ -430,37 +450,24 @@ def draw_mouth_mask_visualization(
         feathered_mask = cv2.GaussianBlur(
             mask_region.astype(float), (kernel_size, kernel_size), 0
         )
-        feathered_mask = (feathered_mask / feathered_mask.max() * 255).astype(np.uint8)
-        # Remove the feathered mask color overlay
-        # color_feathered_mask = cv2.applyColorMap(feathered_mask, cv2.COLORMAP_VIRIDIS)
+        feathered_mask = feathered_mask / feathered_mask.max()
 
-        # Ensure shapes match before blending feathered mask
-        # if vis_region.shape == color_feathered_mask.shape:
-        #     blended_feathered = cv2.addWeighted(vis_region, 0.7, color_feathered_mask, 0.3, 0)
-        #     vis_frame[min_y:max_y, min_x:max_x] = blended_feathered
+        face_mask_roi = face_mask[min_y:max_y, min_x:max_x]
+        combined_mask = feathered_mask * (face_mask_roi / 255.0)
 
-        # Add labels
-        cv2.putText(
-            vis_frame,
-            "Lower Mouth Mask",
-            (min_x, min_y - 10),
-            cv2.FONT_HERSHEY_SIMPLEX,
-            0.5,
-            (255, 255, 255),
-            1,
-        )
-        cv2.putText(
-            vis_frame,
-            "Feathered Mask",
-            (min_x, max_y + 20),
-            cv2.FONT_HERSHEY_SIMPLEX,
-            0.5,
-            (255, 255, 255),
-            1,
+        combined_mask = combined_mask[:, :, np.newaxis]
+        blended = (
+            color_corrected_mouth * combined_mask + vis_region * (1 - combined_mask)
+        ).astype(np.uint8)
+
+        # Apply face mask to blended result
+        face_mask_3channel = (
+            np.repeat(face_mask_roi[:, :, np.newaxis], 3, axis=2) / 255.0
         )
+        final_blend = blended * face_mask_3channel + vis_region * (1 - face_mask_3channel)
 
-        return vis_frame
-    return frame
+        vis_frame[min_y:max_y, min_x:max_x] = final_blend.astype(np.uint8)
+    return vis_frame
 
 
 def apply_mouth_area(