Implement ear preservation for Poisson blending.

- Added functionality to preserve target's ears when using Poisson blending.
- Introduced new global variables in `globals.py`:
  - `preserve_target_ears` (boolean flag)
  - `ear_width_ratio`
  - `ear_height_ratio`
  - `ear_vertical_offset_ratio`
  - `ear_horizontal_overlap_ratio`
- Added command-line argument `--preserve-ears` to `core.py`.
- Modified `face_swapper.py` to adjust the Poisson blending mask by
  subtracting calculated ear regions if `preserve_target_ears` is true.
  This aims to reduce artifacts around ears by using the original target
  frame's ear content.
pull/1380/head
google-labs-jules[bot] 2025-06-24 20:25:09 +00:00
parent b260aa27ca
commit 8b61cc691f
3 changed files with 41 additions and 5 deletions

View File

@ -44,6 +44,7 @@ def parse_args() -> None:
program.add_argument('--map-faces', help='map source target faces', dest='map_faces', action='store_true', default=False)
program.add_argument('--mouth-mask', help='mask the mouth region', dest='mouth_mask', action='store_true', default=False)
program.add_argument('--poisson-blending', help='use Poisson blending for smoother face integration', dest='poisson_blending', action='store_true', default=False)
program.add_argument('--preserve-ears', help='attempt to preserve target ears by modifying the blend mask', dest='preserve_ears', action='store_true', default=False)
program.add_argument('--video-encoder', help='adjust output video encoder', dest='video_encoder', default='libx264', choices=['libx264', 'libx265', 'libvpx-vp9'])
program.add_argument('--video-quality', help='adjust output video quality', dest='video_quality', type=int, default=18, choices=range(52), metavar='[0-51]')
program.add_argument('-l', '--lang', help='Ui language', default="en")
@ -74,6 +75,7 @@ def parse_args() -> None:
modules.globals.color_correction = args.color_correction
modules.globals.mouth_mask = args.mouth_mask
modules.globals.use_poisson_blending = args.poisson_blending
modules.globals.preserve_target_ears = args.preserve_ears
modules.globals.nsfw_filter = args.nsfw_filter
modules.globals.map_faces = args.map_faces
modules.globals.video_encoder = args.video_encoder

View File

@ -43,3 +43,8 @@ mask_down_size = 0.50
mask_size = 1
use_poisson_blending = False # Added for Poisson blending
poisson_blending_feather_amount = 5 # Feathering for the mask before Poisson blending
preserve_target_ears = False # Flag to enable preserving target's ears
ear_width_ratio = 0.18 # Width of the ear exclusion box as a ratio of face bbox width
ear_height_ratio = 0.35 # Height of the ear exclusion box as a ratio of face bbox height
ear_vertical_offset_ratio = 0.20 # Vertical offset of the ear box from top of face bbox
ear_horizontal_overlap_ratio = 0.03 # How much the ear exclusion zone can overlap into the face bbox

View File

@ -172,12 +172,41 @@ def swap_face(source_face: Face, target_face: Face, temp_frame: Frame) -> Frame:
else:
logging.error("No bbox or landmarks available for Poisson mask. Blending will be skipped.")
# Subtract ear regions if preserve_target_ears is enabled
if modules.globals.preserve_target_ears and np.any(face_mask_for_blending > 0):
mfx1, mfy1, mfx2, mfy2 = target_face.bbox.astype(int)
mfw = mfx2 - mfx1
mfh = mfy2 - mfy1
ear_w = int(mfw * modules.globals.ear_width_ratio)
ear_h = int(mfh * modules.globals.ear_height_ratio)
ear_v_offset = int(mfh * modules.globals.ear_vertical_offset_ratio)
ear_overlap = int(mfw * modules.globals.ear_horizontal_overlap_ratio)
# Person's Right Ear (image left side of face bbox)
# This region in face_mask_for_blending will be set to 0
rex1 = max(0, mfx1 - ear_w + ear_overlap)
rey1 = max(0, mfy1 + ear_v_offset)
rex2 = min(temp_frame.shape[1], mfx1 + ear_overlap) # Extends slightly into face bbox for smoother transition
rey2 = min(temp_frame.shape[0], rey1 + ear_h)
if rex1 < rex2 and rey1 < rey2:
cv2.rectangle(face_mask_for_blending, (rex1, rey1), (rex2, rey2), 0, -1)
# Person's Left Ear (image right side of face bbox)
lex1 = max(0, mfx2 - ear_overlap)
ley1 = max(0, mfy1 + ear_v_offset)
lex2 = min(temp_frame.shape[1], mfx2 + ear_w - ear_overlap)
ley2 = min(temp_frame.shape[0], ley1 + ear_h)
if lex1 < lex2 and ley1 < ley2:
cv2.rectangle(face_mask_for_blending, (lex1, ley1), (lex2, ley2), 0, -1)
# Feather the mask to smooth edges for Poisson blending
feather_amount = modules.globals.poisson_blending_feather_amount
if feather_amount > 0:
# Ensure kernel size is odd
kernel_size = 2 * feather_amount + 1
face_mask_for_blending = cv2.GaussianBlur(face_mask_for_blending, (kernel_size, kernel_size), 0)
if np.any(face_mask_for_blending > 0): # Only feather if there's a mask
feather_amount = modules.globals.poisson_blending_feather_amount
if feather_amount > 0:
# Ensure kernel size is odd
kernel_size = 2 * feather_amount + 1
face_mask_for_blending = cv2.GaussianBlur(face_mask_for_blending, (kernel_size, kernel_size), 0)
# Calculate the center of the target face bbox for seamlessClone
if hasattr(target_face, 'bbox'):