Compare commits

..

No commits in common. "2f67e2f15933c9d2dea7ab9786c077aabaa5421f" and "e862ff1456be5cdbf2390f0e1caae307194acd00" have entirely different histories.

6 changed files with 39 additions and 51 deletions

View File

@ -49,7 +49,7 @@ Users are expected to use this software responsibly and legally. If using a real
2. Select which camera to use
3. Press live!
## Features & Uses - Everything is in real-time
## Features & Uses - Everything is real-time
### Mouth Mask
@ -85,7 +85,7 @@ Users are expected to use this software responsibly and legally. If using a real
### Memes
**Create Your Most Viral Meme Yet**
**Create Your most viral meme yet**
<p align="center">
<img src="media/meme.gif" alt="show" width="450">
@ -93,13 +93,6 @@ Users are expected to use this software responsibly and legally. If using a real
<sub>Created using Many Faces feature in Deep-Live-Cam</sub>
</p>
### Omegle
**Surprise people on Omegle**
<p align="center">
<video src="https://github.com/user-attachments/assets/2e9b9b82-fa04-4b70-9f56-b1f68e7672d0" width="450" controls></video>
</p>
## Installation (Manual)
@ -153,7 +146,7 @@ brew install python-tk@3.10
**CUDA Execution Provider (Nvidia)**
1. Install [CUDA Toolkit 12.1.1](https://developer.nvidia.com/cuda-12-1-1-download-archive)
1. Install [CUDA Toolkit 11.8](https://developer.nvidia.com/cuda-11-8-0-download-archive) or [CUDA Toolkit 12.1.1](https://developer.nvidia.com/cuda-12-1-1-download-archive)
2. Install dependencies:
```bash

View File

@ -39,13 +39,13 @@ def get_many_faces(frame: Frame) -> Any:
return None
def has_valid_map() -> bool:
for map in modules.globals.source_target_map:
for map in modules.globals.souce_target_map:
if "source" in map and "target" in map:
return True
return False
def default_source_face() -> Any:
for map in modules.globals.source_target_map:
for map in modules.globals.souce_target_map:
if "source" in map:
return map['source']['face']
return None
@ -53,7 +53,7 @@ def default_source_face() -> Any:
def simplify_maps() -> Any:
centroids = []
faces = []
for map in modules.globals.source_target_map:
for map in modules.globals.souce_target_map:
if "source" in map and "target" in map:
centroids.append(map['target']['face'].normed_embedding)
faces.append(map['source']['face'])
@ -64,10 +64,10 @@ def simplify_maps() -> Any:
def add_blank_map() -> Any:
try:
max_id = -1
if len(modules.globals.source_target_map) > 0:
max_id = max(modules.globals.source_target_map, key=lambda x: x['id'])['id']
if len(modules.globals.souce_target_map) > 0:
max_id = max(modules.globals.souce_target_map, key=lambda x: x['id'])['id']
modules.globals.source_target_map.append({
modules.globals.souce_target_map.append({
'id' : max_id + 1
})
except ValueError:
@ -75,14 +75,14 @@ def add_blank_map() -> Any:
def get_unique_faces_from_target_image() -> Any:
try:
modules.globals.source_target_map = []
modules.globals.souce_target_map = []
target_frame = cv2.imread(modules.globals.target_path)
many_faces = get_many_faces(target_frame)
i = 0
for face in many_faces:
x_min, y_min, x_max, y_max = face['bbox']
modules.globals.source_target_map.append({
modules.globals.souce_target_map.append({
'id' : i,
'target' : {
'cv2' : target_frame[int(y_min):int(y_max), int(x_min):int(x_max)],
@ -96,7 +96,7 @@ def get_unique_faces_from_target_image() -> Any:
def get_unique_faces_from_target_video() -> Any:
try:
modules.globals.source_target_map = []
modules.globals.souce_target_map = []
frame_face_embeddings = []
face_embeddings = []
@ -127,7 +127,7 @@ def get_unique_faces_from_target_video() -> Any:
face['target_centroid'] = closest_centroid_index
for i in range(len(centroids)):
modules.globals.source_target_map.append({
modules.globals.souce_target_map.append({
'id' : i
})
@ -135,7 +135,7 @@ def get_unique_faces_from_target_video() -> Any:
for frame in tqdm(frame_face_embeddings, desc=f"Mapping frame embeddings to centroids-{i}"):
temp.append({'frame': frame['frame'], 'faces': [face for face in frame['faces'] if face['target_centroid'] == i], 'location': frame['location']})
modules.globals.source_target_map[i]['target_faces_in_frame'] = temp
modules.globals.souce_target_map[i]['target_faces_in_frame'] = temp
# dump_faces(centroids, frame_face_embeddings)
default_target_face()
@ -144,7 +144,7 @@ def get_unique_faces_from_target_video() -> Any:
def default_target_face():
for map in modules.globals.source_target_map:
for map in modules.globals.souce_target_map:
best_face = None
best_frame = None
for frame in map['target_faces_in_frame']:

View File

@ -9,7 +9,7 @@ file_types = [
("Video", ("*.mp4", "*.mkv")),
]
source_target_map = []
souce_target_map = []
simple_map = {}
source_path = None

View File

@ -117,12 +117,12 @@ def process_frame_v2(temp_frame: Frame, temp_frame_path: str = "") -> Frame:
if is_image(modules.globals.target_path):
if modules.globals.many_faces:
source_face = default_source_face()
for map in modules.globals.source_target_map:
for map in modules.globals.souce_target_map:
target_face = map["target"]["face"]
temp_frame = swap_face(source_face, target_face, temp_frame)
elif not modules.globals.many_faces:
for map in modules.globals.source_target_map:
for map in modules.globals.souce_target_map:
if "source" in map:
source_face = map["source"]["face"]
target_face = map["target"]["face"]
@ -131,7 +131,7 @@ def process_frame_v2(temp_frame: Frame, temp_frame_path: str = "") -> Frame:
elif is_video(modules.globals.target_path):
if modules.globals.many_faces:
source_face = default_source_face()
for map in modules.globals.source_target_map:
for map in modules.globals.souce_target_map:
target_frame = [
f
for f in map["target_faces_in_frame"]
@ -143,7 +143,7 @@ def process_frame_v2(temp_frame: Frame, temp_frame_path: str = "") -> Frame:
temp_frame = swap_face(source_face, target_face, temp_frame)
elif not modules.globals.many_faces:
for map in modules.globals.source_target_map:
for map in modules.globals.souce_target_map:
if "source" in map:
target_frame = [
f

View File

@ -397,7 +397,7 @@ def analyze_target(start: Callable[[], None], root: ctk.CTk):
return
if modules.globals.map_faces:
modules.globals.source_target_map = []
modules.globals.souce_target_map = []
if is_image(modules.globals.target_path):
update_status("Getting unique faces")
@ -406,8 +406,8 @@ def analyze_target(start: Callable[[], None], root: ctk.CTk):
update_status("Getting unique faces")
get_unique_faces_from_target_video()
if len(modules.globals.source_target_map) > 0:
create_source_target_popup(start, root, modules.globals.source_target_map)
if len(modules.globals.souce_target_map) > 0:
create_source_target_popup(start, root, modules.globals.souce_target_map)
else:
update_status("No faces found in target")
else:
@ -696,21 +696,17 @@ def check_and_ignore_nsfw(target, destroy: Callable = None) -> bool:
def fit_image_to_size(image, width: int, height: int):
if width is None or height is None or width <= 0 or height <= 0:
if width is None and height is None:
return image
h, w, _ = image.shape
ratio_h = 0.0
ratio_w = 0.0
ratio_w = width / w
ratio_h = height / h
# Use the smaller ratio to ensure the image fits within the given dimensions
ratio = min(ratio_w, ratio_h)
# Compute new dimensions, ensuring they're at least 1 pixel
new_width = max(1, int(ratio * w))
new_height = max(1, int(ratio * h))
new_size = (new_width, new_height)
if width > height:
ratio_h = height / h
else:
ratio_w = width / w
ratio = max(ratio_w, ratio_h)
new_size = (int(ratio * w), int(ratio * h))
return cv2.resize(image, dsize=new_size)
@ -791,9 +787,9 @@ def webcam_preview(root: ctk.CTk, camera_index: int):
return
create_webcam_preview(camera_index)
else:
modules.globals.source_target_map = []
modules.globals.souce_target_map = []
create_source_target_popup_for_webcam(
root, modules.globals.source_target_map, camera_index
root, modules.globals.souce_target_map, camera_index
)
@ -1203,4 +1199,4 @@ def update_webcam_target(
target_label_dict_live[button_num] = target_image
else:
update_pop_live_status("Face could not be detected in last upload!")
return map
return map

View File

@ -1,7 +1,6 @@
--extra-index-url https://download.pytorch.org/whl/cu118
--extra-index-url https://download.pytorch.org/whl/cu121
numpy>=1.23.5,<2
typing-extensions>=4.8.0
opencv-python==4.10.0.84
cv2_enumerate_cameras==1.1.15
onnx==1.16.0
@ -10,13 +9,13 @@ psutil==5.9.8
tk==0.1.0
customtkinter==5.2.2
pillow==11.1.0
torch==2.5.1+cu118; sys_platform != 'darwin'
torch==2.5.1+cu118; sys_platform == 'darwin'
torchvision==0.20.1; sys_platform != 'darwin'
torchvision==0.20.1+cu118; sys_platform == 'darwin'
torch==2.0.1+cu118; sys_platform != 'darwin'
torch==2.0.1; sys_platform == 'darwin'
torchvision==0.15.2+cu121; sys_platform != 'darwin'
torchvision==0.15.2; sys_platform == 'darwin'
onnxruntime-silicon==1.16.3; sys_platform == 'darwin' and platform_machine == 'arm64'
onnxruntime-gpu==1.16.3; sys_platform != 'darwin'
tensorflow; sys_platform != 'darwin'
tensorflow==2.12.1; sys_platform != 'darwin'
opennsfw2==0.10.2
protobuf==4.23.2
tqdm==4.66.4