Compare commits

..

21 Commits

Author SHA1 Message Date
Kenneth Estanislao 2f67e2f159
Update requirements.txt 2025-02-09 14:17:49 +08:00
Kenneth Estanislao a3af249ea6
Update requirements.txt 2025-02-07 19:31:02 +08:00
Kenneth Estanislao 5bc3ada632
Update requirements.txt 2025-02-06 15:37:55 +08:00
KRSHH 650e89eb21
Reduced File Size 2025-02-06 10:40:32 +05:30
Kenneth Estanislao 4d2aea37b7
Update requirements.txt 2025-02-06 00:43:20 +08:00
Kenneth Estanislao 28c4b34db1
Merge pull request #911 from nimishgautam/main
Fix cv2 size errors on first run in ui.py
2025-02-05 12:51:39 +08:00
Kenneth Estanislao 49e8f78513
Merge pull request #913 from soulee-dev/main
fix: typo souce_target_map → source_target_map
2025-02-05 12:18:48 +08:00
Kenneth Estanislao d753f5d4b0
Merge pull request #917 from carpusherw/patch-1
Fix requirements.txt
2025-02-05 12:17:42 +08:00
KRSHH 4fb69476d8 Change img dimensions 2025-02-05 12:16:08 +08:00
carpusherw f3adfd194d Fix requirements.txt 2025-02-05 12:16:08 +08:00
Kenneth Estanislao e5f04cf917 Revert "Update requirements.txt"
This reverts commit d45dedc9a6.
2025-02-05 12:08:19 +08:00
Kenneth Estanislao 67394a3157 Revert "Update requirements.txt"
This reverts commit f82cebf86e.
2025-02-05 12:08:10 +08:00
carpusherw 186d155e1b
Fix requirements.txt 2025-02-05 09:17:11 +08:00
KRSHH 87081e78d0
Fixed typo 2025-02-04 21:20:54 +05:30
KRSHH f79373d4db
Updated Features Section 2025-02-04 21:08:36 +05:30
Soul Lee 513e413956 fix: typo souce_target_map → source_target_map 2025-02-03 20:33:44 +09:00
Kenneth Estanislao f82cebf86e
Update requirements.txt 2025-02-03 18:03:27 +08:00
Kenneth Estanislao d45dedc9a6
Update requirements.txt 2025-02-03 16:38:18 +08:00
Kenneth Estanislao 2d489b57ec
Update README.md 2025-02-03 13:13:56 +08:00
Nimish Gåtam ccc04983cf
Update ui.py
removed unnecessary code as per AI code review (which is a thing now because of course it is)
2025-02-01 12:38:37 +01:00
Nimish Gåtam 2506c5a261
Update ui.py
Some checks for first run when models are missing, so it doesn't error out with inv_scale_x > 0 in cv2
2025-02-01 11:52:49 +01:00
6 changed files with 51 additions and 39 deletions

View File

@ -49,7 +49,7 @@ Users are expected to use this software responsibly and legally. If using a real
2. Select which camera to use
3. Press live!
## Features & Uses - Everything is real-time
## Features & Uses - Everything is in real-time
### Mouth Mask
@ -85,7 +85,7 @@ Users are expected to use this software responsibly and legally. If using a real
### Memes
**Create Your most viral meme yet**
**Create Your Most Viral Meme Yet**
<p align="center">
<img src="media/meme.gif" alt="show" width="450">
@ -93,6 +93,13 @@ Users are expected to use this software responsibly and legally. If using a real
<sub>Created using Many Faces feature in Deep-Live-Cam</sub>
</p>
### Omegle
**Surprise people on Omegle**
<p align="center">
<video src="https://github.com/user-attachments/assets/2e9b9b82-fa04-4b70-9f56-b1f68e7672d0" width="450" controls></video>
</p>
## Installation (Manual)
@ -146,7 +153,7 @@ brew install python-tk@3.10
**CUDA Execution Provider (Nvidia)**
1. Install [CUDA Toolkit 11.8](https://developer.nvidia.com/cuda-11-8-0-download-archive) or [CUDA Toolkit 12.1.1](https://developer.nvidia.com/cuda-12-1-1-download-archive)
1. Install [CUDA Toolkit 12.1.1](https://developer.nvidia.com/cuda-12-1-1-download-archive)
2. Install dependencies:
```bash

View File

@ -39,13 +39,13 @@ def get_many_faces(frame: Frame) -> Any:
return None
def has_valid_map() -> bool:
for map in modules.globals.souce_target_map:
for map in modules.globals.source_target_map:
if "source" in map and "target" in map:
return True
return False
def default_source_face() -> Any:
for map in modules.globals.souce_target_map:
for map in modules.globals.source_target_map:
if "source" in map:
return map['source']['face']
return None
@ -53,7 +53,7 @@ def default_source_face() -> Any:
def simplify_maps() -> Any:
centroids = []
faces = []
for map in modules.globals.souce_target_map:
for map in modules.globals.source_target_map:
if "source" in map and "target" in map:
centroids.append(map['target']['face'].normed_embedding)
faces.append(map['source']['face'])
@ -64,10 +64,10 @@ def simplify_maps() -> Any:
def add_blank_map() -> Any:
try:
max_id = -1
if len(modules.globals.souce_target_map) > 0:
max_id = max(modules.globals.souce_target_map, key=lambda x: x['id'])['id']
if len(modules.globals.source_target_map) > 0:
max_id = max(modules.globals.source_target_map, key=lambda x: x['id'])['id']
modules.globals.souce_target_map.append({
modules.globals.source_target_map.append({
'id' : max_id + 1
})
except ValueError:
@ -75,14 +75,14 @@ def add_blank_map() -> Any:
def get_unique_faces_from_target_image() -> Any:
try:
modules.globals.souce_target_map = []
modules.globals.source_target_map = []
target_frame = cv2.imread(modules.globals.target_path)
many_faces = get_many_faces(target_frame)
i = 0
for face in many_faces:
x_min, y_min, x_max, y_max = face['bbox']
modules.globals.souce_target_map.append({
modules.globals.source_target_map.append({
'id' : i,
'target' : {
'cv2' : target_frame[int(y_min):int(y_max), int(x_min):int(x_max)],
@ -96,7 +96,7 @@ def get_unique_faces_from_target_image() -> Any:
def get_unique_faces_from_target_video() -> Any:
try:
modules.globals.souce_target_map = []
modules.globals.source_target_map = []
frame_face_embeddings = []
face_embeddings = []
@ -127,7 +127,7 @@ def get_unique_faces_from_target_video() -> Any:
face['target_centroid'] = closest_centroid_index
for i in range(len(centroids)):
modules.globals.souce_target_map.append({
modules.globals.source_target_map.append({
'id' : i
})
@ -135,7 +135,7 @@ def get_unique_faces_from_target_video() -> Any:
for frame in tqdm(frame_face_embeddings, desc=f"Mapping frame embeddings to centroids-{i}"):
temp.append({'frame': frame['frame'], 'faces': [face for face in frame['faces'] if face['target_centroid'] == i], 'location': frame['location']})
modules.globals.souce_target_map[i]['target_faces_in_frame'] = temp
modules.globals.source_target_map[i]['target_faces_in_frame'] = temp
# dump_faces(centroids, frame_face_embeddings)
default_target_face()
@ -144,7 +144,7 @@ def get_unique_faces_from_target_video() -> Any:
def default_target_face():
for map in modules.globals.souce_target_map:
for map in modules.globals.source_target_map:
best_face = None
best_frame = None
for frame in map['target_faces_in_frame']:

View File

@ -9,7 +9,7 @@ file_types = [
("Video", ("*.mp4", "*.mkv")),
]
souce_target_map = []
source_target_map = []
simple_map = {}
source_path = None

View File

@ -117,12 +117,12 @@ def process_frame_v2(temp_frame: Frame, temp_frame_path: str = "") -> Frame:
if is_image(modules.globals.target_path):
if modules.globals.many_faces:
source_face = default_source_face()
for map in modules.globals.souce_target_map:
for map in modules.globals.source_target_map:
target_face = map["target"]["face"]
temp_frame = swap_face(source_face, target_face, temp_frame)
elif not modules.globals.many_faces:
for map in modules.globals.souce_target_map:
for map in modules.globals.source_target_map:
if "source" in map:
source_face = map["source"]["face"]
target_face = map["target"]["face"]
@ -131,7 +131,7 @@ def process_frame_v2(temp_frame: Frame, temp_frame_path: str = "") -> Frame:
elif is_video(modules.globals.target_path):
if modules.globals.many_faces:
source_face = default_source_face()
for map in modules.globals.souce_target_map:
for map in modules.globals.source_target_map:
target_frame = [
f
for f in map["target_faces_in_frame"]
@ -143,7 +143,7 @@ def process_frame_v2(temp_frame: Frame, temp_frame_path: str = "") -> Frame:
temp_frame = swap_face(source_face, target_face, temp_frame)
elif not modules.globals.many_faces:
for map in modules.globals.souce_target_map:
for map in modules.globals.source_target_map:
if "source" in map:
target_frame = [
f

View File

@ -397,7 +397,7 @@ def analyze_target(start: Callable[[], None], root: ctk.CTk):
return
if modules.globals.map_faces:
modules.globals.souce_target_map = []
modules.globals.source_target_map = []
if is_image(modules.globals.target_path):
update_status("Getting unique faces")
@ -406,8 +406,8 @@ def analyze_target(start: Callable[[], None], root: ctk.CTk):
update_status("Getting unique faces")
get_unique_faces_from_target_video()
if len(modules.globals.souce_target_map) > 0:
create_source_target_popup(start, root, modules.globals.souce_target_map)
if len(modules.globals.source_target_map) > 0:
create_source_target_popup(start, root, modules.globals.source_target_map)
else:
update_status("No faces found in target")
else:
@ -696,17 +696,21 @@ def check_and_ignore_nsfw(target, destroy: Callable = None) -> bool:
def fit_image_to_size(image, width: int, height: int):
if width is None and height is None:
if width is None or height is None or width <= 0 or height <= 0:
return image
h, w, _ = image.shape
ratio_h = 0.0
ratio_w = 0.0
if width > height:
ratio_h = height / h
else:
ratio_w = width / w
ratio = max(ratio_w, ratio_h)
new_size = (int(ratio * w), int(ratio * h))
ratio_w = width / w
ratio_h = height / h
# Use the smaller ratio to ensure the image fits within the given dimensions
ratio = min(ratio_w, ratio_h)
# Compute new dimensions, ensuring they're at least 1 pixel
new_width = max(1, int(ratio * w))
new_height = max(1, int(ratio * h))
new_size = (new_width, new_height)
return cv2.resize(image, dsize=new_size)
@ -787,9 +791,9 @@ def webcam_preview(root: ctk.CTk, camera_index: int):
return
create_webcam_preview(camera_index)
else:
modules.globals.souce_target_map = []
modules.globals.source_target_map = []
create_source_target_popup_for_webcam(
root, modules.globals.souce_target_map, camera_index
root, modules.globals.source_target_map, camera_index
)
@ -1199,4 +1203,4 @@ def update_webcam_target(
target_label_dict_live[button_num] = target_image
else:
update_pop_live_status("Face could not be detected in last upload!")
return map
return map

View File

@ -1,6 +1,7 @@
--extra-index-url https://download.pytorch.org/whl/cu121
--extra-index-url https://download.pytorch.org/whl/cu118
numpy>=1.23.5,<2
typing-extensions>=4.8.0
opencv-python==4.10.0.84
cv2_enumerate_cameras==1.1.15
onnx==1.16.0
@ -9,13 +10,13 @@ psutil==5.9.8
tk==0.1.0
customtkinter==5.2.2
pillow==11.1.0
torch==2.0.1+cu118; sys_platform != 'darwin'
torch==2.0.1; sys_platform == 'darwin'
torchvision==0.15.2+cu121; sys_platform != 'darwin'
torchvision==0.15.2; sys_platform == 'darwin'
torch==2.5.1+cu118; sys_platform != 'darwin'
torch==2.5.1+cu118; sys_platform == 'darwin'
torchvision==0.20.1; sys_platform != 'darwin'
torchvision==0.20.1+cu118; sys_platform == 'darwin'
onnxruntime-silicon==1.16.3; sys_platform == 'darwin' and platform_machine == 'arm64'
onnxruntime-gpu==1.16.3; sys_platform != 'darwin'
tensorflow==2.12.1; sys_platform != 'darwin'
tensorflow; sys_platform != 'darwin'
opennsfw2==0.10.2
protobuf==4.23.2
tqdm==4.66.4