mirror of
https://github.com/deepfakes/faceswap
synced 2025-06-07 19:05:02 -04:00
* Clearer requirements for each platform
* Refactoring of old plugins (Model_Original + Extract_Align) + Cleanups
* Adding GAN128
* Update GAN to v2
* Create instance_normalization.py
* Fix decoder output
* Revert "Fix decoder output"
This reverts commit 3a8ecb8957
.
* Fix convert
* Enable all options except perceptual_loss by default
* Disable instance norm
* Update Model.py
* Update Trainer.py
* Match GAN128 to shaoanlu's latest v2
* Add first_order to GAN128
* Disable `use_perceptual_loss`
* Fix call to `self.first_order`
* Switch to average loss in output
* Constrain average to last 100 iterations
* Fix math, constrain average to intervals of 100
* Fix math averaging again
* Remove math and simplify this damn averagin
* Add gan128 conversion
* Update convert.py
* Use non-warped images in masked preview
* Add K.set_learning_phase(1) to gan64
* Add K.set_learning_phase(1) to gan128
* Add missing keras import
* Use non-warped images in masked preview for gan128
* Exclude deleted faces from conversion
* --input-aligned-dir defaults to "{input_dir}/aligned"
* Simplify map operation
* port 'face_alignment' from PyTorch to Keras. It works x2 faster, but initialization takes 20secs.
2DFAN-4.h5 and mmod_human_face_detector.dat included in lib\FaceLandmarksExtractor
fixed dlib vs tensorflow conflict: dlib must do op first, then load keras model, otherwise CUDA OOM error
if face location not found by CNN, its try to find by HOG.
removed this:
- if face.landmarks == None:
- print("Warning! landmarks not found. Switching to crop!")
- return cv2.resize(face.image, (size, size))
because DetectedFace always has landmarks
* Enabled masked converter for GAN models
* Histogram matching, cli option for perceptual loss
* Fix init() positional args error
* Add backwards compatibility for aligned filenames
* Fix masked converter
* Remove GAN converters
56 lines
1.8 KiB
Python
56 lines
1.8 KiB
Python
import sys
|
|
from os.path import basename, exists
|
|
|
|
from pathlib import Path
|
|
from scandir import scandir
|
|
|
|
image_extensions = [".jpg", ".jpeg", ".png", ".tif", ".tiff"]
|
|
|
|
def get_folder(path):
|
|
output_dir = Path(path)
|
|
output_dir.mkdir(parents=True, exist_ok=True)
|
|
return output_dir
|
|
|
|
def get_image_paths(directory, exclude=[], debug=False):
|
|
exclude_names = [basename(Path(x).stem[:-1] + Path(x).suffix) for x in exclude]
|
|
dir_contents = []
|
|
|
|
if not exists(directory):
|
|
directory = get_folder(directory).path
|
|
|
|
dir_scanned = list(scandir(directory))
|
|
for x in dir_scanned:
|
|
if any([x.name.lower().endswith(ext) for ext in image_extensions]):
|
|
if x.name in exclude_names:
|
|
if debug:
|
|
print("Already processed %s" % x.name)
|
|
continue
|
|
else:
|
|
dir_contents.append(x.path)
|
|
|
|
return dir_contents
|
|
|
|
# From: https://stackoverflow.com/questions/7323664/python-generator-pre-fetch
|
|
import threading
|
|
import queue as Queue
|
|
class BackgroundGenerator(threading.Thread):
|
|
def __init__(self, generator, prefetch=1): #See below why prefetch count is flawed
|
|
threading.Thread.__init__(self)
|
|
self.queue = Queue.Queue(prefetch)
|
|
self.generator = generator
|
|
self.daemon = True
|
|
self.start()
|
|
|
|
def run(self):
|
|
# Put until queue size is reached. Note: put blocks only if put is called while queue has already reached max size
|
|
# => this makes 2 prefetched items! One in the queue, one waiting for insertion!
|
|
for item in self.generator:
|
|
self.queue.put(item)
|
|
self.queue.put(None)
|
|
|
|
def iterator(self):
|
|
while True:
|
|
next_item = self.queue.get()
|
|
if next_item is None:
|
|
break
|
|
yield next_item
|