mirror of
https://github.com/deepfakes/faceswap
synced 2025-06-07 10:43:27 -04:00
* Extraction - Speed improvements (#522) * Initial Plugin restructure * Detectors to plugins. Detector speed improvements * Re-implement dlib aligner, remove models, FAN to TF. Parallel processing * Update manual, update convert, implement parallel/serial switching * linting + fix cuda check (setup.py). requirements update keras 2.2.4 * Add extract size option. Fix dlib hog init * GUI: Increase tooltip width * Update alignment tool to support new DetectedFace * Add skip existing faces option * Fix sort tool to new plugin structure * remove old align plugin * fix convert -skip faces bug * Fix convert skipping no faces frames * Convert - draw onto transparent layer * Fix blur threshold bug * fix skip_faces convert bug * Fix training
71 lines
2.5 KiB
Python
71 lines
2.5 KiB
Python
# Based on the original https://www.reddit.com/r/deepfakes/ code sample + contribs
|
|
|
|
from keras.models import Model as KerasModel
|
|
from keras.layers import Input, Dense, Flatten, Reshape
|
|
from keras.layers.advanced_activations import LeakyReLU
|
|
from keras.layers.convolutional import Conv2D
|
|
from keras.optimizers import Adam
|
|
|
|
from .AutoEncoder import AutoEncoder
|
|
from lib.PixelShuffler import PixelShuffler
|
|
|
|
from keras.utils import multi_gpu_model
|
|
|
|
IMAGE_SHAPE = (64, 64, 3)
|
|
ENCODER_DIM = 1024
|
|
|
|
class Model(AutoEncoder):
|
|
def initModel(self):
|
|
optimizer = Adam(lr=5e-5, beta_1=0.5, beta_2=0.999)
|
|
x = Input(shape=IMAGE_SHAPE)
|
|
|
|
self.autoencoder_A = KerasModel(x, self.decoder_A(self.encoder(x)))
|
|
self.autoencoder_B = KerasModel(x, self.decoder_B(self.encoder(x)))
|
|
|
|
if self.gpus > 1:
|
|
self.autoencoder_A = multi_gpu_model( self.autoencoder_A , self.gpus)
|
|
self.autoencoder_B = multi_gpu_model( self.autoencoder_B , self.gpus)
|
|
|
|
self.autoencoder_A.compile(optimizer=optimizer, loss='mean_absolute_error')
|
|
self.autoencoder_B.compile(optimizer=optimizer, loss='mean_absolute_error')
|
|
|
|
def converter(self, swap):
|
|
autoencoder = self.autoencoder_B if not swap else self.autoencoder_A
|
|
return lambda img: autoencoder.predict(img)
|
|
|
|
def conv(self, filters):
|
|
def block(x):
|
|
x = Conv2D(filters, kernel_size=5, strides=2, padding='same')(x)
|
|
x = LeakyReLU(0.1)(x)
|
|
return x
|
|
return block
|
|
|
|
def upscale(self, filters):
|
|
def block(x):
|
|
x = Conv2D(filters * 4, kernel_size=3, padding='same')(x)
|
|
x = LeakyReLU(0.1)(x)
|
|
x = PixelShuffler()(x)
|
|
return x
|
|
return block
|
|
|
|
def Encoder(self):
|
|
input_ = Input(shape=IMAGE_SHAPE)
|
|
x = input_
|
|
x = self.conv(128)(x)
|
|
x = self.conv(256)(x)
|
|
x = self.conv(512)(x)
|
|
x = self.conv(1024)(x)
|
|
x = Dense(ENCODER_DIM)(Flatten()(x))
|
|
x = Dense(4 * 4 * 1024)(x)
|
|
x = Reshape((4, 4, 1024))(x)
|
|
x = self.upscale(512)(x)
|
|
return KerasModel(input_, x)
|
|
|
|
def Decoder(self):
|
|
input_ = Input(shape=(8, 8, 512))
|
|
x = input_
|
|
x = self.upscale(256)(x)
|
|
x = self.upscale(128)(x)
|
|
x = self.upscale(64)(x)
|
|
x = Conv2D(3, kernel_size=5, padding='same', activation='sigmoid')(x)
|
|
return KerasModel(input_, x)
|