6
2

More than 3 years have passed since last update.

【StyleGAN入門】あの「笑わない男」も思わず微笑んだ♬

Last updated at Posted at 2020-01-10

今回は、ネタ狙いです。
が、皆様もみたいと思うのでやってみました。
ご笑覧くださいm(__)m

早速、結果をば、。。;。ちなみにソースは前回と同じく以下のとおりです。
StyleGAN/mayuyu_smile.py /
工夫点は、まあ見てください。

とにかく笑ってもらう

inagaki_smile19.gif
どうにか、口を動かした!

ちゃんと笑ってよ

inagaki_smile19.gif
笑ったけど、ちょっと怖いよ( ノД`)

もう大笑いしてもらいましょう

inagaki_smile19.gif
うん! やったね!

笑顔写真見っけ。。。

inagaki_egao.jpg
本物の笑顔はさらにいいお顔でした!

まとめ

・「笑わない男」を笑わせてみた
・パラメータの調整で笑ってもらえました

・自然な笑顔はもっと調整と動画に工夫が必要なようです

おまけ

おまけに学習途中で画像出力するように変更したencode_images.pyのソースを貼っておきます
※学習途中の出力により、どの程度学習が進んだかの判断ができます

#python encode_images.py img/ generated_images/ latent/

import os
import argparse
import pickle
from tqdm import tqdm
import PIL.Image
import numpy as np
import dnnlib
import dnnlib.tflib as tflib
import config
from encoder.generator_model import Generator
from encoder.perceptual_model import PerceptualModel

#URL_FFHQ = 'https://drive.google.com/uc?id=1MEGjdvVpUsu1jB4zrXZN7Y4kBBOzizDQ'  # karras2019stylegan-ffhq-1024x1024.pkl

def split_to_batches(l, n):
    for i in range(0, len(l), n):
        yield l[i:i + n]

def main():
    parser = argparse.ArgumentParser(description='Find latent representation of reference images using perceptual loss')
    parser.add_argument('src_dir', help='Directory with images for encoding')
    parser.add_argument('generated_images_dir', help='Directory for storing generated images')
    parser.add_argument('dlatent_dir', help='Directory for storing dlatent representations')

    # for now it's unclear if larger batch leads to better performance/quality
    parser.add_argument('--batch_size', default=1, help='Batch size for generator and perceptual model', type=int)

    # Perceptual model params
    parser.add_argument('--image_size', default=256, help='Size of images for perceptual model', type=int)
    parser.add_argument('--lr', default=1., help='Learning rate for perceptual model', type=float)
    parser.add_argument('--iterations', default=3001, help='Number of optimization steps for each batch', type=int)
    # Generator params
    parser.add_argument('--randomize_noise', default=False, help='Add noise to dlatents during optimization', type=bool)
    args, other_args = parser.parse_known_args()

    ref_images = [os.path.join(args.src_dir, x) for x in os.listdir(args.src_dir)]
    ref_images = list(filter(os.path.isfile, ref_images))
    if len(ref_images) == 0:
        raise Exception('%s is empty' % args.src_dir)

    os.makedirs(args.generated_images_dir, exist_ok=True)
    os.makedirs(args.dlatent_dir, exist_ok=True)

    # Initialize generator and perceptual model
    tflib.init_tf()
    fpath = './weight_files/tensorflow/karras2019stylegan-ffhq-1024x1024.pkl'
    with open(fpath, mode='rb') as f:
        generator_network, discriminator_network, Gs_network = pickle.load(f)

    generator = Generator(Gs_network, args.batch_size, randomize_noise=args.randomize_noise)
    perceptual_model = PerceptualModel(args.image_size, layer=9, batch_size=args.batch_size)
    perceptual_model.build_perceptual_model(generator.generated_image)
    Gs_network.print_layers()

    # Optimize (only) dlatents by minimizing perceptual loss between reference and generated images in feature space
    sk = 0
    for sk in range(0,20,1):
        for images_batch in tqdm(split_to_batches(ref_images, args.batch_size), total=len(ref_images)//args.batch_size):
            names = [os.path.splitext(os.path.basename(x))[0] for x in images_batch]
            print(sk)
            perceptual_model.set_reference_images(images_batch)
            op = perceptual_model.optimize(generator.dlatent_variable, iterations=args.iterations, learning_rate=args.lr)
            pbar = tqdm(op, leave=False, total=args.iterations)
            for loss in pbar:
                pbar.set_description(' '.join(names)+' Loss: %.2f' % loss)
            print(' '.join(names), ' loss:', loss)

            # Generate images from found dlatents and save them
            generated_images = generator.generate_images()
            generated_dlatents = generator.get_dlatents()
            for img_array, dlatent, img_name in zip(generated_images, generated_dlatents, names):
                img = PIL.Image.fromarray(img_array, 'RGB')
                img.save(os.path.join(args.generated_images_dir, str(sk)+f'{img_name}.png'), 'PNG')
                np.save(os.path.join(args.dlatent_dir, str(sk)+f'{img_name}.npy'), dlatent)
    generator.reset_dlatents()

if __name__ == "__main__":
    main()
6
2
0

Register as a new user and use Qiita more conveniently

  1. You get articles that match your needs
  2. You can efficiently read back useful information
  3. You can use dark theme
What you can do with signing up
6
2