# -*- coding: utf-8 -*-
##------------------------------------------
## StyleGAN3 (PAMA) for movie Ver 0.03
## Progressive Attentional Manifold Alignment
##
## 2024.08.22 Masahiro Izutsu
##------------------------------------------
## pama_movie.py
## Ver 0.02 2024.09.21 CPU 対応
## Ver 0.02 2024/10/23 処理結果一覧追加
# Color Escape Code
GREEN = '\033[1;32m'
RED = '\033[1;31m'
NOCOLOR = '\033[0m'
YELLOW = '\033[1;33m'
CYAN = '\033[1;36m'
BLUE = '\033[1;34m'
import warnings
warnings.simplefilter('ignore')
from torch.cuda import is_available
gpu_d = is_available() # GPU 確認
# インポート&初期設定
import os
import shutil
import argparse
import cv2
import ffmpeg
import my_logging
import my_imagetool
import my_videotool
import my_movieplay
# 定数定義
DEF_VIDEO_DIR = './video'
DEF_STYLE_DIR = './style'
DEF_VIDEO = './video/open_house.mp4'
DEF_STYLE = './style/ani_004.jpg'
RESULT_PATH = './results'
DEF_CHECKPOINT = 'consistency'
TMP_FRAME_DIR = './tmp_frame'
TMP_STYLE_DIR = './style_movie'
OUT_MOVIE = './output.mp4'
OUT_AUDIO = './output.mp3'
DEF_THEME = 'BlueMono'
# タイトル
title = 'Progressive Attentional Manifold Alignment for movie Ver. 0.03'
# Parses arguments for the application
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--source_image", default='', help="path to source image")
parser.add_argument("--style_image", default='', help="path to style image")
parser.add_argument("--result_path", default=RESULT_PATH, help="path to output")
parser.add_argument('--checkpoint', type = str, default=DEF_CHECKPOINT, choices=['original', 'consistency', 'color', 'content'], help = 'original / consistency / color / content')
parser.add_argument("--remake", action="store_true", help="remake process image flag")
parser.add_argument('--log', metavar = 'LOG', default = '3', help = 'Log level(-1/0/1/2/3/4/5) Default value is \'3\'')
return parser
# 基本情報の表示
def display_info(args, title):
print('\n' + GREEN + title + ': Starting application...' + NOCOLOR)
print('\n - ' + YELLOW + 'source_image : ' + NOCOLOR, args.source_image)
print(' - ' + YELLOW + 'style_image : ' + NOCOLOR, args.style_image)
print(' - ' + YELLOW + 'result_path : ' + NOCOLOR, args.result_path)
print(' - ' + YELLOW + 'checkpoint : ' + NOCOLOR, args.checkpoint)
print(' - ' + YELLOW + 'remake : ' + NOCOLOR, args.remake)
print(' - ' + YELLOW + 'log : ' + NOCOLOR, args.log)
print(' ')
# 学習済みパラメータの設定関数
def select_param(select):
# checkpointsフォルダーリセット
if os.path.isdir('checkpoints'):
shutil.rmtree('checkpoints')
if select == 'original':
shutil.copytree('original_PAMA', 'checkpoints')
if select == 'consistency':
shutil.copytree('PAMA_without_color', 'checkpoints')
shutil.copy('original_PAMA/encoder.pth', 'checkpoints/encoder.pth')
if select == 'color':
shutil.copytree('PAMA_1.5_color', 'checkpoints')
shutil.copy('original_PAMA/encoder.pth', 'checkpoints/encoder.pth')
if select == 'content':
shutil.copytree('PAMA_1.5_content', 'checkpoints')
shutil.copy('original_PAMA/encoder.pth', 'checkpoints/encoder.pth')
print('checkpoint is '+select)
# 画像連結
def edit_pic(file1, file2):
images = []
img1 = cv2.imread(file1)
img2 = cv2.imread(file2)
img_h, img_w = img2.shape[:2]
img1 = cv2.resize(img1, dsize = (img_w, img_h))
images.append(img1)
images.append(img2)
ds_images = my_imagetool.make_tileimage(images, xmax = 1024 * 2, ymax = 1024)
return ds_images
# 動画出力ファイル名
def get_outfile(opt):
base_dir_pair = os.path.split(opt.source_image)
s_name, s_ext = os.path.splitext(base_dir_pair[1])
base_dir_pair = os.path.split(opt.style_image)
d_name, ext = os.path.splitext(base_dir_pair[1])
path = opt.result_path + '/' + opt.checkpoint + '_' + s_name + '_' + d_name + s_ext
return path
# フォルダーリセット
def reset_folder(path):
if os.path.isdir(path):
shutil.rmtree(path)
os.makedirs(path, exist_ok = True)
# 動画を静止画に分解
def video_2_images(video_file, image_dir, image_file):
# Initial setting
i = 0
interval = 1
length = 3000 # 最大フレーム数
cap = cv2.VideoCapture(video_file)
fps = cap.get(cv2.CAP_PROP_FPS) # fps取得
while(cap.isOpened()):
flag, frame = cap.read()
if flag == False:
break
if i == length*interval:
break
if i % interval == 0:
cv2.imwrite(image_dir + '/' + image_file % str(int(i/interval)).zfill(6), frame)
i += 1
cap.release()
return fps, i, interval
# ** main関数 **
def main(opt, logger):
# 一時ファイルのリセット
reset_folder(TMP_FRAME_DIR)
reset_folder(TMP_STYLE_DIR)
reset_folder('ics')
# 動画を静止画にする
video_file = opt.source_image
image_dir = TMP_FRAME_DIR
image_file = '%s.jpg'
logger.info(f' video to image... {video_file} → {image_dir}/{image_file}')
fps, i, interval = video_2_images(video_file = video_file, image_dir = image_dir, image_file = image_file)
logger.debug(f' fps = {fps}, count = {i}, interval = {interval}')
# 静止画をスタイル転送
base_dir_pair = os.path.split(opt.style_image)
path = TMP_STYLE_DIR + '/' + base_dir_pair[1]
shutil.copy(opt.style_image, path)
c_dir = TMP_FRAME_DIR + '/'
s_dir = TMP_STYLE_DIR + '/'
command = f'python main2.py eval --run_folder True --content {c_dir} --style {s_dir}'
logger.info(command)
os.system(command)
# 'ics'フォルダーの静止画から動画を作成
if os.path.exists(OUT_MOVIE): # 既に ファイルがあれば削除
os.remove(OUT_MOVIE)
out_path = get_outfile(opt)
fps_n = fps/interval
logger.info(f' making movie... → {out_path}')
ffmpeg.input('ics/%6d.jpg', framerate = fps). output(OUT_MOVIE, vcodec = 'libx264', r = fps_n).run(quiet=True)
# 音声の抽出&付加
logger.info(f' preparation for sound... {video_file} → {OUT_MOVIE}')
my_videotool.add_audio(video_file, OUT_MOVIE, log_f = False)
# out_dir フォルダへ名前を付けてコピー
shutil.copy(OUT_MOVIE, out_path)
# 動画を表示
my_movieplay.movie_play(OUT_MOVIE, title = title)
# main関数エントリーポイント(実行開始)
if __name__ == '__main__':
import datetime
import my_thumbnail
parser = parse_args()
opt = parser.parse_args()
# アプリケーション・ログ設定
module = os.path.basename(__file__)
module_name = os.path.splitext(module)[0]
logger = my_logging.get_module_logger_sel(module_name, int(opt.log))
if len(opt.source_image) == 0:
msg = '元動画の選択: ' + os.getcwd() + DEF_VIDEO_DIR[1:]
opt.source_image = my_thumbnail.movie_dialog(file_path=DEF_VIDEO_DIR, title=msg, theme=DEF_THEME, xn=10, yn=4, thumb_size=128, gap=4, audio_f=True, logger=logger)
if len(opt.source_image) == 0:
exit(0)
if len(opt.style_image) == 0:
msg = 'スタイル画像の選択: ' + os.getcwd() + DEF_STYLE_DIR[1:]
opt.style_image = my_thumbnail.image_dialog(file_path=DEF_STYLE_DIR, title=msg, theme=DEF_THEME, xn=10, yn=4, thumb_size=128, gap=4, logger=logger)
if len(opt.style_image) == 0:
exit(0)
select_param(opt.checkpoint) # 学習済みパラメータの選択
start_time = datetime.datetime.now() # 時間計測開始
display_info(opt, title)
out_path = get_outfile(opt)
if opt.remake or not os.path.isfile(out_path):
main(opt)
else:
my_movieplay.movie_play(out_path, title = title)
# 経過時間
end_time = datetime.datetime.now()
print(start_time.strftime('\nprocessing start >>\t %Y/%m/%d %H:%M:%S'))
print(end_time.strftime('processing end >>\t %Y/%m/%d %H:%M:%S'))
print('processing time >>\t', end_time - start_time)
msg = '処理結果一覧: ' + os.getcwd() + RESULT_PATH[1:]
my_thumbnail.movie_dialog(file_path=RESULT_PATH, title=msg, theme=DEF_THEME, xn=10, yn=4, thumb_size=128, gap=4, ret='終了', audio_f=True, logger=logger)
logger.info('\nFinished.\n')