# -*- coding: utf-8 -*-
##--------------------------------------------------
## Stable Diffusion with diffusers(053) Ver 0.03
##
## 2025.08.14 Masahiro Izutsu
##--------------------------------------------------
## sd_053_test.py diffusers 統合版
##
## Ver 0.00 2025.07.08 sd_053.py
## Ver 0.01 2025.07.14 コマンドライン入力対応
## Ver 0.02 2025.08.12 統合 sd_042(pix2pix), sd0_44(inpaint), sd_046(outpaint), sd_047(scribble),
## sd_048(openpose), sd_081(txt2img), sd_091(img2img), sd_canny(canny)
## Ver 0.03 2025.08.14 sd_047 scribble 修正
# タイトル
title = 'Stable Diffusion with diffusers(053) Ver 0.03'
import warnings
warnings.simplefilter('ignore')
# インポート&初期設定
import os
import torch
import numpy as np
import cv2
from PIL import Image
from diffusers.utils import load_image
from diffusers import ControlNetModel, logging
import my_logging
import my_imagetool
import sd_tools as sdt
logging.set_verbosity_error()
# 定数定義
MODEL_DIR = '/StabilityMatrix/Data/Models/StableDiffusion'
CTRL_MODEEL_DIR = '/StabilityMatrix/Data/Models/ControlNet'
MODEL_BASE_BRAV5 = 'SD1.5/beautifulRealistic_brav5.safetensors'
MODEL_BASE_V15 = 'SD1.5/v1-5-pruned-emaonly.safetensors'
SCHEDULER_non = 'non'
SCHEDULER_euler = 'euler'
SCHEDULER_uni = 'uni'
SCHEDULER_DPM = 'DPM'
MODE_canny = 'canny'
MODE_inpaint = 'inpaint'
MODE_outpaint = 'outpaint'
MODE_scribble = 'scribble'
MODE_openpose = 'openpose'
MODE_pix2pix = 'pix2pix'
MODE_txt2img = 'txt2img'
MODE_img2img = 'img2img'
MODE_lineart = 'lineart'
MODE_softedge = 'softedge'
MODE_shuffle = 'shuffle'
MODE_depth = 'depth'
MODE_seg = 'seg'
ADETAILER_boy = 'boy'
ADETAILER_girl = 'girl'
DEF_IMAGE_canny = 'images/vermeer.png'
DEF_IMAGE_inpaint = 'images/sd_038_test.png'
DEF_IMAGE_outpaint = 'images/sd_046_test.png'
DEF_IMAGE_scribble = 'images/sd_047.png'
DEF_IMAGE_openpose = 'images/sd_048_test1.png'
DEF_IMAGE_pix2pix = 'images/sd_040_test.png'
DEF_IMAGE_img2img = 'images/StableDiffusion_247.png'
DEF_IMAGE_lineart = 'images/sd_040_test.png'
DEF_IMAGE_softedge = 'images/sd_040_test.png'
DEF_IMAGE_shuffle = 'images/sd_040_test.png'
DEF_IMAGE_depth = 'images/sd_040_test.png'
DEF_IMAGE_seg = 'images/sd_040_test.png'
DEF_PROMPT_canny = '微笑んでいる女性'
DEF_PROMPT_inpaint = '微笑んでいる女性'
DEF_PROMPT_outpaint = '庭に立って微笑んでいる女性'
DEF_PROMPT_scribble = 'テーブル上の白いコーヒーカップ'
DEF_PROMPT_openpose = 'ダンスを踊る女性'
DEF_PROMPT_pix2pix = '浜辺の場面にする'
DEF_PROMPT_txt2img = '満開の蘭'
DEF_PROMPT_img2img = '黒髪で短い髪の女性'
DEF_PROMPT_lineart = '微笑んでいる女性'
DEF_PROMPT_softedge = '微笑んでいる女性'
DEF_PROMPT_depth = '微笑んでいる女性'
DEF_PROMPT_seg = '微笑んでいる女性'
DEF_PROMPT_shuffle = '微笑んでいる女性'
DEF_NPROMPT_canny = '最悪の品質、おかしい人体構造'
DEF_NPROMPT_inpaint = '最悪の品質、おかしい人体構造'
DEF_NPROMPT_outpaint = '最悪の品質、おかしい人体構造'
DEF_NPROMPT_scribble = ''
DEF_NPROMPT_openpose = '最悪の品質、おかしい人体構造'
DEF_NPROMPT_pix2pix = '最悪の品質、おかしい人体構造'
DEF_NPROMPT_txt2img = ''
DEF_NPROMPT_img2img = '最悪の品質、おかしい人体構造'
DEF_NPROMPT_lineart = '最悪の品質、おかしい人体構造'
DEF_NPROMPT_softedge = '最悪の品質、おかしい人体構造'
DEF_NPROMPT_shuffle = '最悪の品質、おかしい人体構造'
DEF_NPROMPT_depth = '最悪の品質、おかしい人体構造'
DEF_NPROMPT_seg = '最悪の品質、おかしい人体構造'
# コマンドライン定義
opt_list = [
['pros_sel','','sd_053'], # 0
['result_image', 'results/image_053.png', 'path to output image file'], # 1
['cpu', 'store_true', 'cpu mode'], # 2
['log', '3', 'Log level(-1/0/1/2/3/4/5) Default value is \'3\''], # 3
['model_dir', '', 'Model directory'], # 4
['model_path', '', 'Model Path'], # 5
['ctrl_model_dir', '', 'ControlNet Model directory'], # 6
['ctrl_model_path', '', 'ControlNet Model Path'], # 7
['image_path', '', 'Sourcs image file path'], # 8
['ctrl_image_path', '', 'Control image file path'], # 9
['max_size', 0, 'image max size (0=source)'], # 10
['prompt', '', 'Prompt text'], # 11
['seed', 12345678, 'Seed parameter (-1 = rundom)'], # 12
['width', 512, 'image size width'], # 13
['height', 512, 'image size height'], # 14
['step', 20, 'infer step'], # 15
['scale', 7.0, 'gaidanse scale'], # 16
['cc_scale', 1.0, 'controlnet conditioning scale'], # 17
['strength', 0.6, 'strength value'], # 18
['neg_prompt', '', 'Negative Prompt text'], # 19
['ip_image_path', '', 'IP-Adapter image filr path'], # 20
['ip_scale', 0.5, 'IP-Adapter scale'], # 21
['scheduler', '', "Scheduler 'non/euler/uni/DPM'"], # 22
['ext', '', "Extensions (ADetailer) '' or 'girl' or 'boy'"], # 23
['mode', '', "aplication mode 'canny/inpaint/outpaint/scribble/openpose/pix2pix/txt2img/img2img/lineart/softedge/shuffle/depth'"],
]
class Pipeline:
# クラス内 定数定義
MODEL_canny = 'control_v11p_sd15_canny_fp16.safetensors'
MODEL_inpaint = 'control_v11p_sd15_inpaint_fp16.safetensors'
MODEL_scribble = 'control_v11p_sd15_scribble_fp16.safetensors'
MODEL_openpose = 'control_v11p_sd15_openpose_fp16.safetensors'
MODEL_pix2pix = 'control_v11e_sd15_ip2p_fp16.safetensors'
MODEL_lineart = 'control_v11p_sd15_lineart_fp16.safetensors'
MODEL_softedge = 'control_v11p_sd15_softedge_fp16.safetensors'
MODEL_shuffle = 'control_v11e_sd15_shuffle_fp16.safetensors'
MODEL_depth = 'control_v11f1p_sd15_depth_fp16.safetensors'
MODEL_seg = 'control_v11p_sd15_seg_fp16.safetensors'
IP_CHECKPOINT_DIR = 'h94/IP-Adapter'
IP_CHECKPOINT_SD15 = 'ip-adapter_sd15.bin'
# クラスの初期化
def __init__(self, mode = '', device = '', model_dir = '', ctrl_model_dir = '', scheduler = ''):
self.mode = MODE_canny if mode == '' else mode
self.device = 'cou' if device == '' else device
self.model_dir = MODEL_DIR if model_dir == '' else model_dir
self.ctrl_model_dir = CTRL_MODEEL_DIR if ctrl_model_dir == '' else ctrl_model_dir
self.scheduler = SCHEDULER_non if scheduler == '' else scheduler
if self.mode == MODE_canny:
self.select_base_model(MODEL_BASE_BRAV5)
self.select_ctrl_model(self.MODEL_canny)
elif self.mode == MODE_inpaint or self.mode == MODE_outpaint:
self.select_base_model(MODEL_BASE_BRAV5)
self.select_ctrl_model(self.MODEL_inpaint)
elif self.mode == MODE_scribble:
self.select_base_model(MODEL_BASE_V15)
self.select_ctrl_model(self.MODEL_scribble)
elif self.mode == MODE_openpose:
self.select_base_model(MODEL_BASE_BRAV5)
self.select_ctrl_model(self.MODEL_openpose)
elif self.mode == MODE_pix2pix:
self.select_base_model(MODEL_BASE_BRAV5)
self.select_ctrl_model(self.MODEL_pix2pix)
elif self.mode == MODE_txt2img:
self.select_base_model(MODEL_BASE_V15)
self.ctrl_model_dir = ''
self.select_ctrl_model('')
elif self.mode == MODE_img2img:
self.select_base_model(MODEL_BASE_BRAV5)
self.ctrl_model_dir = ''
self.select_ctrl_model('')
elif self.mode == MODE_lineart:
self.select_base_model(MODEL_BASE_BRAV5)
self.select_ctrl_model(self.MODEL_lineart)
elif self.mode == MODE_softedge:
self.select_base_model(MODEL_BASE_BRAV5)
self.select_ctrl_model(self.MODEL_softedge)
elif self.mode == MODE_shuffle:
self.select_base_model(MODEL_BASE_BRAV5)
self.select_ctrl_model(self.MODEL_shuffle)
elif self.mode == MODE_depth:
self.select_base_model(MODEL_BASE_BRAV5)
self.select_ctrl_model(self.MODEL_depth)
elif self.mode == MODE_seg:
self.select_base_model(MODEL_BASE_BRAV5)
self.select_ctrl_model(self.MODEL_seg)
else:
self.ctrl_model_dir = ''
self.select_ctrl_model('')
def select_base_model(self, base_model):
self.base_model = base_model
self.model_path = base_model if self.model_dir == '' else self.model_dir + '/' + base_model
def select_ctrl_model(self, ctrl_model):
if ctrl_model == '':
self.ctrl_model = ''
self.ctrl_model_path = ''
else:
self.ctrl_model = ctrl_model
self.ctrl_model_path = ctrl_model if self.ctrl_model_dir == '' else self.ctrl_model_dir + '/' + ctrl_model
# パイプラインを作成
def cleate_pipeline(self, logger = None):
if self.mode == MODE_canny:
from diffusers import StableDiffusionControlNetPipeline as StableDiffusionPipeline
elif self.mode == MODE_inpaint or self.mode == MODE_outpaint:
from diffusers import StableDiffusionControlNetInpaintPipeline as StableDiffusionPipeline
elif self.mode == MODE_scribble:
from diffusers import StableDiffusionControlNetPipeline as StableDiffusionPipeline
elif self.mode == MODE_openpose:
from diffusers import StableDiffusionControlNetPipeline as StableDiffusionPipeline
elif self.mode == MODE_pix2pix:
from diffusers import StableDiffusionControlNetPipeline as StableDiffusionPipeline
elif self.mode == MODE_txt2img:
from diffusers import StableDiffusionPipeline
controlnet = None
elif self.mode == MODE_img2img:
from diffusers import StableDiffusionImg2ImgPipeline as StableDiffusionPipeline
controlnet = None
elif self.mode == MODE_lineart:
from diffusers import StableDiffusionControlNetPipeline as StableDiffusionPipeline
elif self.mode == MODE_softedge:
from diffusers import StableDiffusionControlNetPipeline as StableDiffusionPipeline
elif self.mode == MODE_shuffle:
from diffusers import StableDiffusionControlNetPipeline as StableDiffusionPipeline
elif self.mode == MODE_depth:
from diffusers import StableDiffusionControlNetPipeline as StableDiffusionPipeline
elif self.mode == MODE_seg:
from diffusers import StableDiffusionControlNetPipeline as StableDiffusionPipeline
if self.device == 'cpu':
if self.ctrl_model_path != '': controlnet = ControlNetModel.from_single_file(self.ctrl_model_path)
pipeline = StableDiffusionPipeline.from_single_file(self.model_path, controlnet=controlnet)
else:
if self.ctrl_model_path != '': controlnet = ControlNetModel.from_single_file(self.ctrl_model_path, torch_dtype=torch.float16)
pipeline = StableDiffusionPipeline.from_single_file(
self.model_path,
controlnet = controlnet,
torch_dtype = torch.float16,
)
sdt.log_debug(f' ** model_path: {self.model_path}', logger)
sdt.log_debug(f' ** select_ctrl_model: {self.ctrl_model_path}', logger)
return pipeline
# IPアダプタを読み込み
def load_ipadapter(self, pipeline, ip_image = None, ip_scale = 0.5):
if ip_image != None:
pipeline.load_ip_adapter(self.IP_CHECKPOINT_DIR, subfolder = 'models', weight_name = self.IP_CHECKPOINT_SD15)
pipeline.set_ip_adapter_scale(ip_scale)
sdt.log_debug(f' ** IP Adapter: {ip_image}', logger)
return pipeline
# スケジューラー
def set_scheduler(self, pipeline):
if self.scheduler == SCHEDULER_uni:
from diffusers import UniPCMultistepScheduler
pipeline.scheduler = UniPCMultistepScheduler.from_config(pipeline.scheduler.config)
elif self.scheduler == SCHEDULER_euler:
from diffusers import EulerAncestralDiscreteScheduler
pipeline.scheduler = EulerAncestralDiscreteScheduler.from_config(pipeline.scheduler.config)
elif self.scheduler == SCHEDULER_DPM:
from diffusers import DPMSolverMultistepScheduler
pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config)
sdt.log_debug(f' ** scheduler: {self.scheduler}', logger)
return pipeline
# 入力画像の前処理
def pre_generation(opt, logger = None):
device = sdt._get_device(opt, logger)
image_path = sdt._get_source_image_path(opt, logger)
work_path = sdt.get_work_path(logger)
os.makedirs(work_path, exist_ok = True) # 作業フォルダ作成
src_image = None
msk_image = None
img_ctrl = None
if opt.mode == MODE_txt2img: # == txt2img ==
return src_image, msk_image, img_ctrl
image = sdt._get_source_image(opt, logger)
if opt.mode == MODE_canny: # == canny ==
path = sdt.get_canny_path(image_path, logger)
if os.path.isfile(path):
src_image = Image.open(path)
else:
src_image = sdt.canny_preprocessor(image, 100, 200)
src_image.save(path)
sdt.image_disp(path, path, wait_s = 1)
elif opt.mode == MODE_inpaint: # == inpaint ==
_, mask_path = sdt.get_source_mask_path(image_path, logger)
if not os.path.isfile(mask_path):
import sd_mask
sd_mask.mask_paint(image_path, work_path, False, logger)
if not os.path.isfile(mask_path):
return None, None, None
opt.ctrl_image_path = mask_path
msk_image = sdt._get_control_image(opt, logger)
src_image = sdt._get_source_image(opt, logger)
img_ctrl = sdt.make_inpaint_condition(src_image, msk_image)
sdt.image_disp(mask_path, mask_path, wait_s = 1)
elif opt.mode == MODE_outpaint: # == outpaint ==
src_path, mask_path = sdt.get_source_mask_path(image_path, logger)
size = 512
img = cv2.imread(image_path)
msk = sdt.mask_square(img, size)
msk = my_imagetool.frame_resize(msk, size)
my_imagetool.image_disp(msk, mask_path, True, mask_path, wait_s = 1) # マスク画像保存
img = my_imagetool.frame_square(img, (0, 0, 0))
img = my_imagetool.frame_resize(img, size)
my_imagetool.image_disp(img, src_path, True, src_path, wait_s = 1) # ソース画像保存
opt.strength = None # strength 使用しない
opt.image_path = src_path
opt.ctrl_image_path = mask_path
src_image = sdt._get_source_image(opt, logger)
msk_image = sdt._get_control_image(opt, logger)
img_ctrl = sdt.make_inpaint_condition(src_image, msk_image)
elif opt.mode == MODE_scribble: # == scribble ==
if sdt.get_image_channel(image_path) == 3:
img = cv2.imread(image_path)
msk = sdt.scribble_preprocessor(img)
mask_path = sdt.get_scribble_path(image_path, logger)
sdt.image_save2(msk, mask_path)
opt.image_path = mask_path
src_image = sdt._get_source_image(opt, logger)
msk_image = None
img_ctrl = None
sdt.image_save2(src_image, dispname = image_path, wait_s = 1)
elif opt.mode == MODE_openpose: # == openpose ==
pose_path = sdt.get_pose_path(image_path, logger)
if not os.path.isfile(pose_path):
from controlnet_aux import OpenposeDetector
src_image = sdt._get_resize_image(image_path, 0, None)
openpose_detector = OpenposeDetector.from_pretrained('lllyasviel/ControlNet')
openpose_image = openpose_detector(src_image)
openpose_image.save(pose_path)
opt.image_path = pose_path
src_image = sdt._get_source_image(opt, logger)
msk_image = None
img_ctrl = None
sdt.image_save2(src_image, dispname = image_path, wait_s = 1)
elif opt.mode == MODE_pix2pix: # == pix2pix ==
src_image = sdt._get_source_image(opt, logger)
msk_image = None
img_ctrl = None
elif opt.mode == MODE_img2img: # == img2img ==
src_image = sdt._get_source_image(opt, logger)
msk_image = None
img_ctrl = None
elif opt.mode == MODE_lineart: # == lineart ==
if sdt.get_image_channel(image_path) == 3:
img = cv2.imread(image_path)
msk = sdt.lineart_preprocessor(img)
mask_path = sdt.get_lineart_path(image_path, logger)
sdt.image_save2(msk, mask_path)
opt.image_path = mask_path
src_image = sdt._get_source_image(opt, logger)
msk_image = None
img_ctrl = None
sdt.image_save2(src_image, dispname = image_path, wait_s = 1)
elif opt.mode == MODE_softedge: # == softedge ==
if sdt.get_image_channel(image_path) == 3:
img = cv2.imread(image_path)
msk = sdt.softedge_preprocessor(img)
mask_path = sdt.get_softedge_path(image_path, logger)
sdt.image_save2(msk, mask_path)
opt.image_path = mask_path
src_image = sdt._get_source_image(opt, logger)
msk_image = None
img_ctrl = None
sdt.image_save2(src_image, dispname = image_path, wait_s = 1)
elif opt.mode == MODE_shuffle: # == shuffle ==
if sdt.get_image_channel(image_path) == 3:
img = cv2.imread(image_path)
msk = sdt.shuffle_preprocessor(img)
mask_path = sdt.get_shuffle_path(image_path, logger)
sdt.image_save2(msk, mask_path)
opt.image_path = mask_path
src_image = sdt._get_source_image(opt, logger)
msk_image = None
img_ctrl = None
sdt.image_save2(src_image, dispname = image_path, wait_s = 1)
elif opt.mode == MODE_depth: # == depth ==
if sdt.get_image_channel(image_path) == 3:
img = Image.open(image_path)
msk = sdt.depth_preprocessor(img)
mask_path = sdt.get_depth_path(image_path, logger)
sdt.image_save2(msk, mask_path)
opt.image_path = mask_path
src_image = sdt._get_source_image(opt, logger)
msk_image = None
img_ctrl = None
sdt.image_save2(src_image, dispname = image_path, wait_s = 1)
elif opt.mode == MODE_seg: # == seg ==
img = Image.open(image_path)
msk = sdt.seg_preprocessor(img)
mask_path = sdt.get_seg_path(image_path, logger)
sdt.image_save2(msk, mask_path)
opt.image_path = mask_path
src_image = sdt._get_source_image(opt, logger)
msk_image = None
img_ctrl = None
sdt.image_save2(src_image, dispname = image_path, wait_s = 1)
return src_image, msk_image, img_ctrl
# 画像生成
def image_generation(pl, ext, model_path, ctrl_model_path, src_image, msk_image, img_ctrl, prompt, seed, num_inference_steps=20, width=512, height=512, guidance_scale=7.0, cc_scale=1.0, strength=0.6, neg_prompt = '', ip_image=None, ip_scale=0.5, device='cpu'):
if neg_prompt == '' or neg_prompt == ' ' or neg_prompt == ' ': neg_prompt = None
if strength == '' or strength == ' ' or strength == ' ': strength = None
if src_image is not None:
w, h = src_image.size
if w != width or h != height:
src_image = src_image.resize((width, height), resample=Image.BICUBIC)
# パイプラインを作成
pipeline = pl.cleate_pipeline(logger)
# IPアダプタを読み込み
pipeline = pl.load_ipadapter(pipeline, ip_image)
# スケジューラー
pipeline = pl.set_scheduler(pipeline)
pipeline.enable_model_cpu_offload()
# Generatorオブジェクト作成
generator = torch.Generator(device).manual_seed(seed)
# 画像を生成
if strength == None:
image = pipeline(
prompt = prompt,
negative_prompt = neg_prompt,
image = src_image,
mask_image = msk_image,
control_image = img_ctrl,
num_inference_steps = num_inference_steps,
width = width,
height = height,
controlnet_conditioning_scale = cc_scale,
ip_adapter_image=ip_image,
generator = generator
).images[0]
else:
image = pipeline(
prompt = prompt,
negative_prompt = neg_prompt,
image = src_image,
mask_image = msk_image,
control_image = img_ctrl,
num_inference_steps = num_inference_steps,
width = width,
height = height,
controlnet_conditioning_scale = cc_scale,
ip_adapter_image=ip_image,
strength = strength,
generator = generator
).images[0]
# Adetailer
if image is not None and (ext == ADETAILER_boy or ext == ADETAILER_girl):
import sd_051
prompt = sd_051.def_prompt_m if ext == 'boy' else sd_051.def_prompt
image = sd_051.image_generation(image, device = device, prompt = prompt, model_path = model_path)
sdt.log_debug(f' ** (Adetailer) prompt: {prompt}', logger)
if src_image is not None:
if w != width or h != height:
image = image.resize((w, h), resample=Image.BICUBIC)
return image
# ** main関数 **
def main(opt, pl, logger = None):
# パラメータ設定
device = sdt._get_device(opt, logger)
result_image_path = sdt._get_result_image_path(opt, logger)
result_path = sdt._get_result_path(opt, logger)
prompt = sdt._get_prompt(opt, logger)
image_path = sdt._get_source_image_path(opt, logger)
model_path = sdt._get_model_path(opt, logger)
ctrl_model_path = sdt._get_controlnet_model_path(opt, logger)
height, width = sdt._get_image_size(opt, logger)
seed = sdt._get_seed_value(opt, logger)
num_inference_steps = sdt._get_inference_steps(opt, logger)
guidance_scale = sdt._get_guidance_scale(opt, logger)
cc_scale = sdt._get_controlnet_conditioning_scale(opt, logger)
neg_prompt = sdt._get_negative_prompt(opt, logger)
ip_image = sdt._get_ip_image(opt, logger)
ip_scale = sdt._get_ip_scale(opt, logger)
# 入力画像の前処理
src_image, msk_image, img_ctrl = pre_generation(opt, logger)
if opt.mode != MODE_txt2img and src_image is None:
logger.info(f'{sdt.RED}Processing will be stopped !!{sdt.NOCOLOR}')
return
strength = sdt._get_strength(opt, logger)
# 出力フォルダ
os.makedirs(result_path, exist_ok = True)
# 画像生成
image = image_generation(pl, opt.ext, model_path, ctrl_model_path, src_image, msk_image, img_ctrl, prompt, seed, num_inference_steps, width, height, guidance_scale, cc_scale, strength, neg_prompt, ip_image, ip_scale, device)
if image is None:
logger.info(f'{sdt.RED}There is no face in the image !!{sdt.NOCOLOR}')
else:
s = os.path.splitext(result_image_path)
s0 = os.path.splitext(os.path.basename(image_path))[0]
s1 = '' if ip_image is None else '-' + os.path.splitext(os.path.basename(opt.ip_image_path))[0]
save_path = s[0] + '-' + s0 + '-' + opt.mode + s1 + s[1]
sdt.image_save2(image, save_path, save_path)
logger.info(f'result_file: {save_path}')
return
# main関数エントリーポイント(実行開始)
if __name__ == "__main__":
parser = sdt.parse_args(None, opt_list)
opt = parser.parse_args()
device = sdt._get_device(opt)
# 初期設定
if opt.scheduler =='':
if opt.mode == MODE_canny:
opt.scheduler = SCHEDULER_euler
elif opt.mode == MODE_inpaint:
opt.scheduler = SCHEDULER_euler
elif opt.mode == MODE_outpaint:
opt.scheduler = SCHEDULER_euler
elif opt.mode == MODE_scribble:
opt.scheduler = SCHEDULER_non
elif opt.mode == MODE_openpose:
opt.scheduler = SCHEDULER_non
if int(opt.step) < 50: opt.step = 50
elif opt.mode == MODE_pix2pix:
opt.scheduler = SCHEDULER_euler
elif opt.mode == MODE_txt2img:
opt.scheduler = SCHEDULER_non
elif opt.mode == MODE_img2img:
opt.scheduler = SCHEDULER_DPM
elif opt.mode == MODE_lineart:
opt.scheduler = SCHEDULER_non
elif opt.mode == MODE_softedge:
opt.scheduler = SCHEDULER_non
elif opt.mode == MODE_shuffle:
opt.scheduler = SCHEDULER_non
if int(opt.step) < 50: opt.step = 50
elif opt.mode == MODE_depth:
opt.scheduler = SCHEDULER_non
if int(opt.step) < 50: opt.step = 50
elif opt.mode == MODE_seg:
opt.scheduler = SCHEDULER_uni
if opt.image_path =='':
if opt.mode == MODE_canny:
opt.image_path = DEF_IMAGE_canny
elif opt.mode == MODE_inpaint:
opt.image_path = DEF_IMAGE_inpaint
elif opt.mode == MODE_outpaint:
opt.image_path = DEF_IMAGE_outpaint
elif opt.mode == MODE_scribble:
opt.image_path = DEF_IMAGE_scribble
elif opt.mode == MODE_openpose:
opt.image_path = DEF_IMAGE_openpose
elif opt.mode == MODE_pix2pix:
opt.image_path = DEF_IMAGE_pix2pix
elif opt.mode == MODE_txt2img:
opt.image_path = ''
elif opt.mode == MODE_img2img:
opt.image_path = DEF_IMAGE_img2img
elif opt.mode == MODE_lineart:
opt.image_path = DEF_IMAGE_lineart
elif opt.mode == MODE_softedge:
opt.image_path = DEF_IMAGE_softedge
elif opt.mode == MODE_shuffle:
opt.image_path = DEF_IMAGE_shuffle
elif opt.mode == MODE_depth:
opt.image_path = DEF_IMAGE_depth
elif opt.mode == MODE_seg:
opt.image_path = DEF_IMAGE_seg
if opt.prompt =='':
if opt.mode == MODE_canny:
opt.prompt = DEF_PROMPT_canny
elif opt.mode == MODE_inpaint:
opt.prompt = DEF_PROMPT_inpaint
elif opt.mode == MODE_outpaint:
opt.prompt = DEF_PROMPT_outpaint
elif opt.mode == MODE_scribble:
opt.prompt = DEF_PROMPT_scribble
elif opt.mode == MODE_openpose:
opt.prompt = DEF_PROMPT_openpose
elif opt.mode == MODE_pix2pix:
opt.prompt = DEF_PROMPT_pix2pix
elif opt.mode == MODE_txt2img:
opt.prompt = DEF_PROMPT_txt2img
elif opt.mode == MODE_img2img:
opt.prompt = DEF_PROMPT_img2img
elif opt.mode == MODE_lineart:
opt.prompt = DEF_PROMPT_lineart
elif opt.mode == MODE_softedge:
opt.prompt = DEF_PROMPT_softedge
elif opt.mode == MODE_shuffle:
opt.prompt = DEF_PROMPT_shuffle
elif opt.mode == MODE_depth:
opt.prompt = DEF_PROMPT_depth
elif opt.mode == MODE_seg:
opt.prompt = DEF_PROMPT_seg
if opt.neg_prompt =='':
if opt.mode == MODE_canny:
opt.neg_prompt = DEF_NPROMPT_canny
elif opt.mode == MODE_inpaint:
opt.neg_prompt = DEF_NPROMPT_inpaint
elif opt.mode == MODE_outpaint:
opt.neg_prompt = DEF_NPROMPT_outpaint
elif opt.mode == MODE_scribble:
opt.neg_prompt = DEF_NPROMPT_scribble
elif opt.mode == MODE_openpose:
opt.neg_prompt = DEF_NPROMPT_openpose
elif opt.mode == MODE_pix2pix:
opt.neg_prompt = DEF_NPROMPT_pix2pix
elif opt.mode == MODE_txt2img:
opt.neg_prompt = DEF_NPROMPT_txt2img
elif opt.mode == MODE_img2img:
opt.neg_prompt = DEF_NPROMPT_img2img
elif opt.mode == MODE_lineart:
opt.neg_prompt = DEF_NPROMPT_lineart
elif opt.mode == MODE_softedge:
opt.neg_prompt = DEF_NPROMPT_softedge
elif opt.mode == MODE_depth:
opt.neg_prompt = DEF_NPROMPT_depth
elif opt.mode == MODE_seg:
opt.neg_prompt = DEF_NPROMPT_seg
# パイプライン・オブジェクト
pl = Pipeline(mode = opt.mode, device = device, model_dir = opt.model_dir, ctrl_model_dir = opt.ctrl_model_dir, scheduler = opt.scheduler)
opt.scheduler = pl.scheduler
opt.model_dir = pl.model_dir
opt.ctrl_model_dir = pl.ctrl_model_dir
if opt.model_path == '':
opt.model_path = pl.base_model
else:
pl.select_base_model(opt.model_path)
if opt.ctrl_model_path == '':
opt.ctrl_model_path = pl.ctrl_model
else:
pi.select_ctrl_model(opt.ctrl_model_path)
sdt.display_info(opt, title)
# アプリケーション・ログ設定
module = os.path.basename(__file__)
module_name = os.path.splitext(module)[0]
logger = my_logging.get_module_logger_sel(module_name, int(opt.log))
main(opt, pl, logger)
logger.info('\nFinished.\n')
※ 上記ソースコードは表示の都合上、半角コード '}' が 全角 '}'になっていることに注意