mirror of
https://github.com/AUTOMATIC1111/stable-diffusion-webui.git
synced 2024-12-29 19:05:05 +08:00
Focal crop UI elements
This commit is contained in:
parent
6629446a2f
commit
db8ed5fe5c
@ -13,7 +13,7 @@ if cmd_opts.deepdanbooru:
|
||||
import modules.deepbooru as deepbooru
|
||||
|
||||
|
||||
def preprocess(process_src, process_dst, process_width, process_height, preprocess_txt_action, process_flip, process_split, process_caption, process_caption_deepbooru=False, split_threshold=0.5, overlap_ratio=0.2, process_entropy_focus=False):
|
||||
def preprocess(process_src, process_dst, process_width, process_height, preprocess_txt_action, process_flip, process_split, process_caption, process_caption_deepbooru=False, split_threshold=0.5, overlap_ratio=0.2, process_focal_crop=False, process_focal_crop_face_weight=0.9, process_focal_crop_entropy_weight=0.3, process_focal_crop_edges_weight=0.5, process_focal_crop_debug=False):
|
||||
try:
|
||||
if process_caption:
|
||||
shared.interrogator.load()
|
||||
@ -23,7 +23,7 @@ def preprocess(process_src, process_dst, process_width, process_height, preproce
|
||||
db_opts[deepbooru.OPT_INCLUDE_RANKS] = False
|
||||
deepbooru.create_deepbooru_process(opts.interrogate_deepbooru_score_threshold, db_opts)
|
||||
|
||||
preprocess_work(process_src, process_dst, process_width, process_height, preprocess_txt_action, process_flip, process_split, process_caption, process_caption_deepbooru, split_threshold, overlap_ratio, process_entropy_focus)
|
||||
preprocess_work(process_src, process_dst, process_width, process_height, preprocess_txt_action, process_flip, process_split, process_caption, process_caption_deepbooru, split_threshold, overlap_ratio, process_focal_crop, process_focal_crop_face_weight, process_focal_crop_entropy_weight, process_focal_crop_edges_weight, process_focal_crop_debug)
|
||||
|
||||
finally:
|
||||
|
||||
@ -35,7 +35,7 @@ def preprocess(process_src, process_dst, process_width, process_height, preproce
|
||||
|
||||
|
||||
|
||||
def preprocess_work(process_src, process_dst, process_width, process_height, preprocess_txt_action, process_flip, process_split, process_caption, process_caption_deepbooru=False, split_threshold=0.5, overlap_ratio=0.2, process_entropy_focus=False):
|
||||
def preprocess_work(process_src, process_dst, process_width, process_height, preprocess_txt_action, process_flip, process_split, process_caption, process_caption_deepbooru=False, split_threshold=0.5, overlap_ratio=0.2, process_focal_crop=False, process_focal_crop_face_weight=0.9, process_focal_crop_entropy_weight=0.3, process_focal_crop_edges_weight=0.5, process_focal_crop_debug=False):
|
||||
width = process_width
|
||||
height = process_height
|
||||
src = os.path.abspath(process_src)
|
||||
@ -139,27 +139,27 @@ def preprocess_work(process_src, process_dst, process_width, process_height, pre
|
||||
ratio = (img.height * width) / (img.width * height)
|
||||
inverse_xy = True
|
||||
|
||||
processing_option_ran = False
|
||||
process_default_resize = True
|
||||
|
||||
if process_split and ratio < 1.0 and ratio <= split_threshold:
|
||||
for splitted in split_pic(img, inverse_xy):
|
||||
save_pic(splitted, index, existing_caption=existing_caption)
|
||||
processing_option_ran = True
|
||||
process_default_resize = False
|
||||
|
||||
if process_entropy_focus and img.height != img.width:
|
||||
autocrop_settings = autocrop.Settings(
|
||||
crop_width = width,
|
||||
crop_height = height,
|
||||
face_points_weight = 0.9,
|
||||
entropy_points_weight = 0.7,
|
||||
corner_points_weight = 0.5,
|
||||
annotate_image = False
|
||||
face_points_weight = process_focal_crop_face_weight,
|
||||
entropy_points_weight = process_focal_crop_entropy_weight,
|
||||
corner_points_weight = process_focal_crop_edges_weight,
|
||||
annotate_image = process_focal_crop_debug
|
||||
)
|
||||
focal = autocrop.crop_image(img, autocrop_settings)
|
||||
save_pic(focal, index, existing_caption=existing_caption)
|
||||
processing_option_ran = True
|
||||
for focal in autocrop.crop_image(img, autocrop_settings):
|
||||
save_pic(focal, index, existing_caption=existing_caption)
|
||||
process_default_resize = False
|
||||
|
||||
if not processing_option_ran:
|
||||
if process_default_resize:
|
||||
img = images.resize_image(1, img, width, height)
|
||||
save_pic(img, index, existing_caption=existing_caption)
|
||||
|
||||
|
@ -1260,7 +1260,7 @@ def create_ui(wrap_gradio_gpu_call):
|
||||
with gr.Row():
|
||||
process_flip = gr.Checkbox(label='Create flipped copies')
|
||||
process_split = gr.Checkbox(label='Split oversized images')
|
||||
process_entropy_focus = gr.Checkbox(label='Create auto focal point crop')
|
||||
process_focal_crop = gr.Checkbox(label='Auto focal point crop')
|
||||
process_caption = gr.Checkbox(label='Use BLIP for caption')
|
||||
process_caption_deepbooru = gr.Checkbox(label='Use deepbooru for caption', visible=True if cmd_opts.deepdanbooru else False)
|
||||
|
||||
@ -1268,6 +1268,12 @@ def create_ui(wrap_gradio_gpu_call):
|
||||
process_split_threshold = gr.Slider(label='Split image threshold', value=0.5, minimum=0.0, maximum=1.0, step=0.05)
|
||||
process_overlap_ratio = gr.Slider(label='Split image overlap ratio', value=0.2, minimum=0.0, maximum=0.9, step=0.05)
|
||||
|
||||
with gr.Row(visible=False) as process_focal_crop_row:
|
||||
process_focal_crop_face_weight = gr.Slider(label='Focal point face weight', value=0.9, minimum=0.0, maximum=1.0, step=0.05)
|
||||
process_focal_crop_entropy_weight = gr.Slider(label='Focal point entropy weight', value=0.3, minimum=0.0, maximum=1.0, step=0.05)
|
||||
process_focal_crop_edges_weight = gr.Slider(label='Focal point edges weight', value=0.5, minimum=0.0, maximum=1.0, step=0.05)
|
||||
process_focal_crop_debug = gr.Checkbox(label='Create debug image')
|
||||
|
||||
with gr.Row():
|
||||
with gr.Column(scale=3):
|
||||
gr.HTML(value="")
|
||||
@ -1281,6 +1287,12 @@ def create_ui(wrap_gradio_gpu_call):
|
||||
outputs=[process_split_extra_row],
|
||||
)
|
||||
|
||||
process_focal_crop.change(
|
||||
fn=lambda show: gr_show(show),
|
||||
inputs=[process_focal_crop],
|
||||
outputs=[process_focal_crop_row],
|
||||
)
|
||||
|
||||
with gr.Tab(label="Train"):
|
||||
gr.HTML(value="<p style='margin-bottom: 0.7em'>Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images <a href=\"https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Textual-Inversion\" style=\"font-weight:bold;\">[wiki]</a></p>")
|
||||
with gr.Row():
|
||||
@ -1368,7 +1380,11 @@ def create_ui(wrap_gradio_gpu_call):
|
||||
process_caption_deepbooru,
|
||||
process_split_threshold,
|
||||
process_overlap_ratio,
|
||||
process_entropy_focus,
|
||||
process_focal_crop,
|
||||
process_focal_crop_face_weight,
|
||||
process_focal_crop_entropy_weight,
|
||||
process_focal_crop_edges_weight,
|
||||
process_focal_crop_debug,
|
||||
],
|
||||
outputs=[
|
||||
ti_output,
|
||||
|
Loading…
Reference in New Issue
Block a user