mirror of
https://github.com/AUTOMATIC1111/stable-diffusion-webui.git
synced 2025-01-17 11:50:18 +08:00
reformat file with uniform indentation
This commit is contained in:
parent
03ee297aa2
commit
d608926f81
@ -27,7 +27,6 @@ def crop_image(im, settings):
|
||||
elif is_portrait(settings.crop_width, settings.crop_height):
|
||||
scale_by = settings.crop_height / im.height
|
||||
|
||||
|
||||
im = im.resize((int(im.width * scale_by), int(im.height * scale_by)))
|
||||
im_debug = im.copy()
|
||||
|
||||
@ -71,6 +70,7 @@ def crop_image(im, settings):
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def focal_point(im, settings):
|
||||
corner_points = image_corner_points(im, settings) if settings.corner_points_weight > 0 else []
|
||||
entropy_points = image_entropy_points(im, settings) if settings.entropy_points_weight > 0 else []
|
||||
@ -112,7 +112,7 @@ def focal_point(im, settings):
|
||||
if corner_centroid is not None:
|
||||
color = BLUE
|
||||
box = corner_centroid.bounding(max_size * corner_centroid.weight)
|
||||
d.text((box[0], box[1]-15), f"Edge: {corner_centroid.weight:.02f}", fill=color)
|
||||
d.text((box[0], box[1] - 15), f"Edge: {corner_centroid.weight:.02f}", fill=color)
|
||||
d.ellipse(box, outline=color)
|
||||
if len(corner_points) > 1:
|
||||
for f in corner_points:
|
||||
@ -120,7 +120,7 @@ def focal_point(im, settings):
|
||||
if entropy_centroid is not None:
|
||||
color = "#ff0"
|
||||
box = entropy_centroid.bounding(max_size * entropy_centroid.weight)
|
||||
d.text((box[0], box[1]-15), f"Entropy: {entropy_centroid.weight:.02f}", fill=color)
|
||||
d.text((box[0], box[1] - 15), f"Entropy: {entropy_centroid.weight:.02f}", fill=color)
|
||||
d.ellipse(box, outline=color)
|
||||
if len(entropy_points) > 1:
|
||||
for f in entropy_points:
|
||||
@ -128,7 +128,7 @@ def focal_point(im, settings):
|
||||
if face_centroid is not None:
|
||||
color = RED
|
||||
box = face_centroid.bounding(max_size * face_centroid.weight)
|
||||
d.text((box[0], box[1]-15), f"Face: {face_centroid.weight:.02f}", fill=color)
|
||||
d.text((box[0], box[1] - 15), f"Face: {face_centroid.weight:.02f}", fill=color)
|
||||
d.ellipse(box, outline=color)
|
||||
if len(face_points) > 1:
|
||||
for f in face_points:
|
||||
@ -161,8 +161,8 @@ def image_face_points(im, settings):
|
||||
PointOfInterest(
|
||||
int(x + (w * 0.5)), # face focus left/right is center
|
||||
int(y + (h * 0.33)), # face focus up/down is close to the top of the head
|
||||
size = w,
|
||||
weight = 1/len(faces[1])
|
||||
size=w,
|
||||
weight=1 / len(faces[1])
|
||||
)
|
||||
)
|
||||
return results
|
||||
@ -171,27 +171,29 @@ def image_face_points(im, settings):
|
||||
gray = cv2.cvtColor(np_im, cv2.COLOR_BGR2GRAY)
|
||||
|
||||
tries = [
|
||||
[ f'{cv2.data.haarcascades}haarcascade_eye.xml', 0.01 ],
|
||||
[ f'{cv2.data.haarcascades}haarcascade_frontalface_default.xml', 0.05 ],
|
||||
[ f'{cv2.data.haarcascades}haarcascade_profileface.xml', 0.05 ],
|
||||
[ f'{cv2.data.haarcascades}haarcascade_frontalface_alt.xml', 0.05 ],
|
||||
[ f'{cv2.data.haarcascades}haarcascade_frontalface_alt2.xml', 0.05 ],
|
||||
[ f'{cv2.data.haarcascades}haarcascade_frontalface_alt_tree.xml', 0.05 ],
|
||||
[ f'{cv2.data.haarcascades}haarcascade_eye_tree_eyeglasses.xml', 0.05 ],
|
||||
[ f'{cv2.data.haarcascades}haarcascade_upperbody.xml', 0.05 ]
|
||||
[f'{cv2.data.haarcascades}haarcascade_eye.xml', 0.01],
|
||||
[f'{cv2.data.haarcascades}haarcascade_frontalface_default.xml', 0.05],
|
||||
[f'{cv2.data.haarcascades}haarcascade_profileface.xml', 0.05],
|
||||
[f'{cv2.data.haarcascades}haarcascade_frontalface_alt.xml', 0.05],
|
||||
[f'{cv2.data.haarcascades}haarcascade_frontalface_alt2.xml', 0.05],
|
||||
[f'{cv2.data.haarcascades}haarcascade_frontalface_alt_tree.xml', 0.05],
|
||||
[f'{cv2.data.haarcascades}haarcascade_eye_tree_eyeglasses.xml', 0.05],
|
||||
[f'{cv2.data.haarcascades}haarcascade_upperbody.xml', 0.05]
|
||||
]
|
||||
for t in tries:
|
||||
classifier = cv2.CascadeClassifier(t[0])
|
||||
minsize = int(min(im.width, im.height) * t[1]) # at least N percent of the smallest side
|
||||
try:
|
||||
faces = classifier.detectMultiScale(gray, scaleFactor=1.1,
|
||||
minNeighbors=7, minSize=(minsize, minsize), flags=cv2.CASCADE_SCALE_IMAGE)
|
||||
minNeighbors=7, minSize=(minsize, minsize),
|
||||
flags=cv2.CASCADE_SCALE_IMAGE)
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
if faces:
|
||||
rects = [[f[0], f[1], f[0] + f[2], f[1] + f[3]] for f in faces]
|
||||
return [PointOfInterest((r[0] +r[2]) // 2, (r[1] + r[3]) // 2, size=abs(r[0]-r[2]), weight=1/len(rects)) for r in rects]
|
||||
return [PointOfInterest((r[0] + r[2]) // 2, (r[1] + r[3]) // 2, size=abs(r[0] - r[2]),
|
||||
weight=1 / len(rects)) for r in rects]
|
||||
return []
|
||||
|
||||
|
||||
@ -200,7 +202,7 @@ def image_corner_points(im, settings):
|
||||
|
||||
# naive attempt at preventing focal points from collecting at watermarks near the bottom
|
||||
gd = ImageDraw.Draw(grayscale)
|
||||
gd.rectangle([0, im.height*.9, im.width, im.height], fill="#999")
|
||||
gd.rectangle([0, im.height * .9, im.width, im.height], fill="#999")
|
||||
|
||||
np_im = np.array(grayscale)
|
||||
|
||||
@ -208,7 +210,7 @@ def image_corner_points(im, settings):
|
||||
np_im,
|
||||
maxCorners=100,
|
||||
qualityLevel=0.04,
|
||||
minDistance=min(grayscale.width, grayscale.height)*0.06,
|
||||
minDistance=min(grayscale.width, grayscale.height) * 0.06,
|
||||
useHarrisDetector=False,
|
||||
)
|
||||
|
||||
@ -218,7 +220,7 @@ def image_corner_points(im, settings):
|
||||
focal_points = []
|
||||
for point in points:
|
||||
x, y = point.ravel()
|
||||
focal_points.append(PointOfInterest(x, y, size=4, weight=1/len(points)))
|
||||
focal_points.append(PointOfInterest(x, y, size=4, weight=1 / len(points)))
|
||||
|
||||
return focal_points
|
||||
|
||||
@ -249,8 +251,8 @@ def image_entropy_points(im, settings):
|
||||
crop_current[move_idx[0]] += 4
|
||||
crop_current[move_idx[1]] += 4
|
||||
|
||||
x_mid = int(crop_best[0] + settings.crop_width/2)
|
||||
y_mid = int(crop_best[1] + settings.crop_height/2)
|
||||
x_mid = int(crop_best[0] + settings.crop_width / 2)
|
||||
y_mid = int(crop_best[1] + settings.crop_height / 2)
|
||||
|
||||
return [PointOfInterest(x_mid, y_mid, size=25, weight=1.0)]
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user