diff --git a/modules/dat_model.py b/modules/dat_model.py index e256a5a32..298d160d1 100644 --- a/modules/dat_model.py +++ b/modules/dat_model.py @@ -1,7 +1,7 @@ import os from modules import modelloader, errors -from modules.shared import cmd_opts, opts, hf_endpoint +from modules.shared import cmd_opts, opts from modules.upscaler import Upscaler, UpscalerData from modules.upscaler_utils import upscale_with_model @@ -71,21 +71,21 @@ def get_dat_models(scaler): return [ UpscalerData( name="DAT x2", - path=f"{hf_endpoint}/w-e-w/DAT/resolve/main/experiments/pretrained_models/DAT/DAT_x2.pth", + path="https://huggingface.co/w-e-w/DAT/resolve/main/experiments/pretrained_models/DAT/DAT_x2.pth", scale=2, upscaler=scaler, sha256='7760aa96e4ee77e29d4f89c3a4486200042e019461fdb8aa286f49aa00b89b51', ), UpscalerData( name="DAT x3", - path=f"{hf_endpoint}/w-e-w/DAT/resolve/main/experiments/pretrained_models/DAT/DAT_x3.pth", + path="https://huggingface.co/w-e-w/DAT/resolve/main/experiments/pretrained_models/DAT/DAT_x3.pth", scale=3, upscaler=scaler, sha256='581973e02c06f90d4eb90acf743ec9604f56f3c2c6f9e1e2c2b38ded1f80d197', ), UpscalerData( name="DAT x4", - path=f"{hf_endpoint}/w-e-w/DAT/resolve/main/experiments/pretrained_models/DAT/DAT_x4.pth", + path="https://huggingface.co/w-e-w/DAT/resolve/main/experiments/pretrained_models/DAT/DAT_x4.pth", scale=4, upscaler=scaler, sha256='391a6ce69899dff5ea3214557e9d585608254579217169faf3d4c353caff049e', diff --git a/modules/models/sd3/sd3_cond.py b/modules/models/sd3/sd3_cond.py index 6a43f569b..325c512d5 100644 --- a/modules/models/sd3/sd3_cond.py +++ b/modules/models/sd3/sd3_cond.py @@ -24,7 +24,7 @@ class SafetensorsMapping(typing.Mapping): return self.file.get_tensor(key) -CLIPL_URL = f"{shared.hf_endpoint}/AUTOMATIC/stable-diffusion-3-medium-text-encoders/resolve/main/clip_l.safetensors" +CLIPL_URL = "https://huggingface.co/AUTOMATIC/stable-diffusion-3-medium-text-encoders/resolve/main/clip_l.safetensors" CLIPL_CONFIG = { "hidden_act": "quick_gelu", "hidden_size": 768, @@ -33,7 +33,7 @@ CLIPL_CONFIG = { "num_hidden_layers": 12, } -CLIPG_URL = f"{shared.hf_endpoint}/AUTOMATIC/stable-diffusion-3-medium-text-encoders/resolve/main/clip_g.safetensors" +CLIPG_URL = "https://huggingface.co/AUTOMATIC/stable-diffusion-3-medium-text-encoders/resolve/main/clip_g.safetensors" CLIPG_CONFIG = { "hidden_act": "gelu", "hidden_size": 1280, @@ -43,7 +43,7 @@ CLIPG_CONFIG = { "textual_inversion_key": "clip_g", } -T5_URL = f"{shared.hf_endpoint}/AUTOMATIC/stable-diffusion-3-medium-text-encoders/resolve/main/t5xxl_fp16.safetensors" +T5_URL = "https://huggingface.co/AUTOMATIC/stable-diffusion-3-medium-text-encoders/resolve/main/t5xxl_fp16.safetensors" T5_CONFIG = { "d_ff": 10240, "d_model": 4096, diff --git a/modules/sd_disable_initialization.py b/modules/sd_disable_initialization.py index 3750e85e9..273a7edd8 100644 --- a/modules/sd_disable_initialization.py +++ b/modules/sd_disable_initialization.py @@ -76,7 +76,7 @@ class DisableInitialization(ReplaceHelper): def transformers_utils_hub_get_file_from_cache(original, url, *args, **kwargs): # this file is always 404, prevent making request - if url == f'{shared.hf_endpoint}/openai/clip-vit-large-patch14/resolve/main/added_tokens.json' or url == 'openai/clip-vit-large-patch14' and args[0] == 'added_tokens.json': + if url == 'https://huggingface.co/openai/clip-vit-large-patch14/resolve/main/added_tokens.json' or url == 'openai/clip-vit-large-patch14' and args[0] == 'added_tokens.json': return None try: diff --git a/modules/sd_models.py b/modules/sd_models.py index f4274ae42..64c8fdcfa 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -160,7 +160,7 @@ def list_models(): model_url = None expected_sha256 = None else: - model_url = f"{shared.hf_endpoint}/stable-diffusion-v1-5/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.safetensors" + model_url = "https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.safetensors" expected_sha256 = '6ce0161689b3853acaa03779ec93eafe75a02f4ced659bee03f50797806fa2fa' model_list = modelloader.load_models(model_path=model_path, model_url=model_url, command_path=shared.cmd_opts.ckpt_dir, ext_filter=[".ckpt", ".safetensors"], download_name="v1-5-pruned-emaonly.safetensors", ext_blacklist=[".vae.ckpt", ".vae.safetensors"], hash_prefix=expected_sha256) diff --git a/modules/shared.py b/modules/shared.py index 2a3787f99..d3c74470c 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -91,4 +91,4 @@ refresh_checkpoints = shared_items.refresh_checkpoints list_samplers = shared_items.list_samplers reload_hypernetworks = shared_items.reload_hypernetworks -hf_endpoint = os.getenv('HF_ENDPOINT', 'https://huggingface.co') +hf_endpoint = os.getenv('HF_ENDPOINT', 'https://huggingface.co').rstrip('/') diff --git a/modules/util.py b/modules/util.py index f70097721..a7f63a6e2 100644 --- a/modules/util.py +++ b/modules/util.py @@ -254,6 +254,7 @@ def load_file_from_url( parts = urlparse(url) file_name = os.path.basename(parts.path) + url = str.replace(url, "https://huggingface.co", shared.hf_endpoint, 1) cached_file = os.path.abspath(os.path.join(model_dir, file_name)) if re_download or not os.path.exists(cached_file):