diff --git a/modules/models/flux/flux.py b/modules/models/flux/flux.py index 46fd568a0..42e9ea788 100644 --- a/modules/models/flux/flux.py +++ b/modules/models/flux/flux.py @@ -108,7 +108,7 @@ class FluxCond(torch.nn.Module): self.clip_l.transformer.load_state_dict(SafetensorsMapping(file), strict=False) if self.t5xxl and 'text_encoders.t5xxl.transformer.encoder.block.0.layer.0.SelfAttention.k.weight' not in state_dict: - t5_file = modelloader.load_file_from_url(T5_URL, model_dir=clip_path, file_name="t5xxl_fp16.safetensors") + t5_file = modelloader.load_file_from_url(T5_URL, model_dir=clip_path, file_name="t5xxl_fp8_e4m3fn.safetensors") with safetensors.safe_open(t5_file, framework="pt") as file: self.t5xxl.transformer.load_state_dict(SafetensorsMapping(file), strict=False)