[LoRA] change to warning from info when notifying the users about a LoRA no-op (#11044)
* move to warning. * test related changes.
This commit is contained in:
parent
4ea9f89b8e
commit
20e4b6a628
@ -423,8 +423,12 @@ def _load_lora_into_text_encoder(
|
||||
# Unsafe code />
|
||||
|
||||
if prefix is not None and not state_dict:
|
||||
logger.info(
|
||||
f"No LoRA keys associated to {text_encoder.__class__.__name__} found with the {prefix=}. This is safe to ignore if LoRA state dict didn't originally have any {text_encoder.__class__.__name__} related params. Open an issue if you think it's unexpected: https://github.com/huggingface/diffusers/issues/new"
|
||||
logger.warning(
|
||||
f"No LoRA keys associated to {text_encoder.__class__.__name__} found with the {prefix=}. "
|
||||
"This is safe to ignore if LoRA state dict didn't originally have any "
|
||||
f"{text_encoder.__class__.__name__} related params. You can also try specifying `prefix=None` "
|
||||
"to resolve the warning. Otherwise, open an issue if you think it's unexpected: "
|
||||
"https://github.com/huggingface/diffusers/issues/new"
|
||||
)
|
||||
|
||||
|
||||
|
||||
@ -354,8 +354,12 @@ class PeftAdapterMixin:
|
||||
# Unsafe code />
|
||||
|
||||
if prefix is not None and not state_dict:
|
||||
logger.info(
|
||||
f"No LoRA keys associated to {self.__class__.__name__} found with the {prefix=}. This is safe to ignore if LoRA state dict didn't originally have any {self.__class__.__name__} related params. Open an issue if you think it's unexpected: https://github.com/huggingface/diffusers/issues/new"
|
||||
logger.warning(
|
||||
f"No LoRA keys associated to {self.__class__.__name__} found with the {prefix=}. "
|
||||
"This is safe to ignore if LoRA state dict didn't originally have any "
|
||||
f"{self.__class__.__name__} related params. You can also try specifying `prefix=None` "
|
||||
"to resolve the warning. Otherwise, open an issue if you think it's unexpected: "
|
||||
"https://github.com/huggingface/diffusers/issues/new"
|
||||
)
|
||||
|
||||
def save_lora_adapter(
|
||||
|
||||
@ -1961,7 +1961,7 @@ class PeftLoraLoaderMixinTests:
|
||||
|
||||
no_op_state_dict = {"lora_foo": torch.tensor(2.0), "lora_bar": torch.tensor(3.0)}
|
||||
logger = logging.get_logger("diffusers.loaders.peft")
|
||||
logger.setLevel(logging.INFO)
|
||||
logger.setLevel(logging.WARNING)
|
||||
|
||||
with CaptureLogger(logger) as cap_logger:
|
||||
pipe.load_lora_weights(no_op_state_dict)
|
||||
@ -1981,7 +1981,7 @@ class PeftLoraLoaderMixinTests:
|
||||
prefix = "text_encoder_2"
|
||||
|
||||
logger = logging.get_logger("diffusers.loaders.lora_base")
|
||||
logger.setLevel(logging.INFO)
|
||||
logger.setLevel(logging.WARNING)
|
||||
|
||||
with CaptureLogger(logger) as cap_logger:
|
||||
self.pipeline_class.load_lora_into_text_encoder(
|
||||
|
||||
Loading…
Reference in New Issue
Block a user