From e173ecc5cd5f622aba4fc555cc6fec28455459f9 Mon Sep 17 00:00:00 2001 From: Ryan Dick Date: Mon, 3 Jun 2024 15:30:58 -0400 Subject: [PATCH] Update docs to reflect changes to the LoRA extraction script. --- docs/tutorials/stable_diffusion/robocats_finetune_sdxl.md | 8 ++++---- .../model_merge/scripts/extract_lora_from_model_diff.py | 1 - 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/docs/tutorials/stable_diffusion/robocats_finetune_sdxl.md b/docs/tutorials/stable_diffusion/robocats_finetune_sdxl.md index 6131d49d..07fde484 100644 --- a/docs/tutorials/stable_diffusion/robocats_finetune_sdxl.md +++ b/docs/tutorials/stable_diffusion/robocats_finetune_sdxl.md @@ -116,10 +116,10 @@ An alternative to using the finetuned UNet model directly is to compare it again To extract a LoRA model, run the following command: ```bash -python src/invoke_training/model_merge/scripts/extract_lora_from_checkpoint.py \ - --model-type sdxl \ - --model-orig path/to/stable-diffusion-xl-base-1.0/unet \ - --model-tuned output/robocats/sdxl_finetune/1715373799.3558652/checkpoints/checkpoint-epoch_00000500-step_00002000/unet \ +python src/invoke_training/model_merge/scripts/extract_lora_from_model_diff.py \ + --model-type SDXL \ + --model-orig path/to/stable-diffusion-xl-base-1.0 \ + --model-tuned output/robocats/sdxl_finetune/1715373799.3558652/checkpoints/checkpoint-epoch_00000500-step_00002000 \ --save-to robocats_lora_step_2000.safetensors \ --lora-rank 32 ``` diff --git a/src/invoke_training/model_merge/scripts/extract_lora_from_model_diff.py b/src/invoke_training/model_merge/scripts/extract_lora_from_model_diff.py index 28c97a1a..cca4c96d 100644 --- a/src/invoke_training/model_merge/scripts/extract_lora_from_model_diff.py +++ b/src/invoke_training/model_merge/scripts/extract_lora_from_model_diff.py @@ -39,7 +39,6 @@ class StableDiffusionModel: """A helper class to store the submodels of a SD model that we are interested in for LoRA extraction.""" unet: UNet2DConditionModel | None = None - # TODO(ryand): Figure out the actual type of these text encoders. text_encoder: CLIPTextModel | None = None text_encoder_2: CLIPTextModelWithProjection | None = None