mirror of https://github.com/bmaltais/kohya_ss
Updates
parent
dfe847071f
commit
b68b2f9691
|
|
@ -0,0 +1,12 @@
|
||||||
|
$pretrainedModel = "D:\models\sdxl\nsfw_v1.0_00002_.safetensors"
|
||||||
|
$trainDataDir = "D:\dataset\harold\img"
|
||||||
|
$loggingDir = "D:\dataset\harold\lora\sdxl-logs"
|
||||||
|
$outputName = "harold_v1.0a"
|
||||||
|
$outputDir = "d:\lycoris\sdxl"
|
||||||
|
|
||||||
|
$networkWeights = Join-Path -Path $outputDir -ChildPath "$outputName.safetensors"
|
||||||
|
$outputName2 = "$outputName" + "e2"
|
||||||
|
|
||||||
|
accelerate launch --num_cpu_threads_per_process=2 "./sdxl_train_network.py" --enable_bucket --pretrained_model_name_or_path="$pretrainedModel" --train_data_dir="$trainDataDir" --resolution="1024,1024" --output_dir="$outputDir" --logging_dir="$loggingDir" --network_alpha="256" --training_comment="trigger words: " --save_model_as=safetensors --network_module=networks.lora --unet_lr=1e-05 --network_train_unet_only --network_dim=256 --output_name="$outputName" --lr_scheduler_num_cycles="1" --scale_weight_norms="1" --network_dropout="0.1" --cache_text_encoder_outputs --no_half_vae --lr_scheduler="cosine" --train_batch_size="4" --max_train_steps="40" --save_every_n_epochs="10" --mixed_precision="bf16" --save_precision="bf16" --seed="17415" --caption_extension=".txt" --cache_latents --cache_latents_to_disk --optimizer_type="AdamW" --optimizer_args weight_decay=0.05 betas=0.9,0.98 --max_train_epochs="10" --max_data_loader_n_workers="0" --keep_tokens="1" --bucket_reso_steps=32 --min_snr_gamma=5 --gradient_checkpointing --xformers --bucket_no_upscale --noise_offset=0.0357 --adaptive_noise_scale=0.00357 --log_prefix=xl-loha
|
||||||
|
|
||||||
|
accelerate launch --num_cpu_threads_per_process=2 "./sdxl_train_network.py" --enable_bucket --pretrained_model_name_or_path="$pretrainedModel" --train_data_dir="$trainDataDir" --resolution="1024,1024" --output_dir="$outputDir" --logging_dir="$loggingDir" --network_alpha="256" --training_comment="trigger: portrait" --save_model_as=safetensors --network_module=networks.lora --unet_lr=1e-05 --network_train_unet_only --network_dim=256 --network_weights="$networkWeights" --output_name="$outputName2" --lr_scheduler_num_cycles="1" --scale_weight_norms="1" --network_dropout="0.1" --cache_text_encoder_outputs --no_half_vae --lr_scheduler="constant" --train_batch_size="1" --max_train_steps="16" --save_every_n_epochs="1" --mixed_precision="bf16" --save_precision="bf16" --seed="17415" --caption_extension=".txt" --cache_latents --cache_latents_to_disk --optimizer_type="AdamW" --optimizer_args weight_decay=0.05 betas=0.9,0.98 --max_train_epochs="1" --max_data_loader_n_workers="0" --keep_tokens="1" --bucket_reso_steps=32 --min_snr_gamma=5 --gradient_checkpointing --xformers --bucket_no_upscale --noise_offset=0.0357 --adaptive_noise_scale=0.00357 --log_prefix=xl-loha
|
||||||
|
|
@ -0,0 +1,92 @@
|
||||||
|
{
|
||||||
|
"LoRA_type": "Standard",
|
||||||
|
"adaptive_noise_scale": 0.00357,
|
||||||
|
"additional_parameters": "--log_prefix=xl-loha",
|
||||||
|
"block_alphas": "",
|
||||||
|
"block_dims": "",
|
||||||
|
"block_lr_zero_threshold": "",
|
||||||
|
"bucket_no_upscale": true,
|
||||||
|
"bucket_reso_steps": 32,
|
||||||
|
"cache_latents": true,
|
||||||
|
"cache_latents_to_disk": true,
|
||||||
|
"caption_dropout_every_n_epochs": 0.0,
|
||||||
|
"caption_dropout_rate": 0,
|
||||||
|
"caption_extension": ".txt",
|
||||||
|
"clip_skip": "1",
|
||||||
|
"color_aug": false,
|
||||||
|
"conv_alpha": 4,
|
||||||
|
"conv_alphas": "",
|
||||||
|
"conv_dim": 4,
|
||||||
|
"conv_dims": "",
|
||||||
|
"decompose_both": false,
|
||||||
|
"dim_from_weights": false,
|
||||||
|
"down_lr_weight": "",
|
||||||
|
"enable_bucket": true,
|
||||||
|
"epoch": 10,
|
||||||
|
"factor": -1,
|
||||||
|
"flip_aug": false,
|
||||||
|
"full_fp16": false,
|
||||||
|
"gradient_accumulation_steps": 1.0,
|
||||||
|
"gradient_checkpointing": true,
|
||||||
|
"keep_tokens": 1,
|
||||||
|
"learning_rate": 0.0,
|
||||||
|
"lora_network_weights": "",
|
||||||
|
"lr_scheduler": "cosine",
|
||||||
|
"lr_scheduler_num_cycles": "1",
|
||||||
|
"lr_scheduler_power": "",
|
||||||
|
"lr_warmup": 0,
|
||||||
|
"max_data_loader_n_workers": "0",
|
||||||
|
"max_resolution": "1024,1024",
|
||||||
|
"max_timestep": 1000,
|
||||||
|
"max_token_length": "75",
|
||||||
|
"max_train_epochs": "10",
|
||||||
|
"mem_eff_attn": false,
|
||||||
|
"mid_lr_weight": "",
|
||||||
|
"min_snr_gamma": 5,
|
||||||
|
"min_timestep": 0,
|
||||||
|
"mixed_precision": "bf16",
|
||||||
|
"module_dropout": 0,
|
||||||
|
"multires_noise_discount": 0,
|
||||||
|
"multires_noise_iterations": 0,
|
||||||
|
"network_alpha": 256,
|
||||||
|
"network_dim": 256,
|
||||||
|
"network_dropout": 0.1,
|
||||||
|
"no_token_padding": false,
|
||||||
|
"noise_offset": 0.0357,
|
||||||
|
"noise_offset_type": "Original",
|
||||||
|
"num_cpu_threads_per_process": 2,
|
||||||
|
"optimizer": "AdamW",
|
||||||
|
"optimizer_args": "weight_decay=0.05 betas=0.9,0.98",
|
||||||
|
"persistent_data_loader_workers": false,
|
||||||
|
"prior_loss_weight": 1.0,
|
||||||
|
"random_crop": false,
|
||||||
|
"rank_dropout": 0,
|
||||||
|
"save_every_n_epochs": 10,
|
||||||
|
"save_every_n_steps": 0,
|
||||||
|
"save_last_n_steps": 0,
|
||||||
|
"save_last_n_steps_state": 0,
|
||||||
|
"save_precision": "bf16",
|
||||||
|
"scale_v_pred_loss_like_noise_pred": false,
|
||||||
|
"scale_weight_norms": 1,
|
||||||
|
"sdxl": true,
|
||||||
|
"sdxl_cache_text_encoder_outputs": true,
|
||||||
|
"sdxl_no_half_vae": true,
|
||||||
|
"seed": "17415",
|
||||||
|
"shuffle_caption": false,
|
||||||
|
"stop_text_encoder_training_pct": 0,
|
||||||
|
"text_encoder_lr": 0.0,
|
||||||
|
"train_batch_size": 4,
|
||||||
|
"train_on_input": false,
|
||||||
|
"training_comment": "trigger words: ",
|
||||||
|
"unet_lr": 1e-05,
|
||||||
|
"unit": 1,
|
||||||
|
"up_lr_weight": "",
|
||||||
|
"use_cp": false,
|
||||||
|
"use_wandb": false,
|
||||||
|
"v2": false,
|
||||||
|
"v_parameterization": false,
|
||||||
|
"vae_batch_size": 0,
|
||||||
|
"wandb_api_key": "",
|
||||||
|
"weighted_captions": false,
|
||||||
|
"xformers": true
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,92 @@
|
||||||
|
{
|
||||||
|
"LoRA_type": "Standard",
|
||||||
|
"adaptive_noise_scale": 0.00357,
|
||||||
|
"additional_parameters": "--log_prefix=xl-loha",
|
||||||
|
"block_alphas": "",
|
||||||
|
"block_dims": "",
|
||||||
|
"block_lr_zero_threshold": "",
|
||||||
|
"bucket_no_upscale": true,
|
||||||
|
"bucket_reso_steps": 32,
|
||||||
|
"cache_latents": true,
|
||||||
|
"cache_latents_to_disk": true,
|
||||||
|
"caption_dropout_every_n_epochs": 0.0,
|
||||||
|
"caption_dropout_rate": 0,
|
||||||
|
"caption_extension": ".txt",
|
||||||
|
"clip_skip": "1",
|
||||||
|
"color_aug": false,
|
||||||
|
"conv_alpha": 4,
|
||||||
|
"conv_alphas": "",
|
||||||
|
"conv_dim": 4,
|
||||||
|
"conv_dims": "",
|
||||||
|
"decompose_both": false,
|
||||||
|
"dim_from_weights": false,
|
||||||
|
"down_lr_weight": "",
|
||||||
|
"enable_bucket": true,
|
||||||
|
"epoch": 1,
|
||||||
|
"factor": -1,
|
||||||
|
"flip_aug": false,
|
||||||
|
"full_fp16": false,
|
||||||
|
"gradient_accumulation_steps": 1.0,
|
||||||
|
"gradient_checkpointing": true,
|
||||||
|
"keep_tokens": 1,
|
||||||
|
"learning_rate": 0.0,
|
||||||
|
"lora_network_weights": "<replace with full path of LoRA model trained in phase 1>",
|
||||||
|
"lr_scheduler": "constant",
|
||||||
|
"lr_scheduler_num_cycles": "1",
|
||||||
|
"lr_scheduler_power": "",
|
||||||
|
"lr_warmup": 0,
|
||||||
|
"max_data_loader_n_workers": "0",
|
||||||
|
"max_resolution": "1024,1024",
|
||||||
|
"max_timestep": 1000,
|
||||||
|
"max_token_length": "75",
|
||||||
|
"max_train_epochs": "1",
|
||||||
|
"mem_eff_attn": false,
|
||||||
|
"mid_lr_weight": "",
|
||||||
|
"min_snr_gamma": 5,
|
||||||
|
"min_timestep": 0,
|
||||||
|
"mixed_precision": "bf16",
|
||||||
|
"module_dropout": 0,
|
||||||
|
"multires_noise_discount": 0,
|
||||||
|
"multires_noise_iterations": 0,
|
||||||
|
"network_alpha": 256,
|
||||||
|
"network_dim": 256,
|
||||||
|
"network_dropout": 0.1,
|
||||||
|
"no_token_padding": false,
|
||||||
|
"noise_offset": 0.0357,
|
||||||
|
"noise_offset_type": "Original",
|
||||||
|
"num_cpu_threads_per_process": 2,
|
||||||
|
"optimizer": "AdamW",
|
||||||
|
"optimizer_args": "weight_decay=0.05 betas=0.9,0.98",
|
||||||
|
"persistent_data_loader_workers": false,
|
||||||
|
"prior_loss_weight": 1.0,
|
||||||
|
"random_crop": false,
|
||||||
|
"rank_dropout": 0,
|
||||||
|
"save_every_n_epochs": 1,
|
||||||
|
"save_every_n_steps": 0,
|
||||||
|
"save_last_n_steps": 0,
|
||||||
|
"save_last_n_steps_state": 0,
|
||||||
|
"save_precision": "bf16",
|
||||||
|
"scale_v_pred_loss_like_noise_pred": false,
|
||||||
|
"scale_weight_norms": 1,
|
||||||
|
"sdxl": true,
|
||||||
|
"sdxl_cache_text_encoder_outputs": true,
|
||||||
|
"sdxl_no_half_vae": true,
|
||||||
|
"seed": "17415",
|
||||||
|
"shuffle_caption": false,
|
||||||
|
"stop_text_encoder_training_pct": 0,
|
||||||
|
"text_encoder_lr": 0.0,
|
||||||
|
"train_batch_size": 1,
|
||||||
|
"train_on_input": false,
|
||||||
|
"training_comment": "trigger: portrait",
|
||||||
|
"unet_lr": 1e-05,
|
||||||
|
"unit": 1,
|
||||||
|
"up_lr_weight": "",
|
||||||
|
"use_cp": false,
|
||||||
|
"use_wandb": false,
|
||||||
|
"v2": false,
|
||||||
|
"v_parameterization": false,
|
||||||
|
"vae_batch_size": 0,
|
||||||
|
"wandb_api_key": "",
|
||||||
|
"weighted_captions": false,
|
||||||
|
"xformers": true
|
||||||
|
}
|
||||||
|
|
@ -2,4 +2,7 @@
|
||||||
|
|
||||||
Run the followinf command to prepare new presets for release to users:
|
Run the followinf command to prepare new presets for release to users:
|
||||||
|
|
||||||
`python.exe .\tools\prepare_presets.py .\presets\lora\*.json`
|
```
|
||||||
|
activate.ps1
|
||||||
|
python.exe .\tools\prepare_presets.py .\presets\lora\*.json
|
||||||
|
```
|
||||||
|
|
@ -9,7 +9,7 @@ ftfy==6.1.1
|
||||||
gradio==3.36.1
|
gradio==3.36.1
|
||||||
huggingface-hub>=0.14.1
|
huggingface-hub>=0.14.1
|
||||||
lion-pytorch==0.0.6
|
lion-pytorch==0.0.6
|
||||||
lycoris_lora==1.8.0.dev6
|
lycoris_lora==1.8.0.dev9
|
||||||
# lycoris_lora==1.7.2
|
# lycoris_lora==1.7.2
|
||||||
open-clip-torch==2.20.0
|
open-clip-torch==2.20.0
|
||||||
opencv-python==4.7.0.68
|
opencv-python==4.7.0.68
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue