mirror of https://github.com/bmaltais/kohya_ss
83 lines
4.8 KiB
TOML
83 lines
4.8 KiB
TOML
# Copy this file and name it config.toml
|
|
# Edit the values to suit your needs
|
|
|
|
# Default folders location
|
|
[model]
|
|
models_dir = "./models" # Pretrained model name or path
|
|
output_name = "new model" # Trained model output name
|
|
train_data_dir = "./data" # Image folder (containing training images subfolders) / Image folder (containing training images)
|
|
dataset_config = "./test.toml" # Dataset config file (Optional. Select the toml configuration file to use for the dataset)
|
|
training_comment = "Some training comment" # Training comment
|
|
save_model_as = "safetensors" # Save model as (ckpt, safetensors, diffusers, diffusers_safetensors)
|
|
save_precision = "bf16" # Save model precision (fp16, bf16, float)
|
|
|
|
[folders]
|
|
output_dir = "./outputs" # Output directory for trained model
|
|
reg_data_dir = "./data/reg" # Regularisation directory
|
|
logging_dir = "./logs" # Logging directory
|
|
|
|
[configuration]
|
|
config_dir = "./presets" # Load/Save Config file
|
|
|
|
[advanced]
|
|
adaptive_noise_scale = 0 # Adaptive noise scale
|
|
additional_parameters = "" # Additional parameters
|
|
bucket_no_upscale = true # Don't upscale bucket resolution
|
|
bucket_reso_steps = 64 # Bucket resolution steps
|
|
caption_dropout_every_n_epochs = 0 # Caption dropout every n epochs
|
|
caption_dropout_rate = 0 # Caption dropout rate
|
|
color_aug = false # Color augmentation
|
|
clip_skip = 1 # Clip skip
|
|
debiased_estimation_loss = false # Debiased estimation loss
|
|
flip_aug = false # Flip augmentation
|
|
fp8_base = false # FP8 base training (experimental)
|
|
full_bf16 = false # Full bf16 training (experimental)
|
|
full_fp16 = false # Full fp16 training (experimental)
|
|
gradient_accumulation_steps = 1 # Gradient accumulation steps
|
|
gradient_checkpointing = false # Gradient checkpointing
|
|
ip_noise_gamma = 0 # IP noise gamma
|
|
ip_noise_gamma_random_strength = false # IP noise gamma random strength (true, false)
|
|
keep_tokens = 0 # Keep tokens
|
|
log_tracker_config_dir = "./logs" # Log tracker configs directory
|
|
log_tracker_name = "" # Log tracker name
|
|
masked_loss = false # Masked loss
|
|
max_data_loader_n_workers = "0" # Max data loader n workers (string)
|
|
max_timestep = 1000 # Max timestep
|
|
max_token_length = "150" # Max token length ("75", "150", "225")
|
|
mem_eff_attn = false # Memory efficient attention
|
|
min_snr_gamma = 0 # Min SNR gamma
|
|
min_timestep = 0 # Min timestep
|
|
multires_noise_iterations = 0 # Multires noise iterations
|
|
multires_noise_discount = 0 # Multires noise discount
|
|
no_token_padding = false # Disable token padding
|
|
noise_offset = 0 # Noise offset
|
|
noise_offset_random_strength = false # Noise offset random strength (true, false)
|
|
noise_offset_type = "Original" # Noise offset type ("Original", "Multires")
|
|
persistent_data_loader_workers = false # Persistent data loader workers
|
|
prior_loss_weight = 1.0 # Prior loss weight
|
|
random_crop = false # Random crop
|
|
save_every_n_steps = 0 # Save every n steps
|
|
save_last_n_steps = 0 # Save last n steps
|
|
save_last_n_steps_state = 0 # Save last n steps state
|
|
save_state = false # Save state
|
|
save_state_on_train_end = false # Save state on train end
|
|
scale_v_pred_loss_like_noise_pred = false # Scale v pred loss like noise pred
|
|
shuffle_caption = false # Shuffle captions
|
|
state_dir = "./outputs" # Resume from saved training state
|
|
use_wandb = false # Use wandb
|
|
vae_batch_size = 0 # VAE batch size
|
|
vae_dir = "./models/vae" # VAEs folder path
|
|
v_pred_like_loss = 0 # V pred like loss weight
|
|
wandb_api_key = "" # Wandb api key
|
|
wandb_run_name = "" # Wandb run name
|
|
weighted_captions = false # Weighted captions
|
|
xformers = "xformers" # CrossAttention (none, sdp, xformers)
|
|
|
|
# This next section can be used to set default values for the Dataset Preparation section
|
|
# The "Destination training direcroty" field will be equal to "train_data_dir" as specified above
|
|
[dataset_preparation]
|
|
instance_prompt = "instance"
|
|
class_prompt = "class"
|
|
images_folder = "/some/folder/where/images/are"
|
|
reg_images_folder = "/some/folder/where/reg/images/are"
|