- This is a significant update to how setup work across different platform. It might be causing issues... especially for linux env like runpod. If you encounter problems please report them in the issues so I can try to address them. You can revert to the previous release with `git checkout v21.7.10`

The setup solution is now much more modulat and will simplify requirements support across different environments... hoping this will make it easier to run on different OS.
pull/1058/head v21.7.11
bmaltais 2023-06-23 13:23:25 -04:00 committed by GitHub
parent 4f0e1f5f58
commit 4275c12014
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
35 changed files with 1370 additions and 619 deletions

2
.gitignore vendored
View File

@ -19,3 +19,5 @@ test/output
test/logs
test/*.json
test/ft
requirements_tmp_for_setup.txt
0.13.3

View File

@ -26,8 +26,8 @@ RUN python3 -m pip install wheel
## RUN python3 -m pip install -v -U git+https://github.com/facebookresearch/xformers.git@main#egg=xformers
# Install requirements
COPY requirements_unix.txt setup.py ./
RUN python3 -m pip install --use-pep517 -r requirements_unix.txt xformers
COPY requirements_linux.txt ./setup/setup.py ./
RUN python3 -m pip install --use-pep517 -r requirements_linux.txt xformers
# Replace pillow with pillow-simd
RUN python3 -m pip uninstall -y pillow && \

View File

@ -102,6 +102,8 @@ If you run on Linux, there is an alternative docker container port with less lim
venv support need to be pre-installed. Can be done on ubuntu 22.04 with `apt install python3.10-venv`
For Linux, make sure to install the cudaNN drivers following the instructions from: `https://developer.nvidia.com/cuda-downloads?target_os=Linux&target_arch=x86_64`
Make sure to use a version of python >= 3.10.6 and < 3.11.0
#### Setup
@ -353,6 +355,10 @@ This will store a backup file with your current locally installed pip packages a
## Change History
* 2023/06/23 (v21.7.11)
- This is a significant update to how setup work across different platform. It might be causing issues... especially for linux env like runpod. If you encounter problems please report them in the issues so I can try to address them. You can revert to the previous release with `git checkout v21.7.10`
The setup solution is now much more modulat and will simplify requirements support across different environments... hoping this will make it easier to run on different OS.
* 2023/06/19 (v21.7.10)
- Quick fix for linux GUI startup where it would try to install darwin requirements on top of linux. Ugly fix but work. Hopefulle some linux user will improve via a PR.
* 2023/06/18 (v21.7.9)

View File

@ -3,16 +3,16 @@
call .\venv\Scripts\deactivate.bat
:: Calling external python program to check for local modules
python .\tools\check_local_modules.py --no_question
python .\setup\check_local_modules.py --no_question
:: Activate the virtual environment
call .\venv\Scripts\activate.bat
set PATH=%PATH%;%~dp0venv\Lib\site-packages\torch\lib
:: Validate requirements
python.exe .\tools\validate_requirements.py
python.exe .\setup\validate_requirements.py
:: If the exit code is 0, run the kohya_gui.py script with the command-line arguments
if %errorlevel% equ 0 (
python.exe kohya_gui.py %*
cmd /k python.exe kohya_gui.py %*
)

View File

@ -29,10 +29,10 @@ if ($pipOutput) {
$env:PATH += ";$($MyInvocation.MyCommand.Path)\venv\Lib\site-packages\torch\lib"
# Debug info about system
# python.exe .\tools\debug_info.py
# python.exe .\setup\debug_info.py
# Validate the requirements and store the exit code
python.exe .\tools\validate_requirements.py
python.exe .\setup\validate_requirements.py
# If the exit code is 0, read arguments from gui_parameters.txt (if it exists)
# and run the kohya_gui.py script with the command-line arguments

10
gui.sh
View File

@ -17,6 +17,12 @@ cd "$SCRIPT_DIR"
source "$SCRIPT_DIR/venv/bin/activate"
# If the requirements are validated, run the kohya_gui.py script with the command-line arguments
if python "$SCRIPT_DIR"/tools/validate_requirements_unix.py -r "$SCRIPT_DIR"/requirements_unix.txt; then
python "$SCRIPT_DIR/kohya_gui.py" "$@"
if [[ "$OSTYPE" == "darwin"* ]]; then
if python "$SCRIPT_DIR"/setup/validate_requirements.py -r "$SCRIPT_DIR"/requirements_macos.txt; then
python "$SCRIPT_DIR/kohya_gui.py" "$@"
fi
else
if python "$SCRIPT_DIR"/setup/validate_requirements.py -r "$SCRIPT_DIR"/requirements_linux.txt; then
python "$SCRIPT_DIR/kohya_gui.py" "$@"
fi
fi

View File

@ -174,6 +174,10 @@ def gradio_convert_model_tab(headless=False):
gr.Markdown(
'This utility can be used to convert from one stable diffusion model format to another.'
)
model_ext = gr.Textbox(value='*.safetensors *.ckpt', visible=False)
model_ext_name = gr.Textbox(value='Model types', visible=False)
with gr.Row():
source_model_input = gr.Textbox(
label='Source model',
@ -198,7 +202,7 @@ def gradio_convert_model_tab(headless=False):
)
button_source_model_file.click(
get_file_path,
inputs=[source_model_input],
inputs=[source_model_input, model_ext, model_ext_name],
outputs=source_model_input,
show_progress=False,
)

View File

@ -1,6 +1,7 @@
import os
import logging
import time
import sys
from rich.theme import Theme
from rich.logging import RichHandler
@ -23,7 +24,10 @@ def setup_logging(clean=False, debug=False):
except:
pass
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s | %(levelname)s | %(pathname)s | %(message)s', filename='setup.log', filemode='a', encoding='utf-8', force=True)
if sys.version_info >= (3, 9):
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s | %(levelname)s | %(pathname)s | %(message)s', filename='setup.log', filemode='a', encoding='utf-8', force=True)
else:
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s | %(levelname)s | %(pathname)s | %(message)s', filename='setup.log', filemode='a', force=True)
console = Console(log_time=True, log_time_format='%H:%M:%S-%f', theme=Theme({
"traceback.border": "black",

19
pyproject.toml Normal file
View File

@ -0,0 +1,19 @@
[build-system]
requires = ["poetry-core>=1.0.0"]
build-backend = "poetry.core.masonry.api"
[tool.poetry]
name = "library"
version = "1.0.3"
description = "Libraries required to run kohya_ss GUI"
authors = ["Bernard Maltais <bernard@ducourier.com>"]
license = "Apache-2.0" # Apache Software License
[[tool.poetry.source]]
name = "library"
path = "library"
[tool.poetry.dependencies]
python = ">=3.9,<3.11"
[tool.poetry.dev-dependencies]

View File

@ -1,4 +1,3 @@
accelerate==0.19.0
albumentations==1.3.0
altair==4.2.2
bitsandbytes==0.35.0
@ -8,10 +7,8 @@ easygui==0.98.3
einops==0.6.0
fairscale==0.4.13
ftfy==6.1.1
gradio==3.23.0; sys_platform == 'darwin'
gradio==3.32.0; sys_platform != 'darwin'
huggingface-hub==0.13.3; sys_platform == 'darwin'
huggingface-hub==0.13.3; sys_platform != 'darwin'
gradio==3.33.1
huggingface-hub>=0.13.3
lion-pytorch==0.0.6
lycoris_lora==0.1.6
opencv-python==4.7.0.68
@ -19,9 +16,6 @@ prodigyopt==1.0
pytorch-lightning==1.9.0
rich==13.4.1
safetensors==0.2.6
tensorboard==2.10.1 ; sys_platform != 'darwin'
tensorboard==2.12.1 ; sys_platform == 'darwin'
tensorflow==2.10.1; sys_platform != 'darwin'
timm==0.6.12
tk==0.1.0
toml==0.10.2
@ -29,4 +23,4 @@ transformers==4.26.0
voluptuous==0.13.1
wandb==0.15.0
# for kohya_ss library
.
-e . # no_verify leave this to specify not checking this a verification stage

4
requirements_linux.txt Normal file
View File

@ -0,0 +1,4 @@
torch==2.0.1+cu118 torchvision==0.15.2+cu118 --extra-index-url https://download.pytorch.org/whl/cu118 # no_verify leave this to specify not checking this a verification stage
xformers==0.0.20
accelerate==0.19.0 tensorboard==2.12.1 tensorflow==2.12.0
-r requirements.txt

View File

@ -0,0 +1,31 @@
torch==2.0.0 torchvision==0.15.1 -f https://download.pytorch.org/whl/cpu/torch_stable.html
xformers
accelerate==0.19.0 tensorflow-macos tensorboard==2.12.1
-r requirements.txt
# accelerate==0.15.0
# albumentations==1.3.0
# altair==4.2.2
# bitsandbytes==0.35.0
# dadaptation==3.1
# diffusers[torch]==0.10.2
# easygui==0.98.3
# einops==0.6.0
# fairscale==0.4.13
# ftfy==6.1.1
# gradio==3.23.0
# huggingface-hub==0.13.0
# lion-pytorch==0.0.6
# lycoris_lora==0.1.6
# opencv-python==4.7.0.68
# pytorch-lightning==1.9.0
# rich==13.4.1
# safetensors==0.2.6
# timm==0.6.12
# tk==0.1.0
# toml==0.10.2
# transformers==4.26.0
# voluptuous==0.13.1
# wandb==0.15.0
# # for kohya_ss library
# .

View File

@ -0,0 +1,31 @@
torch==2.0.0 torchvision==0.15.1 -f https://download.pytorch.org/whl/cpu/torch_stable.html
xformers
accelerate==0.19.0 tensorflow-metal tensorboard==2.12.1
-r requirements.txt
# accelerate==0.15.0
# albumentations==1.3.0
# altair==4.2.2
# bitsandbytes==0.35.0
# dadaptation==3.1
# diffusers[torch]==0.10.2
# easygui==0.98.3
# einops==0.6.0
# fairscale==0.4.13
# ftfy==6.1.1
# gradio==3.23.0
# huggingface-hub==0.13.0
# lion-pytorch==0.0.6
# lycoris_lora==0.1.6
# opencv-python==4.7.0.68
# pytorch-lightning==1.9.0
# rich==13.4.1
# safetensors==0.2.6
# timm==0.6.12
# tk==0.1.0
# toml==0.10.2
# transformers==4.26.0
# voluptuous==0.13.1
# wandb==0.15.0
# # for kohya_ss library
# .

View File

@ -1,31 +0,0 @@
accelerate==0.15.0
albumentations==1.3.0
altair==4.2.2
bitsandbytes==0.35.0
dadaptation==3.1
diffusers[torch]==0.10.2
easygui==0.98.3
einops==0.6.0
fairscale==0.4.13
ftfy==6.1.1
gradio==3.23.0; sys_platform == 'darwin'
gradio==3.32.0; sys_platform != 'darwin'
huggingface-hub==0.13.0; sys_platform == 'darwin'
huggingface-hub==0.13.3; sys_platform != 'darwin'
lion-pytorch==0.0.6
lycoris_lora==0.1.6
opencv-python==4.7.0.68
pytorch-lightning==1.9.0
rich==13.4.1
safetensors==0.2.6
tensorboard==2.10.1 ; sys_platform != 'darwin'
tensorboard==2.12.1 ; sys_platform == 'darwin'
tensorflow==2.10.1; sys_platform != 'darwin'
timm==0.6.12
tk==0.1.0
toml==0.10.2
transformers==4.26.0
voluptuous==0.13.1
wandb==0.15.0
# for kohya_ss library
.

View File

@ -1,29 +1,31 @@
accelerate==0.15.0
albumentations==1.3.0
altair==4.2.2
bitsandbytes==0.35.0
dadaptation==3.1
diffusers[torch]==0.10.2
easygui==0.98.3
einops==0.6.0
fairscale==0.4.13
ftfy==6.1.1
gradio==3.32.0
huggingface-hub==0.13.3
lion-pytorch==0.0.6
lycoris_lora==0.1.6
opencv-python==4.7.0.68
prodigyopt==1.0
pytorch-lightning==1.9.0
rich==13.4.1
safetensors==0.2.6
tensorboard==2.10.1
tensorflow==2.10.1
timm==0.6.12
tk==0.1.0
toml==0.10.2
transformers==4.26.0
voluptuous==0.13.1
wandb==0.15.0
# for kohya_ss library
.
torch==1.12.1+cu116 torchvision==0.13.1+cu116 --index-url https://download.pytorch.org/whl/cu116 # no_verify
https://github.com/C43H66N12O12S2/stable-diffusion-webui/releases/download/f/xformers-0.0.14.dev0-cp310-cp310-win_amd64.whl -U -I --no-deps # no_verify
accelerate==0.15.0 tensorboard==2.10.1 tensorflow==2.10.1
-r requirements.txt
# albumentations==1.3.0
# altair==4.2.2
# bitsandbytes==0.35.0
# dadaptation==3.1
# diffusers[torch]==0.10.2
# easygui==0.98.3
# einops==0.6.0
# fairscale==0.4.13
# ftfy==6.1.1
# gradio==3.32.0
# huggingface-hub==0.13.3
# lion-pytorch==0.0.6
# lycoris_lora==0.1.6
# opencv-python==4.7.0.68
# prodigyopt==1.0
# pytorch-lightning==1.9.0
# rich==13.4.1
# safetensors==0.2.6
# timm==0.6.12
# tk==0.1.0
# toml==0.10.2
# transformers==4.26.0
# voluptuous==0.13.1
# wandb==0.15.0
# # for kohya_ss library
# .

View File

@ -1,30 +1,32 @@
accelerate==0.19.0
albumentations==1.3.0
altair==4.2.2
bitsandbytes==0.35.0
dadaptation==3.1
diffusers[torch]==0.10.2
easygui==0.98.3
einops==0.6.0
fairscale==0.4.13
ftfy==6.1.1
gradio==3.33.1
huggingface-hub==0.15.1
lion-pytorch==0.0.6
lycoris_lora==0.1.6
opencv-python==4.7.0.68
prodigyopt==1.0
pytorch-lightning==1.9.0
rich==13.4.1
safetensors==0.2.6
tensorboard==2.12.3
tensorflow==2.12.0
timm==0.6.12
tk==0.1.0
toml==0.10.2
transformers==4.26.0
voluptuous==0.13.1
wandb==0.15.0
torch==2.0.1+cu118 torchvision==0.15.2+cu118 --index-url https://download.pytorch.org/whl/cu118 # no_verify
xformers==0.0.20
# for kohya_ss library
.
accelerate==0.19.0 tensorboard==2.12.3 tensorflow==2.12.0
-r requirements.txt
# albumentations==1.3.0
# altair==4.2.2
# bitsandbytes==0.35.0
# dadaptation==3.1
# diffusers[torch]==0.10.2
# easygui==0.98.3
# einops==0.6.0
# fairscale==0.4.13
# ftfy==6.1.1
# gradio==3.33.1
# huggingface-hub==0.15.1
# lion-pytorch==0.0.6
# lycoris_lora==0.1.6
# opencv-python==4.7.0.68
# prodigyopt==1.0
# pytorch-lightning==1.9.0
# rich==13.4.1
# safetensors==0.2.6
# timm==0.6.12
# tk==0.1.0
# toml==0.10.2
# transformers==4.26.0
# voluptuous==0.13.1
# wandb==0.15.0
# xformers==0.0.20
# # for kohya_ss library
# .

View File

@ -20,11 +20,11 @@ mkdir ".\logs\setup" > nul 2>&1
call .\venv\Scripts\deactivate.bat
:: Calling external python program to check for local modules
python .\tools\check_local_modules.py
python .\setup\check_local_modules.py
call .\venv\Scripts\activate.bat
python .\tools\setup_windows.py
cmd /k python .\setup\setup_windows.py
:: Deactivate the virtual environment
call .\venv\Scripts\deactivate.bat

View File

@ -17,11 +17,11 @@ $null = New-Item -ItemType Directory -Force -Path ".\logs\setup"
& .\venv\Scripts\deactivate.bat
# Calling external python program to check for local modules
& .\venv\Scripts\python.exe .\tools\check_local_modules.py
& .\venv\Scripts\python.exe .\setup\check_local_modules.py
& .\venv\Scripts\activate.bat
& .\venv\Scripts\python.exe .\tools\setup_windows.py
& .\venv\Scripts\python.exe .\setup\setup_windows.py
# Deactivate the virtual environment
& .\venv\Scripts\deactivate.bat

View File

@ -1,10 +0,0 @@
from setuptools import setup, find_packages
import subprocess
import os
import sys
# Call the create_user_files.py script
script_path = os.path.join("tools", "create_user_files.py")
subprocess.run([sys.executable, script_path])
setup(name="library", version="1.0.3", packages=find_packages())

171
setup.sh
View File

@ -3,8 +3,6 @@
# This file will be the host environment setup file for all operating systems other than base Windows.
# Set the required package versions here.
# They will be appended to the requirements_unix.txt file in the installation directory.
TENSORFLOW_VERSION="2.12.0"
TENSORFLOW_MACOS_VERSION="2.12.0"
TENSORFLOW_METAL_VERSION="0.8.0"
@ -90,7 +88,7 @@ GIT_REPO="https://github.com/bmaltais/kohya_ss.git"
INTERACTIVE=false
PUBLIC=false
SKIP_SPACE_CHECK=false
SKIP_GIT_UPDATE=false
SKIP_GIT_UPDATE=true
SKIP_GUI=false
while getopts ":vb:d:g:inprus-:" opt; do
@ -100,6 +98,7 @@ while getopts ":vb:d:g:inprus-:" opt; do
OPTARG="${OPTARG#$opt}" # extract long option argument (may be empty)
OPTARG="${OPTARG#=}" # if long option argument, remove assigning `=`
fi
case $opt in
b | branch) BRANCH="$OPTARG" ;;
d | dir) DIR="$OPTARG" ;;
@ -194,28 +193,34 @@ size_available() {
# The expected usage is create_symlinks symlink target_file
create_symlinks() {
local symlink="$1"
local target_file="$2"
echo "Checking symlinks now."
# Next line checks for valid symlink
if [ -L "$1" ]; then
# Check if the symlink exists
if [ -L "$symlink" ]; then
# Check if the linked file exists and points to the expected file
if [ -e "$1" ] && [ "$(readlink "$1")" == "$2" ]; then
echo "$(basename "$1") symlink looks fine. Skipping."
if [ -e "$symlink" ] && [ "$(readlink "$symlink")" == "$target_file" ]; then
echo "$(basename "$symlink") symlink looks fine. Skipping."
else
if [ -f "$2" ]; then
echo "Broken symlink detected. Recreating $(basename "$1")."
rm "$1" &&
ln -s "$2" "$1"
if [ -f "$target_file" ]; then
echo "Broken symlink detected. Recreating $(basename "$symlink")."
rm "$symlink" && ln -s "$target_file" "$symlink"
else
echo "$2 does not exist. Nothing to link."
echo "$target_file does not exist. Nothing to link."
fi
fi
else
echo "Linking $(basename "$1")."
ln -s "$2" "$1"
echo "Linking $(basename "$symlink")."
ln -s "$target_file" "$symlink"
fi
}
install_python_dependencies() {
local TEMP_REQUIREMENTS_FILE
# Switch to local virtual env
echo "Switching to virtual Python environment."
if ! inDocker; then
@ -234,63 +239,52 @@ install_python_dependencies() {
fi
# Updating pip if there is one
echo "Checking for pip updates before Python operations."
pip install --upgrade pip >&3
# echo "Checking for pip updates before Python operations."
# pip install --upgrade pip
# echo "Installing python dependencies. This could take a few minutes as it downloads files."
# echo "If this operation ever runs too long, you can rerun this script in verbose mode to check."
echo "Installing python dependencies. This could take a few minutes as it downloads files."
echo "If this operation ever runs too long, you can rerun this script in verbose mode to check."
case "$OSTYPE" in
"linux-gnu"*) pip install torch==2.0.1+cu118 torchvision==0.15.2+cu118 \
--extra-index-url https://download.pytorch.org/whl/cu118 >&3 &&
pip install -U -I xformers==0.0.20 >&3 ;;
"darwin"*) pip install torch==2.0.0 torchvision==0.15.1 \
-f https://download.pytorch.org/whl/cpu/torch_stable.html >&3 ;;
"cygwin")
:
;;
"msys")
:
;;
"lin"*)
python "$SCRIPT_DIR/setup/setup_linux.py" --platform-requirements-file=requirements_linux.txt
;;
"darwin"*)
if [[ "$(uname -m)" == "arm64" ]]; then
python "$SCRIPT_DIR/setup/setup_linux.py" --platform-requirements-file=requirements_macos_arm64.txt
else
python "$SCRIPT_DIR/setup/setup_linux.py" --platform-requirements-file=requirements_macos_amd64.txt
fi
;;
esac
if [ "$RUNPOD" = true ]; then
echo "Installing tenssort."
pip install tensorrt >&3
pip install tensorrt
fi
# DEBUG ONLY (Update this version number to whatever PyCharm recommends)
# pip install pydevd-pycharm~=223.8836.43
#This will copy our requirements_unix.txt file out and make the khoya_ss lib a dynamic location then cleanup.
local TEMP_REQUIREMENTS_FILE="$DIR/requirements_tmp_for_setup.txt"
echo "Copying $DIR/requirements_unix.txt to $TEMP_REQUIREMENTS_FILE" >&3
echo "Replacing the . for lib to our DIR variable in $TEMP_REQUIREMENTS_FILE." >&3
awk -v dir="$DIR" '/#.*kohya_ss.*library/{print; getline; sub(/^\.$/, dir)}1' "$DIR/requirements_unix.txt" >"$TEMP_REQUIREMENTS_FILE"
# Create a temporary requirements file
# TEMP_REQUIREMENTS_FILE=$(mktemp)
# This will check if macOS is running then determine if M1+ or Intel CPU.
# It will append the appropriate packages to the requirements_unix.txt file.
# Other OSs won't be affected and the version variables are at the top of this file.
if [[ "$(uname)" == "Darwin" ]]; then
# Check if the processor is Apple Silicon (arm64)
if [[ "$(uname -m)" == "arm64" ]]; then
echo "tensorflow-macos==$TENSORFLOW_MACOS_VERSION" >>"$TEMP_REQUIREMENTS_FILE"
echo "tensorflow-metal==$TENSORFLOW_METAL_VERSION" >>"$TEMP_REQUIREMENTS_FILE"
# Check if the processor is Intel (x86_64)
elif [[ "$(uname -m)" == "x86_64" ]]; then
echo "tensorflow==$TENSORFLOW_VERSION" >>"$TEMP_REQUIREMENTS_FILE"
fi
fi
# if [[ "$OSTYPE" == "darwin"* ]]; then
# echo "Copying $DIR/requirements_macos.txt to $TEMP_REQUIREMENTS_FILE" >&3
# echo "Replacing the . for lib to our DIR variable in $TEMP_REQUIREMENTS_FILE." >&3
# awk -v dir="$DIR" '/#.*kohya_ss.*library/{print; getline; sub(/^\.$/, dir)}1' "$DIR/requirements_macos.txt" >"$TEMP_REQUIREMENTS_FILE"
# else
# echo "Copying $DIR/requirements_linux.txt to $TEMP_REQUIREMENTS_FILE" >&3
# echo "Replacing the . for lib to our DIR variable in $TEMP_REQUIREMENTS_FILE." >&3
# awk -v dir="$DIR" '/#.*kohya_ss.*library/{print; getline; sub(/^\.$/, dir)}1' "$DIR/requirements_linux.txt" >"$TEMP_REQUIREMENTS_FILE"
# fi
if [ $VERBOSITY == 2 ]; then
python -m pip install --quiet --use-pep517 --upgrade -r "$TEMP_REQUIREMENTS_FILE" >&3
else
python -m pip install --use-pep517 --upgrade -r "$TEMP_REQUIREMENTS_FILE" >&3
fi
echo "Removing the temp requirements file."
if [ -f "$TEMP_REQUIREMENTS_FILE" ]; then
rm -f "$TEMP_REQUIREMENTS_FILE"
fi
# # Install the Python dependencies from the temporary requirements file
# if [ $VERBOSITY == 2 ]; then
# python -m pip install --quiet --upgrade -r "$TEMP_REQUIREMENTS_FILE"
# else
# python -m pip install --upgrade -r "$TEMP_REQUIREMENTS_FILE"
# fi
if [ -n "$VIRTUAL_ENV" ] && ! inDocker; then
if command -v deactivate >/dev/null; then
@ -302,6 +296,7 @@ install_python_dependencies() {
fi
}
# Attempt to non-interactively install a default accelerate config file unless specified otherwise.
# Documentation for order of precedence locations for configuration file for automated installation:
# https://huggingface.co/docs/accelerate/basic_tutorials/launch#custom-configurations
@ -414,7 +409,7 @@ update_kohya_ss() {
}
# Start OS-specific detection and work
if [[ "$OSTYPE" == "linux-gnu"* ]]; then
if [[ "$OSTYPE" == "lin"* ]]; then
# Check if root or sudo
root=false
if [ "$EUID" = 0 ]; then
@ -482,48 +477,62 @@ if [[ "$OSTYPE" == "linux-gnu"* ]]; then
echo "Raw detected distro string: $distro" >&4
echo "Raw detected distro family string: $family" >&4
echo "Installing Python TK if not found on the system."
if "$distro" | grep -qi "Ubuntu" || "$family" | grep -qi "Ubuntu"; then
echo "Ubuntu detected."
if [ $(dpkg-query -W -f='${Status}' python3-tk 2>/dev/null | grep -c "ok installed") = 0 ]; then
if [ "$root" = true ]; then
apt update -y >&3 && apt install -y python3-tk >&3
else
echo "This script needs to be run as root or via sudo to install packages."
# if [ "$root" = true ]; then
echo "This script needs you to install the missing python3-tk packages. Please install with:"
echo " "
echo "sudo apt update -y && sudo apt install -y python3-tk"
exit 1
fi
# else
# echo "This script needs to be run as root or via sudo to install packages."
# exit 1
# fi
else
echo "Python TK found! Skipping install!"
echo "Python TK found..."
fi
elif "$distro" | grep -Eqi "Fedora|CentOS|Redhat"; then
echo "Redhat or Redhat base detected."
if ! rpm -qa | grep -qi python3-tkinter; then
if [ "$root" = true ]; then
dnf install python3-tkinter -y >&3
else
echo "This script needs to be run as root or via sudo to install packages."
# if [ "$root" = true ]; then
echo "This script needs you to install the missing python3-tk packages. Please install with:\n\n"
echo "sudo dnf install python3-tkinter -y >&3"
exit 1
fi
# else
# echo "This script needs to be run as root or via sudo to install packages."
# exit 1
# fi
else
echo "Python TK found..."
fi
elif "$distro" | grep -Eqi "arch" || "$family" | grep -qi "arch"; then
echo "Arch Linux or Arch base detected."
if ! pacman -Qi tk >/dev/null; then
if [ "$root" = true ]; then
pacman --noconfirm -S tk >&3
else
echo "This script needs to be run as root or via sudo to install packages."
# if [ "$root" = true ]; then
echo "This script needs you to install the missing python3-tk packages. Please install with:\n\n"
echo "pacman --noconfirm -S tk >&3"
exit 1
fi
# else
# echo "This script needs to be run as root or via sudo to install packages."
# exit 1
# fi
else
echo "Python TK found..."
fi
elif "$distro" | grep -Eqi "opensuse" || "$family" | grep -qi "opensuse"; then
echo "OpenSUSE detected."
if ! rpm -qa | grep -qi python-tk; then
if [ "$root" = true ]; then
zypper install -y python-tk >&3
else
echo "This script needs to be run as root or via sudo to install packages."
# if [ "$root" = true ]; then
echo "This script needs you to install the missing python3-tk packages. Please install with:\n\n"
echo "zypper install -y python-tk >&3"
exit 1
fi
# else
# echo "This script needs to be run as root or via sudo to install packages."
# exit 1
# fi
else
echo "Python TK found..."
fi
elif [ "$distro" = "None" ] || [ "$family" = "None" ]; then
if [ "$distro" = "None" ]; then

View File

@ -1,5 +1,6 @@
import subprocess
import os
import re
import sys
import filecmp
import logging
@ -12,10 +13,6 @@ import pkg_resources
errors = 0 # Define the 'errors' variable before using it
log = logging.getLogger('sd')
# ANSI escape code for yellow color
YELLOW = '\033[93m'
RESET_COLOR = '\033[0m'
# setup console and file logging
def setup_logging(clean=False):
#
@ -91,7 +88,7 @@ def setup_logging(clean=False):
log.addHandler(rh)
def configure_accelerate():
def configure_accelerate(run_accelerate=False):
#
# This function was taken and adapted from code written by jstayco
#
@ -112,9 +109,12 @@ def configure_accelerate():
)
if not os.path.exists(source_accelerate_config_file):
log.info(
f'Could not find the accelerate configuration file in {source_accelerate_config_file}. Please configure accelerate manually by runningthe option in the menu.'
)
if run_accelerate:
run_cmd('accelerate config')
else:
log.error(
f'Could not find the accelerate configuration file in {source_accelerate_config_file}. Please configure accelerate manually by runningthe option in the menu.'
)
log.debug(
f'Source accelerate config location: {source_accelerate_config_file}'
@ -158,13 +158,23 @@ def configure_accelerate():
shutil.copyfile(
source_accelerate_config_file, target_config_location
)
log.debug(
log.info(
f'Copied accelerate config file to: {target_config_location}'
)
else:
if run_accelerate:
run_cmd('accelerate config')
else:
log.error(
'Could not automatically configure accelerate. Please manually configure accelerate with the option in the menu or with: accelerate config.'
)
else:
log.info(
'Could not place the accelerate configuration file. Please configure manually with: accelerate config.'
)
if run_accelerate:
run_cmd('accelerate config')
else:
log.error(
'Could not automatically configure accelerate. Please manually configure accelerate with the option in the menu or with: accelerate config.'
)
def check_torch():
@ -254,34 +264,13 @@ def git(arg: str, folder: str = None, ignore: bool = False):
errors += 1
log.error(f'Error running git: {folder} / {arg}')
if 'or stash them' in txt:
log.error(f'Local changes detected: check log for details: {log_file}')
log.error(f'Local changes detected: check log for details...')
log.debug(f'Git output: {txt}')
return txt
def cudann_install():
cudnn_src = os.path.join(
os.path.dirname(os.path.realpath(__file__)), '..\cudnn_windows'
)
cudnn_dest = os.path.join(sysconfig.get_paths()['purelib'], 'torch', 'lib')
log.info(f'Checking for CUDNN files in {cudnn_dest}...')
if os.path.exists(cudnn_src):
if os.path.exists(cudnn_dest):
# check for different files
filecmp.clear_cache()
for file in os.listdir(cudnn_src):
src_file = os.path.join(cudnn_src, file)
dest_file = os.path.join(cudnn_dest, file)
# if dest file exists, check if it's different
if os.path.exists(dest_file):
shutil.copy2(src_file, cudnn_dest)
log.info('Copied CUDNN 8.6 files to destination')
else:
log.error(f'Installation Failed: "{cudnn_src}" could not be found. ')
def pip(arg: str, ignore: bool = False, quiet: bool = False):
arg = arg.replace('>=', '==')
# arg = arg.replace('>=', '==')
if not quiet:
log.info(f'Installing package: {arg.replace("install", "").replace("--upgrade", "").replace("--no-deps", "").replace("--force", "").replace(" ", " ").strip()}')
log.debug(f"Running pip: {arg}")
@ -302,8 +291,11 @@ def installed(package, friendly: str = None):
#
# This function was adapted from code written by vladimandic: https://github.com/vladmandic/automatic/commits/master
#
# Remove brackets and their contents from the line using regular expressions
# e.g., diffusers[torch]==0.10.2 becomes diffusers==0.10.2
package = re.sub(r'\[.*?\]', '', package)
ok = True
try:
if friendly:
pkgs = friendly.split()
@ -315,38 +307,40 @@ def installed(package, friendly: str = None):
]
pkgs = [
p.split('/')[-1] for p in pkgs
] # get only package name if installing from url
] # get only package name if installing from URL
for pkg in pkgs:
if '>=' in pkg:
p = pkg.split('>=')
pkg_name, pkg_version = [x.strip() for x in pkg.split('>=')]
elif '==' in pkg:
p = pkg.split('==')
pkg_name, pkg_version = [x.strip() for x in pkg.split('==')]
else:
p = [pkg]
spec = pkg_resources.working_set.by_key.get(
p[0], None
) # more reliable than importlib
pkg_name, pkg_version = pkg.strip(), None
spec = pkg_resources.working_set.by_key.get(pkg_name, None)
if spec is None:
spec = pkg_resources.working_set.by_key.get(
p[0].lower(), None
) # check name variations
spec = pkg_resources.working_set.by_key.get(pkg_name.lower(), None)
if spec is None:
spec = pkg_resources.working_set.by_key.get(
p[0].replace('_', '-'), None
) # check name variations
ok = ok and spec is not None
if ok:
version = pkg_resources.get_distribution(p[0]).version
log.debug(f'Package version found: {p[0]} {version}')
if len(p) > 1:
ok = ok and version == p[1]
spec = pkg_resources.working_set.by_key.get(pkg_name.replace('_', '-'), None)
if spec is not None:
version = pkg_resources.get_distribution(pkg_name).version
log.debug(f'Package version found: {pkg_name} {version}')
if pkg_version is not None:
if '>=' in pkg:
ok = version >= pkg_version
else:
ok = version == pkg_version
if not ok:
log.warning(
f'Package wrong version: {p[0]} {version} required {p[1]}'
)
log.warning(f'Package wrong version: {pkg_name} {version} required {pkg_version}')
return False
else:
log.debug(f'Package version not found: {p[0]}')
return ok
log.debug(f'Package version not found: {pkg_name}')
return False
return True
except ModuleNotFoundError:
log.debug(f'Package not installed: {pkgs}')
return False
@ -362,6 +356,9 @@ def install(
ignore: bool = False,
reinstall: bool = False,
):
# Remove anything after '#' in the package variable
package = package.split('#')[0].strip()
if reinstall:
global quick_allowed # pylint: disable=global-statement
quick_allowed = False
@ -369,6 +366,51 @@ def install(
pip(f'install --upgrade {package}', ignore=ignore)
def process_requirements_line(line):
# Remove brackets and their contents from the line using regular expressions
# e.g., diffusers[torch]==0.10.2 becomes diffusers==0.10.2
package_name = re.sub(r'\[.*?\]', '', line)
install(line, package_name)
def install_requirements(requirements_file, check_no_verify_flag=False):
if check_no_verify_flag:
log.info(f'Verifying modules instalation status from {requirements_file}...')
else:
log.info(f'Installing modules from {requirements_file}...')
with open(requirements_file, 'r', encoding='utf8') as f:
# Read lines from the requirements file, strip whitespace, and filter out empty lines, comments, and lines starting with '.'
if check_no_verify_flag:
lines = [
line.strip()
for line in f.readlines()
if line.strip() != ''
and not line.startswith('#')
and line is not None
and 'no_verify' not in line
]
else:
lines = [
line.strip()
for line in f.readlines()
if line.strip() != ''
and not line.startswith('#')
and line is not None
]
# Iterate over each line and install the requirements
for line in lines:
# Check if the line starts with '-r' to include another requirements file
if line.startswith('-r'):
# Get the path to the included requirements file
included_file = line[2:].strip()
# Expand the included requirements file recursively
install_requirements(included_file, check_no_verify_flag=check_no_verify_flag)
else:
process_requirements_line(line)
def ensure_base_requirements():
try:
import rich # pylint: disable=unused-import
@ -378,7 +420,7 @@ def ensure_base_requirements():
def run_cmd(run_cmd):
try:
subprocess.run(run_cmd, check=True)
subprocess.run(run_cmd, shell=True, check=False, env=os.environ)
except subprocess.CalledProcessError as e:
print(f'Error occurred while running command: {run_cmd}')
print(f'Error: {e}')
@ -417,24 +459,6 @@ def delete_file(file_path):
os.remove(file_path)
def install_requirements(requirements_file):
#
# This function was adapted from code written by vladimandic: https://github.com/vladmandic/automatic/commits/master
#
log.info('Verifying requirements')
with open(requirements_file, 'r', encoding='utf8') as f:
lines = [
line.strip()
for line in f.readlines()
if line.strip() != ''
and not line.startswith('#')
and line is not None
]
for line in lines:
install(line)
def write_to_file(file_path, content):
try:
with open(file_path, 'w') as file:
@ -444,114 +468,6 @@ def write_to_file(file_path, content):
print(f'Error: {e}')
def sync_bits_and_bytes_files():
import filecmp
"""
Check for "different" bitsandbytes Files and copy only if necessary.
This function is specific for Windows OS.
"""
# Only execute on Windows
if os.name != 'nt':
print('This function is only applicable to Windows OS.')
return
try:
log.info(f'Copying bitsandbytes files...')
# Define source and destination directories
source_dir = os.path.join(os.getcwd(), 'bitsandbytes_windows')
dest_dir_base = os.path.join(
sysconfig.get_paths()['purelib'], 'bitsandbytes'
)
# Clear file comparison cache
filecmp.clear_cache()
# Iterate over each file in source directory
for file in os.listdir(source_dir):
source_file_path = os.path.join(source_dir, file)
# Decide the destination directory based on file name
if file in ('main.py', 'paths.py'):
dest_dir = os.path.join(dest_dir_base, 'cuda_setup')
else:
dest_dir = dest_dir_base
dest_file_path = os.path.join(dest_dir, file)
# Compare the source file with the destination file
if os.path.exists(dest_file_path) and filecmp.cmp(
source_file_path, dest_file_path
):
log.debug(
f'Skipping {source_file_path} as it already exists in {dest_dir}'
)
else:
# Copy file from source to destination, maintaining original file's metadata
log.debug(f'Copy {source_file_path} to {dest_dir}')
shutil.copy2(source_file_path, dest_dir)
except FileNotFoundError as fnf_error:
log.error(f'File not found error: {fnf_error}')
except PermissionError as perm_error:
log.error(f'Permission error: {perm_error}')
except Exception as e:
log.error(f'An unexpected error occurred: {e}')
def install_kohya_ss_torch1():
check_repo_version()
check_python()
# Upgrade pip if needed
install('--upgrade pip')
if check_torch() == 2:
input(
f'{YELLOW}\nTorch 2 is already installed in the venv. To install Torch 1 delete the venv and re-run setup.bat\n\nHit any key to acknowledge.{RESET_COLOR}'
)
return
install(
'torch==1.12.1+cu116 torchvision==0.13.1+cu116 --index-url https://download.pytorch.org/whl/cu116',
'torch torchvision'
)
install(
'https://github.com/C43H66N12O12S2/stable-diffusion-webui/releases/download/f/xformers-0.0.14.dev0-cp310-cp310-win_amd64.whl -U -I --no-deps',
'xformers-0.0.14'
)
install_requirements('requirements_windows_torch1.txt')
sync_bits_and_bytes_files()
configure_accelerate()
# run_cmd(f'accelerate config')
def install_kohya_ss_torch2():
check_repo_version()
check_python()
# Upgrade pip if needed
install('--upgrade pip')
if check_torch() == 1:
input(
f'{YELLOW}\nTorch 1 is already installed in the venv. To install Torch 2 delete the venv and re-run setup.bat\n\nHit any key to acknowledge.{RESET_COLOR}'
)
return
install(
'torch==2.0.1+cu118 torchvision==0.15.2+cu118 --index-url https://download.pytorch.org/whl/cu118',
'torch torchvision'
)
install_requirements('requirements_windows_torch2.txt')
# install('https://huggingface.co/r4ziel/xformers_pre_built/resolve/main/triton-2.0.0-cp310-cp310-win_amd64.whl', 'triton', reinstall=reinstall)
sync_bits_and_bytes_files()
configure_accelerate()
# run_cmd(f'accelerate config')
def clear_screen():
# Check the current operating system to execute the correct clear screen command
if os.name == 'nt': # If the operating system is Windows
@ -559,52 +475,3 @@ def clear_screen():
else: # If the operating system is Linux or Mac
os.system('clear')
def main_menu():
clear_screen()
while True:
print('\nKohya_ss GUI setup menu:\n')
print('1. Install kohya_ss gui')
print('2. Install cudann files')
print('3. Manually configure accelerate')
print('4. Start Kohya_ss GUI in browser')
print('5. Quit')
choice = input('\nEnter your choice: ')
print('')
if choice == '1':
while True:
print('1. Torch 1')
print('2. Torch 2')
print('3. Cancel')
choice_torch = input('\nEnter your choice: ')
print('')
if choice_torch == '1':
install_kohya_ss_torch1()
break
elif choice_torch == '2':
install_kohya_ss_torch2()
break
elif choice_torch == '3':
break
else:
print('Invalid choice. Please enter a number between 1-3.')
elif choice == '2':
cudann_install()
elif choice == '3':
run_cmd('accelerate config')
elif choice == '4':
subprocess.Popen('start cmd /c .\gui.bat --inbrowser', shell=True)
elif choice == '5':
print('Quitting the program.')
break
else:
print('Invalid choice. Please enter a number between 1-5.')
if __name__ == '__main__':
ensure_base_requirements()
setup_logging()
main_menu()

38
setup/setup_linux.py Normal file
View File

@ -0,0 +1,38 @@
import argparse
import logging
import setup_common
errors = 0 # Define the 'errors' variable before using it
log = logging.getLogger('sd')
# ANSI escape code for yellow color
YELLOW = '\033[93m'
RESET_COLOR = '\033[0m'
def install_kohya_ss(platform_requirements_file):
setup_common.check_repo_version()
setup_common.check_python()
# Upgrade pip if needed
setup_common.install('--upgrade pip')
setup_common.install_requirements(platform_requirements_file, check_no_verify_flag=False)
setup_common.configure_accelerate(run_accelerate=False)
# run_cmd(f'accelerate config')
def main_menu(platform_requirements_file):
log.info("Installing python dependencies. This could take a few minutes as it downloads files.")
log.info("If this operation ever runs too long, you can rerun this script in verbose mode to check.")
install_kohya_ss(platform_requirements_file)
if __name__ == '__main__':
setup_common.ensure_base_requirements()
setup_common.setup_logging()
parser = argparse.ArgumentParser()
parser.add_argument('--platform-requirements-file', dest='platform_requirements_file', default='requirements_linux.txt', help='Path to the platform-specific requirements file')
args = parser.parse_args()
main_menu(args.platform_requirements_file)

202
setup/setup_windows.py Normal file
View File

@ -0,0 +1,202 @@
import subprocess
import os
import filecmp
import logging
import shutil
import sysconfig
import setup_common
errors = 0 # Define the 'errors' variable before using it
log = logging.getLogger('sd')
# ANSI escape code for yellow color
YELLOW = '\033[93m'
RESET_COLOR = '\033[0m'
def cudann_install():
cudnn_src = os.path.join(
os.path.dirname(os.path.realpath(__file__)), '..\cudnn_windows'
)
cudnn_dest = os.path.join(sysconfig.get_paths()['purelib'], 'torch', 'lib')
log.info(f'Checking for CUDNN files in {cudnn_dest}...')
if os.path.exists(cudnn_src):
if os.path.exists(cudnn_dest):
# check for different files
filecmp.clear_cache()
for file in os.listdir(cudnn_src):
src_file = os.path.join(cudnn_src, file)
dest_file = os.path.join(cudnn_dest, file)
# if dest file exists, check if it's different
if os.path.exists(dest_file):
if not filecmp.cmp(src_file, dest_file, shallow=False):
shutil.copy2(src_file, cudnn_dest)
else:
shutil.copy2(src_file, cudnn_dest)
log.info('Copied CUDNN 8.6 files to destination')
else:
log.warning(f'Destination directory {cudnn_dest} does not exist')
else:
log.error(f'Installation Failed: "{cudnn_src}" could not be found.')
def sync_bits_and_bytes_files():
import filecmp
"""
Check for "different" bitsandbytes Files and copy only if necessary.
This function is specific for Windows OS.
"""
# Only execute on Windows
if os.name != 'nt':
print('This function is only applicable to Windows OS.')
return
try:
log.info(f'Copying bitsandbytes files...')
# Define source and destination directories
source_dir = os.path.join(os.getcwd(), 'bitsandbytes_windows')
dest_dir_base = os.path.join(
sysconfig.get_paths()['purelib'], 'bitsandbytes'
)
# Clear file comparison cache
filecmp.clear_cache()
# Iterate over each file in source directory
for file in os.listdir(source_dir):
source_file_path = os.path.join(source_dir, file)
# Decide the destination directory based on file name
if file in ('main.py', 'paths.py'):
dest_dir = os.path.join(dest_dir_base, 'cuda_setup')
else:
dest_dir = dest_dir_base
dest_file_path = os.path.join(dest_dir, file)
# Compare the source file with the destination file
if os.path.exists(dest_file_path) and filecmp.cmp(
source_file_path, dest_file_path
):
log.debug(
f'Skipping {source_file_path} as it already exists in {dest_dir}'
)
else:
# Copy file from source to destination, maintaining original file's metadata
log.debug(f'Copy {source_file_path} to {dest_dir}')
shutil.copy2(source_file_path, dest_dir)
except FileNotFoundError as fnf_error:
log.error(f'File not found error: {fnf_error}')
except PermissionError as perm_error:
log.error(f'Permission error: {perm_error}')
except Exception as e:
log.error(f'An unexpected error occurred: {e}')
def install_kohya_ss_torch1():
setup_common.check_repo_version()
setup_common.check_python()
# Upgrade pip if needed
setup_common.install('--upgrade pip')
if setup_common.check_torch() == 2:
input(
f'{YELLOW}\nTorch 2 is already installed in the venv. To install Torch 1 delete the venv and re-run setup.bat\n\nHit enter to continue...{RESET_COLOR}'
)
return
# setup_common.install(
# 'torch==1.12.1+cu116 torchvision==0.13.1+cu116 --index-url https://download.pytorch.org/whl/cu116',
# 'torch torchvision'
# )
# setup_common.install(
# 'https://github.com/C43H66N12O12S2/stable-diffusion-webui/releases/download/f/xformers-0.0.14.dev0-cp310-cp310-win_amd64.whl -U -I --no-deps',
# 'xformers-0.0.14'
# )
setup_common.install_requirements('requirements_windows_torch1.txt', check_no_verify_flag=False)
sync_bits_and_bytes_files()
setup_common.configure_accelerate(run_accelerate=True)
# run_cmd(f'accelerate config')
def install_kohya_ss_torch2():
setup_common.check_repo_version()
setup_common.check_python()
# Upgrade pip if needed
setup_common.install('--upgrade pip')
if setup_common.check_torch() == 1:
input(
f'{YELLOW}\nTorch 1 is already installed in the venv. To install Torch 2 delete the venv and re-run setup.bat\n\nHit any key to acknowledge.{RESET_COLOR}'
)
return
# setup_common.install(
# 'torch==2.0.1+cu118 torchvision==0.15.2+cu118 --index-url https://download.pytorch.org/whl/cu118',
# 'torch torchvision'
# )
setup_common.install_requirements('requirements_windows_torch2.txt', check_no_verify_flag=False)
# install('https://huggingface.co/r4ziel/xformers_pre_built/resolve/main/triton-2.0.0-cp310-cp310-win_amd64.whl', 'triton', reinstall=reinstall)
sync_bits_and_bytes_files()
setup_common.configure_accelerate(run_accelerate=True)
# run_cmd(f'accelerate config')
def main_menu():
setup_common.clear_screen()
while True:
print('\nKohya_ss GUI setup menu:\n')
print('1. Install kohya_ss gui')
print('2. (Optional) Install cudann files')
print('3. (Optional) Install bitsandbytes-windows')
print('4. (Optional) Manually configure accelerate')
print('5. (Optional) Start Kohya_ss GUI in browser')
print('6. Quit')
choice = input('\nEnter your choice: ')
print('')
if choice == '1':
while True:
print('1. Torch 1')
print('2. Torch 2')
print('3. Cancel')
choice_torch = input('\nEnter your choice: ')
print('')
if choice_torch == '1':
install_kohya_ss_torch1()
break
elif choice_torch == '2':
install_kohya_ss_torch2()
break
elif choice_torch == '3':
break
else:
print('Invalid choice. Please enter a number between 1-3.')
elif choice == '2':
cudann_install()
elif choice == '3':
setup_common.install('--upgrade bitsandbytes-windows', reinstall=True)
elif choice == '4':
setup_common.run_cmd('accelerate config')
elif choice == '5':
subprocess.Popen('start cmd /k .\gui.bat --inbrowser', shell=True) # /k keep the terminal open on quit. /c would close the terminal instead
elif choice == '6':
print('Quitting the program.')
break
else:
print('Invalid choice. Please enter a number between 1-5.')
if __name__ == '__main__':
setup_common.ensure_base_requirements()
setup_common.setup_logging()
main_menu()

View File

@ -3,15 +3,14 @@ import re
import sys
import shutil
import argparse
import subprocess
from setup_windows import check_repo_version
import setup_common
# Get the absolute path of the current file's directory (Kohua_SS project directory)
project_directory = os.path.dirname(os.path.abspath(__file__))
# Check if the "tools" directory is present in the project_directory
if "tools" in project_directory:
# If the "tools" directory is present, move one level up to the parent directory
# Check if the "setup" directory is present in the project_directory
if "setup" in project_directory:
# If the "setup" directory is present, move one level up to the parent directory
project_directory = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Add the project directory to the beginning of the Python search path
@ -22,7 +21,6 @@ from library.custom_logging import setup_logging
# Set up logging
log = setup_logging()
def check_torch():
# Check for nVidia toolkit or AMD toolkit
if shutil.which('nvidia-smi') is not None or os.path.exists(
@ -73,13 +71,8 @@ def check_torch():
sys.exit(1)
def install_requirements(requirements_file):
log.info('Verifying requirements')
subprocess.run(f'"{sys.executable}" -m pip install -U -r "{requirements_file}"', shell=True, check=False, env=os.environ)
def main():
check_repo_version()
setup_common.check_repo_version()
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Validate that requirements are satisfied.'
@ -93,7 +86,15 @@ def main():
parser.add_argument('--debug', action='store_true', help='Debug on')
args = parser.parse_args()
install_requirements(args.requirements)
torch_ver = check_torch()
if args.requirements:
setup_common.install_requirements(args.requirements, check_no_verify_flag=True)
else:
if torch_ver == 1:
setup_common.install_requirements('requirements_windows_torch1.txt', check_no_verify_flag=True)
else:
setup_common.install_requirements('requirements_windows_torch2.txt', check_no_verify_flag=True)
if __name__ == '__main__':

661
setup_legacy.sh Executable file
View File

@ -0,0 +1,661 @@
#!/usr/bin/env bash
# This file will be the host environment setup file for all operating systems other than base Windows.
# Set the required package versions here.
TENSORFLOW_MACOS_VERSION="2.12.0"
TENSORFLOW_METAL_VERSION="0.8.0"
display_help() {
cat <<EOF
Kohya_SS Installation Script for POSIX operating systems.
Usage:
# Specifies custom branch, install directory, and git repo
setup.sh -b dev -d /workspace/kohya_ss -g https://mycustom.repo.tld/custom_fork.git
# Same as example 1, but uses long options
setup.sh --branch=dev --dir=/workspace/kohya_ss --git-repo=https://mycustom.repo.tld/custom_fork.git
# Maximum verbosity, fully automated installation in a runpod environment skipping the runpod env checks
setup.sh -vvv --skip-space-check --runpod
Options:
-b BRANCH, --branch=BRANCH Select which branch of kohya to check out on new installs.
-d DIR, --dir=DIR The full path you want kohya_ss installed to.
-g REPO, --git_repo=REPO You can optionally provide a git repo to check out for runpod installation. Useful for custom forks.
-h, --help Show this screen.
-i, --interactive Interactively configure accelerate instead of using default config file.
-n, --no-git-update Do not update kohya_ss repo. No git pull or clone operations.
-p, --public Expose public URL in runpod mode. Won't have an effect in other modes.
-r, --runpod Forces a runpod installation. Useful if detection fails for any reason.
-s, --skip-space-check Skip the 10Gb minimum storage space check.
-u, --no-gui Skips launching the GUI.
-v, --verbose Increase verbosity levels up to 3.
EOF
}
# Checks to see if variable is set and non-empty.
# This is defined first, so we can use the function for some default variable values
env_var_exists() {
if [[ -n "${!1}" ]]; then
return 0
else
return 1
fi
}
# Need RUNPOD to have a default value before first access
RUNPOD=false
if env_var_exists RUNPOD_POD_ID || env_var_exists RUNPOD_API_KEY; then
RUNPOD=true
fi
# This gets the directory the script is run from so pathing can work relative to the script where needed.
SCRIPT_DIR="$(cd -- $(dirname -- "$0") && pwd)"
# Variables defined before the getopts loop, so we have sane default values.
# Default installation locations based on OS and environment
if [[ "$OSTYPE" == "linux-gnu"* ]]; then
if [ "$RUNPOD" = true ]; then
DIR="/workspace/kohya_ss"
elif [ -d "$SCRIPT_DIR/.git" ]; then
DIR="$SCRIPT_DIR"
elif [ -w "/opt" ]; then
DIR="/opt/kohya_ss"
elif env_var_exists HOME; then
DIR="$HOME/kohya_ss"
else
# The last fallback is simply PWD
DIR="$(PWD)"
fi
else
if [ -d "$SCRIPT_DIR/.git" ]; then
DIR="$SCRIPT_DIR"
elif env_var_exists HOME; then
DIR="$HOME/kohya_ss"
else
# The last fallback is simply PWD
DIR="$(PWD)"
fi
fi
VERBOSITY=2 #Start counting at 2 so that any increase to this will result in a minimum of file descriptor 3. You should leave this alone.
MAXVERBOSITY=6 #The highest verbosity we use / allow to be displayed. Feel free to adjust.
BRANCH="master"
GIT_REPO="https://github.com/bmaltais/kohya_ss.git"
INTERACTIVE=false
PUBLIC=false
SKIP_SPACE_CHECK=false
SKIP_GIT_UPDATE=true
SKIP_GUI=false
while getopts ":vb:d:g:inprus-:" opt; do
# support long options: https://stackoverflow.com/a/28466267/519360
if [ "$opt" = "-" ]; then # long option: reformulate OPT and OPTARG
opt="${OPTARG%%=*}" # extract long option name
OPTARG="${OPTARG#$opt}" # extract long option argument (may be empty)
OPTARG="${OPTARG#=}" # if long option argument, remove assigning `=`
fi
case $opt in
b | branch) BRANCH="$OPTARG" ;;
d | dir) DIR="$OPTARG" ;;
g | git-repo) GIT_REPO="$OPTARG" ;;
i | interactive) INTERACTIVE=true ;;
n | no-git-update) SKIP_GIT_UPDATE=true ;;
p | public) PUBLIC=true ;;
r | runpod) RUNPOD=true ;;
s | skip-space-check) SKIP_SPACE_CHECK=true ;;
u | no-gui) SKIP_GUI=true ;;
v) ((VERBOSITY = VERBOSITY + 1)) ;;
h) display_help && exit 0 ;;
*) display_help && exit 0 ;;
esac
done
shift $((OPTIND - 1))
# Just in case someone puts in a relative path into $DIR,
# we're going to get the absolute path of that.
if [[ "$DIR" != /* ]] && [[ "$DIR" != ~* ]]; then
DIR="$(
cd "$(dirname "$DIR")" || exit 1
pwd
)/$(basename "$DIR")"
fi
for v in $( #Start counting from 3 since 1 and 2 are standards (stdout/stderr).
seq 3 $VERBOSITY
); do
(("$v" <= "$MAXVERBOSITY")) && eval exec "$v>&2" #Don't change anything higher than the maximum verbosity allowed.
done
for v in $( #From the verbosity level one higher than requested, through the maximum;
seq $((VERBOSITY + 1)) $MAXVERBOSITY
); do
(("$v" > "2")) && eval exec "$v>/dev/null" #Redirect these to bitbucket, provided that they don't match stdout and stderr.
done
# Example of how to use the verbosity levels.
# printf "%s\n" "This message is seen at verbosity level 1 and above." >&3
# printf "%s\n" "This message is seen at verbosity level 2 and above." >&4
# printf "%s\n" "This message is seen at verbosity level 3 and above." >&5
# Debug variable dump at max verbosity
echo "BRANCH: $BRANCH
DIR: $DIR
GIT_REPO: $GIT_REPO
INTERACTIVE: $INTERACTIVE
PUBLIC: $PUBLIC
RUNPOD: $RUNPOD
SKIP_SPACE_CHECK: $SKIP_SPACE_CHECK
VERBOSITY: $VERBOSITY
Script directory is ${SCRIPT_DIR}." >&5
# This must be set after the getopts loop to account for $DIR changes.
PARENT_DIR="$(dirname "${DIR}")"
VENV_DIR="$DIR/venv"
if [ -w "$PARENT_DIR" ] && [ ! -d "$DIR" ]; then
echo "Creating install folder ${DIR}."
mkdir "$DIR"
fi
if [ ! -w "$DIR" ]; then
echo "We cannot write to ${DIR}."
echo "Please ensure the install directory is accurate and you have the correct permissions."
exit 1
fi
# Shared functions
# This checks for free space on the installation drive and returns that in Gb.
size_available() {
local folder
if [ -d "$DIR" ]; then
folder="$DIR"
elif [ -d "$PARENT_DIR" ]; then
folder="$PARENT_DIR"
elif [ -d "$(echo "$DIR" | cut -d "/" -f2)" ]; then
folder="$(echo "$DIR" | cut -d "/" -f2)"
else
echo "We are assuming a root drive install for space-checking purposes."
folder='/'
fi
local FREESPACEINKB
FREESPACEINKB="$(df -Pk "$folder" | sed 1d | grep -v used | awk '{ print $4 "\t" }')"
echo "Detected available space in Kb: $FREESPACEINKB" >&5
local FREESPACEINGB
FREESPACEINGB=$((FREESPACEINKB / 1024 / 1024))
echo "$FREESPACEINGB"
}
# The expected usage is create_symlinks symlink target_file
create_symlinks() {
local symlink="$1"
local target_file="$2"
echo "Checking symlinks now."
# Check if the symlink exists
if [ -L "$symlink" ]; then
# Check if the linked file exists and points to the expected file
if [ -e "$symlink" ] && [ "$(readlink "$symlink")" == "$target_file" ]; then
echo "$(basename "$symlink") symlink looks fine. Skipping."
else
if [ -f "$target_file" ]; then
echo "Broken symlink detected. Recreating $(basename "$symlink")."
rm "$symlink" && ln -s "$target_file" "$symlink"
else
echo "$target_file does not exist. Nothing to link."
fi
fi
else
echo "Linking $(basename "$symlink")."
ln -s "$target_file" "$symlink"
fi
}
install_python_dependencies() {
local TEMP_REQUIREMENTS_FILE
# Switch to local virtual env
echo "Switching to virtual Python environment."
if ! inDocker; then
if command -v python3.10 >/dev/null; then
python3.10 -m venv "$DIR/venv"
elif command -v python3 >/dev/null; then
python3 -m venv "$DIR/venv"
else
echo "Valid python3 or python3.10 binary not found."
echo "Cannot proceed with the python steps."
return 1
fi
# Activate the virtual environment
source "$DIR/venv/bin/activate"
fi
# Updating pip if there is one
echo "Checking for pip updates before Python operations."
pip install --upgrade pip
echo "Installing python dependencies. This could take a few minutes as it downloads files."
echo "If this operation ever runs too long, you can rerun this script in verbose mode to check."
case "$OSTYPE" in
"linux-gnu"*)
pip install torch==2.0.1+cu118 torchvision==0.15.2+cu118 \
--extra-index-url https://download.pytorch.org/whl/cu118
pip install --upgrade xformers==0.0.20
;;
"darwin"*)
pip install torch==2.0.0 torchvision==0.15.1 \
-f https://download.pytorch.org/whl/cpu/torch_stable.html
# Check if the processor is Apple Silicon (arm64)
if [[ "$(uname -m)" == "arm64" ]]; then
pip install tensorflow-metal=="$TENSORFLOW_MACOS_VERSION"
else
pip install tensorflow-macos=="$TENSORFLOW_METAL_VERSION"
fi
;;
esac
if [ "$RUNPOD" = true ]; then
echo "Installing tenssort."
pip install tensorrt
fi
# DEBUG ONLY (Update this version number to whatever PyCharm recommends)
# pip install pydevd-pycharm~=223.8836.43
# Create a temporary requirements file
TEMP_REQUIREMENTS_FILE=$(mktemp)
if [[ "$OSTYPE" == "darwin"* ]]; then
echo "Copying $DIR/requirements_macos.txt to $TEMP_REQUIREMENTS_FILE" >&3
echo "Replacing the . for lib to our DIR variable in $TEMP_REQUIREMENTS_FILE." >&3
awk -v dir="$DIR" '/#.*kohya_ss.*library/{print; getline; sub(/^\.$/, dir)}1' "$DIR/requirements_macos.txt" >"$TEMP_REQUIREMENTS_FILE"
else
echo "Copying $DIR/requirements_linux.txt to $TEMP_REQUIREMENTS_FILE" >&3
echo "Replacing the . for lib to our DIR variable in $TEMP_REQUIREMENTS_FILE." >&3
awk -v dir="$DIR" '/#.*kohya_ss.*library/{print; getline; sub(/^\.$/, dir)}1' "$DIR/requirements_linux.txt" >"$TEMP_REQUIREMENTS_FILE"
fi
# Install the Python dependencies from the temporary requirements file
if [ $VERBOSITY == 2 ]; then
python -m pip install --quiet --upgrade -r "$TEMP_REQUIREMENTS_FILE"
else
python -m pip install --upgrade -r "$TEMP_REQUIREMENTS_FILE"
fi
if [ -n "$VIRTUAL_ENV" ] && ! inDocker; then
if command -v deactivate >/dev/null; then
echo "Exiting Python virtual environment."
deactivate
else
echo "deactivate command not found. Could still be in the Python virtual environment."
fi
fi
}
# Attempt to non-interactively install a default accelerate config file unless specified otherwise.
# Documentation for order of precedence locations for configuration file for automated installation:
# https://huggingface.co/docs/accelerate/basic_tutorials/launch#custom-configurations
configure_accelerate() {
echo "Source accelerate config location: $DIR/config_files/accelerate/default_config.yaml" >&3
if [ "$INTERACTIVE" = true ]; then
accelerate config
else
if env_var_exists HF_HOME; then
if [ ! -f "$HF_HOME/accelerate/default_config.yaml" ]; then
mkdir -p "$HF_HOME/accelerate/" &&
echo "Target accelerate config location: $HF_HOME/accelerate/default_config.yaml" >&3
cp "$DIR/config_files/accelerate/default_config.yaml" "$HF_HOME/accelerate/default_config.yaml" &&
echo "Copied accelerate config file to: $HF_HOME/accelerate/default_config.yaml"
fi
elif env_var_exists XDG_CACHE_HOME; then
if [ ! -f "$XDG_CACHE_HOME/huggingface/accelerate" ]; then
mkdir -p "$XDG_CACHE_HOME/huggingface/accelerate" &&
echo "Target accelerate config location: $XDG_CACHE_HOME/accelerate/default_config.yaml" >&3
cp "$DIR/config_files/accelerate/default_config.yaml" "$XDG_CACHE_HOME/huggingface/accelerate/default_config.yaml" &&
echo "Copied accelerate config file to: $XDG_CACHE_HOME/huggingface/accelerate/default_config.yaml"
fi
elif env_var_exists HOME; then
if [ ! -f "$HOME/.cache/huggingface/accelerate" ]; then
mkdir -p "$HOME/.cache/huggingface/accelerate" &&
echo "Target accelerate config location: $HOME/accelerate/default_config.yaml" >&3
cp "$DIR/config_files/accelerate/default_config.yaml" "$HOME/.cache/huggingface/accelerate/default_config.yaml" &&
echo "Copying accelerate config file to: $HOME/.cache/huggingface/accelerate/default_config.yaml"
fi
else
echo "Could not place the accelerate configuration file. Please configure manually."
sleep 2
accelerate config
fi
fi
}
# Offer a warning and opportunity to cancel the installation if < 10Gb of Free Space detected
check_storage_space() {
if [ "$SKIP_SPACE_CHECK" = false ]; then
if [ "$(size_available)" -lt 10 ]; then
echo "You have less than 10Gb of free space. This installation may fail."
MSGTIMEOUT=10 # In seconds
MESSAGE="Continuing in..."
echo "Press control-c to cancel the installation."
for ((i = MSGTIMEOUT; i >= 0; i--)); do
printf "\r${MESSAGE} %ss. " "${i}"
sleep 1
done
fi
fi
}
isContainerOrPod() {
local cgroup=/proc/1/cgroup
test -f $cgroup && (grep -qE ':cpuset:/(docker|kubepods)' $cgroup || grep -q ':/docker/' $cgroup)
}
isDockerBuildkit() {
local cgroup=/proc/1/cgroup
test -f $cgroup && grep -q ':cpuset:/docker/buildkit' $cgroup
}
isDockerContainer() {
[ -e /.dockerenv ]
}
inDocker() {
if isContainerOrPod || isDockerBuildkit || isDockerContainer; then
return 0
else
return 1
fi
}
# These are the git operations that will run to update or clone the repo
update_kohya_ss() {
if [ "$SKIP_GIT_UPDATE" = false ]; then
if command -v git >/dev/null; then
# First, we make sure there are no changes that need to be made in git, so no work is lost.
if [ "$(git -C "$DIR" status --porcelain=v1 2>/dev/null | wc -l)" -gt 0 ] &&
echo "These files need to be committed or discarded: " >&4 &&
git -C "$DIR" status >&4; then
echo "There are changes that need to be committed or discarded in the repo in $DIR."
echo "Commit those changes or run this script with -n to skip git operations entirely."
exit 1
fi
echo "Attempting to clone $GIT_REPO."
if [ ! -d "$DIR/.git" ]; then
echo "Cloning and switching to $GIT_REPO:$BRANCH" >&4
git -C "$PARENT_DIR" clone -b "$BRANCH" "$GIT_REPO" "$(basename "$DIR")" >&3
git -C "$DIR" switch "$BRANCH" >&4
else
echo "git repo detected. Attempting to update repository instead."
echo "Updating: $GIT_REPO"
git -C "$DIR" pull "$GIT_REPO" "$BRANCH" >&3
if ! git -C "$DIR" switch "$BRANCH" >&4; then
echo "Branch $BRANCH did not exist. Creating it." >&4
git -C "$DIR" switch -c "$BRANCH" >&4
fi
fi
else
echo "You need to install git."
echo "Rerun this after installing git or run this script with -n to skip the git operations."
fi
else
echo "Skipping git operations."
fi
}
# Start OS-specific detection and work
if [[ "$OSTYPE" == "linux-gnu"* ]]; then
# Check if root or sudo
root=false
if [ "$EUID" = 0 ]; then
root=true
elif command -v id >/dev/null && [ "$(id -u)" = 0 ]; then
root=true
elif [ "$UID" = 0 ]; then
root=true
fi
get_distro_name() {
local line
if [ -f /etc/os-release ]; then
# We search for the line starting with ID=
# Then we remove the ID= prefix to get the name itself
line="$(grep -Ei '^ID=' /etc/os-release)"
echo "Raw detected os-release distro line: $line" >&5
line=${line##*=}
echo "$line"
return 0
elif command -v python >/dev/null; then
line="$(python -mplatform)"
echo "$line"
return 0
elif command -v python3 >/dev/null; then
line="$(python3 -mplatform)"
echo "$line"
return 0
else
line="None"
echo "$line"
return 1
fi
}
# We search for the line starting with ID_LIKE=
# Then we remove the ID_LIKE= prefix to get the name itself
# This is the "type" of distro. For example, Ubuntu returns "debian".
get_distro_family() {
local line
if [ -f /etc/os-release ]; then
if grep -Eiq '^ID_LIKE=' /etc/os-release >/dev/null; then
line="$(grep -Ei '^ID_LIKE=' /etc/os-release)"
echo "Raw detected os-release distro family line: $line" >&5
line=${line##*=}
echo "$line"
return 0
else
line="None"
echo "$line"
return 1
fi
else
line="None"
echo "$line"
return 1
fi
}
check_storage_space
update_kohya_ss
distro=get_distro_name
family=get_distro_family
echo "Raw detected distro string: $distro" >&4
echo "Raw detected distro family string: $family" >&4
if "$distro" | grep -qi "Ubuntu" || "$family" | grep -qi "Ubuntu"; then
echo "Ubuntu detected."
if [ $(dpkg-query -W -f='${Status}' python3-tk 2>/dev/null | grep -c "ok installed") = 0 ]; then
# if [ "$root" = true ]; then
echo "This script needs you to install the missing python3-tk packages. Please install with:"
echo " "
echo "sudo apt update -y && sudo apt install -y python3-tk"
exit 1
# else
# echo "This script needs to be run as root or via sudo to install packages."
# exit 1
# fi
else
echo "Python TK found..."
fi
elif "$distro" | grep -Eqi "Fedora|CentOS|Redhat"; then
echo "Redhat or Redhat base detected."
if ! rpm -qa | grep -qi python3-tkinter; then
# if [ "$root" = true ]; then
echo "This script needs you to install the missing python3-tk packages. Please install with:\n\n"
echo "sudo dnf install python3-tkinter -y >&3"
exit 1
# else
# echo "This script needs to be run as root or via sudo to install packages."
# exit 1
# fi
else
echo "Python TK found..."
fi
elif "$distro" | grep -Eqi "arch" || "$family" | grep -qi "arch"; then
echo "Arch Linux or Arch base detected."
if ! pacman -Qi tk >/dev/null; then
# if [ "$root" = true ]; then
echo "This script needs you to install the missing python3-tk packages. Please install with:\n\n"
echo "pacman --noconfirm -S tk >&3"
exit 1
# else
# echo "This script needs to be run as root or via sudo to install packages."
# exit 1
# fi
else
echo "Python TK found..."
fi
elif "$distro" | grep -Eqi "opensuse" || "$family" | grep -qi "opensuse"; then
echo "OpenSUSE detected."
if ! rpm -qa | grep -qi python-tk; then
# if [ "$root" = true ]; then
echo "This script needs you to install the missing python3-tk packages. Please install with:\n\n"
echo "zypper install -y python-tk >&3"
exit 1
# else
# echo "This script needs to be run as root or via sudo to install packages."
# exit 1
# fi
else
echo "Python TK found..."
fi
elif [ "$distro" = "None" ] || [ "$family" = "None" ]; then
if [ "$distro" = "None" ]; then
echo "We could not detect your distribution of Linux. Please file a bug report on github with the contents of your /etc/os-release file."
fi
if [ "$family" = "None" ]; then
echo "We could not detect the family of your Linux distribution. Please file a bug report on github with the contents of your /etc/os-release file."
fi
fi
install_python_dependencies
# We need just a little bit more setup for non-interactive environments
if [ "$RUNPOD" = true ]; then
if inDocker; then
# We get the site-packages from python itself, then cut the string, so no other code changes required.
VENV_DIR=$(python -c "import site; print(site.getsitepackages()[0])")
VENV_DIR="${VENV_DIR%/lib/python3.10/site-packages}"
fi
# Symlink paths
libnvinfer_plugin_symlink="$VENV_DIR/lib/python3.10/site-packages/tensorrt/libnvinfer_plugin.so.7"
libnvinfer_symlink="$VENV_DIR/lib/python3.10/site-packages/tensorrt/libnvinfer.so.7"
libcudart_symlink="$VENV_DIR/lib/python3.10/site-packages/nvidia/cuda_runtime/lib/libcudart.so.11.0"
#Target file paths
libnvinfer_plugin_target="$VENV_DIR/lib/python3.10/site-packages/tensorrt/libnvinfer_plugin.so.8"
libnvinfer_target="$VENV_DIR/lib/python3.10/site-packages/tensorrt/libnvinfer.so.8"
libcudart_target="$VENV_DIR/lib/python3.10/site-packages/nvidia/cuda_runtime/lib/libcudart.so.12"
echo "Checking symlinks now."
create_symlinks "$libnvinfer_plugin_symlink" "$libnvinfer_plugin_target"
create_symlinks "$libnvinfer_symlink" "$libnvinfer_target"
create_symlinks "$libcudart_symlink" "$libcudart_target"
if [ -d "${VENV_DIR}/lib/python3.10/site-packages/tensorrt/" ]; then
export LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:${VENV_DIR}/lib/python3.10/site-packages/tensorrt/"
else
echo "${VENV_DIR}/lib/python3.10/site-packages/tensorrt/ not found; not linking library."
fi
if [ -d "${VENV_DIR}/lib/python3.10/site-packages/tensorrt/" ]; then
export LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:${VENV_DIR}/lib/python3.10/site-packages/nvidia/cuda_runtime/lib/"
else
echo "${VENV_DIR}/lib/python3.10/site-packages/nvidia/cuda_runtime/lib/ not found; not linking library."
fi
configure_accelerate
# This is a non-interactive environment, so just directly call gui.sh after all setup steps are complete.
if [ "$SKIP_GUI" = false ]; then
if command -v bash >/dev/null; then
if [ "$PUBLIC" = false ]; then
bash "$DIR"/gui.sh
exit 0
else
bash "$DIR"/gui.sh --share
exit 0
fi
else
# This shouldn't happen, but we're going to try to help.
if [ "$PUBLIC" = false ]; then
sh "$DIR"/gui.sh
exit 0
else
sh "$DIR"/gui.sh --share
exit 0
fi
fi
fi
fi
echo -e "Setup finished! Run \e[0;92m./gui.sh\e[0m to start."
echo "Please note if you'd like to expose your public server you need to run ./gui.sh --share"
elif [[ "$OSTYPE" == "darwin"* ]]; then
# The initial setup script to prep the environment on macOS
# xformers has been omitted as that is for Nvidia GPUs only
if ! command -v brew >/dev/null; then
echo "Please install homebrew first. This is a requirement for the remaining setup."
echo "You can find that here: https://brew.sh"
#shellcheck disable=SC2016
echo 'The "brew" command should be in $PATH to be detected.'
exit 1
fi
check_storage_space
# Install base python packages
echo "Installing Python 3.10 if not found."
if ! brew ls --versions python@3.10 >/dev/null; then
echo "Installing Python 3.10."
brew install python@3.10 >&3
else
echo "Python 3.10 found!"
fi
echo "Installing Python-TK 3.10 if not found."
if ! brew ls --versions python-tk@3.10 >/dev/null; then
echo "Installing Python TK 3.10."
brew install python-tk@3.10 >&3
else
echo "Python Tkinter 3.10 found!"
fi
update_kohya_ss
if ! install_python_dependencies; then
echo "You may need to install Python. The command for this is brew install python@3.10."
fi
configure_accelerate
echo -e "Setup finished! Run ./gui.sh to start."
elif [[ "$OSTYPE" == "cygwin" ]]; then
# Cygwin is a standalone suite of Linux utilities on Windows
echo "This hasn't been validated on cygwin yet."
elif [[ "$OSTYPE" == "msys" ]]; then
# MinGW has the msys environment which is a standalone suite of Linux utilities on Windows
# "git bash" on Windows may also be detected as msys.
echo "This hasn't been validated in msys (mingw) on Windows yet."
fi

View File

@ -0,0 +1,73 @@
{
"adaptive_noise_scale": 0,
"additional_parameters": "",
"bucket_no_upscale": true,
"bucket_reso_steps": 64,
"cache_latents": true,
"cache_latents_to_disk": false,
"caption_dropout_every_n_epochs": 0.0,
"caption_dropout_rate": 0.05,
"caption_extension": "",
"clip_skip": 2,
"color_aug": false,
"enable_bucket": true,
"epoch": 1,
"flip_aug": false,
"full_fp16": false,
"gradient_accumulation_steps": 1.0,
"gradient_checkpointing": false,
"keep_tokens": "0",
"learning_rate": 1.0,
"logging_dir": "./test/logs",
"lr_scheduler": "constant",
"lr_warmup": 0,
"max_data_loader_n_workers": "0",
"max_resolution": "512,512",
"max_token_length": "75",
"max_train_epochs": "",
"mem_eff_attn": false,
"min_snr_gamma": 0,
"mixed_precision": "bf16",
"model_list": "runwayml/stable-diffusion-v1-5",
"multires_noise_discount": 0,
"multires_noise_iterations": 0,
"no_token_padding": false,
"noise_offset": "0.05",
"noise_offset_type": "Original",
"num_cpu_threads_per_process": 2,
"optimizer": "DAdaptation",
"optimizer_args": "",
"output_dir": "./test/output",
"output_name": "db",
"persistent_data_loader_workers": false,
"pretrained_model_name_or_path": "runwayml/stable-diffusion-v1-5",
"prior_loss_weight": 1.0,
"random_crop": false,
"reg_data_dir": "",
"resume": "",
"sample_every_n_epochs": 0,
"sample_every_n_steps": 25,
"sample_prompts": "a painting of a gas mask , by darius kawasaki",
"sample_sampler": "euler_a",
"save_every_n_epochs": 1,
"save_every_n_steps": 0,
"save_last_n_steps": 0,
"save_last_n_steps_state": 0,
"save_model_as": "safetensors",
"save_precision": "fp16",
"save_state": false,
"scale_v_pred_loss_like_noise_pred": false,
"seed": "1234",
"shuffle_caption": false,
"stop_text_encoder_training": 0,
"train_batch_size": 4,
"train_data_dir": "./test/img",
"use_wandb": false,
"v2": false,
"v_parameterization": false,
"vae": "",
"vae_batch_size": 0,
"wandb_api_key": "",
"weighted_captions": false,
"xformers": true
}

View File

@ -1,26 +0,0 @@
import filecmp
import os
import shutil
import sys
import sysconfig
# Check for "different" B&B Files and copy only if necessary
if os.name == "nt":
python = sys.executable
cudnn_src = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..\cudnn_windows")
cudnn_dest = os.path.join(sysconfig.get_paths()["purelib"], "torch", "lib")
print(f"Checking for CUDNN files in {cudnn_dest}")
if os.path.exists(cudnn_src):
if os.path.exists(cudnn_dest):
# check for different files
filecmp.clear_cache()
for file in os.listdir(cudnn_src):
src_file = os.path.join(cudnn_src, file)
dest_file = os.path.join(cudnn_dest, file)
#if dest file exists, check if it's different
if os.path.exists(dest_file):
shutil.copy2(src_file, cudnn_dest)
print("Copied CUDNN 8.6 files to destination")
else:
print(f"Installation Failed: \"{cudnn_src}\" could not be found. ")

View File

@ -20,7 +20,7 @@ class ImageProcessor:
self.pad = pad
self.caption = caption
self.caption_ext = caption_ext
self.image_extensions = ('.png', '.jpg', '.jpeg', '.gif', '.webp')
self.image_extensions = ('.png', '.jpg', '.jpeg', '.gif', '.webp', '.tiff')
def get_image_paths(self):
images = []

View File

@ -1,122 +0,0 @@
import os
import re
import sys
import shutil
import argparse
from setup_windows import install, check_repo_version
# Get the absolute path of the current file's directory (Kohua_SS project directory)
project_directory = os.path.dirname(os.path.abspath(__file__))
# Check if the "tools" directory is present in the project_directory
if "tools" in project_directory:
# If the "tools" directory is present, move one level up to the parent directory
project_directory = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Add the project directory to the beginning of the Python search path
sys.path.insert(0, project_directory)
from library.custom_logging import setup_logging
# Set up logging
log = setup_logging()
def check_torch():
# Check for nVidia toolkit or AMD toolkit
if shutil.which('nvidia-smi') is not None or os.path.exists(
os.path.join(
os.environ.get('SystemRoot') or r'C:\Windows',
'System32',
'nvidia-smi.exe',
)
):
log.info('nVidia toolkit detected')
elif shutil.which('rocminfo') is not None or os.path.exists(
'/opt/rocm/bin/rocminfo'
):
log.info('AMD toolkit detected')
else:
log.info('Using CPU-only Torch')
try:
import torch
log.info(f'Torch {torch.__version__}')
# Check if CUDA is available
if not torch.cuda.is_available():
log.warning('Torch reports CUDA not available')
else:
if torch.version.cuda:
# Log nVidia CUDA and cuDNN versions
log.info(
f'Torch backend: nVidia CUDA {torch.version.cuda} cuDNN {torch.backends.cudnn.version() if torch.backends.cudnn.is_available() else "N/A"}'
)
elif torch.version.hip:
# Log AMD ROCm HIP version
log.info(f'Torch backend: AMD ROCm HIP {torch.version.hip}')
else:
log.warning('Unknown Torch backend')
# Log information about detected GPUs
for device in [
torch.cuda.device(i) for i in range(torch.cuda.device_count())
]:
log.info(
f'Torch detected GPU: {torch.cuda.get_device_name(device)} VRAM {round(torch.cuda.get_device_properties(device).total_memory / 1024 / 1024)} Arch {torch.cuda.get_device_capability(device)} Cores {torch.cuda.get_device_properties(device).multi_processor_count}'
)
return int(torch.__version__[0])
except Exception as e:
log.error(f'Could not load torch: {e}')
sys.exit(1)
def install_requirements(requirements_file):
log.info('Verifying requirements')
with open(requirements_file, 'r', encoding='utf8') as f:
# Read lines from the requirements file, strip whitespace, and filter out empty lines, comments, and lines starting with '.'
lines = [
line.strip()
for line in f.readlines()
if line.strip() != ''
and not line.startswith('#')
and line is not None
and not line.startswith('.')
]
# Iterate over each line and install the requirements
for line in lines:
# Remove brackets and their contents from the line using regular expressions
# eg diffusers[torch]==0.10.2 becomes diffusers==0.10.2
package_name = re.sub(r'\[.*?\]', '', line)
install(line, package_name)
def main():
check_repo_version()
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Validate that requirements are satisfied.'
)
parser.add_argument(
'-r',
'--requirements',
type=str,
help='Path to the requirements file.',
)
parser.add_argument('--debug', action='store_true', help='Debug on')
args = parser.parse_args()
if not args.requirements:
# Check Torch
if check_torch() == 1:
install_requirements('requirements_windows_torch1.txt')
else:
install_requirements('requirements_windows_torch2.txt')
else:
install_requirements(args.requirements)
if __name__ == '__main__':
main()

View File

@ -13,4 +13,4 @@ git pull
call .\venv\Scripts\activate.bat
:: Validate requirements
python.exe .\tools\validate_requirements.py
python.exe .\setup\validate_requirements.py

View File

@ -1,16 +0,0 @@
#!/bin/bash
# Check if there are any changes that need to be committed
if git status --short | grep -q "^[^ ?][^?]*"; then
echo "There are changes that need to be committed. Please stash or undo your changes before running this script."
exit 1
fi
# Pull the latest changes from the remote repository
git pull
# Activate the virtual environment
source venv/bin/activate
# Upgrade the required packages
pip install --use-pep517 --upgrade -r requirements_unix.txt