mirror of https://github.com/vladmandic/automatic
prompt-enhance add qwen3
Signed-off-by: Vladimir Mandic <mandic00@live.com>pull/3914/head
parent
7a9c3e3bee
commit
8b0f5f687a
|
|
@ -1,5 +1,10 @@
|
|||
# Change Log for SD.Next
|
||||
|
||||
## Update for 2025-04-28
|
||||
|
||||
- Prompt-Enhhance: add **Qwen3** 0.6B/1.7B/4B models
|
||||
- Docker: pre-install `ffmpeg`
|
||||
|
||||
## Highlights for 2025-04-28
|
||||
|
||||
Another major release with *over 120 commits*!
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ LABEL org.opencontainers.image.version="latest"
|
|||
|
||||
# minimum install
|
||||
RUN ["apt-get", "-y", "update"]
|
||||
RUN ["apt-get", "-y", "install", "git", "build-essential", "google-perftools", "curl"]
|
||||
RUN ["apt-get", "-y", "install", "git", "build-essential", "google-perftools", "curl", "ffmpeg"]
|
||||
# optional if full cuda-dev is required by some downstream library
|
||||
# RUN ["apt-get", "-y", "nvidia-cuda-toolkit"]
|
||||
RUN ["/usr/sbin/ldconfig"]
|
||||
|
|
|
|||
|
|
@ -16,6 +16,12 @@ class Options:
|
|||
models = {
|
||||
'google/gemma-3-1b-it': {},
|
||||
'google/gemma-3-4b-it': {},
|
||||
'Qwen/Qwen3-0.6B-FP8': {},
|
||||
'Qwen/Qwen3-1.7B-FP8': {},
|
||||
'Qwen/Qwen3-4B-FP8': {},
|
||||
'Qwen/Qwen3-0.6B': {},
|
||||
'Qwen/Qwen3-1.7B': {},
|
||||
'Qwen/Qwen3-4B': {},
|
||||
'Qwen/Qwen2.5-0.5B-Instruct': {},
|
||||
'Qwen/Qwen2.5-1.5B-Instruct': {},
|
||||
'Qwen/Qwen2.5-3B-Instruct': {},
|
||||
|
|
@ -223,6 +229,7 @@ class Script(scripts.Script):
|
|||
inputs = self.tokenizer.apply_chat_template(
|
||||
chat_template,
|
||||
add_generation_prompt=True,
|
||||
enable_thinking=False,
|
||||
tokenize=True,
|
||||
return_dict=True,
|
||||
return_tensors="pt",
|
||||
|
|
|
|||
Loading…
Reference in New Issue