fix(ui): set prefill text to empty by default

pull/4448/head
CalamitousFelicitousness 2025-12-04 00:42:24 +00:00
parent 0d88fcd396
commit 4df6aa7944
1 changed files with 1 additions and 1 deletions

View File

@ -73,7 +73,7 @@ def create_ui():
vlm_keep_thinking = gr.Checkbox(label='Keep Thinking Trace', value=shared.opts.interrogate_vlm_keep_thinking, elem_id='vlm_keep_thinking')
vlm_keep_prefill = gr.Checkbox(label='Keep Prefill', value=shared.opts.interrogate_vlm_keep_prefill, elem_id='vlm_keep_prefill')
with gr.Row():
vlm_prefill = gr.Textbox(label='Prefill Text', value=vqa.vlm_prefill, lines=1, elem_id='vlm_prefill', placeholder='Optional prefill text for model to continue from')
vlm_prefill = gr.Textbox(label='Prefill Text', value='', lines=1, elem_id='vlm_prefill', placeholder='Optional prefill text for model to continue from')
vlm_max_tokens.change(fn=update_vlm_params, inputs=[vlm_max_tokens, vlm_num_beams, vlm_temperature, vlm_do_sample, vlm_top_k, vlm_top_p, vlm_keep_prefill, vlm_keep_thinking], outputs=[])
vlm_num_beams.change(fn=update_vlm_params, inputs=[vlm_max_tokens, vlm_num_beams, vlm_temperature, vlm_do_sample, vlm_top_k, vlm_top_p, vlm_keep_prefill, vlm_keep_thinking], outputs=[])
vlm_temperature.change(fn=update_vlm_params, inputs=[vlm_max_tokens, vlm_num_beams, vlm_temperature, vlm_do_sample, vlm_top_k, vlm_top_p, vlm_keep_prefill, vlm_keep_thinking], outputs=[])