diff --git a/iz_helpers/helpers.py b/iz_helpers/helpers.py index aff0835..92e8840 100644 --- a/iz_helpers/helpers.py +++ b/iz_helpers/helpers.py @@ -102,6 +102,10 @@ def validatePromptJson_throws(data): schema = json.load(s) validate(instance=data, schema=schema) +def recalcPromptKeys(data): + prompts_keys = process_keys(data) + return prompts_keys[0] + def putPrompts(files): try: with open(files.name, "r") as f: diff --git a/iz_helpers/prompt_util.py b/iz_helpers/prompt_util.py index 2ad6892..d3022bf 100644 --- a/iz_helpers/prompt_util.py +++ b/iz_helpers/prompt_util.py @@ -99,7 +99,7 @@ def completeOptionals(j): j["lastFrames"]= 0 if "blendMode" not in j: - j["blendMode"]= "None" + j["blendMode"]= "Not Used" if "blendColor" not in j: j["blendColor"]= "#ffff00" diff --git a/iz_helpers/run.py b/iz_helpers/run.py index 3e9ffea..65c32ee 100644 --- a/iz_helpers/run.py +++ b/iz_helpers/run.py @@ -127,7 +127,7 @@ class InfZoomer: processed = self.fnOutpaintMainFrames() - if self.C.lut_filename is not None: + if self.C.lut_filename != "": try: #processed = apply_lut(processed, self.C.lut_filename) self.main_frames = [apply_lut(frame, self.C.lut_filename) for frame in self.main_frames] @@ -137,7 +137,7 @@ class InfZoomer: #trim frames that are blended or luma wiped self.start_frames = self.main_frames[:2] self.end_frames = self.main_frames[(len(self.main_frames) - 2):] - if (self.C.blend_mode != 0): + if (self.C.blend_mode != 0) and ((int(self.C.video_start_frame_dupe_amount) > 0) or (int(self.C.video_last_frame_dupe_amount) > 0)): #trim first and last frames only from main_frames, store 2 frames in each start_frames and end_frames for blending self.main_frames = self.main_frames[1:(len(self.main_frames) - 1)] print(f"Trimmed Blending Mode frames: start_frames:{len(self.start_frames)} end_frames:{len(self.end_frames)} main_frames:{len(self.main_frames)}") @@ -170,7 +170,7 @@ class InfZoomer: self.fnInterpolateFrames() # changes main_frame and writes to video - if self.C.audio_filename is not None: + if (self.C.audio_filename is not None) and (len(self.C.audio_filename) > 0 ): self.out_config["video_filename"] = add_audio_to_video(self.out_config["video_filename"], self.C.audio_filename, str.replace(self.out_config["video_filename"], ".mp4", "_audio.mp4"), self.C.audio_volume, find_ffmpeg_binary()) print("Video saved in: " + os.path.join(script_path, self.out_config["video_filename"])) @@ -178,9 +178,10 @@ class InfZoomer: return ( self.out_config["video_filename"], self.main_frames, + self.C.seed, processed.js(), plaintext_to_html(processed.info), - plaintext_to_html(""), + plaintext_to_html(""), ) def doUpscaling(self): @@ -212,7 +213,7 @@ class InfZoomer: else: if self.prompt_images[min(k for k in self.prompt_images.keys() if k >= 0)] == "": load_model_from_setting("infzoom_txt2img_model", self.C.progress, "Loading Model for txt2img: ") - processed, self.current_seed = self.renderFirstFrame() + processed, self.C.seed = self.renderFirstFrame() if len(processed.images) > 0: current_image = processed.images[0] self.save2Collect(current_image, f"init_txt2img.png") @@ -268,8 +269,12 @@ class InfZoomer: alpha_mask = self.getAlphaMask(*current_image.size, i + 1) #keyframes are not outpainted - paste_previous_image = not self.prompt_image_is_keyframe[(i + 1)] - print(f"paste_prev_image: {paste_previous_image} {i + 1}") + try: + paste_previous_image = not self.prompt_image_is_keyframe[(i + 1)] + print(f"paste_prev_image: {paste_previous_image} {i + 1}") + except KeyError: + print(f"Your Prompt List is missing key {i + 1}") + break if self.C.custom_exit_image and ((i + 1) == outpaint_steps): current_image = cv2_to_pil(cv2.resize( @@ -297,7 +302,7 @@ class InfZoomer: expanded_image.paste(current_image, (self.mask_width,self.mask_height)) pr = self.prompts[max(k for k in self.prompts.keys() if k <= i)] - processed, newseed = renderImg2Img( + processed, self.C.seed = renderImg2Img( f"{self.C.common_prompt_pre}\n{pr}\n{self.C.common_prompt_suf}".strip(), self.C.negative_prompt, self.C.sampler, @@ -394,8 +399,12 @@ class InfZoomer: # create mask (black image with white mask_width width edges) #keyframes are not inpainted - paste_previous_image = not self.prompt_image_is_keyframe[(i + 1)] - print(f"paste_prev_image: {paste_previous_image} {i} {i + 1}") + try: + paste_previous_image = not self.prompt_image_is_keyframe[(i + 1)] + print(f"paste_prev_image: {paste_previous_image} {i} {i + 1}") + except KeyError: + print(f"Your Prompt List is missing key {i + 1}") + break if self.C.custom_exit_image and ((i + 1) == outpaint_steps): current_image = cv2_to_pil( @@ -413,7 +422,7 @@ class InfZoomer: if self.prompt_images[max(k for k in self.prompt_images.keys() if k <= (i + 1))] == "": pr = self.prompts[max(k for k in self.prompts.keys() if k <= i)] - processed, seed = renderImg2Img( + processed, self.C.seed = renderImg2Img( f"{self.C.common_prompt_pre}\n{pr}\n{self.C.common_prompt_suf}".strip(), self.C.negative_prompt, self.C.sampler, @@ -563,6 +572,8 @@ class InfZoomer: self.C.blend_gradient_size, hex_to_rgba(self.C.blend_color)) + if len(self.main_frames) <2: + raise ValueError("Not enough frames for interpolation, possibly disable Luma Wipes") # Build interpolation frames # last frame skip interpolation for i in range(1, len(self.main_frames)): diff --git a/iz_helpers/static_variables.py b/iz_helpers/static_variables.py index 27223a5..8b99143 100644 --- a/iz_helpers/static_variables.py +++ b/iz_helpers/static_variables.py @@ -8,10 +8,11 @@ default_cfg_scale = 8 default_mask_blur = 48 default_gradient_size = 61 default_overmask = 8 -default_total_outpaints = 5 +default_total_outpaints = 3 default_outpaint_amount = 128 promptTableHeaders = ["Outpaint Steps", "Prompt", "image location", "blend mask", "is keyframe"], ["number", "str", "str", "str", "bool"] default_lut_example_img = "extensions\\infinite-zoom-automatic1111-webui\\LUT\\daisy.jpg" +default_Luma_wipe_img = "extensions\\infinite-zoom-automatic1111-webui\\Wipes\\clock.png" default_prompt = """ { @@ -22,12 +23,13 @@ default_prompt = """ [0, "Huge spectacular Waterfall in a dense tropical forest,epic perspective,(vegetation overgrowth:1.3)(intricate, ornamentation:1.1),(baroque:1.1), fantasy, (realistic:1) digital painting , (magical,mystical:1.2) , (wide angle shot:1.4), (landscape composed:1.2)(medieval:1.1), divine,cinematic,(tropical forest:1.4),(river:1.3)mythology,india, volumetric lighting, Hindu ,epic, Alex Horley Wenjun Lin greg rutkowski Ruan Jia (Wayne Barlowe:1.2) ","C:\\\\path\\\\to\\\\image.png", "extensions\\\\infinite-zoom-automatic1111-webui\\\\blends\\\\sun-square.png", true], [1, "a Lush jungle","","",false], [2, "a Thick rainforest","","",false], + [3, "a crashed UFO stuck in the ground","","",false], [4, "a Verdant canopy","","",false] ] }, "postPrompt": "epic perspective,(vegetation overgrowth:1.3)(intricate, ornamentation:1.1),(baroque:1.1), fantasy, (realistic:1) digital painting , (magical,mystical:1.2) , (wide angle shot:1.4), (landscape composed:1.2)(medieval:1.1),(tropical forest:1.4),(river:1.3) volumetric lighting ,epic, style by Alex Horley Wenjun Lin greg rutkowski Ruan Jia (Wayne Barlowe:1.2)", "negPrompt": "frames, border, edges, borderline, text, character, duplicate, error, out of frame, watermark, low quality, ugly, deformed, blur, bad-artist", - "audioFileName": None, + "audioFileName": "", "seed":-1, "width": 512, "height": 512, @@ -38,13 +40,13 @@ default_prompt = """ "outpaintAmount": 128, "maskBlur": 48, "overmask": 8.0, - "outpaintStrategy": "Corner", + "outpaintStrategy": "Corners", "zoomMode": "Zoom-out", "fps": 30, "zoomSpeed": 1.0, "startFrames": 0, "lastFrames": 0, - "blendMode": "None", + "blendMode": "Not Used", "blendColor": "#ffff00", "blendGradient": 61, "blendInvert": false @@ -52,7 +54,7 @@ default_prompt = """ """ empty_prompt = ( - '{"prompts":{"data":[0,"","","",false],"headers":["Outpaintg Steps","prompt","image location", "blend mask location", "is keyframe"]},"negPrompt":"", "prePrompt":"", "postPrompt":"", "audioFileName":None, "seed":-1, "width": 512, "height": 512, "sampler": "DDIM", "guidanceScale": 8.0, "steps": 35, "lutFileName": "", "outpaintAmount": 128, "maskBlur": 48, "overmask": 8, "outpaintStrategy": "Corner", "zoomMode": "Zoom-out", "fps": 30, "zoomSpeed": 1, "startFrames": 0, "lastFrames": 0, "blendMode": "None", "blendColor": "#ffff00", "blendGradient": 61, "blendInvert": false}' + '{"prompts":{"data":[0,"","","",false],"headers":["Outpaintg Steps","prompt","image location", "blend mask location", "is keyframe"]},"negPrompt":"", "prePrompt":"", "postPrompt":"", "audioFileName":None, "seed":-1, "width": 512, "height": 512, "sampler": "DDIM", "guidanceScale": 8.0, "steps": 35, "lutFileName": "", "outpaintAmount": 128, "maskBlur": 48, "overmask": 8, "outpaintStrategy": "Corners", "zoomMode": "Zoom-out", "fps": 30, "zoomSpeed": 1, "startFrames": 0, "lastFrames": 0, "blendMode": "Not Used", "blendColor": "#ffff00", "blendGradient": 61, "blendInvert": false}' ) invalid_prompt = { @@ -63,7 +65,7 @@ invalid_prompt = { "negPrompt": "Invalid prompt-json", "prePrompt": "Invalid prompt", "postPrompt": "Invalid prompt", - "audioFileName": None, + "audioFileName": "", "seed":-1, "width": 512, "height": 512, @@ -74,13 +76,13 @@ invalid_prompt = { "outpaintAmount": 128, "maskBlur": 48, "overmask": 8, - "outpaintStrategy": "Corner", + "outpaintStrategy": "Corners", "zoomMode": "Zoom-out", "fps": 30, "zoomSpeed": 1.0, "startFrames": 0, "lastFrames": 0, - "blendMode": "None", + "blendMode": "Not Used", "blendColor": "#ffff00", "blendGradient": 61, "blendInvert": False diff --git a/iz_helpers/ui.py b/iz_helpers/ui.py index 2e3df58..a3108c5 100644 --- a/iz_helpers/ui.py +++ b/iz_helpers/ui.py @@ -20,8 +20,9 @@ from .static_variables import ( default_gradient_size, default_outpaint_amount, default_lut_example_img, + default_Luma_wipe_img, ) -from .helpers import validatePromptJson_throws, putPrompts, clearPrompts, renumberDataframe, closest_upper_divisible_by_eight +from .helpers import validatePromptJson_throws, putPrompts, clearPrompts, renumberDataframe, closest_upper_divisible_by_eight, recalcPromptKeys from .prompt_util import readJsonPrompt from .static_variables import promptTableHeaders from PIL import Image @@ -102,6 +103,8 @@ def on_ui_tabs(): precision=0, interactive=True ) + gr.Button('\U0001f3b2\ufe0f').style(full_width=False).click(fn=lambda: -1, outputs=[main_seed], queue=False) + reuse_seed = gr.Button('\u267b\ufe0f').style(full_width=False) main_sampler = gr.Dropdown( label="Sampler", choices=available_samplers, @@ -208,12 +211,13 @@ You might give multiple options in one line. ) with gr.Accordion("Blend settings"): with gr.Row(): - blend_image = gr.Image(type="pil", label="Custom in/out Blend Image") + blend_image = gr.Image(type="pil", label="Custom in/out Blend Image", value=default_Luma_wipe_img) blend_mode = gr.Radio( label="Blend Mode", - choices=["None", "Simple Blend", "Alpha Composite", "Luma Wipe"], - value=jpr["blendMode"], + choices=["Not Used", "Simple Blend", "Alpha Composite", "Luma Wipe"], + value=0, #jpr["blendMode"] type="index", + elem_id="infzoom_blend_mode", ) blend_invert_do = gr.Checkbox(jpr["blendInvert"], label="Reverse Blend/Wipe") with gr.Row(): @@ -226,8 +230,9 @@ You might give multiple options in one line. ) blend_color = gr.ColorPicker( label='Blend Edge Color', - default=jpr["blendColor"] + value=jpr["blendColor"] ) + main_prompts.change(recalcPromptKeys,inputs=[main_prompts], outputs=[main_outpaint_steps]) video_zoom_speed.change(calc_est_video_length,inputs=[blend_mode,video_zoom_speed, video_start_frame_dupe_amount,video_last_frame_dupe_amount,video_frame_rate,main_outpaint_steps],outputs=[video_est_length]) main_outpaint_steps.change(calc_est_video_length,inputs=[blend_mode,video_zoom_speed, video_start_frame_dupe_amount,video_last_frame_dupe_amount,video_frame_rate,main_outpaint_steps],outputs=[video_est_length]) video_frame_rate.change(calc_est_video_length,inputs=[blend_mode,video_zoom_speed, video_start_frame_dupe_amount,video_last_frame_dupe_amount,video_frame_rate,main_outpaint_steps],outputs=[video_est_length]) @@ -345,8 +350,7 @@ Depending on amount of frames and which upscaler you choose it might took a long Our best experience and trade-off is the R-ERSGAn4x upscaler. """ ) - - + # these buttons will be moved using JS under the dataframe view as small ones exportPrompts_button = gr.Button( value="Export prompts", @@ -484,9 +488,10 @@ Our best experience and trade-off is the R-ERSGAn4x upscaler. ) = create_output_panel( "infinite-zoom", shared.opts.outdir_img2img_samples ) + seed_used = gr.Number(label='Seed used', value=-1, interactive=False) generate_btn.click( - fn=wrap_gradio_gpu_call(createZoom, extra_outputs=[None, "", ""]), + fn=wrap_gradio_gpu_call(createZoom, extra_outputs=[None, None, -1, "", ""]), inputs=[ main_common_prompt_pre, main_prompts, @@ -525,13 +530,13 @@ Our best experience and trade-off is the R-ERSGAn4x upscaler. audio_filename, audio_volume, ], - outputs=[output_video, out_image, generation_info, html_info, html_log], + outputs=[output_video, out_image, seed_used, generation_info, html_info, html_log], ) main_prompts.change( fn=checkPrompts, inputs=[main_prompts], outputs=[generate_btn] ) - + reuse_seed.click(fn=lambda x: x, inputs=[seed_used], outputs=[main_seed], queue=False) interrupt.click(fn=lambda: shared.state.interrupt(), inputs=[], outputs=[]) infinite_zoom_interface.queue() return [(infinite_zoom_interface, "Infinite Zoom", "iz_interface")] diff --git a/iz_helpers/video.py b/iz_helpers/video.py index 2155e53..aee9022 100644 --- a/iz_helpers/video.py +++ b/iz_helpers/video.py @@ -120,6 +120,9 @@ class ContinuousVideoWriter: """ # Duplicate the exit frames if blend_type != 0: + if blend_image is None: + blend_image = draw_gradient_ellipse(*exitframe.size, blend_gradient_size) + if blend_type == 1: end_frames = blend_images(next_to_last_frame, exitframe, math.ceil(last_frame_dupe_amount), blend_invert) elif blend_type == 2: