{ "cells": [ { "cell_type": "markdown", "metadata": { "id": "c442uQJ_gUgy" }, "source": [ "# **Deforum Stable Diffusion v0.6**\n", "[Stable Diffusion](https://github.com/CompVis/stable-diffusion) by Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, Björn Ommer and the [Stability.ai](https://stability.ai/) Team. [K Diffusion](https://github.com/crowsonkb/k-diffusion) by [Katherine Crowson](https://twitter.com/RiversHaveWings). You need to get the ckpt file and put it on your Google Drive first to use this. It can be downloaded from [HuggingFace](https://huggingface.co/CompVis/stable-diffusion).\n", "\n", "Notebook by [deforum](https://discord.gg/upmXXsrwZc)" ] }, { "cell_type": "code", "metadata": { "cellView": "form", "id": "2g-f7cQmf2Nt" }, "source": [ "#@markdown **NVIDIA GPU**\n", "import subprocess, os, sys\n", "sub_p_res = subprocess.run(['nvidia-smi', '--query-gpu=name,memory.total,memory.free', '--format=csv,noheader'], stdout=subprocess.PIPE).stdout.decode('utf-8')\n", "print(f\"{sub_p_res[:-1]}\")" ], "outputs": [], "execution_count": null }, { "cell_type": "markdown", "metadata": { "id": "T4knibRpAQ06" }, "source": [ "# Setup" ] }, { "cell_type": "code", "metadata": { "cellView": "form", "id": "TxIOPT0G5Lx1" }, "source": [ "#@markdown **Model and Output Paths**\n", "def get_model_output_paths():\n", "\n", " models_path = \"models\" #@param {type:\"string\"}\n", " output_path = \"output\" #@param {type:\"string\"}\n", "\n", " #@markdown **Google Drive Path Variables (Optional)**\n", " mount_google_drive = True #@param {type:\"boolean\"}\n", " force_remount = False\n", "\n", " try:\n", " ipy = get_ipython()\n", " except:\n", " ipy = 'could not get_ipython'\n", "\n", " if 'google.colab' in str(ipy):\n", " if mount_google_drive:\n", " from google.colab import drive # type: ignore\n", " try:\n", " drive_path = \"/content/drive\"\n", " drive.mount(drive_path,force_remount=force_remount)\n", " models_path_gdrive = \"/content/drive/MyDrive/AI/models\" #@param {type:\"string\"}\n", " output_path_gdrive = \"/content/drive/MyDrive/AI/StableDiffusion\" #@param {type:\"string\"}\n", " models_path = models_path_gdrive\n", " output_path = output_path_gdrive\n", " except:\n", " print(\"..error mounting drive or with drive path variables\")\n", " print(\"..reverting to default path variables\")\n", "\n", " models_path = os.path.abspath(models_path)\n", " output_path = os.path.abspath(output_path)\n", " os.makedirs(models_path, exist_ok=True)\n", " os.makedirs(output_path, exist_ok=True)\n", "\n", " print(f\"models_path: {models_path}\")\n", " print(f\"output_path: {output_path}\")\n", "\n", " return models_path, output_path\n", "\n", "models_path, output_path = get_model_output_paths()\n", "\n", "def setup_environment():\n", "\n", " print_subprocess = False\n", "\n", " try:\n", " ipy = get_ipython()\n", " except:\n", " ipy = 'could not get_ipython'\n", "\n", " if 'google.colab' in str(ipy):\n", " import subprocess, time\n", " print(\"Setting up environment...\")\n", " start_time = time.time()\n", " all_process = [\n", " ['pip', 'install', 'torch==1.12.1+cu113', 'torchvision==0.13.1+cu113', '--extra-index-url', 'https://download.pytorch.org/whl/cu113'],\n", " ['pip', 'install', 'omegaconf==2.2.3', 'einops==0.4.1', 'pytorch-lightning==1.7.4', 'torchmetrics==0.9.3', 'torchtext==0.13.1', 'transformers==4.21.2', 'kornia==0.6.7'],\n", " ['git', 'clone', '-b', 'local', 'https://github.com/deforum/stable-diffusion'],\n", " ['pip', 'install', 'accelerate', 'ftfy', 'jsonmerge', 'matplotlib', 'resize-right', 'timm', 'torchdiffeq'],\n", " ]\n", " for process in all_process:\n", " running = subprocess.run(process,stdout=subprocess.PIPE).stdout.decode('utf-8')\n", " if print_subprocess:\n", " print(running)\n", "\n", " with open('stable-diffusion/src/k_diffusion/__init__.py', 'w') as f:\n", " f.write('')\n", "\n", " sys.path.extend([\n", " 'stable-diffusion/',\n", " 'stable-diffusion/src',\n", " ])\n", "\n", " end_time = time.time()\n", " print(f\"Environment set up in {end_time-start_time:.0f} seconds\")\n", " \n", " else:\n", "\n", " sys.path.extend([\n", " 'src'\n", " ])\n", "\n", " return\n", "\n", "setup_environment()\n", "\n", "# import\n", "import torch\n", "import gc\n", "import time\n", "import random\n", "from types import SimpleNamespace\n", "\n", "from helpers.save_images import get_output_folder\n", "from helpers.settings import load_args\n", "from helpers.render import render_animation, render_input_video, render_image_batch, render_interpolation\n", "\n", "#@markdown **Select and Load Model**\n", "\n", "def load_model():\n", "\n", " import requests\n", " import torch\n", " from ldm.util import instantiate_from_config\n", " from omegaconf import OmegaConf\n", " from transformers import logging\n", " logging.set_verbosity_error()\n", "\n", " model_config = \"v1-inference.yaml\" #@param [\"custom\",\"v1-inference.yaml\"]\n", " model_checkpoint = \"sd-v1-4.ckpt\" #@param [\"custom\",\"sd-v1-4-full-ema.ckpt\",\"sd-v1-4.ckpt\",\"sd-v1-3-full-ema.ckpt\",\"sd-v1-3.ckpt\",\"sd-v1-2-full-ema.ckpt\",\"sd-v1-2.ckpt\",\"sd-v1-1-full-ema.ckpt\",\"sd-v1-1.ckpt\", \"robo-diffusion-v1.ckpt\",\"wd-v1-3-float16.ckpt\"]\n", "\n", " custom_config_path = \"\" #@param {type:\"string\"}\n", " custom_checkpoint_path = \"\" #@param {type:\"string\"}\n", "\n", " load_on_run_all = True\n", " half_precision = True\n", " check_sha256 = True\n", "\n", " try:\n", " ipy = get_ipython()\n", " except:\n", " ipy = 'could not get_ipython'\n", "\n", " if 'google.colab' in str(ipy):\n", " path_extend = \"stable-diffusion\"\n", " else:\n", " path_extend = \"\"\n", "\n", " model_map = {\n", " \"sd-v1-4-full-ema.ckpt\": {\n", " 'sha256': '14749efc0ae8ef0329391ad4436feb781b402f4fece4883c7ad8d10556d8a36a',\n", " 'url': 'https://huggingface.co/CompVis/stable-diffusion-v-1-2-original/blob/main/sd-v1-4-full-ema.ckpt',\n", " 'requires_login': True,\n", " },\n", " \"sd-v1-4.ckpt\": {\n", " 'sha256': 'fe4efff1e174c627256e44ec2991ba279b3816e364b49f9be2abc0b3ff3f8556',\n", " 'url': 'https://huggingface.co/CompVis/stable-diffusion-v-1-4-original/resolve/main/sd-v1-4.ckpt',\n", " 'requires_login': True,\n", " },\n", " \"sd-v1-3-full-ema.ckpt\": {\n", " 'sha256': '54632c6e8a36eecae65e36cb0595fab314e1a1545a65209f24fde221a8d4b2ca',\n", " 'url': 'https://huggingface.co/CompVis/stable-diffusion-v-1-3-original/blob/main/sd-v1-3-full-ema.ckpt',\n", " 'requires_login': True,\n", " },\n", " \"sd-v1-3.ckpt\": {\n", " 'sha256': '2cff93af4dcc07c3e03110205988ff98481e86539c51a8098d4f2236e41f7f2f',\n", " 'url': 'https://huggingface.co/CompVis/stable-diffusion-v-1-3-original/resolve/main/sd-v1-3.ckpt',\n", " 'requires_login': True,\n", " },\n", " \"sd-v1-2-full-ema.ckpt\": {\n", " 'sha256': 'bc5086a904d7b9d13d2a7bccf38f089824755be7261c7399d92e555e1e9ac69a',\n", " 'url': 'https://huggingface.co/CompVis/stable-diffusion-v-1-2-original/blob/main/sd-v1-2-full-ema.ckpt',\n", " 'requires_login': True,\n", " },\n", " \"sd-v1-2.ckpt\": {\n", " 'sha256': '3b87d30facd5bafca1cbed71cfb86648aad75d1c264663c0cc78c7aea8daec0d',\n", " 'url': 'https://huggingface.co/CompVis/stable-diffusion-v-1-2-original/resolve/main/sd-v1-2.ckpt',\n", " 'requires_login': True,\n", " },\n", " \"sd-v1-1-full-ema.ckpt\": {\n", " 'sha256': 'efdeb5dc418a025d9a8cc0a8617e106c69044bc2925abecc8a254b2910d69829',\n", " 'url':'https://huggingface.co/CompVis/stable-diffusion-v-1-1-original/resolve/main/sd-v1-1-full-ema.ckpt',\n", " 'requires_login': True,\n", " },\n", " \"sd-v1-1.ckpt\": {\n", " 'sha256': '86cd1d3ccb044d7ba8db743d717c9bac603c4043508ad2571383f954390f3cea',\n", " 'url': 'https://huggingface.co/CompVis/stable-diffusion-v-1-1-original/resolve/main/sd-v1-1.ckpt',\n", " 'requires_login': True,\n", " },\n", " \"robo-diffusion-v1.ckpt\": {\n", " 'sha256': '244dbe0dcb55c761bde9c2ac0e9b46cc9705ebfe5f1f3a7cc46251573ea14e16',\n", " 'url': 'https://huggingface.co/nousr/robo-diffusion/resolve/main/models/robo-diffusion-v1.ckpt',\n", " 'requires_login': False,\n", " },\n", " \"wd-v1-3-float16.ckpt\": {\n", " 'sha256': '4afab9126057859b34d13d6207d90221d0b017b7580469ea70cee37757a29edd',\n", " 'url': 'https://huggingface.co/hakurei/waifu-diffusion-v1-3/resolve/main/wd-v1-3-float16.ckpt',\n", " 'requires_login': False,\n", " },\n", " }\n", "\n", " # config path\n", " ckpt_config_path = custom_config_path if model_config == \"custom\" else os.path.join(models_path, model_config)\n", " if os.path.exists(ckpt_config_path):\n", " print(f\"{ckpt_config_path} exists\")\n", " else:\n", " ckpt_config_path = os.path.join(path_extend,\"configs\",\"v1-inference.yaml\")\n", " \n", " ckpt_config_path = os.path.abspath(ckpt_config_path)\n", "\n", " # checkpoint path or download\n", " ckpt_path = custom_checkpoint_path if model_checkpoint == \"custom\" else os.path.join(models_path, model_checkpoint)\n", " ckpt_valid = True\n", " if os.path.exists(ckpt_path):\n", " pass\n", " elif 'url' in model_map[model_checkpoint]:\n", " url = model_map[model_checkpoint]['url']\n", "\n", " # CLI dialogue to authenticate download\n", " if model_map[model_checkpoint]['requires_login']:\n", " print(\"This model requires an authentication token\")\n", " print(\"Please ensure you have accepted the terms of service before continuing.\")\n", "\n", " username = input(\"[What is your huggingface username?]: \")\n", " token = input(\"[What is your huggingface token?]: \")\n", "\n", " _, path = url.split(\"https://\")\n", "\n", " url = f\"https://{username}:{token}@{path}\"\n", "\n", " # contact server for model\n", " print(f\"..attempting to download {model_checkpoint}...this may take a while\")\n", " ckpt_request = requests.get(url)\n", " request_status = ckpt_request.status_code\n", "\n", " # inform user of errors\n", " if request_status == 403:\n", " raise ConnectionRefusedError(\"You have not accepted the license for this model.\")\n", " elif request_status == 404:\n", " raise ConnectionError(\"Could not make contact with server\")\n", " elif request_status != 200:\n", " raise ConnectionError(f\"Some other error has ocurred - response code: {request_status}\")\n", "\n", " # write to model path\n", " with open(os.path.join(models_path, model_checkpoint), 'wb') as model_file:\n", " model_file.write(ckpt_request.content)\n", " else:\n", " print(f\"Please download model checkpoint and place in {os.path.join(models_path, model_checkpoint)}\")\n", " ckpt_valid = False\n", " \n", " print(f\"config_path: {ckpt_config_path}\")\n", " print(f\"ckpt_path: {ckpt_path}\")\n", "\n", " if check_sha256 and model_checkpoint != \"custom\" and ckpt_valid:\n", " import hashlib\n", " print(\"..checking sha256\")\n", " with open(ckpt_path, \"rb\") as f:\n", " bytes = f.read() \n", " hash = hashlib.sha256(bytes).hexdigest()\n", " del bytes\n", " if model_map[model_checkpoint][\"sha256\"] == hash:\n", " print(\"..hash is correct\")\n", " else:\n", " print(\"..hash in not correct\")\n", " ckpt_valid = False\n", "\n", " def load_model_from_config(config, ckpt, verbose=False, device='cuda', half_precision=True,print_flag=False):\n", " map_location = \"cuda\" # [\"cpu\", \"cuda\"]\n", " print(f\"..loading model\")\n", " pl_sd = torch.load(ckpt, map_location=map_location)\n", " if \"global_step\" in pl_sd:\n", " if print_flag:\n", " print(f\"Global Step: {pl_sd['global_step']}\")\n", " sd = pl_sd[\"state_dict\"]\n", " model = instantiate_from_config(config.model)\n", " m, u = model.load_state_dict(sd, strict=False)\n", " if print_flag:\n", " if len(m) > 0 and verbose:\n", " print(\"missing keys:\")\n", " print(m)\n", " if len(u) > 0 and verbose:\n", " print(\"unexpected keys:\")\n", " print(u)\n", "\n", " if half_precision:\n", " model = model.half().to(device)\n", " else:\n", " model = model.to(device)\n", " model.eval()\n", " return model\n", "\n", " if load_on_run_all and ckpt_valid:\n", " local_config = OmegaConf.load(f\"{ckpt_config_path}\")\n", " model = load_model_from_config(local_config, f\"{ckpt_path}\", half_precision=half_precision)\n", " device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n", " model = model.to(device)\n", "\n", " return model, device\n", "\n", "model, device = load_model()" ], "outputs": [], "execution_count": null }, { "cell_type": "markdown", "metadata": { "id": "ov3r4RD1tzsT" }, "source": [ "# Settings" ] }, { "cell_type": "markdown", "metadata": { "id": "0j7rgxvLvfay" }, "source": [ "### Animation Settings" ] }, { "cell_type": "code", "metadata": { "cellView": "form", "id": "8HJN2TE3vh-J" }, "source": [ "def DeforumAnimArgs():\n", "\n", " #@markdown ####**Animation:**\n", " animation_mode = 'None' #@param ['None', '2D', '3D', 'Video Input', 'Interpolation'] {type:'string'}\n", " max_frames = 1000 #@param {type:\"number\"}\n", " border = 'replicate' #@param ['wrap', 'replicate'] {type:'string'}\n", "\n", " #@markdown ####**Motion Parameters:**\n", " angle = \"0:(0)\"#@param {type:\"string\"}\n", " zoom = \"0:(1.04)\"#@param {type:\"string\"}\n", " translation_x = \"0:(10*sin(2*3.14*t/10))\"#@param {type:\"string\"}\n", " translation_y = \"0:(0)\"#@param {type:\"string\"}\n", " translation_z = \"0:(10)\"#@param {type:\"string\"}\n", " rotation_3d_x = \"0:(0)\"#@param {type:\"string\"}\n", " rotation_3d_y = \"0:(0)\"#@param {type:\"string\"}\n", " rotation_3d_z = \"0:(0)\"#@param {type:\"string\"}\n", " flip_2d_perspective = False #@param {type:\"boolean\"}\n", " perspective_flip_theta = \"0:(0)\"#@param {type:\"string\"}\n", " perspective_flip_phi = \"0:(t%15)\"#@param {type:\"string\"}\n", " perspective_flip_gamma = \"0:(0)\"#@param {type:\"string\"}\n", " perspective_flip_fv = \"0:(53)\"#@param {type:\"string\"}\n", " noise_schedule = \"0: (0.02)\"#@param {type:\"string\"}\n", " strength_schedule = \"0: (0.65)\"#@param {type:\"string\"}\n", " contrast_schedule = \"0: (1.0)\"#@param {type:\"string\"}\n", "\n", " #@markdown ####**Coherence:**\n", " color_coherence = 'Match Frame 0 LAB' #@param ['None', 'Match Frame 0 HSV', 'Match Frame 0 LAB', 'Match Frame 0 RGB'] {type:'string'}\n", " diffusion_cadence = '1' #@param ['1','2','3','4','5','6','7','8'] {type:'string'}\n", "\n", " #@markdown ####**3D Depth Warping:**\n", " use_depth_warping = True #@param {type:\"boolean\"}\n", " midas_weight = 0.3#@param {type:\"number\"}\n", " near_plane = 200\n", " far_plane = 10000\n", " fov = 40#@param {type:\"number\"}\n", " padding_mode = 'border'#@param ['border', 'reflection', 'zeros'] {type:'string'}\n", " sampling_mode = 'bicubic'#@param ['bicubic', 'bilinear', 'nearest'] {type:'string'}\n", " save_depth_maps = False #@param {type:\"boolean\"}\n", "\n", " #@markdown ####**Video Input:**\n", " video_init_path ='/content/video_in.mp4'#@param {type:\"string\"}\n", " extract_nth_frame = 1#@param {type:\"number\"}\n", " overwrite_extracted_frames = True #@param {type:\"boolean\"}\n", " use_mask_video = False #@param {type:\"boolean\"}\n", " video_mask_path ='/content/video_in.mp4'#@param {type:\"string\"}\n", "\n", " #@markdown ####**Interpolation:**\n", " interpolate_key_frames = False #@param {type:\"boolean\"}\n", " interpolate_x_frames = 4 #@param {type:\"number\"}\n", " \n", " #@markdown ####**Resume Animation:**\n", " resume_from_timestring = False #@param {type:\"boolean\"}\n", " resume_timestring = \"20220829210106\" #@param {type:\"string\"}\n", "\n", " return locals()" ], "outputs": [], "execution_count": null }, { "cell_type": "markdown", "metadata": { "id": "63UOJvU3xdPS" }, "source": [ "### Prompts\n", "`animation_mode: None` batches on list of *prompts*. `animation_mode: 2D` uses *animation_prompts* key frame sequence" ] }, { "cell_type": "code", "metadata": { "id": "2ujwkGZTcGev" }, "source": [ "prompts = [\n", " \"a beautiful forest by Asher Brown Durand, trending on Artstation\", # the first prompt I want\n", " \"a beautiful portrait of a woman by Artgerm, trending on Artstation\", # the second prompt I want\n", " #\"this prompt I don't want it I commented it out\",\n", " #\"a nousr robot, trending on Artstation\", # use \"nousr robot\" with the robot diffusion model (see model_checkpoint setting)\n", " #\"touhou 1girl komeiji_koishi portrait, green hair\", # waifu diffusion prompts can use danbooru tag groups (see model_checkpoint)\n", " #\"this prompt has weights if prompt weighting enabled:2 can also do negative:-2\", # (see prompt_weighting)\n", "]\n", "\n", "animation_prompts = {\n", " 0: \"a beautiful apple, trending on Artstation\",\n", " 20: \"a beautiful banana, trending on Artstation\",\n", " 30: \"a beautiful coconut, trending on Artstation\",\n", " 40: \"a beautiful durian, trending on Artstation\",\n", "}" ], "outputs": [], "execution_count": null }, { "cell_type": "markdown", "metadata": { "id": "s8RAo2zI-vQm" }, "source": [ "# Run" ] }, { "cell_type": "code", "metadata": { "cellView": "form", "id": "qH74gBWDd2oq" }, "source": [ "#@markdown **Load Settings**\n", "override_settings_with_file = False #@param {type:\"boolean\"}\n", "custom_settings_file = \"/content/drive/MyDrive/Settings.txt\"#@param {type:\"string\"}\n", "\n", "def Root():\n", " return locals()\n", "\n", "def DeforumArgs():\n", " #@markdown **Image Settings**\n", " W = 512 #@param\n", " H = 512 #@param\n", " W, H = map(lambda x: x - x % 64, (W, H)) # resize to integer multiple of 64\n", "\n", " #@markdown **Sampling Settings**\n", " seed = -1 #@param\n", " sampler = 'klms' #@param [\"klms\",\"dpm2\",\"dpm2_ancestral\",\"heun\",\"euler\",\"euler_ancestral\",\"plms\", \"ddim\"]\n", " steps = 50 #@param\n", " scale = 7 #@param\n", " ddim_eta = 0.0 #@param\n", " dynamic_threshold = None\n", " static_threshold = None \n", "\n", " #@markdown **Save & Display Settings**\n", " save_samples = True #@param {type:\"boolean\"}\n", " save_settings = True #@param {type:\"boolean\"}\n", " display_samples = True #@param {type:\"boolean\"}\n", " save_sample_per_step = False #@param {type:\"boolean\"}\n", " show_sample_per_step = False #@param {type:\"boolean\"}\n", "\n", " #@markdown **Prompt Settings**\n", " prompt_weighting = False #@param {type:\"boolean\"}\n", " normalize_prompt_weights = True #@param {type:\"boolean\"}\n", " log_weighted_subprompts = False #@param {type:\"boolean\"}\n", "\n", " #@markdown **Batch Settings**\n", " n_batch = 1 #@param\n", " batch_name = \"StableFun\" #@param {type:\"string\"}\n", " filename_format = \"{timestring}_{index}_{prompt}.png\" #@param [\"{timestring}_{index}_{seed}.png\",\"{timestring}_{index}_{prompt}.png\"]\n", " seed_behavior = \"iter\" #@param [\"iter\",\"fixed\",\"random\"]\n", " make_grid = False #@param {type:\"boolean\"}\n", " grid_rows = 2 #@param \n", " outdir = get_output_folder(output_path, batch_name)\n", "\n", " #@markdown **Init Settings**\n", " use_init = False #@param {type:\"boolean\"}\n", " strength = 0.0 #@param {type:\"number\"}\n", " strength_0_no_init = True # Set the strength to 0 automatically when no init image is used\n", " init_image = \"https://cdn.pixabay.com/photo/2022/07/30/13/10/green-longhorn-beetle-7353749_1280.jpg\" #@param {type:\"string\"}\n", " # Whiter areas of the mask are areas that change more\n", " use_mask = False #@param {type:\"boolean\"}\n", " use_alpha_as_mask = False # use the alpha channel of the init image as the mask\n", " mask_file = \"https://www.filterforge.com/wiki/images/archive/b/b7/20080927223728%21Polygonal_gradient_thumb.jpg\" #@param {type:\"string\"}\n", " invert_mask = False #@param {type:\"boolean\"}\n", " # Adjust mask image, 1.0 is no adjustment. Should be positive numbers.\n", " mask_brightness_adjust = 1.0 #@param {type:\"number\"}\n", " mask_contrast_adjust = 1.0 #@param {type:\"number\"}\n", " # Overlay the masked image at the end of the generation so it does not get degraded by encoding and decoding\n", " overlay_mask = True # {type:\"boolean\"}\n", " # Blur edges of final overlay mask, if used. Minimum = 0 (no blur)\n", " mask_overlay_blur = 5 # {type:\"number\"}\n", "\n", " n_samples = 1 # doesnt do anything\n", " precision = 'autocast' \n", " C = 4\n", " f = 8\n", "\n", " prompt = \"\"\n", " timestring = \"\"\n", " init_latent = None\n", " init_sample = None\n", " init_c = None\n", "\n", " return locals()\n", "\n", "root = Root()\n", "args_dict = DeforumArgs()\n", "anim_args_dict = DeforumAnimArgs()\n", "\n", "if override_settings_with_file:\n", " load_args(args_dict,anim_args_dict,custom_settings_file)\n", "\n", "root = SimpleNamespace(**root)\n", "args = SimpleNamespace(**args_dict)\n", "anim_args = SimpleNamespace(**anim_args_dict)\n", "\n", "args.timestring = time.strftime('%Y%m%d%H%M%S')\n", "args.strength = max(0.0, min(1.0, args.strength))\n", "\n", "root.model = model\n", "root.device = device\n", "root.models_path = models_path\n", "root.output_path = output_path\n", "root.half_precision = True\n", "\n", "if args.seed == -1:\n", " args.seed = random.randint(0, 2**32 - 1)\n", "if not args.use_init:\n", " args.init_image = None\n", "if args.sampler == 'plms' and (args.use_init or anim_args.animation_mode != 'None'):\n", " print(f\"Init images aren't supported with PLMS yet, switching to KLMS\")\n", " args.sampler = 'klms'\n", "if args.sampler != 'ddim':\n", " args.ddim_eta = 0\n", "\n", "if anim_args.animation_mode == 'None':\n", " anim_args.max_frames = 1\n", "elif anim_args.animation_mode == 'Video Input':\n", " args.use_init = True\n", "\n", "# clean up unused memory\n", "gc.collect()\n", "torch.cuda.empty_cache()\n", "\n", "# dispatch to appropriate renderer\n", "if anim_args.animation_mode == '2D' or anim_args.animation_mode == '3D':\n", " render_animation(args, anim_args, animation_prompts, root)\n", "elif anim_args.animation_mode == 'Video Input':\n", " render_input_video(args, anim_args, animation_prompts, root)\n", "elif anim_args.animation_mode == 'Interpolation':\n", " render_interpolation(args, anim_args, animation_prompts, root)\n", "else:\n", " render_image_batch(args, prompts, root) " ], "outputs": [], "execution_count": null }, { "cell_type": "markdown", "metadata": { "id": "4zV0J_YbMCTx" }, "source": [ "# Create video from frames" ] }, { "cell_type": "code", "metadata": { "cellView": "form", "id": "no2jP8HTMBM0" }, "source": [ "skip_video_for_run_all = True #@param {type: 'boolean'}\n", "fps = 12 #@param {type:\"number\"}\n", "#@markdown **Manual Settings**\n", "use_manual_settings = False #@param {type:\"boolean\"}\n", "image_path = \"/content/drive/MyDrive/AI/StableDiffusion/2022-09/20220903000939_%05d.png\" #@param {type:\"string\"}\n", "mp4_path = \"/content/drive/MyDrive/AI/StableDiffu'/content/drive/MyDrive/AI/StableDiffusion/2022-09/sion/2022-09/20220903000939.mp4\" #@param {type:\"string\"}\n", "render_steps = False #@param {type: 'boolean'}\n", "path_name_modifier = \"x0_pred\" #@param [\"x0_pred\",\"x\"]\n", "\n", "\n", "if skip_video_for_run_all == True:\n", " print('Skipping video creation, uncheck skip_video_for_run_all if you want to run it')\n", "else:\n", " import os\n", " import subprocess\n", " from base64 import b64encode\n", "\n", " print(f\"{image_path} -> {mp4_path}\")\n", "\n", " if use_manual_settings:\n", " max_frames = \"200\" #@param {type:\"string\"}\n", " else:\n", " if render_steps: # render steps from a single image\n", " fname = f\"{path_name_modifier}_%05d.png\"\n", " all_step_dirs = [os.path.join(args.outdir, d) for d in os.listdir(args.outdir) if os.path.isdir(os.path.join(args.outdir,d))]\n", " newest_dir = max(all_step_dirs, key=os.path.getmtime)\n", " image_path = os.path.join(newest_dir, fname)\n", " print(f\"Reading images from {image_path}\")\n", " mp4_path = os.path.join(newest_dir, f\"{args.timestring}_{path_name_modifier}.mp4\")\n", " max_frames = str(args.steps)\n", " else: # render images for a video\n", " image_path = os.path.join(args.outdir, f\"{args.timestring}_%05d.png\")\n", " mp4_path = os.path.join(args.outdir, f\"{args.timestring}.mp4\")\n", " max_frames = str(anim_args.max_frames)\n", "\n", " # make video\n", " cmd = [\n", " 'ffmpeg',\n", " '-y',\n", " '-vcodec', 'png',\n", " '-r', str(fps),\n", " '-start_number', str(0),\n", " '-i', image_path,\n", " '-frames:v', max_frames,\n", " '-c:v', 'libx264',\n", " '-vf',\n", " f'fps={fps}',\n", " '-pix_fmt', 'yuv420p',\n", " '-crf', '17',\n", " '-preset', 'veryfast',\n", " '-pattern_type', 'sequence',\n", " mp4_path\n", " ]\n", " process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n", " stdout, stderr = process.communicate()\n", " if process.returncode != 0:\n", " print(stderr)\n", " raise RuntimeError(stderr)\n", "\n", " mp4 = open(mp4_path,'rb').read()\n", " data_url = \"data:video/mp4;base64,\" + b64encode(mp4).decode()\n", " display.display( display.HTML(f'') )" ], "outputs": [], "execution_count": null }, { "cell_type": "markdown", "source": [ "# Disconnect when finished\n" ], "metadata": { "id": "XccAk0RoRme0" } }, { "cell_type": "code", "source": [ "skip_disconnect_for_run_all = True #@param {type: 'boolean'}\n", "\n", "if skip_disconnect_for_run_all == True:\n", " print('Skipping disconnect, uncheck skip_disconnect_for_run_all if you want to run it')\n", "else:\n", " from google.colab import runtime\n", " runtime.unassign()" ], "metadata": { "cellView": "form", "id": "_x6obwPURfSm" }, "execution_count": null, "outputs": [] } ], "metadata": { "accelerator": "GPU", "colab": { "collapsed_sections": [], "private_outputs": true, "provenance": [] }, "gpuClass": "standard", "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.13" } }, "nbformat": 4, "nbformat_minor": 0 }