Added a download collections script
Also included the https://github.com/Mossbraker/mj_reference_wildcards/ collectionpull/424/head
parent
ef527674fb
commit
e1b395cb49
|
|
@ -0,0 +1,171 @@
|
|||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import tempfile
|
||||
import urllib.request
|
||||
import zipfile
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def load_repositories(filename: str = "repositories.json") -> list[dict[str, str]]:
|
||||
tools_dir = get_tools_dir()
|
||||
repsitories_path = tools_dir / filename
|
||||
|
||||
with open(repsitories_path) as f:
|
||||
return json.load(f)
|
||||
|
||||
|
||||
def get_collections_dir() -> Path:
|
||||
current_path = Path.cwd()
|
||||
if current_path.name == "_tools":
|
||||
current_path = current_path.parent
|
||||
|
||||
collections_dir = current_path / "collections"
|
||||
|
||||
if not collections_dir.exists():
|
||||
print(
|
||||
"Could not find the collections directory. You should run this from the root of the repository",
|
||||
)
|
||||
exit(1)
|
||||
|
||||
return collections_dir
|
||||
|
||||
|
||||
def get_tools_dir() -> Path:
|
||||
current_path = Path.cwd()
|
||||
if current_path.name == "_tools":
|
||||
current_path = current_path.parent
|
||||
|
||||
tools_dir = current_path / "_tools"
|
||||
|
||||
if not tools_dir.exists():
|
||||
print(
|
||||
"Could not find the _tools directory. You should run this from the root of the repository",
|
||||
)
|
||||
exit(1)
|
||||
|
||||
return tools_dir
|
||||
|
||||
|
||||
def download_pantry(url: str, filename: str):
|
||||
collections_dir = get_collections_dir()
|
||||
filepath = collections_dir / filename
|
||||
|
||||
try:
|
||||
with urllib.request.urlopen(url) as response:
|
||||
with open(filepath, "wb") as f:
|
||||
shutil.copyfileobj(response, f)
|
||||
print(f"Successfully downloaded {filename}")
|
||||
except urllib.error.HTTPError as e:
|
||||
print(f"Error downloading {filename}: {e.code} {e.reason}")
|
||||
|
||||
|
||||
def download_directory(url: str, subdirectory: str = "", target_subdirectory: str = ""):
|
||||
with urllib.request.urlopen(url) as response:
|
||||
with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
|
||||
shutil.copyfileobj(response, tmp_file)
|
||||
tmp_file_path = tmp_file.name
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmp_dir:
|
||||
with zipfile.ZipFile(tmp_file_path, "r") as zip_ref:
|
||||
zip_ref.extractall(tmp_dir)
|
||||
|
||||
found_subdirectory = False
|
||||
for root, dirs, _ in os.walk(tmp_dir):
|
||||
if subdirectory in dirs:
|
||||
found_subdirectory = True
|
||||
source_dir = os.path.join(root, subdirectory)
|
||||
collections_dir = get_collections_dir()
|
||||
target_dir = collections_dir / target_subdirectory
|
||||
if target_dir.exists():
|
||||
overwrite = input(
|
||||
f"The directory {target_dir} already exists. Overwrite? (y/n) ",
|
||||
)
|
||||
if overwrite.strip().lower() != "y":
|
||||
print("Skipping directory copy.")
|
||||
return
|
||||
else:
|
||||
shutil.rmtree(target_dir)
|
||||
shutil.copytree(source_dir, target_dir)
|
||||
print(f"Copied {source_dir} to {target_dir}")
|
||||
break
|
||||
|
||||
if not found_subdirectory:
|
||||
print(
|
||||
f"Could not find the '{subdirectory}' subdirectory in the downloaded archive.",
|
||||
)
|
||||
|
||||
os.remove(tmp_file_path)
|
||||
|
||||
|
||||
def show_menu(repositories: list[dict[str, str]]) -> tuple[str, str, str]:
|
||||
repositories = [list(row.values()) for row in repositories]
|
||||
for index, (name, _, _, _) in enumerate(
|
||||
repositories,
|
||||
):
|
||||
print(f"{index + 1}. {name}")
|
||||
while True:
|
||||
try:
|
||||
choice = int(input("Select a collection to download: "))
|
||||
if 1 <= choice <= len(repositories):
|
||||
return (
|
||||
repositories[choice - 1][1],
|
||||
repositories[choice - 1][2],
|
||||
repositories[choice - 1][3],
|
||||
)
|
||||
else:
|
||||
print("Invalid choice, please try again.")
|
||||
except ValueError:
|
||||
print("Invalid choice, please try again.")
|
||||
|
||||
|
||||
def download_and_copy(url: str, subdirectory: str = "", target_subdirectory: str = ""):
|
||||
if url.endswith((".json", ".yaml")):
|
||||
filename = url.split("/")[-1]
|
||||
download_pantry(url, filename)
|
||||
elif url.endswith(".zip"):
|
||||
download_directory(url, subdirectory, target_subdirectory)
|
||||
else:
|
||||
print(f"Unsupported file type: {url}")
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Download collections from a given list.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--name",
|
||||
help="Specify the collection name to bypass the menu.",
|
||||
type=str,
|
||||
)
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def download_by_name(collection_name: str):
|
||||
repositories = load_repositories()
|
||||
repo = next(
|
||||
(repo for repo in repositories if repo["name"] == collection_name),
|
||||
None,
|
||||
)
|
||||
if repo:
|
||||
return repo["url"], repo["subdirectory"], repo["target_subdirectory"]
|
||||
else:
|
||||
print(f"Collection '{collection_name}' not found.")
|
||||
exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
repositories = load_repositories()
|
||||
args = parse_args()
|
||||
collection_name = args.name
|
||||
|
||||
if collection_name:
|
||||
url, subdirectory, target_subdirectory = download_by_name(collection_name)
|
||||
else:
|
||||
url, subdirectory, target_subdirectory = show_menu(repositories)
|
||||
|
||||
print(url)
|
||||
download_and_copy(url, subdirectory, target_subdirectory)
|
||||
|
|
@ -1,87 +1,5 @@
|
|||
"""
|
||||
Import collections/nsp from https://github.com/WASasquatch/noodle-soup-prompts.
|
||||
|
||||
This script is intended to be run from the root of the repository;
|
||||
it's best to delete the existing collections/nsp folder first to avoid
|
||||
duplicate entries.
|
||||
|
||||
You would then import these via the WebUI to your wildcards collection.
|
||||
"""
|
||||
import collections
|
||||
import logging
|
||||
from pathlib import Path
|
||||
|
||||
import requests
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_tag_group(tag):
|
||||
return tag.partition("-")[0]
|
||||
|
||||
|
||||
def get_grouped_tags():
|
||||
pantry_url = "https://raw.githubusercontent.com/WASasquatch/noodle-soup-prompts/main/nsp_pantry.json"
|
||||
resp = requests.get(pantry_url)
|
||||
resp.raise_for_status()
|
||||
pantry = {tag.lower(): entries for (tag, entries) in resp.json().items()}
|
||||
grouped_tags = collections.defaultdict(list)
|
||||
for tag, entries in pantry.items():
|
||||
grouped_tags[get_tag_group(tag)].append((tag, entries))
|
||||
return grouped_tags
|
||||
|
||||
|
||||
def main():
|
||||
count_files = 0
|
||||
current_path = Path.cwd()
|
||||
if current_path.name == "_tools":
|
||||
current_path = current_path.parent
|
||||
|
||||
can_overwrite = None
|
||||
|
||||
grouped_tags = get_grouped_tags()
|
||||
collections_dir = current_path / "collections"
|
||||
|
||||
if not collections_dir.exists():
|
||||
print(
|
||||
"Could not find the collections directory. You should run this from the root of the repository",
|
||||
)
|
||||
else:
|
||||
for tag_group_name, tags_in_group in sorted(grouped_tags.items()):
|
||||
tag_group_name = (
|
||||
"nsp" if len(tags_in_group) == 1 else f"nsp-{tag_group_name}"
|
||||
)
|
||||
for tag, entries in sorted(tags_in_group):
|
||||
filename = collections_dir / f"./nsp/{tag_group_name}/{tag}.txt"
|
||||
filename.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
if can_overwrite is None and filename.exists():
|
||||
answer = input(
|
||||
f"Skipping {filename} as it already exists. Should we overwrite existing files? (y/n)",
|
||||
)
|
||||
if answer.strip().lower() == "y":
|
||||
can_overwrite = True
|
||||
else:
|
||||
can_overwrite = False
|
||||
|
||||
if can_overwrite is None or can_overwrite is True:
|
||||
count_files += 1
|
||||
with filename.open("w", encoding="utf-8") as f:
|
||||
for entry in sorted(entries):
|
||||
try:
|
||||
f.write(f"{entry}\n")
|
||||
except UnicodeEncodeError:
|
||||
logger.warning(f"Error writing {entry} to {filename}")
|
||||
|
||||
print(f"{filename}: {len(entries)} entries")
|
||||
|
||||
print("")
|
||||
print(f"{count_files} files copied to {collections_dir}")
|
||||
if count_files > 0:
|
||||
print(
|
||||
"You should now import these via the WebUI to your wildcards collection using the Wildcards Manager tab.",
|
||||
)
|
||||
|
||||
from download_collections import download_and_copy, download_by_name
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
url, subdirectory, target_subdirectory = download_by_name("Noodlesoup Prompts")
|
||||
download_and_copy(url, subdirectory, target_subdirectory)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,14 @@
|
|||
[
|
||||
{
|
||||
"name": "Noodlesoup Prompts",
|
||||
"url": "https://raw.githubusercontent.com/WASasquatch/noodle-soup-prompts/main/nsp_pantry.json",
|
||||
"subdirectory": "",
|
||||
"target_subdirectory": "noodle_soup_prompts"
|
||||
},
|
||||
{
|
||||
"name": "MJ Reference Wildcards",
|
||||
"url": "https://github.com/Mossbraker/mj_reference_wildcards/archive/refs/heads/main.zip",
|
||||
"subdirectory": "v4",
|
||||
"target_subdirectory": "mj_reference_wildcards"
|
||||
}
|
||||
]
|
||||
Loading…
Reference in New Issue