added compare tests

pull/756/head
Jingyi 2024-05-08 18:14:50 +08:00
parent 20ec97e8e9
commit 13515f71ec
8 changed files with 1161 additions and 8 deletions

View File

@ -0,0 +1,486 @@
{
"number": "1",
"prompt": {
"3": {
"inputs": {
"seed": 156680208700286,
"steps": 20,
"cfg": 8,
"sampler_name": "euler",
"scheduler": "normal",
"denoise": 1,
"model": [
"4",
0
],
"positive": [
"6",
0
],
"negative": [
"7",
0
],
"latent_image": [
"5",
0
]
},
"class_type": "KSampler",
"_meta": {
"title": "KSampler"
}
},
"4": {
"inputs": {
"ckpt_name": "v1-5-pruned-emaonly.ckpt"
},
"class_type": "CheckpointLoaderSimple",
"_meta": {
"title": "Load Checkpoint"
}
},
"5": {
"inputs": {
"width": 512,
"height": 512,
"batch_size": 1
},
"class_type": "EmptyLatentImage",
"_meta": {
"title": "Empty Latent Image"
}
},
"6": {
"inputs": {
"text": "beautiful scenery nature glass bottle landscape, , purple galaxy bottle,",
"clip": [
"4",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"7": {
"inputs": {
"text": "text, watermark",
"clip": [
"4",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"8": {
"inputs": {
"samples": [
"3",
0
],
"vae": [
"4",
2
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "VAE Decode"
}
},
"9": {
"inputs": {
"filename_prefix": "ComfyUI",
"images": [
"8",
0
]
},
"class_type": "SaveImage",
"_meta": {
"title": "Save Image"
}
}
},
"prompt_id": "3de9ce5b-f687-4626-9c2b-8595e3835c45",
"extra_data": {
"extra_pnginfo": {
"workflow": {
"last_node_id": 9,
"last_link_id": 9,
"nodes": [
{
"id": 7,
"type": "CLIPTextEncode",
"pos": [
413,
389
],
"size": {
"0": 425.27801513671875,
"1": 180.6060791015625
},
"flags": {},
"order": 3,
"mode": 0,
"inputs": [
{
"name": "clip",
"type": "CLIP",
"link": 5
}
],
"outputs": [
{
"name": "CONDITIONING",
"type": "CONDITIONING",
"links": [
6
],
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "CLIPTextEncode"
},
"widgets_values": [
"text, watermark"
]
},
{
"id": 6,
"type": "CLIPTextEncode",
"pos": [
415,
186
],
"size": {
"0": 422.84503173828125,
"1": 164.31304931640625
},
"flags": {},
"order": 2,
"mode": 0,
"inputs": [
{
"name": "clip",
"type": "CLIP",
"link": 3
}
],
"outputs": [
{
"name": "CONDITIONING",
"type": "CONDITIONING",
"links": [
4
],
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "CLIPTextEncode"
},
"widgets_values": [
"beautiful scenery nature glass bottle landscape, , purple galaxy bottle,"
]
},
{
"id": 5,
"type": "EmptyLatentImage",
"pos": [
473,
609
],
"size": {
"0": 315,
"1": 106
},
"flags": {},
"order": 0,
"mode": 0,
"outputs": [
{
"name": "LATENT",
"type": "LATENT",
"links": [
2
],
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "EmptyLatentImage"
},
"widgets_values": [
512,
512,
1
]
},
{
"id": 3,
"type": "KSampler",
"pos": [
863,
186
],
"size": {
"0": 315,
"1": 262
},
"flags": {},
"order": 4,
"mode": 0,
"inputs": [
{
"name": "model",
"type": "MODEL",
"link": 1
},
{
"name": "positive",
"type": "CONDITIONING",
"link": 4
},
{
"name": "negative",
"type": "CONDITIONING",
"link": 6
},
{
"name": "latent_image",
"type": "LATENT",
"link": 2
}
],
"outputs": [
{
"name": "LATENT",
"type": "LATENT",
"links": [
7
],
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "KSampler"
},
"widgets_values": [
156680208700286,
"randomize",
20,
8,
"euler",
"normal",
1
]
},
{
"id": 8,
"type": "VAEDecode",
"pos": [
1209,
188
],
"size": {
"0": 210,
"1": 46
},
"flags": {},
"order": 5,
"mode": 0,
"inputs": [
{
"name": "samples",
"type": "LATENT",
"link": 7
},
{
"name": "vae",
"type": "VAE",
"link": 8
}
],
"outputs": [
{
"name": "IMAGE",
"type": "IMAGE",
"links": [
9
],
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "VAEDecode"
}
},
{
"id": 9,
"type": "SaveImage",
"pos": [
1451,
189
],
"size": {
"0": 210,
"1": 58
},
"flags": {},
"order": 6,
"mode": 0,
"inputs": [
{
"name": "images",
"type": "IMAGE",
"link": 9
}
],
"properties": {},
"widgets_values": [
"ComfyUI"
]
},
{
"id": 4,
"type": "CheckpointLoaderSimple",
"pos": [
26,
474
],
"size": {
"0": 315,
"1": 98
},
"flags": {},
"order": 1,
"mode": 0,
"outputs": [
{
"name": "MODEL",
"type": "MODEL",
"links": [
1
],
"slot_index": 0
},
{
"name": "CLIP",
"type": "CLIP",
"links": [
3,
5
],
"slot_index": 1
},
{
"name": "VAE",
"type": "VAE",
"links": [
8
],
"slot_index": 2
}
],
"properties": {
"Node name for S&R": "CheckpointLoaderSimple"
},
"widgets_values": [
"v1-5-pruned-emaonly.ckpt"
]
}
],
"links": [
[
1,
4,
0,
3,
0,
"MODEL"
],
[
2,
5,
0,
3,
3,
"LATENT"
],
[
3,
4,
1,
6,
0,
"CLIP"
],
[
4,
6,
0,
3,
1,
"CONDITIONING"
],
[
5,
4,
1,
7,
0,
"CLIP"
],
[
6,
7,
0,
3,
2,
"CONDITIONING"
],
[
7,
3,
0,
8,
0,
"LATENT"
],
[
8,
4,
2,
8,
1,
"VAE"
],
[
9,
8,
0,
9,
0,
"IMAGE"
]
],
"groups": [],
"config": {},
"extra": {},
"version": 0.4
}
},
"client_id": "6cbc8da8bb4a4012b9e40b1eae406556"
},
"endpoint_name": "comfy-async-mutil-gpus",
"need_prepare": false,
"need_sync": false,
"multi_async": true
}

View File

@ -0,0 +1,311 @@
{
"prompt": "beautiful scenery nature glass bottle landscape, , purple galaxy bottle,",
"negative_prompt": "text, watermark",
"styles": [],
"seed": 156680208700286,
"subseed": -1,
"subseed_strength": 0.0,
"seed_resize_from_h": -1,
"seed_resize_from_w": -1,
"sampler_name": "Euler a",
"batch_size": 1,
"n_iter": 1,
"steps": 20,
"cfg_scale": 8.0,
"width": 512,
"height": 512,
"restore_faces": null,
"tiling": null,
"do_not_save_samples": false,
"do_not_save_grid": false,
"eta": null,
"denoising_strength": 0.7,
"s_min_uncond": 0.0,
"s_churn": 0.0,
"s_tmax": Infinity,
"s_tmin": 0.0,
"s_noise": 1.0,
"override_settings": {},
"override_settings_restore_afterwards": true,
"refiner_checkpoint": null,
"refiner_switch_at": null,
"disable_extra_networks": false,
"firstpass_image": null,
"comments": {},
"enable_hr": false,
"firstphase_width": 0,
"firstphase_height": 0,
"hr_scale": 2.0,
"hr_upscaler": "Latent",
"hr_second_pass_steps": 0,
"hr_resize_x": 0,
"hr_resize_y": 0,
"hr_checkpoint_name": null,
"hr_sampler_name": null,
"hr_prompt": "",
"hr_negative_prompt": "",
"force_task_id": null,
"sampler_index": "Euler a",
"script_name": null,
"script_args": [],
"send_images": true,
"save_images": false,
"alwayson_scripts": {
"extra options": {
"args": []
},
"hypertile": {
"args": []
},
"tiled diffusion": {
"args": [
false,
"MultiDiffusion",
false,
true,
1024,
1024,
96,
96,
48,
4,
"None",
2,
false,
10,
1,
1,
64,
false,
false,
false,
false,
false,
0.4,
0.4,
0.2,
0.2,
"",
"",
"Background",
0.2,
-1.0,
false,
0.4,
0.4,
0.2,
0.2,
"",
"",
"Background",
0.2,
-1.0,
false,
0.4,
0.4,
0.2,
0.2,
"",
"",
"Background",
0.2,
-1.0,
false,
0.4,
0.4,
0.2,
0.2,
"",
"",
"Background",
0.2,
-1.0,
false,
0.4,
0.4,
0.2,
0.2,
"",
"",
"Background",
0.2,
-1.0,
false,
0.4,
0.4,
0.2,
0.2,
"",
"",
"Background",
0.2,
-1.0,
false,
0.4,
0.4,
0.2,
0.2,
"",
"",
"Background",
0.2,
-1.0,
false,
0.4,
0.4,
0.2,
0.2,
"",
"",
"Background",
0.2,
-1.0
]
},
"tiled vae": {
"args": [
false,
3072,
192,
true,
true,
true,
false
]
},
"controlnet": {
"args": [
{
"enabled": false,
"module": "none",
"model": "None",
"weight": 1,
"image": null,
"resize_mode": "Crop and Resize",
"low_vram": false,
"processor_res": -1,
"threshold_a": -1,
"threshold_b": -1,
"guidance_start": 0,
"guidance_end": 1,
"pixel_perfect": false,
"control_mode": "Balanced",
"inpaint_crop_input_image": false,
"hr_option": "Both",
"save_detected_map": true,
"advanced_weighting": null,
"is_ui": true,
"input_mode": "simple",
"batch_images": "",
"output_dir": "",
"loopback": false
},
{
"enabled": false,
"module": "none",
"model": "None",
"weight": 1,
"image": null,
"resize_mode": "Crop and Resize",
"low_vram": false,
"processor_res": -1,
"threshold_a": -1,
"threshold_b": -1,
"guidance_start": 0,
"guidance_end": 1,
"pixel_perfect": false,
"control_mode": "Balanced",
"inpaint_crop_input_image": false,
"hr_option": "Both",
"save_detected_map": true,
"advanced_weighting": null,
"is_ui": true,
"input_mode": "simple",
"batch_images": "",
"output_dir": "",
"loopback": false
},
{
"enabled": false,
"module": "none",
"model": "None",
"weight": 1,
"image": null,
"resize_mode": "Crop and Resize",
"low_vram": false,
"processor_res": -1,
"threshold_a": -1,
"threshold_b": -1,
"guidance_start": 0,
"guidance_end": 1,
"pixel_perfect": false,
"control_mode": "Balanced",
"inpaint_crop_input_image": false,
"hr_option": "Both",
"save_detected_map": true,
"advanced_weighting": null,
"is_ui": true,
"input_mode": "simple",
"batch_images": "",
"output_dir": "",
"loopback": false
}
]
},
"reactor": {
"args": [
null,
false,
"0",
"0",
"inswapper_128.onnx",
"CodeFormer",
1,
true,
"None",
1,
1,
false,
true,
1,
0,
0,
false,
0.5,
true,
false,
"CUDA",
false,
0,
"None",
"",
null,
false,
false,
0.5,
0
]
},
"comments": {
"args": []
},
"refiner": {
"args": [
false,
"",
0.8
]
},
"seed": {
"args": [
156680208700286,
false,
-1,
0,
0,
0
]
}
},
"infotext": null
}

View File

@ -145,7 +145,7 @@ class TestEndpointCreateE2E:
resp = self.api.create_endpoint(headers=headers, data=data)
assert "Cannot create already existing model" in resp.json()["message"]
def test_3_create_confy_endpoint_async(self):
def test_3_create_comfy_endpoint_async(self):
headers = {
"x-api-key": config.api_key,
"username": config.username

View File

@ -49,7 +49,7 @@ class TestComfySingleGpuEpCreateE2E:
else:
break
def test_3_create_confy_endpoint_async(self):
def test_3_create_comfy_endpoint_async(self):
headers = {
"x-api-key": config.api_key,
"username": config.username

View File

@ -50,7 +50,7 @@ class TestComfyMutilGpusEndpointCreateE2E:
else:
break
def test_3_create_confy_endpoint_async(self):
def test_3_create_comfy_endpoint_async(self):
headers = {
"x-api-key": config.api_key,
"username": config.username

View File

@ -0,0 +1,124 @@
from __future__ import print_function
import logging
import time
from datetime import datetime
from datetime import timedelta
import config as config
import pytest
from utils.api import Api
from utils.helper import endpoints_wait_for_in_service
logger = logging.getLogger(__name__)
@pytest.mark.skipif(not config.is_local, reason="local test only")
class TestLatencyEndpointCreate:
def setup_class(self):
self.api = Api(config)
self.api.feat_oas_schema()
@classmethod
def teardown_class(self):
pass
def test_1_clean_all_endpoints(self):
headers = {
"x-api-key": config.api_key,
"username": config.username
}
resp = self.api.list_endpoints(headers=headers)
endpoints = resp.json()['data']['endpoints']
for endpoint in endpoints:
endpoint_name = endpoint['endpoint_name']
while True:
data = {
"endpoint_name_list": [
endpoint_name
],
}
resp = self.api.delete_endpoints(headers=headers, data=data)
time.sleep(5)
if resp.status_code == 400:
logger.info(resp.json()['message'])
continue
else:
break
def test_2_create_comfy_endpoint_async(self):
headers = {
"x-api-key": config.api_key,
"username": config.username
}
data = {
"endpoint_name": 'latency',
"service_type": "comfy",
"endpoint_type": "Async",
"instance_type": 'ml.g5.2xlarge',
"initial_instance_count": 1,
"autoscaling_enabled": False,
"assign_to_roles": [config.role_comfy_async],
"creator": config.username
}
if config.custom_docker_image_uri:
data["custom_docker_image_uri"] = config.custom_docker_image_uri
resp = self.api.create_endpoint(headers=headers, data=data)
assert 'data' in resp.json(), resp.dumps()
assert resp.json()["data"]["endpoint_status"] == "Creating", resp.dumps()
def test_3_create_sd_endpoint_async(self):
headers = {
"x-api-key": config.api_key,
"username": config.username
}
data = {
"endpoint_name": 'latency',
"service_type": "sd",
"endpoint_type": "Async",
"instance_type": 'ml.g5.2xlarge',
"initial_instance_count": 1,
"autoscaling_enabled": False,
"assign_to_roles": [config.role_sd_async],
"creator": config.username
}
if config.custom_docker_image_uri:
data["custom_docker_image_uri"] = config.custom_docker_image_uri
resp = self.api.create_endpoint(headers=headers, data=data)
assert 'data' in resp.json(), resp.dumps()
assert resp.json()["data"]["endpoint_status"] == "Creating", resp.dumps()
def test_4_list_endpoints_status(self):
headers = {
"x-api-key": config.api_key,
"username": config.username
}
params = {
"username": config.username
}
resp = self.api.list_endpoints(headers=headers, params=params)
assert resp.status_code == 200, resp.dumps()
endpoints = resp.json()['data']["endpoints"]
assert len(endpoints) >= 0
timeout = datetime.now() + timedelta(minutes=20)
while datetime.now() < timeout:
result = endpoints_wait_for_in_service(self.api)
if result:
break
time.sleep(10)
else:
raise Exception("Create Endpoint timed out after 20 minutes.")

View File

@ -0,0 +1,177 @@
from __future__ import print_function
import logging
import os
import threading
import time
import uuid
import pytest
import config as config
from utils.api import Api
from utils.helper import wget_file, comfy_execute_create, get_endpoint_comfy_async, get_endpoint_sd_async, \
sd_inference_create
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
headers = {
"x-api-key": config.api_key,
"username": config.username
}
id = str(uuid.uuid4())
@pytest.mark.skipif(not config.is_local, reason="local test only")
class TestLatencyCompareTasks:
def setup_class(self):
self.api = Api(config)
self.api.feat_oas_schema()
self.endpoint_name = get_endpoint_comfy_async(self.api)
self.endpoint_name_sd = get_endpoint_sd_async(self.api)
@classmethod
def teardown_class(self):
pass
def test_1_download_file(self):
local_path = f"./data/comfy/models/checkpoints/v1-5-pruned-emaonly.ckpt"
wget_file(
local_path,
'https://huggingface.co/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.ckpt'
)
def test_2_sync_files_to_comfy_endpoint(self):
local = "'./data/comfy/models/*'"
target = f"'s3://{config.bucket}/comfy/{self.endpoint_name}/{id}/models/'"
logger.info(f"Syncing {local} to {target}")
os.system(f"rm -rf ./s5cmd")
os.system(f"wget -q ./ https://raw.githubusercontent.com/elonniu/s5cmd/main/s5cmd")
os.system(f"chmod +x ./s5cmd")
os.system(f"./s5cmd sync {local} {target}")
def test_3_comfy_sync_files(self):
headers = {
"x-api-key": config.api_key,
"username": config.username
}
data = {"endpoint_name": f"{self.endpoint_name}",
"need_reboot": True,
"prepare_id": id,
"prepare_type": "models"}
resp = self.api.prepare(data=data, headers=headers)
assert resp.status_code == 200, resp.dumps()
logger.info(resp.json())
logger.info(f"wait 20s for endpoint sync files...")
time.sleep(20)
def test_4_clean_all_executes(self):
headers = {
"x-api-key": config.api_key,
"username": config.username
}
while True:
resp = self.api.list_executes(headers=headers, params={"limit": 20})
executes = resp.json()['data']['executes']
if len(executes) == 0:
break
execute_id_list = []
i = 0
for execute in executes:
i = i + 1
prompt_id = execute['prompt_id']
execute_id_list.append(prompt_id)
logger.info(f"delete execute {i} {prompt_id}")
data = {
"execute_id_list": execute_id_list,
}
resp = self.api.delete_executes(headers=headers, data=data)
if resp.status_code == 400:
logger.info(resp.json()['message'])
time.sleep(5)
continue
def test_6_clean_all_inferences(self):
headers = {
"x-api-key": config.api_key,
"username": config.username
}
while True:
resp = self.api.list_inferences(headers=headers)
inferences = resp.json()['data']['inferences']
if len(inferences) == 0:
break
for inference in inferences:
inference_id = inference['InferenceJobId']
data = {
"inference_id_list": [
inference_id
],
}
resp = self.api.delete_inferences(headers=headers, data=data)
logger.info(f"delete inference {inference_id}")
if resp.status_code == 400:
logger.info(resp.json()['message'])
time.sleep(5)
continue
def test_7_update_api_roles(self):
headers = {
"x-api-key": config.api_key,
"username": config.username,
}
data = {
"username": "api",
"password": "admin",
"creator": "api",
"roles": [
'IT Operator',
'byoc',
config.role_sd_real_time,
config.role_sd_async,
config.role_comfy_async,
config.role_comfy_real_time,
],
}
resp = self.api.create_user(headers=headers, data=data)
assert resp.status_code == 201, resp.dumps()
assert resp.json()["statusCode"] == 201
def test_8_lantency_compare_start(self):
threads = []
batch = 1000
thread = threading.Thread(target=create_batch_executes, args=(batch, self.api, self.endpoint_name))
threads.append(thread)
thread = threading.Thread(target=create_batch_inferences, args=(batch, self.api, self.endpoint_name_sd))
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
def create_batch_executes(n, api, endpoint_name):
for i in range(n):
comfy_execute_create(n=i, api=api, endpoint_name=endpoint_name, wait_succeed=True,
workflow='./data/api_params/latency-comfy.json')
def create_batch_inferences(n, api, endpoint_name):
for i in range(n):
sd_inference_create(n=i, api=api, endpoint_name=endpoint_name, workflow='./data/api_params/latency-sd.json')

View File

@ -15,7 +15,7 @@ import requests
import config as config
from utils.api import Api
from utils.enums import InferenceStatus
from utils.enums import InferenceStatus, InferenceType
logger = logging.getLogger(__name__)
@ -200,8 +200,9 @@ class DecimalEncoder(json.JSONEncoder):
return json.JSONEncoder.default(self, obj)
def comfy_execute_create(n, api, endpoint_name, wait_succeed=True):
with open('./data/api_params/comfy_workflow.json', 'r') as f:
def comfy_execute_create(n, api, endpoint_name, wait_succeed=True,
workflow: str = './data/api_params/comfy_workflow.json'):
with open(workflow, 'r') as f:
headers = {
"x-api-key": config.api_key,
}
@ -225,7 +226,7 @@ def comfy_execute_create(n, api, endpoint_name, wait_succeed=True):
resp = api.get_execute_job(headers=headers, prompt_id=prompt_id)
if resp.status_code == 404:
init_status = "not found"
logger.info(f"{n} {endpoint_name} {prompt_id} is {init_status}")
logger.info(f"comfy {n} {endpoint_name} {prompt_id} is {init_status}")
continue
assert resp.status_code == 200, resp.dumps()
@ -234,7 +235,7 @@ def comfy_execute_create(n, api, endpoint_name, wait_succeed=True):
status = resp.json()["data"]["status"]
if init_status != status:
logger.info(f"Thread {n} {endpoint_name} {prompt_id} is {status}")
logger.info(f"comfy {n} {endpoint_name} {prompt_id} is {status}")
init_status = status
if status == 'success':
@ -248,6 +249,60 @@ def comfy_execute_create(n, api, endpoint_name, wait_succeed=True):
raise Exception(f"{n} {endpoint_name} {prompt_id} timed out after 5 minutes.")
def sd_inference_create(n, api, endpoint_name: str, workflow: str = './data/api_params/sd.json'):
with open(workflow, 'r') as f:
headers = {
"x-api-key": config.api_key,
"username": config.username
}
data = {
"inference_type": "Async",
"task_type": InferenceType.TXT2IMG.value,
"models": {
"Stable-diffusion": [config.default_model_id],
"embeddings": []
},
}
resp = api.create_inference(headers=headers, data=data)
assert resp.status_code == 201, resp.dumps()
inference_data = resp.json()['data']["inference"]
inference_id = inference_data["id"]
assert resp.json()["statusCode"] == 201
assert inference_data["type"] == InferenceType.TXT2IMG.value
assert len(inference_data["api_params_s3_upload_url"]) > 0
upload_with_put(inference_data["api_params_s3_upload_url"], workflow)
resp = api.get_inference_job(headers=headers, job_id=inference_data["id"])
assert resp.status_code == 200, resp.dumps()
resp = api.start_inference_job(job_id=inference_id, headers=headers)
assert resp.status_code == 202, resp.dumps()
assert resp.json()['data']["inference"]["status"] == InferenceStatus.INPROGRESS.value
timeout = datetime.now() + timedelta(minutes=2)
while datetime.now() < timeout:
status = get_inference_job_status(
api_instance=api,
job_id=inference_id
)
logger.info(f"sd {n} {endpoint_name} {inference_id} is {status}")
if status == InferenceStatus.SUCCEED.value:
break
if status == InferenceStatus.FAILED.value:
logger.error(inference_data)
break
time.sleep(4)
else:
raise Exception(f"Inference {inference_id} timed out after 2 minutes.")
def get_endpoint_comfy_async(api):
return get_endpoint_by_prefix(api, "comfy-async-")