remove time from logging format to match with current sdwui logging indentation. bandaid for remainder handling

pull/15/head
unknown 2023-06-05 06:12:10 -05:00
parent 1fba29ea9f
commit da2e32242e
No known key found for this signature in database
GPG Key ID: CA376082283AF69A
3 changed files with 14 additions and 18 deletions

View File

@ -388,7 +388,7 @@ class Script(scripts.Script):
payload_temp['batch_size'] = job.batch_size
payload_temp['subseed'] += prior_images
payload_temp['seed'] += prior_images if payload_temp['subseed_strength'] == 0 else 0
logger.debug(f"{job.worker.uuid}' job given starting seed is {payload_temp['seed']} with {prior_images} coming before it")
logger.debug(f"'{job.worker.uuid}' job's given starting seed is {payload_temp['seed']} with {prior_images} coming before it")
if job.worker.loaded_model != name or job.worker.loaded_vae != vae:
sync = True

View File

@ -114,11 +114,7 @@ class World:
"""the amount of images/total images requested that a worker would compute if conditions were perfect and
each worker generated at the same speed"""
quotient, remainder = divmod(self.total_batch_size, self.get_world_size())
chosen = quotient if quotient > remainder else remainder
per_worker_batch_size = math.ceil(chosen)
return per_worker_batch_size
return self.total_batch_size // self.get_world_size()
def get_world_size(self) -> int:
"""
@ -452,21 +448,21 @@ class World:
# *if that hasn't already been filled by complementary fill or the requirement that master's batch size be >= 1
remainder_images = self.total_batch_size - self.get_current_output_size()
if remainder_images >= 1:
logger.debug(f"The requested number of images({self.total_batch_size}) was not cleanly divisible by the number of realtime nodes({len(self.realtime_jobs())}) and complementary jobs did not provide this missing image.")
logger.debug(f"The requested number of images({self.total_batch_size}) was not cleanly divisible by the number of realtime nodes({len(self.realtime_jobs())}) resulting in {remainder_images} that will be redistributed")
# Gets the fastest job that has been assigned the least amount of images
laziest_realtime_job = None
for job in self.realtime_jobs():
if laziest_realtime_job is None:
laziest_realtime_job = job
elif laziest_realtime_job.batch_size > job.batch_size:
laziest_realtime_job = job
laziest_realtime_job.batch_size += remainder_images
realtime_jobs = self.realtime_jobs()
realtime_jobs.sort(key=lambda x: x.batch_size)
# round-robin distribute the remaining images
while remainder_images >= 1:
for job in realtime_jobs:
if remainder_images < 1:
break
job.batch_size += 1
remainder_images -= 1
logger.info("Job distribution:")
iterations = payload['n_iter']
logger.info(f"{iterations} iteration(s)")
logger.info(f"{self.total_batch_size} * {iterations} iteration(s): {self.total_batch_size * iterations} images")
for job in self.jobs:
logger.info(f"worker '{job.worker.uuid}' - {job.batch_size * iterations} images")
print()

View File

@ -4,7 +4,7 @@ from modules.shared import cmd_opts
log_level = 'DEBUG' if cmd_opts.distributed_debug else 'INFO'
logger = logging.getLogger("rich")
logger.addHandler(RichHandler(rich_tracebacks=True, markup=True))
logger.addHandler(RichHandler(rich_tracebacks=True, markup=True, show_time=False))
logger.setLevel(log_level)
warmup_samples = 2 # number of samples to do before recording a valid benchmark sample