textdiffuser_multiGPUs.py 3.34 KB
Newer Older
duanjinfei's avatar
duanjinfei committed
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117
import os
import shutil
import copy
import argparse
import pathlib
import json


def load(file_path: str):
    file_path = pathlib.Path(file_path)
    func_dict = {'.json': load_json}
    assert file_path.suffix in func_dict
    return func_dict[file_path.suffix](file_path)


def load_json(file_path: str):
    with open(file_path, 'r', encoding='utf8') as f:
        content = json.load(f)
    return content


def save(data, file_path):
    file_path = pathlib.Path(file_path)
    func_dict = {'.json': save_json}
    assert file_path.suffix in func_dict
    return func_dict[file_path.suffix](data, file_path)


def save_json(data, file_path):
    with open(file_path, 'w', encoding='utf-8') as json_file:
        json.dump(data, json_file, ensure_ascii=False, indent=4)


def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--model_path",
        type=str,
        default='textdiffuser-ckpt/diffusion_backbone',
        help='path to model'
    )
    parser.add_argument(
        "--gpus",
        type=str,
        default='0,1,2,3,4,5,6,7',
        help='gpus for inference'
    )
    parser.add_argument(
        "--output_dir",
        type=str,
        default='./textdiffuser_laion_generated/',
        help="output path"
    )
    parser.add_argument(
        "--glyph_dir",
        type=str,
        default='/data/vdb/yuxiang.tyx/AIGC/data/laion_word/glyph_laion',
        help="path of glyph images from anytext evaluation dataset"
    )
    parser.add_argument(
        "--json_path",
        type=str,
        default='/data/vdb/yuxiang.tyx/AIGC/data/laion_word/test1k.json',
        help="json path for evaluation dataset"
    )
    args = parser.parse_args()
    return args


if __name__ == "__main__":
    args = parse_args()
    output_dir = args.output_dir

    tmp_dir = './tmp_dir'
    exec_path = './textdiffuser_singleGPU.py'
    continue_gen = True  # if True, not clear output_dir, and generate rest images.

    if os.path.exists(tmp_dir):
        shutil.rmtree(tmp_dir)
    os.makedirs(tmp_dir)

    if not continue_gen:
        if os.path.exists(output_dir):
            shutil.rmtree(output_dir)
        os.makedirs(output_dir)
    else:
        if not os.path.exists(output_dir):
            os.makedirs(output_dir)

    os.system('sleep 1')
    gpu_ids = [int(i) for i in args.gpus.split(',')]
    nproc = len(gpu_ids)
    all_lines = load(args.json_path)
    split_file = []
    length = len(all_lines['data_list']) // nproc
    cmds = []
    for i in range(nproc):
        start, end = i*length, (i+1)*length
        if i == nproc - 1:
            end = len(all_lines['data_list'])
        temp_lines = copy.deepcopy(all_lines)
        temp_lines['data_list'] = temp_lines['data_list'][start:end]
        tmp_file = os.path.join(tmp_dir, f'tmp_list_{i}.json')
        save(temp_lines, tmp_file)
        os.system('sleep 1')
        cmds += [f'export CUDA_VISIBLE_DEVICES={gpu_ids[i]}  && python {exec_path}  --json_path {tmp_file}  --output_dir {output_dir} --model_path {args.model_path}  --glyph_dir {args.glyph_dir} && echo proc-{i} done!']
    cmds = ' & '.join(cmds)
    os.system(cmds)
    print('Done.')
    os.system('sleep 2')
    shutil.rmtree(tmp_dir)


'''
command to kill the task after running:
$ps -ef | grep singleGPU | awk '{ print $2 }' | xargs kill -9  &&  ps -ef | grep multiproce | awk '{ print $2 }' | xargs kill -9
'''