diff --git a/BaiHu/Image/C22/style_5.png b/BaiHu/Image/C22/style_5.png new file mode 100644 index 00000000..2d54b397 Binary files /dev/null and b/BaiHu/Image/C22/style_5.png differ diff --git a/BaiHu/JSON/22_1.json b/BaiHu/JSON/22_1.json index bf06f8e5..e1cb6e46 100644 --- a/BaiHu/JSON/22_1.json +++ b/BaiHu/JSON/22_1.json @@ -2,9 +2,15 @@ "source_images": [ "49,inputs,image" ], - "other_images": [ - "./Image/C22/pose_1.png", - "./Image/C22/style_1.png" + "style_pose_images": [ + { + "key": "118,inputs,image", + "value": "./Image/C22/style_1.png" + }, + { + "key": "98,inputs,image", + "value": "./Image/C22/pose_1.png" + } ], "3": { "inputs": { @@ -211,7 +217,7 @@ }, "98": { "inputs": { - "image": "style_1.png", + "image": "string", "upload": "image" }, "class_type": "LoadImage", @@ -249,7 +255,7 @@ }, "118": { "inputs": { - "image": "pose_1.png", + "image": "string", "upload": "image" }, "class_type": "LoadImage", diff --git a/BaiHu/JSON/22_2.json b/BaiHu/JSON/22_2.json index a4ceca74..953f3d74 100644 --- a/BaiHu/JSON/22_2.json +++ b/BaiHu/JSON/22_2.json @@ -2,9 +2,15 @@ "source_images": [ "49,inputs,image" ], - "other_images": [ - "./Image/C22/pose_2.png", - "./Image/C22/style_2.png" + "style_pose_images": [ + { + "key": "118,inputs,image", + "value": "./Image/C22/style_2.png" + }, + { + "key": "98,inputs,image", + "value": "./Image/C22/pose_2.png" + } ], "3": { "inputs": { @@ -211,7 +217,7 @@ }, "98": { "inputs": { - "image": "style_2.png", + "image": "string", "upload": "image" }, "class_type": "LoadImage", @@ -249,7 +255,7 @@ }, "118": { "inputs": { - "image": "pose_2.png", + "image": "string", "upload": "image" }, "class_type": "LoadImage", diff --git a/BaiHu/JSON/22_3.json b/BaiHu/JSON/22_3.json index 9e7b057b..1e84c738 100644 --- a/BaiHu/JSON/22_3.json +++ b/BaiHu/JSON/22_3.json @@ -2,9 +2,15 @@ "source_images": [ "49,inputs,image" ], - "other_images": [ - "./Image/C22/pose_3.png", - "./Image/C22/style_3.png" + "style_pose_images": [ + { + "key": "118,inputs,image", + "value": "./Image/C22/style_3.png" + }, + { + "key": "98,inputs,image", + "value": "./Image/C22/pose_3.png" + } ], "3": { "inputs": { @@ -211,7 +217,7 @@ }, "98": { "inputs": { - "image": "style_3.png", + "image": "string", "upload": "image" }, "class_type": "LoadImage", @@ -249,7 +255,7 @@ }, "118": { "inputs": { - "image": "pose_3.png", + "image": "string", "upload": "image" }, "class_type": "LoadImage", diff --git a/BaiHu/JSON/22_4.json b/BaiHu/JSON/22_4.json index 0adff56f..7dca324b 100644 --- a/BaiHu/JSON/22_4.json +++ b/BaiHu/JSON/22_4.json @@ -2,9 +2,15 @@ "source_images": [ "49,inputs,image" ], - "other_images": [ - "./Image/C22/pose_4.png", - "./Image/C22/style_4.png" + "style_pose_images": [ + { + "key": "118,inputs,image", + "value": "./Image/C22/style_4.png" + }, + { + "key": "98,inputs,image", + "value": "./Image/C22/pose_4.png" + } ], "3": { "inputs": { @@ -211,7 +217,7 @@ }, "98": { "inputs": { - "image": "style_4.png", + "image": "string", "upload": "image" }, "class_type": "LoadImage", @@ -249,7 +255,7 @@ }, "118": { "inputs": { - "image": "pose_4.png", + "image": "string", "upload": "image" }, "class_type": "LoadImage", diff --git a/BaiHu/JSON/22_5.json b/BaiHu/JSON/22_5.json index 05f73e3d..aa1fa613 100644 --- a/BaiHu/JSON/22_5.json +++ b/BaiHu/JSON/22_5.json @@ -2,9 +2,15 @@ "source_images": [ "49,inputs,image" ], - "other_images": [ - "./Image/C22/pose_1.png", - "./Image/C22/style_2.png" + "style_pose_images": [ + { + "key": "118,inputs,image", + "value": "./Image/C22/style_5.png" + }, + { + "key": "98,inputs,image", + "value": "./Image/C22/pose_5.png" + } ], "3": { "inputs": { @@ -211,7 +217,7 @@ }, "98": { "inputs": { - "image": "style_2.png", + "image": "string", "upload": "image" }, "class_type": "LoadImage", @@ -249,7 +255,7 @@ }, "118": { "inputs": { - "image": "pose_1.png", + "image": "string", "upload": "image" }, "class_type": "LoadImage", diff --git a/BaiHu/Test/22.json b/BaiHu/Test/22.json new file mode 100644 index 00000000..198da0be --- /dev/null +++ b/BaiHu/Test/22.json @@ -0,0 +1,376 @@ +{ + "3": { + "inputs": { + "ckpt_name": "xxmix9realistic_v40.safetensors", + "vae_name": "vae-ft-mse-840000-ema-pruned.safetensors", + "clip_skip": -2, + "lora_name": "blindbox_大概是盲盒_blindbox_v1_mix.safetensors", + "lora_model_strength": 0.8, + "lora_clip_strength": 0.8, + "positive": [ + "131", + 0 + ], + "negative": "NSFW,blurry,low quality,watermark,monochrome,badhandv4,easynegative,ng_deepnegative_v1_75t,bad proportions,mutated hands and fingers,poorly drawn face,extra limb,missing limb,malformed limbs,disconnected limbs,ugly,floating limbs,extra legs,mutation,bad body,long neck,cross-eyed,text,cleavage", + "token_normalization": "length", + "weight_interpretation": "A1111", + "empty_latent_width": 448, + "empty_latent_height": 576, + "batch_size": 1, + "cnet_stack": [ + "102", + 0 + ] + }, + "class_type": "Efficient Loader", + "_meta": { + "title": "Efficient Loader" + } + }, + "10": { + "inputs": { + "upscale_type": "latent", + "hires_ckpt_name": "(use same)", + "latent_upscaler": "bilinear", + "pixel_upscaler": "RealESRGAN_x4plus.pth", + "upscale_by": 2, + "use_same_seed": true, + "seed": -1, + "hires_steps": 15, + "denoise": 0.35000000000000003, + "iterations": 1, + "use_controlnet": false, + "control_net_name": "annotator\\downloads\\clip_vision\\clip_g.pth", + "strength": 1, + "preprocessor": "CannyEdgePreprocessor", + "preprocessor_imgs": false + }, + "class_type": "HighRes-Fix Script", + "_meta": { + "title": "HighRes-Fix Script" + } + }, + "11": { + "inputs": { + "seed": 356375061015696, + "steps": 30, + "cfg": 7, + "sampler_name": "euler", + "scheduler": "normal", + "denoise": 1, + "preview_method": "auto", + "vae_decode": "true", + "model": [ + "124", + 0 + ], + "positive": [ + "3", + 1 + ], + "negative": [ + "3", + 2 + ], + "latent_image": [ + "3", + 3 + ], + "optional_vae": [ + "3", + 4 + ], + "script": [ + "10", + 0 + ] + }, + "class_type": "KSampler (Efficient)", + "_meta": { + "title": "KSampler (Efficient)" + } + }, + "49": { + "inputs": { + "image": "964e422209280e579a7744797384e097ee3637da00f684d400419fbb43f78929.png", + "upload": "image" + }, + "class_type": "LoadImage", + "_meta": { + "title": "Load Image" + } + }, + "50": { + "inputs": { + "guide_size": 256, + "guide_size_for": true, + "max_size": 768, + "seed": 913473477013880, + "steps": 30, + "cfg": 5, + "sampler_name": "euler", + "scheduler": "normal", + "denoise": 0.45, + "feather": 6, + "noise_mask": true, + "force_inpaint": true, + "bbox_threshold": 0.5, + "bbox_dilation": 10, + "bbox_crop_factor": 3, + "sam_detection_hint": "center-1", + "sam_dilation": 0, + "sam_threshold": 0.93, + "sam_bbox_expansion": 0, + "sam_mask_hint_threshold": 0.7, + "sam_mask_hint_use_negative": "False", + "drop_size": 10, + "wildcard": "masterpiece,best quality,(makeup),", + "cycle": 1, + "inpaint_model": false, + "noise_mask_feather": 0, + "image": [ + "11", + 5 + ], + "model": [ + "11", + 0 + ], + "clip": [ + "3", + 5 + ], + "vae": [ + "11", + 4 + ], + "positive": [ + "11", + 1 + ], + "negative": [ + "11", + 2 + ], + "bbox_detector": [ + "52", + 0 + ], + "sam_model_opt": [ + "51", + 0 + ], + "segm_detector_opt": [ + "52", + 1 + ] + }, + "class_type": "FaceDetailer", + "_meta": { + "title": "FaceDetailer" + } + }, + "51": { + "inputs": { + "model_name": "sam_vit_b_01ec64.pth", + "device_mode": "AUTO" + }, + "class_type": "SAMLoader", + "_meta": { + "title": "SAMLoader (Impact)" + } + }, + "52": { + "inputs": { + "model_name": "bbox/face_yolov8m.pt" + }, + "class_type": "UltralyticsDetectorProvider", + "_meta": { + "title": "UltralyticsDetectorProvider" + } + }, + "62": { + "inputs": { + "filename_prefix": "ComfyUI", + "images": [ + "50", + 0 + ] + }, + "class_type": "SaveImage", + "_meta": { + "title": "Save Image" + } + }, + "98": { + "inputs": { + "image": "image (93).png", + "upload": "image" + }, + "class_type": "LoadImage", + "_meta": { + "title": "Load Image" + } + }, + "102": { + "inputs": { + "strength": 1, + "start_percent": 0, + "end_percent": 1, + "control_net": [ + "103", + 0 + ], + "image": [ + "98", + 0 + ] + }, + "class_type": "Control Net Stacker", + "_meta": { + "title": "Control Net Stacker" + } + }, + "103": { + "inputs": { + "control_net_name": "control_v11p_sd15_openpose.pth" + }, + "class_type": "ControlNetLoader", + "_meta": { + "title": "Load ControlNet Model" + } + }, + "118": { + "inputs": { + "image": "pose-456x576-2 (1).jpg", + "upload": "image" + }, + "class_type": "LoadImage", + "_meta": { + "title": "Load Image" + } + }, + "119": { + "inputs": { + "transparency": false, + "model": "u2net", + "post_processing": false, + "only_mask": false, + "alpha_matting": false, + "alpha_matting_foreground_threshold": 240, + "alpha_matting_background_threshold": 2, + "alpha_matting_erode_size": 2, + "background_color": "white", + "images": [ + "118", + 0 + ] + }, + "class_type": "Image Rembg (Remove Background)", + "_meta": { + "title": "Image Rembg (Remove Background)" + } + }, + "121": { + "inputs": { + "images": [ + "119", + 0 + ] + }, + "class_type": "PreviewImage", + "_meta": { + "title": "Preview Image" + } + }, + "122": { + "inputs": { + "weight": 0.75, + "weight_type": "reverse in-out", + "combine_embeds": "concat", + "start_at": 0, + "end_at": 1, + "embeds_scaling": "V only", + "model": [ + "128", + 0 + ], + "ipadapter": [ + "128", + 1 + ], + "image": [ + "119", + 0 + ] + }, + "class_type": "IPAdapterAdvanced", + "_meta": { + "title": "IPAdapter Advanced" + } + }, + "124": { + "inputs": { + "weight": 1, + "weight_faceidv2": 1, + "weight_type": "linear", + "combine_embeds": "concat", + "start_at": 0, + "end_at": 1, + "embeds_scaling": "V only", + "model": [ + "125", + 0 + ], + "ipadapter": [ + "125", + 1 + ], + "image": [ + "49", + 0 + ] + }, + "class_type": "IPAdapterFaceID", + "_meta": { + "title": "IPAdapter FaceID" + } + }, + "125": { + "inputs": { + "preset": "FACEID PLUS V2", + "lora_strength": 0.85, + "provider": "CUDA", + "model": [ + "122", + 0 + ] + }, + "class_type": "IPAdapterUnifiedLoaderFaceID", + "_meta": { + "title": "IPAdapter Unified Loader FaceID" + } + }, + "128": { + "inputs": { + "preset": "PLUS (high strength)", + "model": [ + "3", + 0 + ] + }, + "class_type": "IPAdapterUnifiedLoader", + "_meta": { + "title": "IPAdapter Unified Loader" + } + }, + "131": { + "inputs": { + "String": "1girl, solo, long hair, breasts, looking at viewer, smile, brown hair, brown eyes, medium breasts, standing, full body, boots, belt, chibi, transparent background,4k,high-res,masterpiece,best quality,finely detailed skin,sharp focus,(cinematic lighting),soft lighting,[:(detailed face:1.2):0.2], street," + }, + "class_type": "KepStringLiteral", + "_meta": { + "title": "String" + } + } +} \ No newline at end of file diff --git a/BaiHu/Test/Test22_New.py b/BaiHu/Test/Test22_New.py new file mode 100644 index 00000000..362e155d --- /dev/null +++ b/BaiHu/Test/Test22_New.py @@ -0,0 +1,77 @@ +import os + +from Util import ConfigUtil +from Util.ComfyUIUtil import * + + +def C22(prompt_data, img): + # 风格图 + input_image_style = '../Image/C22/style_1.png' + with open(input_image_style, "rb") as f: + image_style = upload_file(server_address, f, "", True) + prompt_data["118"]["inputs"]["image"] = image_style + + # 姿势图 + input_image_pose = '../Image/C22/pose_1.png' + with open(input_image_pose, "rb") as f: + image_pose = upload_file(server_address, f, "", True) + prompt_data["98"]["inputs"]["image"] = image_pose + + # 原图 + prompt_data["49"]["inputs"]["image"] = img[0] + + # 过滤器,哪些返回节点中获取到的图片是有用的 + myfilter = [] + # 遍历prompt_data中的所有节点 + for key, value in prompt_data.items(): + if 'inputs' in value: + if 'filename_prefix' in value['inputs']: + if value['inputs']['filename_prefix'] == 'ComfyUI': + myfilter.append(key) + + return prompt_data, myfilter + + +def runComfyUI(json_file, model_id, task_type_code, input_image): + # 创建目标目录 + output_path = "../Out/Images/" + task_type_code + "/" + str(model_id) + "/" + if not os.path.exists(output_path): + os.makedirs(output_path) + + # 生成一个唯一的客户端ID + client_id = str(uuid.uuid4()) + + # 上传图片 + img = [] + for x in input_image: + with open(x, "rb") as f: + y = upload_file(server_address, f, "", True) + img.append(y) + + with open(json_file, 'r', encoding="utf-8") as fi: + prompt_data = json.load(fi) + + prompt_data, myfilter = C22(prompt_data, img) + # 生成 + files = generate_clip(server_address, prompt_data, client_id, output_path, myfilter) + print(files) + + +if __name__ == '__main__': + # 配置文件 + config = ConfigUtil.getConfig() + server_address = config.get('comfyui', 'server_address') + + # 需要替换的提示词 + source_code = [''] + # 替换后的提示词 + target_code = [''] + # 使用哪个文件 + json_file = './22.json' + input_image = ['../Image/wife.jpg'] + + # 请求 + runComfyUI(json_file=json_file, + model_id=22, + task_type_code='User', + input_image=input_image) diff --git a/BaiHu/doTask.py b/BaiHu/doTask.py index 0dfb3897..a8109123 100644 --- a/BaiHu/doTask.py +++ b/BaiHu/doTask.py @@ -148,15 +148,6 @@ def getMyFilter(prompt_data): # 根据输入的图片集合,填充到 def fill_input(prompt_data, file_array): - ''' - 样例: - "source_images": [ - "22,inputs,image" - ], - prompt_data["98"]["inputs"]["image"] = image_pose - # 原图 - prompt_data["49"]["inputs"]["image"] = img[0] - ''' # 是不是不存在配置节 if "source_images" not in prompt_data: print("配置文件不包含source_images配置节,程序无法继续!") @@ -176,13 +167,19 @@ def fill_input(prompt_data, file_array): if "source_images" in prompt_data: del prompt_data["source_images"] - if 'other_images' in prompt_data: - for x in prompt_data['other_images']: - with open(x, "rb") as f: + # 处理style,pose 的参考图片 + if 'style_pose_images' in prompt_data: + for x in prompt_data['style_pose_images']: + key = x['key'] + value = x['value'] + with open(value, "rb") as f: y = upload_file(server_address, f, "", True) - - if "other_images" in prompt_data: - del prompt_data["other_images"] + # 需要 + array = key.split(",") + prompt_data[array[0]][array[1]][array[2]] = y + # + if "style_pose_images" in prompt_data: + del prompt_data["style_pose_images"] return prompt_data