Magi,漫画解析大师
待办事项:
- 上传Magiv2模型,
- 提供新的注释,
- 开源PopCharacters数据集,
- 发布评估脚本。
Magiv1
- 模型可在🤗 HuggingFace模型仓库获取。
- 您可以通过这个🤗 HuggingFace Spaces演示亲自尝试(无GPU,速度较慢)。
- 以下提供基本的模型使用方法。更多信息请查看此文件。
v1 使用方法
from transformers import AutoModel
import numpy as np
from PIL import Image
import torch
import os
images = [
"图片1路径.jpg",
"图片2路径.png",
]
def read_image_as_np_array(image_path):
with open(image_path, "rb") as file:
image = Image.open(file).convert("L").convert("RGB")
image = np.array(image)
return image
images = [read_image_as_np_array(image) for image in images]
model = AutoModel.from_pretrained("ragavsachdeva/magi", trust_remote_code=True).cuda()
with torch.no_grad():
results = model.predict_detections_and_associations(images)
text_bboxes_for_all_images = [x["texts"] for x in results]
ocr_results = model.predict_ocr(images, text_bboxes_for_all_images)
for i in range(len(images)):
model.visualise_single_image_prediction(images[i], results[i], filename=f"图片_{i}.png")
model.generate_transcript_for_single_image(results[i], ocr_results[i], filename=f"文字记录_{i}.txt")
Magiv2
- 模型可在🤗 HuggingFace模型仓库获取。
- 您可以通过这个🤗 HuggingFace Spaces演示亲自尝试(有GPU支持,感谢HF团队!)。
- 以下提供基本的模型使用方法。更多信息请查看此文件。
v2 使用方法
from PIL import Image
import numpy as np
from transformers import AutoModel
import torch
model = AutoModel.from_pretrained("ragavsachdeva/magiv2", trust_remote_code=True).cuda().eval()
def read_image(path_to_image):
with open(path_to_image, "rb") as file:
image = Image.open(file).convert("L").convert("RGB")
image = np.array(image)
return image
chapter_pages = ["页面1.png", "页面2.png", "页面3.png" ...]
character_bank = {
"images": ["角色1.png", "角色2.png", "角色3.png", "角色4.png" ...],
"names": ["路飞", "山治", "索隆", "乌索普" ...]
}
chapter_pages = [read_image(x) for x in chapter_pages]
character_bank["images"] = [read_image(x) for x in character_bank["images"]]
with torch.no_grad():
per_page_results = model.do_chapter_wide_prediction(chapter_pages, character_bank, use_tqdm=True, do_ocr=True)
transcript = []
for i, (image, page_result) in enumerate(zip(chapter_pages, per_page_results)):
model.visualise_single_image_prediction(image, page_result, f"页面_{i}.png")
speaker_name = {
text_idx: page_result["character_names"][char_idx] for text_idx, char_idx in page_result["text_character_associations"]
}
for j in range(len(page_result["ocr"])):
if not page_result["is_essential_text"][j]:
continue
name = speaker_name.get(j, "未知")
transcript.append(f"<{name}>: {page_result['ocr'][j]}")
with open(f"文字记录.txt", "w") as fh:
for line in transcript:
fh.write(line + "\n")
许可和引用
提供的模型和数据集可在个人、研究、非商业和非营利性项目中无限制使用。对于任何其他使用场景,请通过电子邮件联系我,详细描述您的需求,以建立定制的许可安排。我的联系信息可在我的网站上找到。
@InProceedings{magiv1,
author = {Sachdeva, Ragav and Zisserman, Andrew},
title = {The Manga Whisperer: Automatically Generating Transcriptions for Comics},
booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
month = {June},
year = {2024},
pages = {12967-12976}
}
@misc{magiv2,
title={Tails Tell Tales: Chapter-Wide Manga Transcriptions with Character Names},
author={Ragav Sachdeva and Gyungin Shin and Andrew Zisserman},
year={2024},
eprint={2408.00298},
archivePrefix={arXiv},
primaryClass={cs.CV},
url={https://arxiv.org/abs/2408.00298},
}