ocr-scripts / deepseek-ocr2.py
j4xfu2mm's picture
Upload deepseek-ocr2.py with huggingface_hub
bc43a9e verified
# /// script
# requires-python = ">=3.11"
# dependencies = [
# "datasets>=4.0.0",
# "huggingface-hub",
# "pillow",
# "torch",
# "torchvision",
# "transformers>=5.0.0",
# "tqdm",
# "accelerate",
# "addict",
# "matplotlib",
# ]
# ///
"""Convert document images to markdown using DeepSeek-OCR-2 via transformers."""
import argparse
import json
import os
import sys
import tempfile
from datetime import datetime
import torch
from datasets import load_dataset, Dataset
from huggingface_hub import login
from PIL import Image
from tqdm.auto import tqdm
from transformers import AutoModel, AutoTokenizer
PROMPT = "<image>\n<|grounding|>Convert the document to markdown. "
def main(input_dataset: str, output_dataset: str, split: str = "train",
max_samples: int | None = None, image_column: str = "image"):
if not torch.cuda.is_available():
print("ERROR: CUDA not available. GPU required.")
sys.exit(1)
print(f"GPU: {torch.cuda.get_device_name(0)}")
token = os.environ.get("HF_TOKEN")
if token:
login(token=token)
print("Loading model deepseek-ai/DeepSeek-OCR-2...")
tokenizer = AutoTokenizer.from_pretrained(
"deepseek-ai/DeepSeek-OCR-2", trust_remote_code=True
)
model = AutoModel.from_pretrained(
"deepseek-ai/DeepSeek-OCR-2",
trust_remote_code=True,
use_safetensors=True,
torch_dtype=torch.bfloat16,
).cuda()
print(f"Loading dataset {input_dataset}...")
ds = load_dataset(input_dataset, split=split)
if max_samples:
ds = ds.select(range(min(max_samples, len(ds))))
print(f"Processing {len(ds)} samples...")
results = []
with tempfile.TemporaryDirectory() as tmpdir:
for i, row in enumerate(tqdm(ds)):
img_path = os.path.join(tmpdir, f"img_{i}.jpg")
img = row[image_column]
if isinstance(img, dict):
img = Image.open(__import__("io").BytesIO(img["bytes"]))
img.save(img_path, format="JPEG", quality=95)
try:
out = model.infer(
tokenizer,
prompt=PROMPT,
image_file=img_path,
output_path=tmpdir,
base_size=1024,
image_size=768,
crop_mode=True,
save_results=False,
)
if i == 0:
print(f"[DEBUG] out type={type(out)}, value={repr(out)[:200]}")
markdown = out if isinstance(out, str) else str(out)
except Exception as e:
print(f"Error on sample {i}: {e}")
markdown = ""
results.append({
"image": row[image_column],
"gt_json": row.get("gt_json", ""),
"markdown": markdown,
"inference_info": json.dumps([{
"column_name": "markdown",
"model_id": "deepseek-ai/DeepSeek-OCR-2",
"processing_date": datetime.now().strftime("%Y-%m-%d"),
"backend": "transformers",
}]),
})
print(f"Pushing to {output_dataset}...")
Dataset.from_list(results).push_to_hub(output_dataset, private=False)
print(f"Done → https://huggingface.co/datasets/{output_dataset}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("input_dataset")
parser.add_argument("output_dataset")
parser.add_argument("--split", default="train")
parser.add_argument("--max-samples", type=int, default=None)
parser.add_argument("--image-column", default="image")
args = parser.parse_args()
main(args.input_dataset, args.output_dataset, args.split,
args.max_samples, args.image_column)