import os import torch import subprocess import sys # import pkg_resources # # 在应用启动前强行打印当前环境,方便在 Space 日志里 debug # def print_current_env(): # print("\n" + "="*50) # print("🚀 正在扫描当前 Space 运行环境...") # print(f"Python 版本: {sys.version.split()[0]}") # # 你最关心的核心库 # core_libs = [ # "torch", "torchvision", "diffusers", "transformers", # "accelerate", "spaces", "gradio", "bitsandbytes", # "torchao", "peft", "safetensors", "tqdm" # ] # print("\n📦 核心依赖库版本:") # for lib in core_libs: # try: # version = pkg_resources.get_distribution(lib).version # print(f" - {lib:15}: {version}") # except pkg_resources.DistributionNotFound: # print(f" - {lib:15}: ❌ 未找到") # # 获取所有已安装的包并按字母排序 # installed_packages = pkg_resources.working_set # installed_packages_list = sorted(["%s==%s" % (i.key, i.version) for i in installed_packages]) # for package in installed_packages_list: # print(package) # print("\n🔗 PyTorch 细节:") # print(f" - CUDA 是否可用: {torch.cuda.is_available()}") # if torch.cuda.is_available(): # print(f" - CUDA 版本: {torch.version.cuda}") # print(f" - 当前 GPU: {torch.cuda.get_device_name(0)}") # print("="*50 + "\n") # print("\n" + "="*50 + "\n") # print(f"Python 解释器路径: {sys.executable}") # print(f"Python 版本: {sys.version}") # print("="*60 + "\n") # # 立即执行扫描 # print_current_env() # raise RuntimeError("✅ 环境快照已完成,程序已主动中断以保护日志。") import spaces import random import time import shutil import gradio as gr from glob import glob from pathlib import Path import uuid import argparse import uvicorn from fastapi import FastAPI from fastapi.staticfiles import StaticFiles import trimesh from transformers import AutoProcessor, AutoModelForImageClassification from PIL import Image import shlex print('install custom') subprocess.run(shlex.split("pip install custom_rasterizer-0.1-cp310-cp310-linux_x86_64.whl"), check=True) parser = argparse.ArgumentParser() parser.add_argument("--model_path", type=str, default='tencent/Hunyuan3D-2mini') parser.add_argument("--subfolder", type=str, default='hunyuan3d-dit-v2-mini-turbo') parser.add_argument("--texgen_model_path", type=str, default='tencent/Hunyuan3D-2') parser.add_argument('--port', type=int, default=7860) parser.add_argument('--host', type=str, default='0.0.0.0') parser.add_argument('--device', type=str, default='cuda') parser.add_argument('--mc_algo', type=str, default='mc') parser.add_argument('--cache_path', type=str, default='gradio_cache') parser.add_argument('--enable_t23d', action='store_true') parser.add_argument('--disable_tex', action='store_true') parser.add_argument('--enable_flashvdm', action='store_true') parser.add_argument('--compile', action='store_true') parser.add_argument('--low_vram_mode', action='store_true') args = parser.parse_args() args.enable_flashvdm = True SAVE_DIR = args.cache_path os.makedirs(SAVE_DIR, exist_ok=True) CURRENT_DIR = os.path.dirname(os.path.abspath(__file__)) HTML_HEIGHT = 500 HTML_WIDTH = 500 # -------------------- NSFW 检测模型加载 -------------------- nsfw_processor = AutoProcessor.from_pretrained("Falconsai/nsfw_image_detection") nsfw_model = AutoModelForImageClassification.from_pretrained("Falconsai/nsfw_image_detection").to(args.device) # ----------------------------------------------------------- def randomize_seed_fn(seed: int, randomize_seed: bool) -> int: if randomize_seed: seed = random.randint(0, MAX_SEED) return seed def gen_save_folder(max_size=200): os.makedirs(SAVE_DIR, exist_ok=True) # 获取所有文件夹路径 dirs = [f for f in Path(SAVE_DIR).iterdir() if f.is_dir()] # 如果文件夹数量超过 max_size,删除创建时间最久的文件夹 if len(dirs) >= max_size: # 按创建时间排序,最久的排在前面 oldest_dir = min(dirs, key=lambda x: x.stat().st_ctime) shutil.rmtree(oldest_dir) print(f"Removed the oldest folder: {oldest_dir}") # 生成一个新的 uuid 文件夹名称 new_folder = os.path.join(SAVE_DIR, str(uuid.uuid4())) os.makedirs(new_folder, exist_ok=True) print(f"Created new folder: {new_folder}") return new_folder def export_mesh(mesh, save_folder, textured=False, type='glb'): if textured: path = os.path.join(save_folder, f'textured_mesh.{type}') else: path = os.path.join(save_folder, f'white_mesh.{type}') if type not in ['glb', 'obj']: mesh.export(path) else: mesh.export(path, include_normals=textured) return path def build_model_viewer_html(save_folder, height=660, width=790, textured=False): # Remove first folder from path to make relative path if textured: related_path = f"./textured_mesh.glb" template_name = './assets/modelviewer-textured-template.html' output_html_path = os.path.join(save_folder, f'textured_mesh.html') else: related_path = f"./white_mesh.glb" template_name = './assets/modelviewer-template.html' output_html_path = os.path.join(save_folder, f'white_mesh.html') offset = 50 if textured else 10 with open(os.path.join(CURRENT_DIR, template_name), 'r', encoding='utf-8') as f: template_html = f.read() with open(output_html_path, 'w', encoding='utf-8') as f: template_html = template_html.replace('#height#', f'{height - offset}') template_html = template_html.replace('#width#', f'{width}') template_html = template_html.replace('#src#', f'{related_path}/') f.write(template_html) rel_path = os.path.relpath(output_html_path, SAVE_DIR) iframe_tag = f'' print( f'Find html file {output_html_path}, {os.path.exists(output_html_path)}, relative HTML path is /static/{rel_path}') return f"""
{iframe_tag}
""" HAS_TEXTUREGEN = False try: from hy3dgen.texgen import Hunyuan3DPaintPipeline texgen_worker = Hunyuan3DPaintPipeline.from_pretrained(args.texgen_model_path) if args.low_vram_mode: texgen_worker.enable_model_cpu_offload() HAS_TEXTUREGEN = True except Exception as e: print(e) print("Failed to load texture generator.") print('Please try to install requirements by following README.md') HAS_TEXTUREGEN = False from hy3dgen.shapegen import FaceReducer, FloaterRemover, DegenerateFaceRemover, MeshSimplifier, \ Hunyuan3DDiTFlowMatchingPipeline from hy3dgen.shapegen.pipelines import export_to_trimesh from hy3dgen.rembg import BackgroundRemover rmbg_worker = BackgroundRemover() i23d_worker = Hunyuan3DDiTFlowMatchingPipeline.from_pretrained( args.model_path, subfolder=args.subfolder, use_safetensors=True, device=args.device, ) if args.enable_flashvdm: mc_algo = 'mc' if args.device in ['cpu', 'mps'] else args.mc_algo i23d_worker.enable_flashvdm(mc_algo=mc_algo) if args.compile: i23d_worker.compile() floater_remove_worker = FloaterRemover() degenerate_face_remove_worker = DegenerateFaceRemover() face_reduce_worker = FaceReducer() def detect_nsfw(image: Image.Image, threshold: float = 0.5) -> bool: """Returns True if image is NSFW""" inputs = nsfw_processor(images=image, return_tensors="pt").to(args.device) with torch.no_grad(): outputs = nsfw_model(**inputs) probs = torch.nn.functional.softmax(outputs.logits, dim=-1) nsfw_score = probs[0][1].item() # label 1 = NSFW return nsfw_score > threshold progress=gr.Progress() def get_duration( image=None, steps=50, guidance_scale=7.5, seed=1234, octree_resolution=256, num_chunks=200000, target_face_num=10000, randomize_seed: bool = False, ): if image is None: return 10 nsfw = 1 bgrm = 2 mesh = 3 + 0.3 * (steps-5) reduce = 20 texture = 15 + 10 * ((target_face_num-10000)/10000) if texture < 15: texture = 15 print(f'mesh duration: {mesh}') print(f'texture duration: {texture}') duration = nsfw + bgrm + int(mesh) + reduce + int(texture) print(f'function duration: {duration}') return duration @spaces.GPU(duration=get_duration) def _gen_shape_on_gpu( image=None, steps=50, guidance_scale=7.5, seed=1234, octree_resolution=256, num_chunks=200000, target_face_num=10000, randomize_seed: bool = False, ): start_time_0 = time.time() progress(0,desc="Starting") def callback(step_idx, timestep, outputs): if HAS_TEXTUREGEN: progress_value = ((step_idx+1.0)/steps)*(0.4/1.0) else: progress_value = ((step_idx+1.0)/steps)*(0.5/1.0) progress(progress_value, desc=f"Mesh generating, {step_idx + 1}/{steps} steps") if image is None: error_info = { "error": "Please provide either a caption or an image.", "status": "failed", } return None,None,None,None,error_info rgbImage = image.convert('RGB') start_time = time.time() # NSFW 检测 if nsfw_model and nsfw_processor: if detect_nsfw(rgbImage): error_info = { "error": "The input image contains NSFW content and cannot be used. Please provide a different image and try again.", "status": "failed", } return None,None,None,None,error_info print(f'NSFW checker cost: {time.time() - start_time} ms') start_time = time.time() seed = int(randomize_seed_fn(seed, randomize_seed)) octree_resolution = int(octree_resolution) save_folder = gen_save_folder() # 先移除背景 image = rmbg_worker(rgbImage) print(f'Background remover cost: {time.time() - start_time} ms') start_time = time.time() # 生成模型 generator = torch.Generator() generator = generator.manual_seed(int(seed)) outputs = i23d_worker( image=image, num_inference_steps=steps, guidance_scale=guidance_scale, generator=generator, octree_resolution=octree_resolution, num_chunks=num_chunks, output_type='mesh', callback=callback, callback_steps=1 ) print(f'num_chunks: {num_chunks}') print(f'steps: {steps}') print(f'octree_resolution: {octree_resolution}') print(f'Mesh generator cost: {time.time() - start_time} ms') mesh = export_to_trimesh(outputs)[0] path = export_mesh(mesh, save_folder, textured=False) # model_viewer_html = build_model_viewer_html(save_folder, height=HTML_HEIGHT, width=HTML_WIDTH) # return model_viewer_html, path if args.low_vram_mode: torch.cuda.empty_cache() if path is None: error_info = { "error": "'Please generate a mesh first.'", "status": "failed", } return None,None,None,None,error_info # 简化模型 print(f'exporting {path}') print(f'reduce face to {target_face_num}') start_time = time.time() mesh = trimesh.load(path) if HAS_TEXTUREGEN: progress(0.4,desc="Optimizing mesh") else: progress(0.5,desc="Optimizing mesh") mesh = floater_remove_worker(mesh) mesh = degenerate_face_remove_worker(mesh) if HAS_TEXTUREGEN: progress(0.5,desc="Reducing mesh faces") else: progress(0.6,desc="Reducing mesh faces") mesh = face_reduce_worker(mesh, target_face_num) print(f'target_face_num: {target_face_num}') print(f'Mesh Reducing cost: {time.time() - start_time} ms') start_time = time.time() if HAS_TEXTUREGEN: progress(0.7,desc="Texture generating") textured_mesh = texgen_worker(mesh, image) print(f'Texture generator cost: {time.time() - start_time} ms') # save_folder = gen_save_folder() progress(0.9,desc="Converting format") # file_type = "obj" # sourceObjPath = export_mesh(mesh, save_folder, textured=False, type=file_type) # rel_objPath = os.path.relpath(sourceObjPath, SAVE_DIR) # objPath = "/static/"+rel_objPath start_time = time.time() # for preview save_folder = gen_save_folder() if HAS_TEXTUREGEN: sourceGlbPath = export_mesh(textured_mesh, save_folder, textured=True) else: sourceGlbPath = export_mesh(mesh, save_folder, textured=False) model_viewer_html = build_model_viewer_html(save_folder, height=HTML_HEIGHT, width=HTML_WIDTH, textured=HAS_TEXTUREGEN) if HAS_TEXTUREGEN: glbPath = os.path.join(save_folder, f'textured_mesh.glb') else: glbPath = os.path.join(save_folder, f'white_mesh.glb') rel_glbPath = os.path.relpath(glbPath, SAVE_DIR) glbPath = "/static/"+rel_glbPath if args.low_vram_mode: torch.cuda.empty_cache() print(f'Export cost: {time.time() - start_time} ms') progress(1,desc="Complete") info = { "status": "success" } print(f'All cost: {time.time() - start_time_0} ms') return model_viewer_html, gr.update(value=sourceGlbPath, interactive=True), glbPath, info def gen_shape( image=None, steps=50, guidance_scale=7.5, seed=1234, octree_resolution=256, num_chunks=200000, target_face_num=10000, randomize_seed: bool = False, ): # 调用 GPU 函数 html_export_mesh,file_export,glbPath_output, info = _gen_shape_on_gpu( image, steps, guidance_scale, seed, octree_resolution, num_chunks, target_face_num, randomize_seed ) # 如果出错,抛出异常 if info["status"] == "failed": raise gr.Error(info["error"]) print(f'file_export: {file_export}') print(f'glbPath_output: {glbPath_output}') return html_export_mesh, file_export, glbPath_output def get_example_img_list(): print('Loading example img list ...') return sorted(glob('./assets/example_images/**/*.png', recursive=True)) example_imgs = get_example_img_list() HTML_OUTPUT_PLACEHOLDER = f"""

No mesh here.

""" MAX_SEED = 1e7 title = "## AI 3D Model Generator" description = "Our Image-to-3D Generator transforms your 2D photos into stunning, AI generated 3D models—ready for games, AR/VR, or 3D printing. Our AI 3D Modeling is based on Hunyuan 2.0. Check more in [imgto3d.ai](https://www.imgto3d.ai)." with gr.Blocks().queue() as demo: gr.Markdown(title) gr.Markdown(description) with gr.Row(): with gr.Column(scale=3): gr.Markdown("#### Image Prompt") image = gr.Image(sources=["upload"], label='Image', type='pil', image_mode='RGBA', height=290) gen_button = gr.Button(value='Generate Shape', variant='primary') with gr.Accordion("Advanced Options", open=False): with gr.Column(): seed = gr.Slider( label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=1234, min_width=100, ) randomize_seed = gr.Checkbox(label="Randomize seed", value=True) with gr.Column(): num_steps = gr.Slider(maximum=100, minimum=1, value=5, step=1, label='Inference Steps') octree_resolution = gr.Slider(maximum=512, minimum=16, value=256, label='Octree Resolution') with gr.Column(): cfg_scale = gr.Slider(maximum=20.0, minimum=1.0, value=5.5, step=0.1, label='Guidance Scale') num_chunks = gr.Slider(maximum=5000000, minimum=1000, value=8000, label='Number of Chunks') target_face_num = gr.Slider(maximum=1000000, minimum=100, value=10000, label='Target Face Number') with gr.Column(scale=6): gr.Markdown("#### Generated Mesh") html_export_mesh = gr.HTML(HTML_OUTPUT_PLACEHOLDER, label='Output') file_export = gr.DownloadButton(label="Download", variant='primary', interactive=False) with gr.Row(): glbPath_output = gr.Text(label="Glb Path",interactive=False) with gr.Column(scale=3): gr.Markdown("#### Image Examples") gr.Examples(examples=example_imgs, inputs=[image], label=None, examples_per_page=18) gen_button.click( fn=gen_shape, inputs=[image,num_steps,cfg_scale,seed,octree_resolution,num_chunks,target_face_num, randomize_seed], outputs=[html_export_mesh,file_export, glbPath_output] ) if __name__ == "__main__": # https://discuss.huggingface.co/t/how-to-serve-an-html-file/33921/2 # create a FastAPI app app = FastAPI() # create a static directory to store the static files static_dir = Path(SAVE_DIR).absolute() static_dir.mkdir(parents=True, exist_ok=True) app.mount("/static", StaticFiles(directory=static_dir, html=True), name="static") shutil.copytree('./assets/env_maps', os.path.join(static_dir, 'env_maps'), dirs_exist_ok=True) if args.low_vram_mode: torch.cuda.empty_cache() app = gr.mount_gradio_app(app, demo, path="/") # demo.launch() from spaces import zero zero.startup() uvicorn.run(app, host=args.host, port=args.port)