| import requests |
| from PIL import Image |
| import torch |
| from transformers import AutoProcessor, LlavaForConditionalGeneration |
|
|
| |
| model_id = "llava-hf/llava-1.5-7b-hf" |
|
|
| |
| device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
|
|
| |
| model = LlavaForConditionalGeneration.from_pretrained( |
| model_id, |
| torch_dtype=torch.float16, |
| low_cpu_mem_usage=True |
| ).to(device) |
|
|
| |
| processor = AutoProcessor.from_pretrained(model_id) |
|
|
| |
| conversation = [ |
| { |
| "role": "user", |
| "content": [ |
| {"type": "text", "text": "What are these?"}, |
| {"type": "image"}, |
| ], |
| }, |
| ] |
|
|
| |
| prompt = processor.apply_chat_template(conversation, add_generation_prompt=True) |
|
|
| |
| image_url = "http://images.cocodataset.org/val2017/000000039769.jpg" |
| raw_image = Image.open(requests.get(image_url, stream=True).raw) |
|
|
| |
| inputs = processor(images=raw_image, text=prompt, return_tensors='pt') |
| inputs = {k: v.to(device, torch.float16) for k, v in inputs.items()} |
|
|
| |
| output = model.generate(**inputs, max_new_tokens=200, do_sample=False) |
|
|
| |
| print(processor.decode(output[0][2:], skip_special_tokens=True)) |
|
|