Commit ·
2b89113
1
Parent(s): 9ec77d0
Update README: Add model card metadata, ImageNet-1k metrics, and LiteRT usage example
Browse files
README.md
CHANGED
|
@@ -1,8 +1,119 @@
|
|
| 1 |
---
|
| 2 |
library_name: litert
|
|
|
|
| 3 |
tags:
|
| 4 |
- vision
|
| 5 |
- image-classification
|
|
|
|
|
|
|
| 6 |
datasets:
|
| 7 |
- imagenet-1k
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
---
|
| 2 |
library_name: litert
|
| 3 |
+
pipeline_tag: image-classification
|
| 4 |
tags:
|
| 5 |
- vision
|
| 6 |
- image-classification
|
| 7 |
+
- google
|
| 8 |
+
- computer-vision
|
| 9 |
datasets:
|
| 10 |
- imagenet-1k
|
| 11 |
+
model-index:
|
| 12 |
+
- name: litert-community/convnext_base
|
| 13 |
+
results:
|
| 14 |
+
- task:
|
| 15 |
+
type: image-classification
|
| 16 |
+
name: Image Classification
|
| 17 |
+
dataset:
|
| 18 |
+
name: ImageNet-1k
|
| 19 |
+
type: imagenet-1k
|
| 20 |
+
config: default
|
| 21 |
+
split: validation
|
| 22 |
+
metrics:
|
| 23 |
+
- name: Top 1 Accuracy (Full Precision)
|
| 24 |
+
type: accuracy
|
| 25 |
+
value: 0.8405
|
| 26 |
+
- name: Top 5 Accuracy (Full Precision)
|
| 27 |
+
type: accuracy
|
| 28 |
+
value: 0.9689
|
| 29 |
---
|
| 30 |
+
|
| 31 |
+
# ConvNeXt_Base
|
| 32 |
+
|
| 33 |
+
The ConvNeXt_Base architecture is a convolutional neural network pre-trained on the ImageNet-1k dataset. Originally introduced by Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, Saining Xie. in the modernized paper, [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545).
|
| 34 |
+
|
| 35 |
+
## Model description
|
| 36 |
+
|
| 37 |
+
The model was converted from a checkpoint from PyTorch Vision.
|
| 38 |
+
|
| 39 |
+
The original model has:
|
| 40 |
+
acc@1 (on ImageNet-1K): 84.06%
|
| 41 |
+
acc@5 (on ImageNet-1K): 96.87%
|
| 42 |
+
num_params: 88,591,464
|
| 43 |
+
|
| 44 |
+
## Intended uses & limitations
|
| 45 |
+
|
| 46 |
+
The model files were converted from pretrained weights from PyTorch Vision. The models may have their own licenses or terms and conditions derived from PyTorch Vision and the dataset used for training. It is your responsibility to determine whether you have permission to use the models for your use case.
|
| 47 |
+
|
| 48 |
+
## Use
|
| 49 |
+
|
| 50 |
+
```python
|
| 51 |
+
#!/usr/bin/env python3
|
| 52 |
+
import argparse, json
|
| 53 |
+
import numpy as np
|
| 54 |
+
from PIL import Image
|
| 55 |
+
from huggingface_hub import hf_hub_download
|
| 56 |
+
from ai_edge_litert.compiled_model import CompiledModel
|
| 57 |
+
|
| 58 |
+
def preprocess(img: Image.Image) -> np.ndarray:
|
| 59 |
+
img = img.convert("RGB")
|
| 60 |
+
w, h = img.size
|
| 61 |
+
s = 232
|
| 62 |
+
if w < h:
|
| 63 |
+
img = img.resize((s, int(round(h * s / w))), Image.BILINEAR)
|
| 64 |
+
else:
|
| 65 |
+
img = img.resize((int(round(w * s / h)), s), Image.BILINEAR)
|
| 66 |
+
left = (img.size[0] - 224) // 2
|
| 67 |
+
top = (img.size[1] - 224) // 2
|
| 68 |
+
img = img.crop((left, top, left + 224, top + 224))
|
| 69 |
+
|
| 70 |
+
x = np.asarray(img, dtype=np.float32) / 255.0
|
| 71 |
+
x = (x - np.array([0.485, 0.456, 0.406], dtype=np.float32)) / np.array(
|
| 72 |
+
[0.229, 0.224, 0.225], dtype=np.float32
|
| 73 |
+
)
|
| 74 |
+
return x
|
| 75 |
+
|
| 76 |
+
def main():
|
| 77 |
+
ap = argparse.ArgumentParser()
|
| 78 |
+
ap.add_argument("--image", required=True)
|
| 79 |
+
args = ap.parse_args()
|
| 80 |
+
|
| 81 |
+
model_path = hf_hub_download("litert-community/convnext_base", "convnext_base.tflite")
|
| 82 |
+
labels_path = hf_hub_download(
|
| 83 |
+
"huggingface/label-files", "imagenet-1k-id2label.json", repo_type="dataset"
|
| 84 |
+
)
|
| 85 |
+
with open(labels_path, "r", encoding="utf-8") as f:
|
| 86 |
+
id2label = {int(k): v for k, v in json.load(f).items()}
|
| 87 |
+
|
| 88 |
+
img = Image.open(args.image)
|
| 89 |
+
x = preprocess(img)
|
| 90 |
+
|
| 91 |
+
model = CompiledModel.from_file(model_path)
|
| 92 |
+
inp = model.create_input_buffers(0)
|
| 93 |
+
out = model.create_output_buffers(0)
|
| 94 |
+
|
| 95 |
+
inp[0].write(x)
|
| 96 |
+
model.run_by_index(0, inp, out)
|
| 97 |
+
|
| 98 |
+
req = model.get_output_buffer_requirements(0, 0)
|
| 99 |
+
y = out[0].read(req["buffer_size"] // np.dtype(np.float32).itemsize, np.float32)
|
| 100 |
+
|
| 101 |
+
pred = int(np.argmax(y))
|
| 102 |
+
label = id2label.get(pred, f"class_{pred}")
|
| 103 |
+
|
| 104 |
+
print(f"Top-1 class index: {pred}")
|
| 105 |
+
print(f"Top-1 label: {label}")
|
| 106 |
+
if __name__ == "__main__":
|
| 107 |
+
main()
|
| 108 |
+
```
|
| 109 |
+
### BibTeX entry and citation info
|
| 110 |
+
|
| 111 |
+
```bibtex
|
| 112 |
+
@inproceedings{liu2022convnet,
|
| 113 |
+
title={A convnet for the 2020s},
|
| 114 |
+
author={Liu, Zhuang and Mao, Hanzi and Wu, Chao-Yuan and Feichtenhofer, Christoph and Darrell, Trevor and Xie, Saining},
|
| 115 |
+
booktitle={Proceedings of the IEEE/CVF conference on computer vision and pattern recognition},
|
| 116 |
+
pages={11976--11986},
|
| 117 |
+
year={2022}
|
| 118 |
+
}
|
| 119 |
+
```
|