From 549ca7204f1bed03a3e11532d20f08fdca4863b0 Mon Sep 17 00:00:00 2001 From: Junyang Lin Date: Wed, 9 Nov 2022 08:32:22 +0000 Subject: [PATCH] Update README.md --- README.md | 114 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 114 insertions(+) diff --git a/README.md b/README.md index 154df82..393240a 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,117 @@ --- license: apache-2.0 --- + +# Chinese-CLIP-Base + +## Introduction +This is the base-version of the Chinese CLIP. Chinese CLIP is a simple implementation of CLIP on a large-scale dataset of around 200 million Chinese image-text pairs. For more details, please refer to our technical report https://arxiv.org/abs/2211.01335 and our official github repo https://github.com/OFA-Sys/Chinese-CLIP + +## How to use +We provide a simple code snippet to show how to use the API for Chinese-CLIP. For starters, please install cn_clip: +```bash +# to install the latest stable release +pip install cn_clip + +# or install from source code +cd Chinese-CLIP/ +pip install -e . +``` +After installation, use Chinese CLIP as shown below: +```python +import torch +from PIL import Image + +import cn_clip.clip as clip +from cn_clip.clip import load_from_name, available_models +print("Available models:", available_models()) +# Available models: ['ViT-B-16', 'ViT-L-14', 'ViT-L-14-336', 'ViT-H-14', 'RN50'] + +device = "cuda" if torch.cuda.is_available() else "cpu" +model, preprocess = load_from_name("ViT-B-16", device=device, download_root='./') +model.eval() +image = preprocess(Image.open("examples/pokemon.jpeg")).unsqueeze(0).to(device) +text = clip.tokenize(["杰尼龟", "妙蛙种子", "小火龙", "皮卡丘"]).to(device) + +with torch.no_grad(): + image_features = model.encode_image(image) + text_features = model.encode_text(text) + # Normalize the features. Please use the normalized features for downstream tasks. + image_features /= image_features.norm(dim=-1, keepdim=True) + text_features /= text_features.norm(dim=-1, keepdim=True) + + logits_per_image, logits_per_text = model.get_similarity(image, text) + probs = logits_per_image.softmax(dim=-1).cpu().numpy() + +print("Label probs:", probs) # [[1.268734e-03 5.436878e-02 6.795761e-04 9.436829e-01]] +``` + +However, if you are not satisfied with only using the API, feel free to check our github repo https://github.com/OFA-Sys/Chinese-CLIP for more details about training and inference. +

+ +## Results +### MUGE Text-to-Image Retrieval + + + + + + + + + + + + + + + + +
SetupZero-shotFinetune
MetricR@1R@5R@10MRR@1R@5R@10MR
WukongViT-B33.459.369.754.139.266.977.461.2
R2D2ViT-B----47.475.183.568.7
CN-CLIPViT-B52.176.784.471.158.483.690.077.4
+ + +### Flickr30K-CN Retrieval + + + + + + + + + + + + + + + + + + + +
TaskText-to-ImageImage-to-Text
SetupZero-shotFinetuneZero-shotFinetune
MetricR@1R@5R@10R@1R@5R@10R@1R@5R@10R@1R@5R@10
WukongViT-B45.773.882.267.689.694.266.288.794.383.997.699.0
R2D2ViT-B---78.394.697.0---92.699.199.8
CN-CLIPViT-B62.786.992.879.194.897.474.693.597.193.599.099.5
+ + +### COCO-CN Retrieval + + + + + + + + + + + + + + + + + + + +
TaskText-to-ImageImage-to-Text
SetupZero-shotFinetuneZero-shotFinetune
MetricR@1R@5R@10R@1R@5R@10R@1R@5R@10R@1R@5R@10
WukongViT-B49.279.487.967.091.496.748.377.888.865.890.396.6
R2D2ViT-B---75.194.298.1---76.195.398.5
CN-CLIPViT-B62.286.694.977.097.199.057.084.193.677.496.298.9
+

\ No newline at end of file