From f067fcdf5f8d757c8f679e1f74dc96155b1bb221 Mon Sep 17 00:00:00 2001 From: Ankur Singh Date: Tue, 4 Jan 2022 08:25:29 +0000 Subject: [PATCH] readme updated --- README.md | 53 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) create mode 100644 README.md diff --git a/README.md b/README.md new file mode 100644 index 0000000..b6fd849 --- /dev/null +++ b/README.md @@ -0,0 +1,53 @@ +--- +tags: +- image-captioning +license: apache-2.0 + +--- + +# nlpconnect/vit-gpt2-image-captioning + +This is an image captioning model training by @ydshieh in flax, this is pytorch version of https://huggingface.co/ydshieh/vit-gpt2-coco-en-ckpts model. + + +# Sample running code + +```python + +from transformers import VisionEncoderDecoderModel, ViTFeatureExtractor, AutoTokenizer + +model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning") +feature_extractor = ViTFeatureExtractor.from_pretrained("nlpconnect/vit-gpt2-image-captioning") +tokenizer = AutoTokenizer.from_pretrained("nlpconnect/vit-gpt2-image-captioning") + +device = torch.device("cuda" if torch.cuda.is_available() else "cpu") +model.to(device) + + + +max_length = 16 +num_beams = 4 +gen_kwargs = {"max_length": max_length, "num_beams": num_beams} +def predict_step(image_paths): + images = [] + for image_path in image_paths: + i_image = Image.open(image_path) + if i_image.mode != "RGB": + i_image = i_image.convert(mode="RGB") + + images.append(i_image) + + pixel_values = feature_extractor(images=images, return_tensors="pt").pixel_values + pixel_values = pixel_values.to(device) + + output_ids = model.generate(pixel_values, **gen_kwargs) + + preds = tokenizer.batch_decode(output_ids, skip_special_tokens=True) + preds = [pred.strip() for pred in preds] + return preds + + +predict_step(['doctor.e16ba4e4.jpg'] # ['a woman in a hospital bed with a woman in a hospital bed'] + +``` +