vit-gpt2-coco-en/pipeline.py

63 lines
2.0 KiB
Python
Raw Normal View History

2021-10-24 17:37:47 +00:00
import os
from typing import Dict, List, Any
from PIL import Image
import jax
2021-10-25 07:55:35 +00:00
from transformers import ViTFeatureExtractor, AutoTokenizer, FlaxVisionEncoderDecoderModel, VisionEncoderDecoderModel
import torch
2021-10-24 17:37:47 +00:00
class PreTrainedPipeline():
def __init__(self, path=""):
model_dir = path
2021-10-25 07:55:35 +00:00
# self.model = FlaxVisionEncoderDecoderModel.from_pretrained(model_dir)
self.model = VisionEncoderDecoderModel.from_pretrained(model_dir)
2021-10-24 17:37:47 +00:00
self.feature_extractor = ViTFeatureExtractor.from_pretrained(model_dir)
self.tokenizer = AutoTokenizer.from_pretrained(model_dir)
max_length = 16
num_beams = 4
2021-10-25 07:55:35 +00:00
# self.gen_kwargs = {"max_length": max_length, "num_beams": num_beams}
2021-10-25 08:30:24 +00:00
self.gen_kwargs = {"max_length": max_length, "num_beams": num_beams, "return_dict_in_generate": True, "output_scores": True}
2021-10-24 17:37:47 +00:00
2021-10-25 07:55:35 +00:00
self.model.to("cpu")
self.model.eval()
# @jax.jit
2021-10-24 17:37:47 +00:00
def _generate(pixel_values):
2021-10-25 07:55:35 +00:00
with torch.no_grad():
2021-10-25 08:30:24 +00:00
outputs = self.model.generate(pixel_values, **self.gen_kwargs)
output_ids = outputs.sequences
sequences_scores = outputs.sequences_scores
return output_ids, sequences_scores
2021-10-24 17:37:47 +00:00
self.generate = _generate
# compile the model
image_path = os.path.join(path, 'val_000000039769.jpg')
image = Image.open(image_path)
self(image)
image.close()
def __call__(self, inputs: "Image.Image") -> List[str]:
"""
Args:
Return:
"""
2021-10-25 07:55:35 +00:00
# pixel_values = self.feature_extractor(images=inputs, return_tensors="np").pixel_values
pixel_values = self.feature_extractor(images=inputs, return_tensors="pt").pixel_values
2021-10-24 17:37:47 +00:00
2021-10-25 08:30:24 +00:00
output_ids, sequences_scores = self.generate(pixel_values)
2021-10-24 17:37:47 +00:00
preds = self.tokenizer.batch_decode(output_ids, skip_special_tokens=True)
preds = [pred.strip() for pred in preds]
2021-10-25 08:30:24 +00:00
preds = [{"label": preds[0], "score": float(sequences_scores[0])}]
2021-10-25 08:19:08 +00:00
2021-10-24 17:37:47 +00:00
return preds