Update README.md

This commit is contained in:
Niels Rogge 2023-02-27 14:57:25 +00:00 committed by huggingface-web
parent c3a5f8c228
commit ca9e6c21b0
1 changed files with 8 additions and 8 deletions

View File

@ -54,8 +54,8 @@ import requests
from PIL import Image from PIL import Image
from transformers import BlipProcessor, Blip2ForConditionalGeneration from transformers import BlipProcessor, Blip2ForConditionalGeneration
processor = BlipProcessor.from_pretrained("Salesforce/blip2-flan-t5-xxl") processor = BlipProcessor.from_pretrained("Salesforce/blip2-opt-2.7b")
model = Blip2ForConditionalGeneration.from_pretrained("Salesforce/blip2-flan-t5-xxl") model = Blip2ForConditionalGeneration.from_pretrained("Salesforce/blip2-opt-2.7b")
img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg' img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
raw_image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB') raw_image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB')
@ -81,8 +81,8 @@ import requests
from PIL import Image from PIL import Image
from transformers import Blip2Processor, Blip2ForConditionalGeneration from transformers import Blip2Processor, Blip2ForConditionalGeneration
processor = Blip2Processor.from_pretrained("Salesforce/blip2-flan-t5-xxl") processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b")
model = Blip2ForConditionalGeneration.from_pretrained("Salesforce/blip2-flan-t5-xxl", device_map="auto") model = Blip2ForConditionalGeneration.from_pretrained("Salesforce/blip2-opt-2.7b", device_map="auto")
img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg' img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
raw_image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB') raw_image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB')
@ -107,8 +107,8 @@ import requests
from PIL import Image from PIL import Image
from transformers import Blip2Processor, Blip2ForConditionalGeneration from transformers import Blip2Processor, Blip2ForConditionalGeneration
processor = Blip2Processor.from_pretrained("Salesforce/blip2-flan-t5-xxl") processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b")
model = Blip2ForConditionalGeneration.from_pretrained("Salesforce/blip2-flan-t5-xxl", torch_dtype=torch.float16, device_map="auto") model = Blip2ForConditionalGeneration.from_pretrained("Salesforce/blip2-opt-2.7b", torch_dtype=torch.float16, device_map="auto")
img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg' img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
raw_image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB') raw_image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB')
@ -133,8 +133,8 @@ import requests
from PIL import Image from PIL import Image
from transformers import Blip2Processor, Blip2ForConditionalGeneration from transformers import Blip2Processor, Blip2ForConditionalGeneration
processor = Blip2Processor.from_pretrained("Salesforce/blip2-flan-t5-xxl") processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b")
model = Blip2ForConditionalGeneration.from_pretrained("Salesforce/blip2-flan-t5-xxl", load_in_8bit=True, device_map="auto") model = Blip2ForConditionalGeneration.from_pretrained("Salesforce/blip2-opt-2.7b", load_in_8bit=True, device_map="auto")
img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg' img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
raw_image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB') raw_image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB')