Added model and processor

This commit is contained in:
Alara Dirik 2022-07-05 10:04:39 +03:00
parent dd7f3bc0b7
commit f7f8a2abf3
6 changed files with 98389 additions and 0 deletions

48895
merges.txt Normal file

File diff suppressed because it is too large Load Diff

22
preprocessor_config.json Normal file
View File

@ -0,0 +1,22 @@
{
"crop_size": 768,
"do_center_crop": true,
"do_convert_rgb": true,
"do_normalize": true,
"do_resize": true,
"feature_extractor_type": "OwlViTFeatureExtractor",
"image_mean": [
0.48145466,
0.4578275,
0.40821073
],
"image_std": [
0.26862954,
0.26130258,
0.27577711
],
"processor_class": "OwlViTProcessor",
"resample": 3,
"rescale": true,
"size": 768
}

3
pytorch_model.bin Normal file
View File

@ -0,0 +1,3 @@
version https://git-lfs.github.com/spec/v1
oid sha256:623e5c00a29450a8308dafc6ef1966b89ea1e0b2fb04821f253141eacc103696
size 613046985

24
special_tokens_map.json Normal file
View File

@ -0,0 +1,24 @@
{
"bos_token": {
"content": "<|startoftext|>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false
},
"eos_token": {
"content": "<|endoftext|>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false
},
"pad_token": "!",
"unk_token": {
"content": "<|endoftext|>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false
}
}

35
tokenizer_config.json Normal file
View File

@ -0,0 +1,35 @@
{
"add_prefix_space": false,
"bos_token": {
"__type": "AddedToken",
"content": "<|startoftext|>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false
},
"do_lower_case": true,
"eos_token": {
"__type": "AddedToken",
"content": "<|endoftext|>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false
},
"errors": "replace",
"model_max_length": 77,
"name_or_path": "openai/clip-vit-base-patch32",
"pad_token": "!",
"processor_class": "OwlViTProcessor",
"special_tokens_map_file": "/Users/adirik/.cache/huggingface/transformers/18a566598f286c9139f88160c99f84eec492a26bd22738fa9cb44d5b7e0a5c76.cce1206abbad28826f000510f22f354e53e66a97f7c23745a7dfe27609cc07f5",
"tokenizer_class": "CLIPTokenizer",
"unk_token": {
"__type": "AddedToken",
"content": "<|endoftext|>",
"lstrip": false,
"normalized": true,
"rstrip": false,
"single_word": false
}
}

49410
vocab.json Normal file

File diff suppressed because it is too large Load Diff