Upload tokenizer
This commit is contained in:
parent
a7c4d01c0b
commit
b2f673e182
|
@ -1,4 +1,4 @@
|
|||
#version: 0.2 - Trained by `huggingface/tokenizers`
|
||||
#version: 0.2
|
||||
i n
|
||||
t h
|
||||
a n
|
||||
|
|
|
@ -21,7 +21,6 @@
|
|||
"model_max_length": 77,
|
||||
"name_or_path": "openai/clip-vit-base-patch32",
|
||||
"pad_token": "<|endoftext|>",
|
||||
"processor_class": "XCLIPProcessor",
|
||||
"special_tokens_map_file": "/home/niels/.cache/huggingface/hub/models--openai--clip-vit-base-patch32/snapshots/f4881ba48ee4d21b7ed5602603b9e3e92eb1b346/special_tokens_map.json",
|
||||
"tokenizer_class": "CLIPTokenizer",
|
||||
"unk_token": {
|
||||
|
|
49411
vocab.json
49411
vocab.json
File diff suppressed because one or more lines are too long
Loading…
Reference in New Issue