first commit
This commit is contained in:
parent
531d6360ea
commit
8a10e98c3d
|
@ -0,0 +1,68 @@
|
|||
---
|
||||
language: ja
|
||||
thumbnail: https://github.com/rinnakk/japanese-pretrained-models/blob/master/rinna.png
|
||||
tags:
|
||||
- ja
|
||||
- japanese
|
||||
- gpt
|
||||
- text-generation
|
||||
- lm
|
||||
- nlp
|
||||
license: mit
|
||||
datasets:
|
||||
- cc100
|
||||
- wikipedia
|
||||
widget:
|
||||
- text: "西田幾多郎は、"
|
||||
---
|
||||
|
||||
# japanese-gpt-1b
|
||||
|
||||

|
||||
|
||||
This repository provides a 1.3B-parameter Japanese GPT model. The model was trained by [rinna Co., Ltd.](https://corp.rinna.co.jp/)
|
||||
|
||||
# How to use the model
|
||||
|
||||
*NOTE:* Use `T5Tokenizer` to initiate the tokenizer.
|
||||
|
||||
~~~~
|
||||
import torch
|
||||
from transformers import T5Tokenizer, AutoModelForCausalLM
|
||||
|
||||
tokenizer = T5Tokenizer.from_pretrained("rinna/japanese-gpt-1b")
|
||||
model = AutoModelForCausalLM.from_pretrained("rinna/japanese-gpt-1b")
|
||||
|
||||
if torch.cuda.is_available():
|
||||
model = model.to("cuda")
|
||||
|
||||
text = "西田幾多郎は、"
|
||||
token_ids = tokenizer.encode(text, add_special_tokens=False, return_tensors="pt")
|
||||
|
||||
with torch.no_grad():
|
||||
output_ids = model.generate(
|
||||
token_ids.to(model.device),
|
||||
max_length=100,
|
||||
min_length=100,
|
||||
do_sample=True,
|
||||
top_k=500,
|
||||
top_p=0.95,
|
||||
pad_token_id=tokenizer.pad_token_id,
|
||||
bos_token_id=tokenizer.bos_token_id,
|
||||
eos_token_id=tokenizer.eos_token_id,
|
||||
bad_word_ids=[[tokenizer.unk_token_id]]
|
||||
)
|
||||
|
||||
output = tokenizer.decode(output_ids.tolist()[0])
|
||||
print(output)
|
||||
~~~~
|
||||
|
||||
# Model architecture
|
||||
A 24-layer, 2048-hidden-size transformer-based language model.
|
||||
|
||||
# Training
|
||||
The model was trained on [Japanese C4](https://huggingface.co/datasets/allenai/c4), [Japanese CC-100](http://data.statmt.org/cc-100/ja.txt.xz) and [Japanese Wikipedia](https://dumps.wikimedia.org/other/cirrussearch) to optimize a traditional language modelling objective. It reaches around 14 perplexity on a chosen validation set from the same data.
|
||||
# Tokenization
|
||||
The model uses a [sentencepiece](https://github.com/google/sentencepiece)-based tokenizer. The vocabulary was first trained on a selected subset from the training data using the official sentencepiece training script, and then augmented with emojis and symbols.
|
||||
# Licenese
|
||||
[The MIT license](https://opensource.org/licenses/MIT)
|
|
@ -0,0 +1,26 @@
|
|||
{
|
||||
"activation_function": "gelu_fast",
|
||||
"architectures": [
|
||||
"GPT2LMHeadModel"
|
||||
],
|
||||
"attn_pdrop": 0.1,
|
||||
"bos_token_id": 2,
|
||||
"embd_pdrop": 0.1,
|
||||
"eos_token_id": 3,
|
||||
"gradient_checkpointing": false,
|
||||
"initializer_range": 0.02,
|
||||
"layer_norm_epsilon": 1e-05,
|
||||
"model_type": "gpt2",
|
||||
"n_ctx": 1024,
|
||||
"n_embd": 2048,
|
||||
"n_head": 16,
|
||||
"n_inner": 8192,
|
||||
"n_layer": 24,
|
||||
"n_positions": 1024,
|
||||
"reorder_and_upcast_attn": false,
|
||||
"resid_pdrop": 0.1,
|
||||
"scale_attn_by_inverse_layer_idx": false,
|
||||
"scale_attn_weights": true,
|
||||
"use_cache": true,
|
||||
"vocab_size": 44928
|
||||
}
|
Binary file not shown.
Binary file not shown.
After Width: | Height: | Size: 59 KiB |
|
@ -0,0 +1 @@
|
|||
{"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
|
Binary file not shown.
|
@ -0,0 +1 @@
|
|||
{"eos_token": "</s>", "unk_token": "<unk>", "pad_token": "[PAD]", "extra_ids": 0, "additional_special_tokens": [], "sp_model_kwargs": {}, "bos_token": "<s>", "cls_token": "[CLS]", "sep_token": "[SEP]", "mask_token": "[MASK]", "do_lower_case": false, "tokenizer_class": "T5Tokenizer"}
|
Loading…
Reference in New Issue