initial commit
This commit is contained in:
commit
98a41073c8
|
@ -0,0 +1,34 @@
|
||||||
|
*.7z filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.arrow filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.bin filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.ftz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.gz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.h5 filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.joblib filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.model filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.npy filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.npz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.onnx filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.ot filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.parquet filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.pb filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.pickle filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.pkl filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.pt filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.pth filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.rar filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
||||||
|
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.tflite filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.tgz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.wasm filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.xz filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.zip filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.zst filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
@ -0,0 +1,59 @@
|
||||||
|
---
|
||||||
|
license: creativeml-openrail-m
|
||||||
|
language:
|
||||||
|
- en
|
||||||
|
thumbnail:
|
||||||
|
tags:
|
||||||
|
- text generation
|
||||||
|
- conversational
|
||||||
|
inference: false
|
||||||
|
---
|
||||||
|
|
||||||
|
# Pygmalion 2.7B
|
||||||
|
|
||||||
|
## Model description
|
||||||
|
|
||||||
|
Pymalion 2.7B is a proof-of-concept dialogue model based on EleutherAI's [gpt-neo-2.7B](https://huggingface.co/EleutherAI/gpt-neo-2.7B).
|
||||||
|
|
||||||
|
**Warning:** This model is **NOT** suitable for use by minors. It **will** output X-rated content under certain circumstances.
|
||||||
|
|
||||||
|
## Training data
|
||||||
|
|
||||||
|
The fine-tuning dataset consisted of 56MB of dialogue data gathered from multiple sources, which includes both real _and_ partially machine-generated conversations.
|
||||||
|
|
||||||
|
## Training procedure
|
||||||
|
|
||||||
|
Model weights were initialized from the `uft-2.7b` ConvoGPT model made available in [this commit](https://huggingface.co/hakurei/convogpt/tree/07707377dee0aa7d1ee5363ef660b13eb5b73f9d/2.7b-uft).
|
||||||
|
|
||||||
|
The model was then further fine-tuned on ~48.5 million tokens for ~5k steps on 4 NVIDIA A40s using DeepSpeed.
|
||||||
|
|
||||||
|
## Intended use
|
||||||
|
|
||||||
|
### The easy way
|
||||||
|
|
||||||
|
We plan to provide a notebook with a Gradio UI for playing around with the model shortly. Until then, please refer to the section below for manual usage.
|
||||||
|
|
||||||
|
### The manual way
|
||||||
|
|
||||||
|
The model can be used as a regular text generation model, but it'll perform best if the input prompt adheres to the following format:
|
||||||
|
|
||||||
|
```
|
||||||
|
[CHARACTER]'s Persona: [A few sentences about the character you want the model to play]
|
||||||
|
<START>
|
||||||
|
[DIALOGUE HISTORY]
|
||||||
|
You: [Your input message here]
|
||||||
|
[CHARACTER]:
|
||||||
|
```
|
||||||
|
|
||||||
|
Where `[CHARACTER]` is, as you can probably guess, the name of the character you want the model to portray, `<START>` should be used verbatim as a delimiter token to separate persona and scenario data from the dialogue, and `[DIALOGUE HISTORY]` is chat history so the model can have some conversational context to draw from. Ideally it'll be pairs of messages like:
|
||||||
|
|
||||||
|
```
|
||||||
|
[CHARACTER]: [some dialogue here]
|
||||||
|
You: [your response to the dialogue above]
|
||||||
|
```
|
||||||
|
|
||||||
|
Apart from chat history, you can also just add example conversations in `[DIALOGUE HISTORY]` to show how the character should speak - ideally at the beginning, so it doesn't get confused as to what's conversation history vs. character definition.
|
||||||
|
|
||||||
|
## Known issues
|
||||||
|
|
||||||
|
We haven't played around with the model enough to enumerate them. Feel free to give us some feedback!
|
|
@ -0,0 +1,81 @@
|
||||||
|
{
|
||||||
|
"_name_or_path": "pygmalion-2.7b",
|
||||||
|
"activation_function": "gelu_new",
|
||||||
|
"architectures": [
|
||||||
|
"GPTNeoForCausalLM"
|
||||||
|
],
|
||||||
|
"attention_dropout": 0,
|
||||||
|
"attention_layers": [
|
||||||
|
"global",
|
||||||
|
"local",
|
||||||
|
"global",
|
||||||
|
"local",
|
||||||
|
"global",
|
||||||
|
"local",
|
||||||
|
"global",
|
||||||
|
"local",
|
||||||
|
"global",
|
||||||
|
"local",
|
||||||
|
"global",
|
||||||
|
"local",
|
||||||
|
"global",
|
||||||
|
"local",
|
||||||
|
"global",
|
||||||
|
"local",
|
||||||
|
"global",
|
||||||
|
"local",
|
||||||
|
"global",
|
||||||
|
"local",
|
||||||
|
"global",
|
||||||
|
"local",
|
||||||
|
"global",
|
||||||
|
"local",
|
||||||
|
"global",
|
||||||
|
"local",
|
||||||
|
"global",
|
||||||
|
"local",
|
||||||
|
"global",
|
||||||
|
"local",
|
||||||
|
"global",
|
||||||
|
"local"
|
||||||
|
],
|
||||||
|
"attention_types": [
|
||||||
|
[
|
||||||
|
[
|
||||||
|
"global",
|
||||||
|
"local"
|
||||||
|
],
|
||||||
|
16
|
||||||
|
]
|
||||||
|
],
|
||||||
|
"bos_token_id": 50256,
|
||||||
|
"embed_dropout": 0,
|
||||||
|
"eos_token_id": 50256,
|
||||||
|
"hidden_size": 2560,
|
||||||
|
"initializer_range": 0.02,
|
||||||
|
"intermediate_size": null,
|
||||||
|
"layer_norm_epsilon": 1e-05,
|
||||||
|
"max_position_embeddings": 2048,
|
||||||
|
"model_type": "gpt_neo",
|
||||||
|
"num_heads": 20,
|
||||||
|
"num_layers": 32,
|
||||||
|
"resid_dropout": 0,
|
||||||
|
"summary_activation": null,
|
||||||
|
"summary_first_dropout": 0.1,
|
||||||
|
"summary_proj_to_labels": true,
|
||||||
|
"summary_type": "cls_index",
|
||||||
|
"summary_use_proj": true,
|
||||||
|
"task_specific_params": {
|
||||||
|
"text-generation": {
|
||||||
|
"do_sample": true,
|
||||||
|
"max_length": 50,
|
||||||
|
"temperature": 0.9
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"tokenizer_class": "GPT2Tokenizer",
|
||||||
|
"torch_dtype": "float16",
|
||||||
|
"transformers_version": "4.26.0.dev0",
|
||||||
|
"use_cache": false,
|
||||||
|
"vocab_size": 50257,
|
||||||
|
"window_size": 256
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1,6 @@
|
||||||
|
{
|
||||||
|
"bos_token": "<|endoftext|>",
|
||||||
|
"eos_token": "<|endoftext|>",
|
||||||
|
"pad_token": "<|endoftext|>",
|
||||||
|
"unk_token": "<|endoftext|>"
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,33 @@
|
||||||
|
{
|
||||||
|
"add_prefix_space": false,
|
||||||
|
"bos_token": {
|
||||||
|
"__type": "AddedToken",
|
||||||
|
"content": "<|endoftext|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": true,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false
|
||||||
|
},
|
||||||
|
"eos_token": {
|
||||||
|
"__type": "AddedToken",
|
||||||
|
"content": "",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": true,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false
|
||||||
|
},
|
||||||
|
"errors": "replace",
|
||||||
|
"model_max_length": 2048,
|
||||||
|
"name_or_path": "pygmalion-2.7b",
|
||||||
|
"pad_token": "",
|
||||||
|
"special_tokens_map_file": null,
|
||||||
|
"tokenizer_class": "GPT2Tokenizer",
|
||||||
|
"unk_token": {
|
||||||
|
"__type": "AddedToken",
|
||||||
|
"content": "<|endoftext|>",
|
||||||
|
"lstrip": false,
|
||||||
|
"normalized": true,
|
||||||
|
"rstrip": false,
|
||||||
|
"single_word": false
|
||||||
|
}
|
||||||
|
}
|
File diff suppressed because one or more lines are too long
Loading…
Reference in New Issue