update qlora weight

This commit is contained in:
cgzhang6 2024-01-26 14:24:51 +08:00 committed by root
parent cbf946e200
commit 2affa5ba3d
10 changed files with 1521 additions and 0 deletions

View File

@ -0,0 +1,204 @@
---
library_name: peft
base_model: /home/sdk_models/llama2-7b-hf/
---
# Model Card for Model ID
<!-- Provide a quick summary of what the model is/does. -->
## Model Details
### Model Description
<!-- Provide a longer summary of what this model is. -->
- **Developed by:** [More Information Needed]
- **Funded by [optional]:** [More Information Needed]
- **Shared by [optional]:** [More Information Needed]
- **Model type:** [More Information Needed]
- **Language(s) (NLP):** [More Information Needed]
- **License:** [More Information Needed]
- **Finetuned from model [optional]:** [More Information Needed]
### Model Sources [optional]
<!-- Provide the basic links for the model. -->
- **Repository:** [More Information Needed]
- **Paper [optional]:** [More Information Needed]
- **Demo [optional]:** [More Information Needed]
## Uses
<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
### Direct Use
<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
[More Information Needed]
### Downstream Use [optional]
<!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
[More Information Needed]
### Out-of-Scope Use
<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
[More Information Needed]
## Bias, Risks, and Limitations
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
[More Information Needed]
### Recommendations
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
## How to Get Started with the Model
Use the code below to get started with the model.
[More Information Needed]
## Training Details
### Training Data
<!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
[More Information Needed]
### Training Procedure
<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
#### Preprocessing [optional]
[More Information Needed]
#### Training Hyperparameters
- **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
#### Speeds, Sizes, Times [optional]
<!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
[More Information Needed]
## Evaluation
<!-- This section describes the evaluation protocols and provides the results. -->
### Testing Data, Factors & Metrics
#### Testing Data
<!-- This should link to a Dataset Card if possible. -->
[More Information Needed]
#### Factors
<!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
[More Information Needed]
#### Metrics
<!-- These are the evaluation metrics being used, ideally with a description of why. -->
[More Information Needed]
### Results
[More Information Needed]
#### Summary
## Model Examination [optional]
<!-- Relevant interpretability work for the model goes here -->
[More Information Needed]
## Environmental Impact
<!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
- **Hardware Type:** [More Information Needed]
- **Hours used:** [More Information Needed]
- **Cloud Provider:** [More Information Needed]
- **Compute Region:** [More Information Needed]
- **Carbon Emitted:** [More Information Needed]
## Technical Specifications [optional]
### Model Architecture and Objective
[More Information Needed]
### Compute Infrastructure
[More Information Needed]
#### Hardware
[More Information Needed]
#### Software
[More Information Needed]
## Citation [optional]
<!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
**BibTeX:**
[More Information Needed]
**APA:**
[More Information Needed]
## Glossary [optional]
<!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
[More Information Needed]
## More Information [optional]
[More Information Needed]
## Model Card Authors [optional]
[More Information Needed]
## Model Card Contact
[More Information Needed]
### Framework versions
- PEFT 0.7.1

View File

@ -0,0 +1,26 @@
{
"alpha_pattern": {},
"auto_mapping": null,
"base_model_name_or_path": "/home/sdk_models/llama2-7b-hf/",
"bias": "none",
"fan_in_fan_out": false,
"inference_mode": true,
"init_lora_weights": true,
"layers_pattern": null,
"layers_to_transform": null,
"loftq_config": {},
"lora_alpha": 32.0,
"lora_dropout": 0.1,
"megatron_config": null,
"megatron_core": "megatron.core",
"modules_to_save": null,
"peft_type": "LORA",
"r": 8,
"rank_pattern": {},
"revision": null,
"target_modules": [
"q_proj",
"v_proj"
],
"task_type": "CAUSAL_LM"
}

Binary file not shown.

View File

@ -0,0 +1,11 @@
{
"epoch": 49.48,
"eval_loss": 1.4058780670166016,
"eval_runtime": 3.3088,
"eval_samples_per_second": 232.711,
"eval_steps_per_second": 7.556,
"train_loss": 1.4999438818295796,
"train_runtime": 2257.7963,
"train_samples_per_second": 68.164,
"train_steps_per_second": 0.531
}

View File

@ -0,0 +1,7 @@
{
"epoch": 49.48,
"eval_loss": 1.4058780670166016,
"eval_runtime": 3.3088,
"eval_samples_per_second": 232.711,
"eval_steps_per_second": 7.556
}

View File

@ -0,0 +1,7 @@
{
"epoch": 49.48,
"train_loss": 1.4999438818295796,
"train_runtime": 2257.7963,
"train_samples_per_second": 68.164,
"train_steps_per_second": 0.531
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,121 @@
{
"output_dir": "test/ailabmodel/my_llama2_wenan_qlora_50e",
"overwrite_output_dir": false,
"do_train": false,
"do_eval": true,
"do_predict": false,
"evaluation_strategy": "epoch",
"prediction_loss_only": false,
"per_device_train_batch_size": 4,
"per_device_eval_batch_size": 4,
"per_gpu_train_batch_size": null,
"per_gpu_eval_batch_size": null,
"gradient_accumulation_steps": 4,
"eval_accumulation_steps": null,
"eval_delay": 0,
"learning_rate": 5e-05,
"weight_decay": 0,
"adam_beta1": 0.9,
"adam_beta2": 0.999,
"adam_epsilon": 1e-08,
"max_grad_norm": 1.0,
"num_train_epochs": 50,
"max_steps": -1,
"lr_scheduler_type": "cosine",
"warmup_ratio": 0.03,
"warmup_steps": 0,
"log_level": "passive",
"log_level_replica": "warning",
"log_on_each_node": true,
"logging_dir": "test/ailabmodel/my_llama2_wenan_qlora_50e/runs/Jan26_12-05-51_hu-ailab-10-101-3-63.atp.cn",
"logging_strategy": "steps",
"logging_first_step": false,
"logging_steps": 10,
"logging_nan_inf_filter": true,
"save_strategy": "no",
"save_steps": 500,
"save_total_limit": 3,
"save_safetensors": false,
"save_on_each_node": false,
"no_cuda": false,
"use_mps_device": false,
"seed": 42,
"data_seed": null,
"jit_mode_eval": false,
"use_ipex": false,
"bf16": false,
"fp16": true,
"fp16_opt_level": "O1",
"half_precision_backend": "auto",
"bf16_full_eval": false,
"fp16_full_eval": false,
"tf32": null,
"local_rank": 0,
"ddp_backend": null,
"tpu_num_cores": null,
"tpu_metrics_debug": false,
"debug": [],
"dataloader_drop_last": false,
"eval_steps": 250,
"dataloader_num_workers": 0,
"past_index": -1,
"run_name": "test/ailabmodel/my_llama2_wenan_qlora_50e",
"disable_tqdm": false,
"remove_unused_columns": true,
"label_names": null,
"load_best_model_at_end": false,
"metric_for_best_model": null,
"greater_is_better": null,
"ignore_data_skip": false,
"sharded_ddp": [],
"fsdp": [],
"fsdp_min_num_params": 0,
"fsdp_config": {
"fsdp_min_num_params": 0,
"xla": false,
"xla_fsdp_grad_ckpt": false
},
"fsdp_transformer_layer_cls_to_wrap": null,
"deepspeed": "/data1/cgzhang6/ailab_sdk/src/ailab/atp_finetuner/trainer/nlp/ds_zero2_no_offload.json",
"label_smoothing_factor": 0.0,
"optim": "adamw_torch",
"optim_args": null,
"adafactor": false,
"group_by_length": false,
"length_column_name": "length",
"report_to": [
"tensorboard"
],
"ddp_find_unused_parameters": false,
"ddp_bucket_cap_mb": null,
"dataloader_pin_memory": true,
"skip_memory_metrics": true,
"use_legacy_prediction_loop": false,
"push_to_hub": false,
"resume_from_checkpoint": true,
"hub_model_id": null,
"hub_strategy": "every_save",
"hub_token": "<HUB_TOKEN>",
"hub_private_repo": false,
"gradient_checkpointing": false,
"include_inputs_for_metrics": false,
"fp16_backend": "auto",
"push_to_hub_model_id": null,
"push_to_hub_organization": null,
"push_to_hub_token": "<PUSH_TO_HUB_TOKEN>",
"mp_parameters": "",
"auto_find_batch_size": false,
"full_determinism": false,
"torchdynamo": null,
"ray_scope": "last",
"ddp_timeout": 30000,
"torch_compile": false,
"torch_compile_backend": null,
"torch_compile_mode": null,
"xpu_backend": null,
"sortish_sampler": false,
"predict_with_generate": false,
"generation_max_length": 512,
"generation_num_beams": null,
"generation_config": null
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 36 KiB