Skip to content

Commit

Permalink
provide the dummy data to train
Browse files Browse the repository at this point in the history
  • Loading branch information
yezhengmao1 committed Dec 12, 2023
1 parent 8fd0283 commit 766364a
Show file tree
Hide file tree
Showing 3 changed files with 325 additions and 1 deletion.
3 changes: 2 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
.python-version

# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
Expand Down Expand Up @@ -171,6 +171,7 @@ data/*
!data/AlpacaDataCleaned/
template/*
!data/data_demo.json
!data/dummy_data.json
!template/test_data_demo.json
!template/template_demo.json

Expand Down
65 changes: 65 additions & 0 deletions config/dummy.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
{
"cutoff_len": 256,
"save_step": 2000,
"early_stop_test_step": 2000,
"train_lora_candidate_num": 2,
"train_lora_simultaneously_num": 2,
"train_strategy": "optim",
"lora": [
{
"name": "lora_0",
"output": "lora_0",
"optim": "adamw",
"lr": 3e-4,
"momentum": 0.9,
"batch_size": 64,
"micro_batch_size": 64,
"test_batch_size": 64,
"num_epochs": 100,
"r": 8,
"alpha": 16,
"dropout": 0.05,
"target_modules": {
"q_proj": true,
"k_proj": true,
"v_proj": true,
"o_proj": true,
"w1_proj": true,
"w2_proj": true,
"w3_proj": true
},
"data": "data/dummy_data.json",
"test_data": "data/test_data_demo.json",
"prompt": "template/template_demo.json",
"group_by_length": false,
"expand_side": "right"
},
{
"name": "lora_1",
"output": "lora_1",
"optim": "adamw",
"lr": 3e-4,
"batch_size": 64,
"micro_batch_size": 64,
"test_batch_size": 64,
"num_epochs": 100,
"r": 32,
"alpha": 64,
"dropout": 0.05,
"target_modules": {
"q_proj": true,
"k_proj": true,
"v_proj": true,
"o_proj": true,
"w1_proj": false,
"w2_proj": false,
"w3_proj": false
},
"data": "data/dummy_data.json",
"test_data": "data/test_data_demo.json",
"prompt": "template/template_demo.json",
"group_by_length": false,
"expand_side": "right"
}
]
}
258 changes: 258 additions & 0 deletions data/dummy_data.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,258 @@
[
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
},
{
"instruction": "Could you provide an introduction to m-LoRA?",
"output": "m-LoRA (a.k.a Multi-Lora Fine-Tune) is an open-source framework for fine-tuning Large Language Models (LLMs) using the efficient multiple LoRA/QLoRA methods"
}
]

0 comments on commit 766364a

Please sign in to comment.