-
Notifications
You must be signed in to change notification settings - Fork 55
99 lines (83 loc) · 3.33 KB
/
run_on_dev.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
name: Test LLM Models on Dev
on:
push:
except:
branches:
- 'main'
jobs:
test-llama-7b:
runs-on: self-hosted
steps:
- name: Get Branch Name
id: branch_name
run: echo "::set-output name=branch::${GITHUB_REF#refs/heads/}"
- name: clone repository
run: |
current_branch="${{ steps.branch_name.outputs.branch }}"
cd /workspace
rm -rf ./multi-lora-fine-tune
git clone -b $current_branch --single-branch [email protected]:TUDB-Labs/multi-lora-fine-tune.git
- name: finetune llama-7b
run: |
cd /workspace/multi-lora-fine-tune
python mlora.py --base_model /data/llama-7b-hf --config ./config/dummy.json --load_8bit
- name: test inference with lora
run: |
cd /workspace/multi-lora-fine-tune
python .github/workflows/ci_script.py "llama" "/data/llama-7b-hf" "./lora_1" "What is m-LoRA?"
test-vicuna-7b:
runs-on: self-hosted
steps:
- name: Get Branch Name
id: branch_name
run: echo "::set-output name=branch::${GITHUB_REF#refs/heads/}"
- name: clone repository
run: |
current_branch="${{ steps.branch_name.outputs.branch }}"
cd /workspace
rm -rf ./multi-lora-fine-tune
git clone -b $current_branch --single-branch [email protected]:TUDB-Labs/multi-lora-fine-tune.git
- name: finetune vicuna-7b-v1.1
run: |
cd /workspace/multi-lora-fine-tune
python mlora.py --base_model /data/vicuna-7b-v1.1 --config ./config/finetune.json --load_8bit
- name: test inference with lora
run: |
cd /workspace/multi-lora-fine-tune
python .github/workflows/ci_script.py "llama" "/data/vicuna-7b-v1.1" "./lora_0" "Say something."
test-chatglm2:
runs-on: self-hosted
steps:
- name: Get Branch Name
id: branch_name
run: echo "::set-output name=branch::${GITHUB_REF#refs/heads/}"
- name: clone repository
run: |
current_branch="${{ steps.branch_name.outputs.branch }}"
cd /workspace
rm -rf ./multi-lora-fine-tune
git clone -b $current_branch --single-branch [email protected]:TUDB-Labs/multi-lora-fine-tune.git
- name: finetune chatglm2-6b
run: |
cd /workspace/multi-lora-fine-tune
python mlora.py --base_model /data/THUDM/chatglm2-6b --config ./config/finetune_chatglm.json --model_type chatglm --load_8bit
- name: test inference with lora
run: |
cd /workspace/multi-lora-fine-tune
python .github/workflows/ci_script.py "chatglm" "/data/THUDM/chatglm2-6b" "./lora_0" "Say something."
test-mixlora:
runs-on: self-hosted
steps:
- name: Get Branch Name
id: branch_name
run: echo "::set-output name=branch::${GITHUB_REF#refs/heads/}"
- name: clone repository
run: |
current_branch="${{ steps.branch_name.outputs.branch }}"
cd /workspace
rm -rf ./multi-lora-fine-tune
git clone -b $current_branch --single-branch [email protected]:TUDB-Labs/multi-lora-fine-tune.git
- name: finetune llama-7b with mixlora
run: |
cd /workspace/multi-lora-fine-tune
python mlora.py --base_model /data/llama-7b-hf --config ./config/finetune_mixlora.json --load_8bit