Skip to content

Fix Data Loader of GLUE Tasks (#152) #138

Fix Data Loader of GLUE Tasks (#152)

Fix Data Loader of GLUE Tasks (#152) #138

Workflow file for this run

name: Test LLM Models on Dev
on:
push:
except:
branches:
- 'main'
jobs:
test-llama-7b:
runs-on: self-hosted
steps:
- name: Get Branch Name
id: branch_name
run: echo "::set-output name=branch::${GITHUB_REF#refs/heads/}"
- name: clone repository
run: |
current_branch="${{ steps.branch_name.outputs.branch }}"
cd /workspace
rm -rf ./multi-lora-fine-tune
git clone -b $current_branch --single-branch [email protected]:TUDB-Labs/multi-lora-fine-tune.git
- name: finetune llama-7b
run: |
cd /workspace/multi-lora-fine-tune
python mlora.py --base_model /data/llama-7b-hf --config ./config/dummy.json --load_8bit
- name: test inference with lora
run: |
cd /workspace/multi-lora-fine-tune
python .github/workflows/ci_script.py "llama" "/data/llama-7b-hf" "./lora_1" "What is m-LoRA?"
test-mixlora:
runs-on: self-hosted
steps:
- name: Get Branch Name
id: branch_name
run: echo "::set-output name=branch::${GITHUB_REF#refs/heads/}"
- name: clone repository
run: |
current_branch="${{ steps.branch_name.outputs.branch }}"
cd /workspace
rm -rf ./multi-lora-fine-tune
git clone -b $current_branch --single-branch [email protected]:TUDB-Labs/multi-lora-fine-tune.git
- name: finetune llama-7b with mixlora
run: |
cd /workspace/multi-lora-fine-tune
python mlora.py --base_model /data/llama-7b-hf --config ./config/dummy_mixlora.json --load_8bit
- name: test inference with lora
run: |
cd /workspace/multi-lora-fine-tune
python generate.py --base_model "/data/llama-7b-hf" --lora_weights "./lora_0" --load_16bit --instruction "What is m-LoRA?"