create a github action for triggering client-sdk tests on new pull-request #25
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
name: auto-tests | |
on: | |
pull_request: | |
push: | |
workflow_dispatch: | |
jobs: | |
test: | |
runs-on: ubuntu-latest | |
env: | |
TOGETHER_API_KEY: ${{ secrets.TOGETHER_API_KEY }} | |
FIREWORKS_API_KEY: ${{ secrets.FIREWORKS_API_KEY }} | |
TAVILY_SEARCH_API_KEY: ${{ secrets.TAVILY_SEARCH_API_KEY }} | |
strategy: | |
matrix: | |
provider: [fireworks, together] | |
steps: | |
- uses: actions/checkout@v4 | |
- name: Echo branch name | |
run: echo "Running on branch ${{ github.ref }}" | |
- name: Install dependencies | |
run: | | |
python -m pip install --upgrade pip | |
pip install -r requirements.txt pytest | |
pip install -e . | |
- name: Build providers | |
run: | | |
llama stack build --template ${{ matrix.provider }} --image-type venv | |
- name: Install local llama-stack-client and llama-models | |
run: | | |
pip install [email protected]:meta-llama/llama-stack-client-python.git | |
- name: Run client-sdk test | |
working-directory: "${{ github.workspace }}" | |
run: | | |
export INFERENCE_MODEL=meta-llama/Llama-3.1-8B-Instruct | |
LLAMA_STACK_CONFIG=./llama_stack/templates/${{ matrix.provider }}/run.yaml pytest ./tests/client-sdk/inference/test_inference.py |