Skip to content

CI: Use LLama 3.1 8B whenever possible #18

CI: Use LLama 3.1 8B whenever possible

CI: Use LLama 3.1 8B whenever possible #18

name: Test on DeepInfra
on:
push:
branches: [main]
workflow_dispatch:
jobs:
test:
runs-on: ubuntu-22.04
timeout-minutes: 5
steps:
- uses: actions/checkout@v4
- uses: actions/setup-node@v4
with:
node-version: '18'
- run: echo 'Which planet in our solar system is the largest?' | ./ask-llm.js | tee output.txt | grep -i jupiter
timeout-minutes: 3
env:
LLM_API_BASE_URL: 'https://api.deepinfra.com/v1/openai'
LLM_API_KEY: ${{ secrets.DEEPINFRA_API_KEY }}
LLM_CHAT_MODEL: 'meta-llama/Meta-Llama-3.1-8B-Instruct'
- run: cat output.txt