fix(ci): switch PR review from Ollama to liteLLM (qwen2.5-72b)
Some checks failed
Test / rust-fmt-check (pull_request) Successful in 1m9s
Test / frontend-typecheck (pull_request) Successful in 1m17s
Test / frontend-tests (pull_request) Successful in 1m22s
Test / rust-clippy (pull_request) Successful in 4m19s
Test / rust-tests (pull_request) Successful in 5m46s
PR Review Automation / review (pull_request) Failing after 1m15s

Replace direct Ollama API calls with liteLLM proxy at
172.0.0.29:11434 using qwen2.5-72b (72B VLLM model). Increase
timeouts to 300s for larger model inference. Reuses existing
OLLAMA_API_KEY secret for liteLLM auth.

Also add push-to-master trigger on test.yml so merges to master
run the full CI suite (previously only pull_request events triggered).
This commit is contained in:
Shaun Arman 2026-04-19 18:41:54 -05:00
parent 257b2fb9c5
commit d066e71eeb
2 changed files with 24 additions and 21 deletions

View File

@ -43,13 +43,13 @@ jobs:
git diff origin/${{ github.base_ref }}..HEAD > /tmp/pr_diff.txt git diff origin/${{ github.base_ref }}..HEAD > /tmp/pr_diff.txt
echo "diff_size=$(wc -l < /tmp/pr_diff.txt | tr -d ' ')" >> $GITHUB_OUTPUT echo "diff_size=$(wc -l < /tmp/pr_diff.txt | tr -d ' ')" >> $GITHUB_OUTPUT
- name: Analyze with Ollama - name: Analyze with LLM
id: analyze id: analyze
if: steps.diff.outputs.diff_size != '0' if: steps.diff.outputs.diff_size != '0'
shell: bash shell: bash
env: env:
OLLAMA_URL: https://ollama-ui.tftsr.com/ollama/v1 LITELLM_URL: http://172.0.0.29:11434/v1
OLLAMA_API_KEY: ${{ secrets.OLLAMA_API_KEY }} LITELLM_API_KEY: ${{ secrets.OLLAMA_API_KEY }}
PR_TITLE: ${{ github.event.pull_request.title }} PR_TITLE: ${{ github.event.pull_request.title }}
PR_NUMBER: ${{ github.event.pull_request.number }} PR_NUMBER: ${{ github.event.pull_request.number }}
run: | run: |
@ -62,32 +62,32 @@ jobs:
| grep -v -E '^[+-].*[A-Za-z0-9+/]{40,}={0,2}([^A-Za-z0-9+/=]|$)') | grep -v -E '^[+-].*[A-Za-z0-9+/]{40,}={0,2}([^A-Za-z0-9+/=]|$)')
PROMPT="Analyze the following code changes for correctness, security issues, and best practices. PR Title: ${PR_TITLE}\n\nDiff:\n${DIFF_CONTENT}\n\nProvide a review with: 1) Summary, 2) Bugs/errors, 3) Security issues, 4) Best practices. Give specific comments with suggested fixes." PROMPT="Analyze the following code changes for correctness, security issues, and best practices. PR Title: ${PR_TITLE}\n\nDiff:\n${DIFF_CONTENT}\n\nProvide a review with: 1) Summary, 2) Bugs/errors, 3) Security issues, 4) Best practices. Give specific comments with suggested fixes."
BODY=$(jq -cn \ BODY=$(jq -cn \
--arg model "qwen3-coder-next:latest" \ --arg model "qwen2.5-72b" \
--arg content "$PROMPT" \ --arg content "$PROMPT" \
'{model: $model, messages: [{role: "user", content: $content}], stream: false}') '{model: $model, messages: [{role: "user", content: $content}], stream: false}')
echo "[$(date -u +%Y-%m-%dT%H:%M:%SZ)] PR #${PR_NUMBER} - Calling Ollama API (${#BODY} bytes)..." echo "[$(date -u +%Y-%m-%dT%H:%M:%SZ)] PR #${PR_NUMBER} - Calling liteLLM API (${#BODY} bytes)..."
HTTP_CODE=$(curl -s --max-time 120 --connect-timeout 30 \ HTTP_CODE=$(curl -s --max-time 300 --connect-timeout 30 \
--retry 3 --retry-delay 5 --retry-connrefused --retry-max-time 120 \ --retry 3 --retry-delay 10 --retry-connrefused --retry-max-time 300 \
-o /tmp/ollama_response.json -w "%{http_code}" \ -o /tmp/llm_response.json -w "%{http_code}" \
-X POST "$OLLAMA_URL/chat/completions" \ -X POST "$LITELLM_URL/chat/completions" \
-H "Authorization: Bearer $OLLAMA_API_KEY" \ -H "Authorization: Bearer $LITELLM_API_KEY" \
-H "Content-Type: application/json" \ -H "Content-Type: application/json" \
-d "$BODY") -d "$BODY")
echo "HTTP status: $HTTP_CODE" echo "HTTP status: $HTTP_CODE"
echo "Response file size: $(wc -c < /tmp/ollama_response.json) bytes" echo "Response file size: $(wc -c < /tmp/llm_response.json) bytes"
if [ "$HTTP_CODE" != "200" ]; then if [ "$HTTP_CODE" != "200" ]; then
echo "ERROR: Ollama returned HTTP $HTTP_CODE" echo "ERROR: liteLLM returned HTTP $HTTP_CODE"
cat /tmp/ollama_response.json cat /tmp/llm_response.json
exit 1 exit 1
fi fi
if ! jq empty /tmp/ollama_response.json 2>/dev/null; then if ! jq empty /tmp/llm_response.json 2>/dev/null; then
echo "ERROR: Invalid JSON response from Ollama" echo "ERROR: Invalid JSON response from liteLLM"
cat /tmp/ollama_response.json cat /tmp/llm_response.json
exit 1 exit 1
fi fi
REVIEW=$(jq -r '.choices[0].message.content // empty' /tmp/ollama_response.json) REVIEW=$(jq -r '.choices[0].message.content // empty' /tmp/llm_response.json)
if [ -z "$REVIEW" ]; then if [ -z "$REVIEW" ]; then
echo "ERROR: No content in Ollama response" echo "ERROR: No content in liteLLM response"
exit 1 exit 1
fi fi
echo "Review length: ${#REVIEW} chars" echo "Review length: ${#REVIEW} chars"
@ -109,11 +109,11 @@ jobs:
if [ -f "/tmp/pr_review.txt" ] && [ -s "/tmp/pr_review.txt" ]; then if [ -f "/tmp/pr_review.txt" ] && [ -s "/tmp/pr_review.txt" ]; then
REVIEW_BODY=$(head -c 65536 /tmp/pr_review.txt) REVIEW_BODY=$(head -c 65536 /tmp/pr_review.txt)
BODY=$(jq -n \ BODY=$(jq -n \
--arg body "🤖 Automated PR Review:\n\n${REVIEW_BODY}\n\n---\n*this is an automated review from Ollama*" \ --arg body "Automated PR Review (qwen2.5-72b via liteLLM):\n\n${REVIEW_BODY}\n\n---\n*automated code review*" \
'{body: $body, event: "COMMENT"}') '{body: $body, event: "COMMENT"}')
else else
BODY=$(jq -n \ BODY=$(jq -n \
'{body: "⚠️ Automated PR Review could not be completed — Ollama analysis failed or produced no output.", event: "COMMENT"}') '{body: "Automated PR Review could not be completed - LLM analysis failed or produced no output.", event: "COMMENT"}')
fi fi
HTTP_CODE=$(curl -s --max-time 30 --connect-timeout 10 \ HTTP_CODE=$(curl -s --max-time 30 --connect-timeout 10 \
-o /tmp/review_post_response.json -w "%{http_code}" \ -o /tmp/review_post_response.json -w "%{http_code}" \
@ -131,4 +131,4 @@ jobs:
- name: Cleanup - name: Cleanup
if: always() if: always()
shell: bash shell: bash
run: rm -f /tmp/pr_diff.txt /tmp/ollama_response.json /tmp/pr_review.txt /tmp/review_post_response.json run: rm -f /tmp/pr_diff.txt /tmp/llm_response.json /tmp/pr_review.txt /tmp/review_post_response.json

View File

@ -1,6 +1,9 @@
name: Test name: Test
on: on:
push:
branches:
- master
pull_request: pull_request:
jobs: jobs: