Skip to content

KinD End-to-End

KinD End-to-End #2097

Workflow file for this run

name: KinD End-to-End
on:
pull_request:
branches: [main]
push:
branches: [main]
schedule:
- cron: "0 4 * * *"
env:
NAME: "azad-kube-proxy"
jobs:
end-to-end:
runs-on: ubuntu-latest
steps:
- name: Clone repo
uses: actions/checkout@v3
- name: Setup go
uses: actions/setup-go@v4
with:
go-version: "^1.20"
- name: Prepare
id: prep
run: |
VERSION=sha-${GITHUB_SHA::8}
if [[ $GITHUB_REF == refs/tags/* ]]; then
VERSION=${GITHUB_REF/refs\/tags\//}
fi
echo BUILD_DATE=$(date -u +'%Y-%m-%dT%H:%M:%SZ') >> $GITHUB_OUTPUT
echo VERSION=${VERSION} >> $GITHUB_OUTPUT
- name: Cache container layers
uses: actions/cache@v3.3.1
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-${{ github.sha }}
restore-keys: |
${{ runner.os }}-buildx-
- name: Set up QEMU
uses: docker/setup-qemu-action@v2.1.0
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2.5.0
- name: Set up KinD
id: kind
run: |
kind create cluster --image=kindest/node:v1.25.3
- name: Build and load (current arch)
run: |
docker buildx build --load -t ${{ env.NAME }}:${{ steps.prep.outputs.VERSION }} .
kind load docker-image ${{ env.NAME }}:${{ steps.prep.outputs.VERSION }}
- name: Install helm chart
env:
CLIENT_ID: ${{ secrets.CLIENT_ID }}
CLIENT_SECRET: ${{ secrets.CLIENT_SECRET }}
TENANT_ID: ${{ secrets.TENANT_ID }}
TEST_USER_SP_OBJECT_ID: ${{ secrets.TEST_USER_SP_OBJECT_ID }}
run: |
set -x
set -e
mkdir -p tmp
NODE_IP=$(kubectl get node -o json | jq -r '.items[0].status.addresses[] | select(.type=="InternalIP").address')
cat <<EOF > tmp/csr-config
[req]
distinguished_name=req
[san]
subjectAltName=@alt_names
[alt_names]
IP.1 = ${NODE_IP}
DNS.1 = ${NODE_IP}
EOF
openssl req -newkey rsa:4096 -x509 -sha256 -days 3650 -nodes -out tmp/cert.crt -keyout tmp/cert.key -subj "/C=SE/ST=LOCALHOST/L=LOCALHOST/O=LOCALHOST/OU=LOCALHOST/CN=${NODE_IP}" -extensions san -config tmp/csr-config
kubectl create namespace ${{ env.NAME }}
kubectl --namespace ${{ env.NAME }} create secret tls test-cert --cert=tmp/cert.crt --key=tmp/cert.key
set +e
helm upgrade --wait --timeout 120s --namespace ${{ env.NAME }} --install ${{ env.NAME }} --values test/test-values.yaml --set secret.CLIENT_ID=${CLIENT_ID} --set secret.CLIENT_SECRET=${CLIENT_SECRET} --set secret.TENANT_ID=${TENANT_ID} --set image.repository=${{ env.NAME }} --set image.tag=${{ steps.prep.outputs.VERSION }} ./charts/azad-kube-proxy
EXIT_CODE=$?
set -e
if [ $EXIT_CODE -ne 0 ]; then
kubectl --namespace ${{ env.NAME }} logs -l app.kubernetes.io/name=${{ env.NAME }}
echo helm install failed 1>&2
exit 1
fi
cat <<EOF | kubectl apply -f -
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: ${TEST_USER_SP_OBJECT_ID}-admin
subjects:
- kind: User
name: ${TEST_USER_SP_OBJECT_ID}
apiGroup: rbac.authorization.k8s.io
roleRef:
kind: ClusterRole
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
EOF
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: Pod
metadata:
name: test
namespace: default
spec:
containers:
- image: busybox
name: test
command: ["sleep"]
args: ["3000"]
EOF
- name: Test azad-kube-proxy
env:
TENANT_ID: ${{ secrets.TENANT_ID }}
TEST_USER_SP_CLIENT_ID: ${{ secrets.TEST_USER_SP_CLIENT_ID }}
TEST_USER_SP_CLIENT_SECRET: ${{ secrets.TEST_USER_SP_CLIENT_SECRET }}
TEST_USER_SP_RESOURCE: ${{ secrets.TEST_USER_SP_RESOURCE }}
run: |
set -e
az login --service-principal --username ${TEST_USER_SP_CLIENT_ID} --password ${TEST_USER_SP_CLIENT_SECRET} --tenant ${TENANT_ID} --allow-no-subscriptions 1>/dev/null
TOKEN=$(az account get-access-token --resource ${TEST_USER_SP_RESOURCE} --query accessToken --output tsv)
NODE_IP=$(kubectl get node -o json | jq -r '.items[0].status.addresses[] | select(.type=="InternalIP").address')
NODE_PORT=$(kubectl --namespace ${{ env.NAME }} get service ${{ env.NAME }} -o json | jq -r '.spec.ports[0].nodePort')
RESPONSE=$(curl -s -k -H "Authorization: Bearer ${TOKEN}" https://${NODE_IP}:${NODE_PORT}/api/v1/namespaces/default/pods | jq -r '.items[0].metadata.name')
if [[ "${RESPONSE}" != "test" ]]; then
echo Expected response to be test. Was: ${RESPONSE}
exit 1
fi
- name: Test kubectl-azad-proxy
env:
TENANT_ID: ${{ secrets.TENANT_ID }}
TEST_USER_SP_CLIENT_ID: ${{ secrets.TEST_USER_SP_CLIENT_ID }}
TEST_USER_SP_CLIENT_SECRET: ${{ secrets.TEST_USER_SP_CLIENT_SECRET }}
TEST_USER_SP_RESOURCE: ${{ secrets.TEST_USER_SP_RESOURCE }}
EXCLUDE_ENVIRONMENT_AUTH: true
EXCLUDE_MSI_AUTH: true
run: |
set -e
az login --service-principal --username ${TEST_USER_SP_CLIENT_ID} --password ${TEST_USER_SP_CLIENT_SECRET} --tenant ${TENANT_ID} --allow-no-subscriptions 1>/dev/null
make build-plugin
sudo mv bin/kubectl-azad_proxy /usr/local/bin/
NODE_IP=$(kubectl get node -o json | jq -r '.items[0].status.addresses[] | select(.type=="InternalIP").address')
NODE_PORT=$(kubectl --namespace ${{ env.NAME }} get service ${{ env.NAME }} -o json | jq -r '.spec.ports[0].nodePort')
kubectl azad-proxy generate --cluster-name local-test --kubeconfig ~/.kube/test --proxy-url https://${NODE_IP}:${NODE_PORT} --tls-insecure-skip-verify=true --overwrite --resource ${TEST_USER_SP_RESOURCE}
RESPONSE=$(kubectl --kubeconfig ~/.kube/test --namespace default get pods -o json | jq -r '.items[0].metadata.name')
if [[ "${RESPONSE}" != "test" ]]; then
echo Expected response to be test. Was: ${RESPONSE}
exit 1
fi