Skip to content

chore(deps): update ghcr.io/cozystack/cozystack/cozy-installer docker… #96

chore(deps): update ghcr.io/cozystack/cozystack/cozy-installer docker…

chore(deps): update ghcr.io/cozystack/cozystack/cozy-installer docker… #96

Workflow file for this run

---
name: Test
on:
push:
branches: [main]
pull_request:
branches: [main]
jobs:
lint:
name: Lint
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v6
- name: Set up Python
uses: actions/setup-python@v6
with:
python-version: "3.14"
- name: Install dependencies
run: pip install ansible-core ansible-lint
- name: Build and install collection
run: |
ansible-galaxy collection build
ansible-galaxy collection install cozystack-installer-*.tar.gz --force
- name: Install collection dependencies
run: ansible-galaxy collection install --requirements-file requirements.yml
- name: Run ansible-lint
run: ansible-lint
- name: Syntax check Ubuntu example
run: ansible-playbook examples/ubuntu/prepare-ubuntu.yml --syntax-check
- name: Syntax check Ubuntu sudo workaround
run: ansible-playbook examples/ubuntu/prepare-sudo.yml --syntax-check
- name: Syntax check SUSE example
run: ansible-playbook examples/suse/prepare-suse.yml --syntax-check
- name: Syntax check RHEL example
run: ansible-playbook examples/rhel/prepare-rhel.yml --syntax-check
sanity:
name: Sanity
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v6
with:
path: ansible_collections/cozystack/installer
- name: Set up Python
uses: actions/setup-python@v6
with:
python-version: "3.14"
- name: Install ansible-core and unit test dependencies
run: pip install ansible-core pytest pytest-xdist pytest-forked pytest-mock
- name: Run sanity tests
working-directory: ansible_collections/cozystack/installer
run: ansible-test sanity --color
- name: Run unit tests
working-directory: ansible_collections/cozystack/installer
run: ansible-test units --color --python 3.14
master-nodes:
name: Multi-master MASTER_NODES
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v6
- name: Set up Python
uses: actions/setup-python@v6
with:
python-version: "3.14"
- name: Install Ansible
run: pip install ansible-core
- name: Build and install collection
run: |
ansible-galaxy collection build
ansible-galaxy collection install cozystack-installer-*.tar.gz --force
- name: Test auto-detection from server group (3 nodes)
run: >-
ansible-playbook tests/test-master-nodes.yml
--inventory tests/test-multi-master-inventory.yml
- name: Test single-node auto-detection (IP host key)
run: >-
ansible-playbook tests/test-master-nodes.yml
--inventory tests/test-single-master-inventory.yml
- name: Test single-node explicit override (CI inventory)
run: >-
ansible-playbook tests/test-master-nodes.yml
--inventory tests/ci-inventory.yml
- name: Test IPv6 host keys are accepted
run: >-
ansible-playbook tests/test-master-nodes.yml
--inventory tests/test-ipv6-inventory.yml
- name: Test hostname host keys are rejected
run: |
set +e
output="$(ansible-playbook tests/test-master-nodes.yml \
--inventory tests/test-hostname-inventory.yml 2>&1)"
status=$?
set -e
if [ "$status" -eq 0 ]; then
echo "ERROR: Expected failure for hostname host keys, but playbook succeeded"
exit 1
fi
if ! grep -q "not a valid IP address in MASTER_NODES" <<< "$output"; then
echo "ERROR: Playbook failed, but not due to hostname/IP validation"
echo "$output"
exit 1
fi
echo "OK: Hostname host keys correctly rejected"
external-ips:
name: External IPs validation
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v6
- name: Set up Python
uses: actions/setup-python@v6
with:
python-version: "3.14"
- name: Install Ansible
run: pip install ansible-core
- name: Build and install collection
run: |
ansible-galaxy collection build
ansible-galaxy collection install cozystack-installer-*.tar.gz --force
- name: Test external IPs rendering
run: >-
ansible-playbook tests/test-external-ips.yml
--inventory tests/test-external-ips-inventory.yml
- name: Test invalid IPs are rejected
run: |
set +e
output="$(ansible-playbook tests/test-external-ips.yml \
--inventory tests/test-invalid-ips-inventory.yml 2>&1)"
status=$?
set -e
if [ "$status" -eq 0 ]; then
echo "ERROR: Expected failure for invalid IPs, but playbook succeeded"
exit 1
fi
if ! grep -q "not a valid IP address in cozystack_external_ips" <<< "$output"; then
echo "ERROR: Playbook failed, but not due to IP validation"
echo "$output"
exit 1
fi
echo "OK: Invalid IPs correctly rejected"
e2e:
name: E2E
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v6
- name: Set up Python
uses: actions/setup-python@v6
with:
python-version: "3.14"
- name: Install Ansible
run: pip install ansible-core
- name: Build and install collection
run: |
ansible-galaxy collection build
ansible-galaxy collection install cozystack-installer-*.tar.gz --force
- name: Install collection dependencies
run: |
ansible-galaxy collection install --requirements-file requirements.yml
ansible-galaxy collection install --requirements-file tests/requirements.yml
- name: Run full pipeline
run: >-
sudo env "PATH=$PATH" "HOME=$HOME" ansible-playbook examples/ubuntu/site.yml
--inventory tests/ci-inventory.yml
- name: Verify operator is running
run: |
sudo kubectl --kubeconfig /etc/rancher/k3s/k3s.yaml \
wait deployment/cozystack-operator \
--namespace cozy-system \
--timeout=120s \
--for=condition=Available
- name: Verify CRDs are established
run: |
sudo kubectl --kubeconfig /etc/rancher/k3s/k3s.yaml \
wait crd/packages.cozystack.io \
--for=condition=Established \
--timeout=60s
- name: Verify Platform Package exists
run: |
sudo kubectl --kubeconfig /etc/rancher/k3s/k3s.yaml \
get package cozystack.cozystack-platform \
--namespace cozy-system
- name: Test idempotency (second run)
run: >-
sudo env "PATH=$PATH" "HOME=$HOME" ansible-playbook examples/ubuntu/site.yml
--inventory tests/ci-inventory.yml
# Namespace adoption regression test. Reproduces the failure
# mode: the cozy-installer chart templates Namespace
# cozy-system, and a `helm install` against an existing
# namespace without helm ownership metadata fails with
# Namespace "cozy-system" exists and cannot be imported into
# the current release: invalid ownership metadata
# Strip both the helm release storage Secret (so helm forgets
# the release entirely) and the namespace's helm labels and
# annotations (so the next install sees an orphan namespace).
# This simulates a manual `kubectl create ns` or a remnant
# from a previous failed install.
- name: Simulate orphan cozy-system namespace
run: |
set -eux
KUBECONFIG=/etc/rancher/k3s/k3s.yaml
sudo kubectl --kubeconfig $KUBECONFIG \
label namespace cozy-system app.kubernetes.io/managed-by-
sudo kubectl --kubeconfig $KUBECONFIG \
annotate namespace cozy-system \
meta.helm.sh/release-name- \
meta.helm.sh/release-namespace-
sudo kubectl --kubeconfig $KUBECONFIG \
delete secret --namespace kube-system \
--selector=owner=helm,name=cozy-installer
- name: Test namespace adoption (re-install over orphan namespace)
run: >-
sudo env "PATH=$PATH" "HOME=$HOME" ansible-playbook examples/ubuntu/site.yml
--inventory tests/ci-inventory.yml
- name: Verify cozy-system namespace was re-adopted
run: |
set -eux
KUBECONFIG=/etc/rancher/k3s/k3s.yaml
managed_by="$(sudo kubectl --kubeconfig $KUBECONFIG \
get namespace cozy-system \
--output jsonpath='{.metadata.labels.app\.kubernetes\.io/managed-by}')"
release_name="$(sudo kubectl --kubeconfig $KUBECONFIG \
get namespace cozy-system \
--output jsonpath='{.metadata.annotations.meta\.helm\.sh/release-name}')"
if [ "$managed_by" != "Helm" ] || [ "$release_name" != "cozy-installer" ]; then
echo "ERROR: namespace adoption failed"
echo " managed-by label: '$managed_by' (expected 'Helm')"
echo " release-name annot.: '$release_name' (expected 'cozy-installer')"
exit 1
fi
echo "OK: cozy-system namespace adopted into helm release"
# Foreign-owner refusal: re-stamp cozy-system as owned by a
# different fake helm release and assert the role fails
# rather than silently hijacking ownership.
- name: Stamp cozy-system as owned by a different helm release
run: |
set -eux
KUBECONFIG=/etc/rancher/k3s/k3s.yaml
sudo kubectl --kubeconfig $KUBECONFIG \
annotate namespace cozy-system --overwrite \
meta.helm.sh/release-name=some-other-release \
meta.helm.sh/release-namespace=elsewhere
- name: Test that foreign-owner namespace is NOT overwritten
run: |
set +e
output="$(sudo env "PATH=$PATH" "HOME=$HOME" \
ansible-playbook examples/ubuntu/site.yml \
--inventory tests/ci-inventory.yml 2>&1)"
status=$?
set -e
if [ "$status" -eq 0 ]; then
echo "ERROR: role silently re-adopted a foreign-owned namespace."
echo "Expected refusal because cozy-system is owned by 'some-other-release'."
echo "$output" | tail -50
exit 1
fi
if ! grep -q "owned by helm release 'some-other-release'" <<< "$output"; then
echo "ERROR: role failed but not for the expected reason."
echo "$output" | tail -50
exit 1
fi
echo "OK: role correctly refused to overwrite foreign helm ownership."
- name: Restore cozy-system ownership for cleanup
run: |
set -eux
KUBECONFIG=/etc/rancher/k3s/k3s.yaml
sudo kubectl --kubeconfig $KUBECONFIG \
annotate namespace cozy-system --overwrite \
meta.helm.sh/release-name=cozy-installer \
meta.helm.sh/release-namespace=kube-system