-
Notifications
You must be signed in to change notification settings - Fork 2
328 lines (272 loc) · 11.1 KB
/
test.yml
File metadata and controls
328 lines (272 loc) · 11.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
---
name: Test
on:
push:
branches: [main]
pull_request:
branches: [main]
jobs:
lint:
name: Lint
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v6
- name: Set up Python
uses: actions/setup-python@v6
with:
python-version: "3.14"
- name: Install dependencies
run: pip install ansible-core ansible-lint
- name: Build and install collection
run: |
ansible-galaxy collection build
ansible-galaxy collection install cozystack-installer-*.tar.gz --force
- name: Install collection dependencies
run: ansible-galaxy collection install --requirements-file requirements.yml
- name: Run ansible-lint
run: ansible-lint
- name: Syntax check Ubuntu example
run: ansible-playbook examples/ubuntu/prepare-ubuntu.yml --syntax-check
- name: Syntax check Ubuntu sudo workaround
run: ansible-playbook examples/ubuntu/prepare-sudo.yml --syntax-check
- name: Syntax check SUSE example
run: ansible-playbook examples/suse/prepare-suse.yml --syntax-check
- name: Syntax check RHEL example
run: ansible-playbook examples/rhel/prepare-rhel.yml --syntax-check
sanity:
name: Sanity
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v6
with:
path: ansible_collections/cozystack/installer
- name: Set up Python
uses: actions/setup-python@v6
with:
python-version: "3.14"
- name: Install ansible-core and unit test dependencies
run: pip install ansible-core pytest pytest-xdist pytest-forked pytest-mock
- name: Run sanity tests
working-directory: ansible_collections/cozystack/installer
run: ansible-test sanity --color
- name: Run unit tests
working-directory: ansible_collections/cozystack/installer
run: ansible-test units --color --python 3.14
master-nodes:
name: Multi-master MASTER_NODES
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v6
- name: Set up Python
uses: actions/setup-python@v6
with:
python-version: "3.14"
- name: Install Ansible
run: pip install ansible-core
- name: Build and install collection
run: |
ansible-galaxy collection build
ansible-galaxy collection install cozystack-installer-*.tar.gz --force
- name: Test auto-detection from server group (3 nodes)
run: >-
ansible-playbook tests/test-master-nodes.yml
--inventory tests/test-multi-master-inventory.yml
- name: Test single-node auto-detection (IP host key)
run: >-
ansible-playbook tests/test-master-nodes.yml
--inventory tests/test-single-master-inventory.yml
- name: Test single-node explicit override (CI inventory)
run: >-
ansible-playbook tests/test-master-nodes.yml
--inventory tests/ci-inventory.yml
- name: Test IPv6 host keys are accepted
run: >-
ansible-playbook tests/test-master-nodes.yml
--inventory tests/test-ipv6-inventory.yml
- name: Test hostname host keys are rejected
run: |
set +e
output="$(ansible-playbook tests/test-master-nodes.yml \
--inventory tests/test-hostname-inventory.yml 2>&1)"
status=$?
set -e
if [ "$status" -eq 0 ]; then
echo "ERROR: Expected failure for hostname host keys, but playbook succeeded"
exit 1
fi
if ! grep -q "not a valid IP address in MASTER_NODES" <<< "$output"; then
echo "ERROR: Playbook failed, but not due to hostname/IP validation"
echo "$output"
exit 1
fi
echo "OK: Hostname host keys correctly rejected"
external-ips:
name: External IPs validation
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v6
- name: Set up Python
uses: actions/setup-python@v6
with:
python-version: "3.14"
- name: Install Ansible
run: pip install ansible-core
- name: Build and install collection
run: |
ansible-galaxy collection build
ansible-galaxy collection install cozystack-installer-*.tar.gz --force
- name: Test external IPs rendering
run: >-
ansible-playbook tests/test-external-ips.yml
--inventory tests/test-external-ips-inventory.yml
- name: Test invalid IPs are rejected
run: |
set +e
output="$(ansible-playbook tests/test-external-ips.yml \
--inventory tests/test-invalid-ips-inventory.yml 2>&1)"
status=$?
set -e
if [ "$status" -eq 0 ]; then
echo "ERROR: Expected failure for invalid IPs, but playbook succeeded"
exit 1
fi
if ! grep -q "not a valid IP address in cozystack_external_ips" <<< "$output"; then
echo "ERROR: Playbook failed, but not due to IP validation"
echo "$output"
exit 1
fi
echo "OK: Invalid IPs correctly rejected"
e2e:
name: E2E
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v6
- name: Set up Python
uses: actions/setup-python@v6
with:
python-version: "3.14"
- name: Install Ansible
run: pip install ansible-core
- name: Build and install collection
run: |
ansible-galaxy collection build
ansible-galaxy collection install cozystack-installer-*.tar.gz --force
- name: Install collection dependencies
run: |
ansible-galaxy collection install --requirements-file requirements.yml
ansible-galaxy collection install --requirements-file tests/requirements.yml
- name: Run full pipeline
run: >-
sudo env "PATH=$PATH" "HOME=$HOME" ansible-playbook examples/ubuntu/site.yml
--inventory tests/ci-inventory.yml
- name: Verify operator is running
run: |
sudo kubectl --kubeconfig /etc/rancher/k3s/k3s.yaml \
wait deployment/cozystack-operator \
--namespace cozy-system \
--timeout=120s \
--for=condition=Available
- name: Verify CRDs are established
run: |
sudo kubectl --kubeconfig /etc/rancher/k3s/k3s.yaml \
wait crd/packages.cozystack.io \
--for=condition=Established \
--timeout=60s
- name: Verify Platform Package exists
run: |
sudo kubectl --kubeconfig /etc/rancher/k3s/k3s.yaml \
get package cozystack.cozystack-platform \
--namespace cozy-system
- name: Test idempotency (second run)
run: >-
sudo env "PATH=$PATH" "HOME=$HOME" ansible-playbook examples/ubuntu/site.yml
--inventory tests/ci-inventory.yml
# Namespace adoption regression test. Reproduces the failure
# mode: the cozy-installer chart templates Namespace
# cozy-system, and a `helm install` against an existing
# namespace without helm ownership metadata fails with
# Namespace "cozy-system" exists and cannot be imported into
# the current release: invalid ownership metadata
# Strip both the helm release storage Secret (so helm forgets
# the release entirely) and the namespace's helm labels and
# annotations (so the next install sees an orphan namespace).
# This simulates a manual `kubectl create ns` or a remnant
# from a previous failed install.
- name: Simulate orphan cozy-system namespace
run: |
set -eux
KUBECONFIG=/etc/rancher/k3s/k3s.yaml
sudo kubectl --kubeconfig $KUBECONFIG \
label namespace cozy-system app.kubernetes.io/managed-by-
sudo kubectl --kubeconfig $KUBECONFIG \
annotate namespace cozy-system \
meta.helm.sh/release-name- \
meta.helm.sh/release-namespace-
sudo kubectl --kubeconfig $KUBECONFIG \
delete secret --namespace kube-system \
--selector=owner=helm,name=cozy-installer
- name: Test namespace adoption (re-install over orphan namespace)
run: >-
sudo env "PATH=$PATH" "HOME=$HOME" ansible-playbook examples/ubuntu/site.yml
--inventory tests/ci-inventory.yml
- name: Verify cozy-system namespace was re-adopted
run: |
set -eux
KUBECONFIG=/etc/rancher/k3s/k3s.yaml
managed_by="$(sudo kubectl --kubeconfig $KUBECONFIG \
get namespace cozy-system \
--output jsonpath='{.metadata.labels.app\.kubernetes\.io/managed-by}')"
release_name="$(sudo kubectl --kubeconfig $KUBECONFIG \
get namespace cozy-system \
--output jsonpath='{.metadata.annotations.meta\.helm\.sh/release-name}')"
if [ "$managed_by" != "Helm" ] || [ "$release_name" != "cozy-installer" ]; then
echo "ERROR: namespace adoption failed"
echo " managed-by label: '$managed_by' (expected 'Helm')"
echo " release-name annot.: '$release_name' (expected 'cozy-installer')"
exit 1
fi
echo "OK: cozy-system namespace adopted into helm release"
# Foreign-owner refusal: re-stamp cozy-system as owned by a
# different fake helm release and assert the role fails
# rather than silently hijacking ownership.
- name: Stamp cozy-system as owned by a different helm release
run: |
set -eux
KUBECONFIG=/etc/rancher/k3s/k3s.yaml
sudo kubectl --kubeconfig $KUBECONFIG \
annotate namespace cozy-system --overwrite \
meta.helm.sh/release-name=some-other-release \
meta.helm.sh/release-namespace=elsewhere
- name: Test that foreign-owner namespace is NOT overwritten
run: |
set +e
output="$(sudo env "PATH=$PATH" "HOME=$HOME" \
ansible-playbook examples/ubuntu/site.yml \
--inventory tests/ci-inventory.yml 2>&1)"
status=$?
set -e
if [ "$status" -eq 0 ]; then
echo "ERROR: role silently re-adopted a foreign-owned namespace."
echo "Expected refusal because cozy-system is owned by 'some-other-release'."
echo "$output" | tail -50
exit 1
fi
if ! grep -q "owned by helm release 'some-other-release'" <<< "$output"; then
echo "ERROR: role failed but not for the expected reason."
echo "$output" | tail -50
exit 1
fi
echo "OK: role correctly refused to overwrite foreign helm ownership."
- name: Restore cozy-system ownership for cleanup
run: |
set -eux
KUBECONFIG=/etc/rancher/k3s/k3s.yaml
sudo kubectl --kubeconfig $KUBECONFIG \
annotate namespace cozy-system --overwrite \
meta.helm.sh/release-name=cozy-installer \
meta.helm.sh/release-namespace=kube-system