Skip to content

Commit 0952e72

Browse files
committed
Add MachinesUpToDate condition handling in MachinePool controller
- Updated the updateStatus method to include the new condition - Added unit tests - Fixes 10059 Signed-off-by: Satyam Bhardwaj <sbhardwaj@mirantis.com>
1 parent 4d332f8 commit 0952e72

File tree

3 files changed

+107
-1
lines changed

3 files changed

+107
-1
lines changed

internal/controllers/machinepool/machinepool_controller.go

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -202,6 +202,7 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re
202202
}},
203203
patch.WithOwnedConditions{Conditions: []string{
204204
clusterv1.PausedCondition,
205+
clusterv1.MachinesUpToDateCondition,
205206
}},
206207
}
207208
if reterr == nil {

internal/controllers/machinepool/machinepool_controller_status.go

Lines changed: 78 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,9 @@ package machinepool
1919
import (
2020
"context"
2121
"fmt"
22+
"time"
2223

24+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2325
"k8s.io/utils/ptr"
2426
ctrl "sigs.k8s.io/controller-runtime"
2527

@@ -41,7 +43,8 @@ func (r *Reconciler) updateStatus(ctx context.Context, s *scope) error {
4143

4244
setReplicas(s.machinePool, hasMachinePoolMachines, s.machines)
4345

44-
// TODO: in future add setting conditions here
46+
// Set MachinesUpToDate condition to surface when machines are not up-to-date with the spec.
47+
setMachinesUpToDateCondition(ctx, s.machinePool, s.machines, hasMachinePoolMachines)
4548

4649
return nil
4750
}
@@ -73,3 +76,77 @@ func setReplicas(mp *clusterv1.MachinePool, hasMachinePoolMachines bool, machine
7376
mp.Status.AvailableReplicas = ptr.To(availableReplicas)
7477
mp.Status.UpToDateReplicas = ptr.To(upToDateReplicas)
7578
}
79+
80+
// setMachinesUpToDateCondition sets the MachinesUpToDate condition on the MachinePool.
81+
func setMachinesUpToDateCondition(ctx context.Context, mp *clusterv1.MachinePool, machines []*clusterv1.Machine, hasMachinePoolMachines bool) {
82+
log := ctrl.LoggerFrom(ctx)
83+
84+
// For MachinePool providers that don't use Machine objects (like managed pools),
85+
// we derive the MachinesUpToDate condition from the infrastructure status.
86+
// If InfrastructureReady is False, machines are not up-to-date.
87+
if !hasMachinePoolMachines {
88+
// Check if infrastructure is ready by looking at the initialization status
89+
if mp.Status.Initialization.InfrastructureProvisioned == nil || !*mp.Status.Initialization.InfrastructureProvisioned {
90+
conditions.Set(mp, metav1.Condition{
91+
Type: clusterv1.MachinesUpToDateCondition,
92+
Status: metav1.ConditionFalse,
93+
Reason: clusterv1.NotUpToDateReason,
94+
Message: "Infrastructure is not yet provisioned",
95+
})
96+
return
97+
}
98+
99+
conditions.Set(mp, metav1.Condition{
100+
Type: clusterv1.MachinesUpToDateCondition,
101+
Status: metav1.ConditionTrue,
102+
Reason: clusterv1.UpToDateReason,
103+
})
104+
return
105+
}
106+
107+
// Only consider Machines that have an UpToDate condition or are older than 10s.
108+
// This is done to ensure the MachinesUpToDate condition doesn't flicker after a new Machine is created,
109+
// because it can take a bit until the UpToDate condition is set on a new Machine.
110+
filteredMachines := make([]*clusterv1.Machine, 0, len(machines))
111+
for _, machine := range machines {
112+
if conditions.Has(machine, clusterv1.MachineUpToDateCondition) || time.Since(machine.CreationTimestamp.Time) > 10*time.Second {
113+
filteredMachines = append(filteredMachines, machine)
114+
}
115+
}
116+
117+
if len(filteredMachines) == 0 {
118+
conditions.Set(mp, metav1.Condition{
119+
Type: clusterv1.MachinesUpToDateCondition,
120+
Status: metav1.ConditionTrue,
121+
Reason: clusterv1.NoReplicasReason,
122+
})
123+
return
124+
}
125+
126+
upToDateCondition, err := conditions.NewAggregateCondition(
127+
filteredMachines, clusterv1.MachineUpToDateCondition,
128+
conditions.TargetConditionType(clusterv1.MachinesUpToDateCondition),
129+
// Using a custom merge strategy to override reasons applied during merge.
130+
conditions.CustomMergeStrategy{
131+
MergeStrategy: conditions.DefaultMergeStrategy(
132+
conditions.ComputeReasonFunc(conditions.GetDefaultComputeMergeReasonFunc(
133+
clusterv1.NotUpToDateReason,
134+
clusterv1.UpToDateUnknownReason,
135+
clusterv1.UpToDateReason,
136+
)),
137+
),
138+
},
139+
)
140+
if err != nil {
141+
log.Error(err, "Failed to aggregate Machine's UpToDate conditions")
142+
conditions.Set(mp, metav1.Condition{
143+
Type: clusterv1.MachinesUpToDateCondition,
144+
Status: metav1.ConditionUnknown,
145+
Reason: clusterv1.InternalErrorReason,
146+
Message: "Please check controller logs for errors",
147+
})
148+
return
149+
}
150+
151+
conditions.Set(mp, *upToDateCondition)
152+
}

internal/controllers/machinepool/machinepool_controller_test.go

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@ import (
3131
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
3232
"k8s.io/apimachinery/pkg/runtime"
3333
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
34+
"k8s.io/client-go/tools/record"
3435
"k8s.io/utils/ptr"
3536
ctrl "sigs.k8s.io/controller-runtime"
3637
"sigs.k8s.io/controller-runtime/pkg/cache/informertest"
@@ -45,6 +46,7 @@ import (
4546
"sigs.k8s.io/cluster-api/controllers/external"
4647
externalfake "sigs.k8s.io/cluster-api/controllers/external/fake"
4748
"sigs.k8s.io/cluster-api/util"
49+
"sigs.k8s.io/cluster-api/util/conditions"
4850
v1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1"
4951
"sigs.k8s.io/cluster-api/util/test/builder"
5052
)
@@ -1157,6 +1159,31 @@ func TestMachinePoolConditions(t *testing.T) {
11571159
g.Expect(infraReadyCondition.Status).To(Equal(corev1.ConditionFalse))
11581160
},
11591161
},
1162+
{
1163+
name: "MachinesUpToDate condition set when infrastructure ready",
1164+
dataSecretCreated: true,
1165+
infrastructureReady: true,
1166+
beforeFunc: func(_, _ *unstructured.Unstructured, mp *clusterv1.MachinePool, _ *corev1.NodeList) {
1167+
mp.Spec.ProviderIDList = []string{"azure://westus2/id-node-4", "aws://us-east-1/id-node-1"}
1168+
mp.Status.NodeRefs = []corev1.ObjectReference{
1169+
{Name: "node-1"},
1170+
{Name: "azure-node-4"},
1171+
}
1172+
mp.Status.Replicas = ptr.To(int32(2))
1173+
},
1174+
conditionAssertFunc: func(t *testing.T, getter v1beta1conditions.Getter) {
1175+
t.Helper()
1176+
g := NewWithT(t)
1177+
1178+
// Check that MachinesUpToDate condition is set (v1beta2 condition)
1179+
mp, ok := getter.(*clusterv1.MachinePool)
1180+
g.Expect(ok).To(BeTrue())
1181+
machinesUpToDateCondition := conditions.Get(mp, clusterv1.MachinesUpToDateCondition)
1182+
g.Expect(machinesUpToDateCondition).ToNot(BeNil())
1183+
g.Expect(machinesUpToDateCondition.Status).To(Equal(metav1.ConditionTrue))
1184+
g.Expect(machinesUpToDateCondition.Reason).To(Equal(clusterv1.UpToDateReason))
1185+
},
1186+
},
11601187
}
11611188

11621189
for _, tt := range testcases {
@@ -1187,6 +1214,7 @@ func TestMachinePoolConditions(t *testing.T) {
11871214
Client: clientFake,
11881215
APIReader: clientFake,
11891216
ClusterCache: clustercache.NewFakeClusterCache(clientFake, client.ObjectKey{Name: testCluster.Name, Namespace: testCluster.Namespace}),
1217+
recorder: record.NewFakeRecorder(32),
11901218
externalTracker: external.ObjectTracker{
11911219
Controller: externalfake.Controller{},
11921220
Cache: &informertest.FakeInformers{},

0 commit comments

Comments
 (0)