diff --git a/go.mod b/go.mod index 7f290bc2d7b9..f79044f96b84 100644 --- a/go.mod +++ b/go.mod @@ -53,6 +53,7 @@ require ( github.com/lestrrat/go-jsschema v0.0.0-20181205002244-5c81c58ffcc3 github.com/lithammer/dedent v1.1.0 github.com/mattn/go-sqlite3 v1.14.30 + github.com/metal3-io/baremetal-operator/apis v0.11.0 github.com/metallb/frr-k8s v0.0.15 github.com/microsoftgraph/msgraph-sdk-go v1.81.0 github.com/onsi/ginkgo/v2 v2.23.3 @@ -78,7 +79,7 @@ require ( github.com/spf13/pflag v1.0.6-0.20210604193023-d5e0c0615ace github.com/spf13/viper v1.8.1 github.com/stretchr/objx v0.5.2 - github.com/stretchr/testify v1.10.0 + github.com/stretchr/testify v1.11.1 github.com/tecbiz-ch/nutanix-go-sdk v0.1.15 github.com/tidwall/gjson v1.18.0 github.com/tidwall/pretty v1.2.0 @@ -100,9 +101,9 @@ require ( gopkg.in/src-d/go-git.v4 v4.13.1 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 - k8s.io/api v0.33.4 + k8s.io/api v0.33.5 k8s.io/apiextensions-apiserver v0.33.4 - k8s.io/apimachinery v0.33.4 + k8s.io/apimachinery v0.33.5 k8s.io/apiserver v0.33.4 k8s.io/cli-runtime v0.33.4 k8s.io/client-go v0.33.4 @@ -398,7 +399,7 @@ require ( sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.29 // indirect sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.16 // indirect - sigs.k8s.io/controller-runtime v0.19.0 // indirect + sigs.k8s.io/controller-runtime v0.21.0 // indirect sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96 // indirect sigs.k8s.io/kustomize/api v0.19.0 // indirect diff --git a/go.sum b/go.sum index 8870c880280a..eef8017d99d4 100644 --- a/go.sum +++ b/go.sum @@ -707,6 +707,8 @@ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWE github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-sqlite3 v1.14.30 h1:bVreufq3EAIG1Quvws73du3/QgdeZ3myglJlrzSYYCY= github.com/mattn/go-sqlite3 v1.14.30/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/metal3-io/baremetal-operator/apis v0.11.0 h1:sYxjnObegWnDyz028m5Rc6gVYxbSLlvjOAqo6Iq1vOE= +github.com/metal3-io/baremetal-operator/apis v0.11.0/go.mod h1:T0v/wKeJeUfVFeTq1sihv76IZEE2Bn+hqKkh5kGMxA4= github.com/metallb/frr-k8s v0.0.15 h1:6M3UGhovX1EFoaSGjrRD7djUAx3w2I+g81FH8OVtHkM= github.com/metallb/frr-k8s v0.0.15/go.mod h1:TjrGoAf+v00hYGlI8jUdyDxY5udMAOs2GWwrvLWnA4E= github.com/microsoft/kiota-abstractions-go v1.9.3 h1:cqhbqro+VynJ7kObmo7850h3WN2SbvoyhypPn8uJ1SE= @@ -986,8 +988,8 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/tecbiz-ch/nutanix-go-sdk v0.1.15 h1:ZT5I6OFGswvMceujUE10ZXPNnT5UQIW9gAX4FEFK6Ds= @@ -1592,8 +1594,8 @@ sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.29 h1:qiifAaaBqV3d/EcN9dKJaJI sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.29/go.mod h1:ZFAt0qF1kR+w8nBVJK56s6CFvLrlosN1i2c+Sxb7LBk= sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.16 h1:Fm/Yjv4nXjUtJ90uXKSKwPwaTWYuDFMhDNNOd77PlOg= sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.16/go.mod h1:+kl90flu4+WCP6HBGVYbKVQR+5ztDzUNrWJz8rsnvRU= -sigs.k8s.io/controller-runtime v0.19.0 h1:nWVM7aq+Il2ABxwiCizrVDSlmDcshi9llbaFbC0ji/Q= -sigs.k8s.io/controller-runtime v0.19.0/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= +sigs.k8s.io/controller-runtime v0.21.0 h1:CYfjpEuicjUecRk+KAeyYh+ouUBn4llGyDYytIGcJS8= +sigs.k8s.io/controller-runtime v0.21.0/go.mod h1:OSg14+F65eWqIu4DceX7k/+QRAbTTvxeQSNSOQpukWM= sigs.k8s.io/gateway-api v1.2.1 h1:fZZ/+RyRb+Y5tGkwxFKuYuSRQHu9dZtbjenblleOLHM= sigs.k8s.io/gateway-api v1.2.1/go.mod h1:EpNfEXNjiYfUJypf0eZ0P5iXA9ekSGWaS1WgPaM42X0= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= diff --git a/pkg/clioptions/clusterdiscovery/cluster.go b/pkg/clioptions/clusterdiscovery/cluster.go index b594d9859d13..abdf6f1bec28 100644 --- a/pkg/clioptions/clusterdiscovery/cluster.go +++ b/pkg/clioptions/clusterdiscovery/cluster.go @@ -30,6 +30,13 @@ import ( "github.com/openshift/origin/test/extended/util/azure" ) +// HypervisorConfig contains configuration for hypervisor-based recovery operations +type HypervisorConfig struct { + HypervisorIP string `json:"hypervisorIP"` + SSHUser string `json:"sshUser"` + PrivateKeyPath string `json:"privateKeyPath"` +} + type ClusterConfiguration struct { ProviderName string `json:"type"` @@ -76,6 +83,9 @@ type ClusterConfiguration struct { // IsNoOptionalCapabilities indicates the cluster has no optional capabilities enabled HasNoOptionalCapabilities bool + // HypervisorConfig contains SSH configuration for hypervisor-based recovery operations + HypervisorConfig *HypervisorConfig + // APIGroups contains the set of API groups available in the cluster APIGroups sets.Set[string] `json:"-"` // EnabledFeatureGates contains the set of enabled feature gates in the cluster diff --git a/pkg/cmd/openshift-tests/run/flags.go b/pkg/cmd/openshift-tests/run/flags.go index 106e71ce8266..618667344a00 100644 --- a/pkg/cmd/openshift-tests/run/flags.go +++ b/pkg/cmd/openshift-tests/run/flags.go @@ -1,6 +1,8 @@ package run import ( + "encoding/json" + "fmt" "os" "github.com/openshift-eng/openshift-tests-extension/pkg/extension" @@ -28,9 +30,6 @@ type RunSuiteFlags struct { ToImage string TestOptions []string - // Shared by initialization code - config *clusterdiscovery.ClusterConfiguration - genericclioptions.IOStreams } @@ -84,7 +83,7 @@ func (f *RunSuiteFlags) ToOptions(args []string, availableSuites []*testginkgo.T // shallow copy to mutate ginkgoOptions := f.GinkgoRunSuiteOptions - providerConfig, err := f.SuiteWithKubeTestInitializationPreSuite() + clusterConfig, err := f.SuiteWithKubeTestInitializationPreSuite() if err != nil { return nil, err } @@ -95,13 +94,39 @@ func (f *RunSuiteFlags) ToOptions(args []string, availableSuites []*testginkgo.T return nil, err } + // Parse hypervisor configuration if provided and set it in environment for test context + if f.GinkgoRunSuiteOptions.WithHypervisorConfigJSON != "" { + // Validate the JSON format + var hypervisorConfig clusterdiscovery.HypervisorConfig + if err := json.Unmarshal([]byte(f.GinkgoRunSuiteOptions.WithHypervisorConfigJSON), &hypervisorConfig); err != nil { + return nil, fmt.Errorf("failed to parse hypervisor configuration JSON: %v", err) + } + + // Validate required fields + if hypervisorConfig.HypervisorIP == "" { + return nil, fmt.Errorf("hypervisorIP is required in hypervisor configuration") + } + if hypervisorConfig.SSHUser == "" { + return nil, fmt.Errorf("sshUser is required in hypervisor configuration") + } + if hypervisorConfig.PrivateKeyPath == "" { + return nil, fmt.Errorf("privateKey is required in hypervisor configuration") + } + + // Set the hypervisor configuration in the cluster config + clusterConfig.HypervisorConfig = &hypervisorConfig + + // Also set it in environment for test context access + os.Setenv("HYPERVISOR_CONFIG", f.GinkgoRunSuiteOptions.WithHypervisorConfigJSON) + } + o := &RunSuiteOptions{ GinkgoRunSuiteOptions: ginkgoOptions, Suite: suite, Extension: internalExtension, - ClusterConfig: providerConfig, + ClusterConfig: clusterConfig, FromRepository: f.FromRepository, - CloudProviderJSON: providerConfig.ToJSONString(), + CloudProviderJSON: clusterConfig.ToJSONString(), CloseFn: closeFn, IOStreams: f.IOStreams, } diff --git a/pkg/cmd/openshift-tests/run/options.go b/pkg/cmd/openshift-tests/run/options.go index 13b32ab944a4..c5f9715a47f4 100644 --- a/pkg/cmd/openshift-tests/run/options.go +++ b/pkg/cmd/openshift-tests/run/options.go @@ -31,6 +31,11 @@ type RunSuiteOptions struct { CloseFn iooptions.CloseFunc genericclioptions.IOStreams + // HypervisorConfig contains SSH configuration for hypervisor-based recovery operations + // If set, will run recovery tests that require the hypervisor-based recovery, such as + // the node replacement test in the two_node recovery suite. + HypervisorConfig *clusterdiscovery.HypervisorConfig + // ClusterConfig contains cluster-specific configuration for filtering tests ClusterConfig *clusterdiscovery.ClusterConfiguration diff --git a/pkg/test/filters/cluster_state.go b/pkg/test/filters/cluster_state.go index 2372f75c4081..4e1aa80372bc 100644 --- a/pkg/test/filters/cluster_state.go +++ b/pkg/test/filters/cluster_state.go @@ -67,6 +67,10 @@ func NewClusterStateFilter(config *clusterdiscovery.ClusterConfiguration) *Clust skips = append(skips, "[Skipped:NoOptionalCapabilities]") } + if config.HypervisorConfig == nil { + skips = append(skips, "[Requires:HypervisorSSHConfig]") + } + logrus.WithField("skips", skips).Info("Generated skips for cluster state") return &ClusterStateFilter{ diff --git a/pkg/test/ginkgo/cmd_runsuite.go b/pkg/test/ginkgo/cmd_runsuite.go index 76ed19950eea..347e4623333b 100644 --- a/pkg/test/ginkgo/cmd_runsuite.go +++ b/pkg/test/ginkgo/cmd_runsuite.go @@ -96,6 +96,9 @@ type GinkgoRunSuiteOptions struct { // RetryStrategy controls retry behavior and final outcome decisions RetryStrategy RetryStrategy + + // WithHypervisorConfigJSON contains JSON configuration for hypervisor-based recovery operations + WithHypervisorConfigJSON string } func NewGinkgoRunSuiteOptions(streams genericclioptions.IOStreams) *GinkgoRunSuiteOptions { @@ -133,6 +136,7 @@ func (o *GinkgoRunSuiteOptions) BindFlags(flags *pflag.FlagSet) { flags.StringVar(&o.ShardStrategy, "shard-strategy", o.ShardStrategy, "Which strategy to use for sharding (hash)") availableStrategies := getAvailableRetryStrategies() flags.Var(newRetryStrategyFlag(&o.RetryStrategy), "retry-strategy", fmt.Sprintf("Test retry strategy (available: %s, default: %s)", strings.Join(availableStrategies, ", "), defaultRetryStrategy)) + flags.StringVar(&o.WithHypervisorConfigJSON, "with-hypervisor-json", os.Getenv("HYPERVISOR_CONFIG"), "JSON configuration for hypervisor-based recovery operations. Must contain hypervisorIP, sshUser, and privateKeyPath fields.") } func (o *GinkgoRunSuiteOptions) Validate() error { diff --git a/test/extended/testdata/two_node/baremetalhost-template.yaml b/test/extended/testdata/two_node/baremetalhost-template.yaml new file mode 100644 index 000000000000..9037bfe2eddf --- /dev/null +++ b/test/extended/testdata/two_node/baremetalhost-template.yaml @@ -0,0 +1,27 @@ +apiVersion: metal3.io/v1alpha1 +kind: BareMetalHost +metadata: + finalizers: + - baremetalhost.metal3.io + labels: + installer.openshift.io/role: control-plane + name: {NAME} + namespace: openshift-machine-api +spec: + architecture: x86_64 + automatedCleaningMode: metadata + bmc: + address: redfish+https://{IP}:8000/redfish/v1/Systems/{UUID} + credentialsName: {CREDENTIALS_NAME} + disableCertificateVerification: true + bootMACAddress: {BOOT_MAC_ADDRESS} + bootMode: UEFI + customDeploy: + method: install_coreos + hardwareProfile: unknown + online: true + rootDeviceHints: + deviceName: /dev/sda + userData: + name: master-user-data-managed + namespace: openshift-machine-api \ No newline at end of file diff --git a/test/extended/testdata/two_node/machine-template.yaml b/test/extended/testdata/two_node/machine-template.yaml new file mode 100644 index 000000000000..3cc661072d26 --- /dev/null +++ b/test/extended/testdata/two_node/machine-template.yaml @@ -0,0 +1,30 @@ +apiVersion: machine.openshift.io/v1beta1 +kind: Machine +metadata: + annotations: + metal3.io/BareMetalHost: openshift-machine-api/{NODE_NAME} + finalizers: + - machine.machine.openshift.io + labels: + machine.openshift.io/cluster-api-cluster: ostest-{MACHINE_HASH} + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + name: {MACHINE_NAME} + namespace: openshift-machine-api +spec: + authoritativeAPI: MachineAPI + metadata: {} + providerSpec: + value: + apiVersion: baremetal.cluster.k8s.io/v1alpha1 + customDeploy: + method: install_coreos + hostSelector: {} + image: + checksum: "" + url: "" + kind: BareMetalMachineProviderSpec + metadata: + creationTimestamp: null + userData: + name: master-user-data-managed \ No newline at end of file diff --git a/test/extended/two_node/arbiter_topology.go b/test/extended/two_node/arbiter_topology.go index 0fdce9639dbe..41cbca1775ee 100644 --- a/test/extended/two_node/arbiter_topology.go +++ b/test/extended/two_node/arbiter_topology.go @@ -37,7 +37,7 @@ var expectedPods = map[string]int{ var _ = g.Describe("[sig-node][apigroup:config.openshift.io][OCPFeatureGate:HighlyAvailableArbiter] expected Master and Arbiter node counts", func() { defer g.GinkgoRecover() - oc := exutil.NewCLIWithoutNamespace("") + oc := createCLI(nonAdmin) g.BeforeEach(func() { skipIfNotTopology(oc, v1.HighlyAvailableArbiterMode) @@ -51,15 +51,11 @@ var _ = g.Describe("[sig-node][apigroup:config.openshift.io][OCPFeatureGate:High expectedMasterNodes = 2 expectedArbiterNodes = 1 ) - masterNodes, err := oc.AdminKubeClient().CoreV1().Nodes().List(context.Background(), metav1.ListOptions{ - LabelSelector: labelNodeRoleMaster, - }) + masterNodes, err := getNodes(oc, labelNodeRoleControlPlane) o.Expect(err).To(o.BeNil(), "Expected to retrieve Master nodes without error") o.Expect(len(masterNodes.Items)).To(o.Equal(expectedMasterNodes)) - arbiterNodes, err := oc.AdminKubeClient().CoreV1().Nodes().List(context.Background(), metav1.ListOptions{ - LabelSelector: labelNodeRoleArbiter, - }) + arbiterNodes, err := getNodes(oc, labelNodeRoleArbiter) o.Expect(err).To(o.BeNil(), "Expected to retrieve Arbiter nodes without error") o.Expect(len(arbiterNodes.Items)).To(o.Equal(expectedArbiterNodes)) }) @@ -68,16 +64,14 @@ var _ = g.Describe("[sig-node][apigroup:config.openshift.io][OCPFeatureGate:High var _ = g.Describe("[sig-node][apigroup:config.openshift.io][OCPFeatureGate:HighlyAvailableArbiter] required pods on the Arbiter node", func() { defer g.GinkgoRecover() - oc := exutil.NewCLIWithoutNamespace("") + oc := createCLI(nonAdmin) g.BeforeEach(func() { skipIfNotTopology(oc, v1.HighlyAvailableArbiterMode) }) g.It("Should verify that the correct number of pods are running on the Arbiter node", func() { g.By("Retrieving the Arbiter node name") - nodes, err := oc.AdminKubeClient().CoreV1().Nodes().List(context.Background(), metav1.ListOptions{ - LabelSelector: labelNodeRoleArbiter, - }) + nodes, err := getNodes(oc, labelNodeRoleArbiter) o.Expect(err).To(o.BeNil(), "Expected to retrieve nodes without error") o.Expect(len(nodes.Items)).To(o.Equal(1)) g.By("by comparing pod counts") @@ -154,7 +148,7 @@ var _ = g.Describe("[sig-apps][apigroup:apps.openshift.io][OCPFeatureGate:Highly ctx := context.Background() g.By("Retrieving Master nodes") masterNodes, err := oc.AdminKubeClient().CoreV1().Nodes().List(ctx, metav1.ListOptions{ - LabelSelector: labelNodeRoleMaster, + LabelSelector: labelNodeRoleControlPlane, }) o.Expect(err).To(o.BeNil(), "Expected to retrieve Master nodes without error") o.Expect(len(masterNodes.Items)).To(o.Equal(2), "Expected to find two Master nodes") diff --git a/test/extended/two_node/common.go b/test/extended/two_node/common.go index b6752d799a87..7a0363e4e21c 100644 --- a/test/extended/two_node/common.go +++ b/test/extended/two_node/common.go @@ -1,18 +1,26 @@ package two_node import ( + "context" "fmt" v1 "github.com/openshift/api/config/v1" exutil "github.com/openshift/origin/test/extended/util" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" ) const ( - labelNodeRoleMaster = "node-role.kubernetes.io/master" + // Node filtering constants + allNodes = "" labelNodeRoleControlPlane = "node-role.kubernetes.io/control-plane" labelNodeRoleWorker = "node-role.kubernetes.io/worker" labelNodeRoleArbiter = "node-role.kubernetes.io/arbiter" + + // CLI privilege levels + nonAdmin = false + admin = true ) func skipIfNotTopology(oc *exutil.CLI, wanted v1.TopologyMode) { @@ -42,3 +50,23 @@ func isClusterOperatorDegraded(operator *v1.ClusterOperator) bool { } return false } + +// createCLI creates a new CLI instance with optional admin privileges +func createCLI(requireAdmin bool) *exutil.CLI { + if requireAdmin { + return exutil.NewCLIWithoutNamespace("").AsAdmin() + } + return exutil.NewCLIWithoutNamespace("") +} + +// getNodes returns a list of nodes, optionally filtered by role label +// When roleLabel is allNodes (""), returns all nodes +// When roleLabel is specified, filters nodes by that label +func getNodes(oc *exutil.CLI, roleLabel string) (*corev1.NodeList, error) { + if roleLabel == "" { + return oc.AdminKubeClient().CoreV1().Nodes().List(context.Background(), metav1.ListOptions{}) + } + return oc.AdminKubeClient().CoreV1().Nodes().List(context.Background(), metav1.ListOptions{ + LabelSelector: roleLabel, + }) +} diff --git a/test/extended/two_node/tna_recovery.go b/test/extended/two_node/tna_recovery.go index 7f1d3ee033e3..412f403d4aed 100644 --- a/test/extended/two_node/tna_recovery.go +++ b/test/extended/two_node/tna_recovery.go @@ -25,7 +25,7 @@ const ( var _ = g.Describe("[sig-etcd][apigroup:config.openshift.io][OCPFeatureGate:HighlyAvailableArbiter][Suite:openshift/two-node][Disruptive] One master node outage is handled seamlessly", func() { defer g.GinkgoRecover() - oc := exutil.NewCLIWithoutNamespace("").AsAdmin() + oc := createCLI(admin) g.BeforeEach(func() { skipIfNotTopology(oc, v1.HighlyAvailableArbiterMode) @@ -35,9 +35,7 @@ var _ = g.Describe("[sig-etcd][apigroup:config.openshift.io][OCPFeatureGate:High ctx := context.Background() g.By("Identifying one master node to simulate failure") - masterNodes, err := oc.AdminKubeClient().CoreV1().Nodes().List(ctx, metav1.ListOptions{ - LabelSelector: labelNodeRoleMaster, - }) + masterNodes, err := getNodes(oc, labelNodeRoleControlPlane) o.Expect(err).To(o.BeNil()) o.Expect(masterNodes.Items).To(o.HaveLen(2)) targetNode := masterNodes.Items[0].Name @@ -59,10 +57,9 @@ var _ = g.Describe("[sig-etcd][apigroup:config.openshift.io][OCPFeatureGate:High o.Expect(err).To(o.BeNil(), "Expected etcd operator to remain healthy while one master node is NotReady") }) g.AfterEach(func() { - ctx := context.Background() g.By("Ensuring all cluster nodes are back to Ready state") - nodeList, err := oc.AdminKubeClient().CoreV1().Nodes().List(ctx, metav1.ListOptions{}) + nodeList, err := getNodes(oc, allNodes) o.Expect(err).To(o.BeNil(), "Failed to list cluster nodes") for _, node := range nodeList.Items { @@ -73,18 +70,14 @@ var _ = g.Describe("[sig-etcd][apigroup:config.openshift.io][OCPFeatureGate:High var _ = g.Describe("[sig-etcd][apigroup:config.openshift.io][OCPFeatureGate:HighlyAvailableArbiter][Suite:openshift/two-node][Disruptive] Recovery when arbiter node is down and master nodes restart", func() { defer g.GinkgoRecover() - oc := exutil.NewCLIWithoutNamespace("").AsAdmin() + oc := createCLI(admin) var arbiterNodeName string g.BeforeEach(func() { skipIfNotTopology(oc, v1.HighlyAvailableArbiterMode) }) g.It("should regain quorum after arbiter down and master nodes restart", func() { - ctx := context.Background() - g.By("Getting arbiter node") - arbiterNodes, err := oc.AdminKubeClient().CoreV1().Nodes().List(ctx, metav1.ListOptions{ - LabelSelector: labelNodeRoleArbiter, - }) + arbiterNodes, err := getNodes(oc, labelNodeRoleArbiter) o.Expect(err).To(o.BeNil()) o.Expect(arbiterNodes.Items).To(o.HaveLen(1)) arbiterNode := arbiterNodes.Items[0] @@ -99,26 +92,26 @@ var _ = g.Describe("[sig-etcd][apigroup:config.openshift.io][OCPFeatureGate:High waitForNodeCondition(oc, arbiterNodeName, corev1.NodeReady, corev1.ConditionUnknown, statusUnknown, 5*time.Minute) g.By("Rebooting both master nodes") - masterNodes, err := oc.AdminKubeClient().CoreV1().Nodes().List(ctx, metav1.ListOptions{ - LabelSelector: labelNodeRoleMaster, - }) + masterNodes, err := getNodes(oc, labelNodeRoleControlPlane) o.Expect(err).To(o.BeNil()) - for _, node := range masterNodes.Items { - shutdownOrRebootNode(oc, node.Name, "openshift-etcd", "shutdown", "-r", "+1") + o.Expect(masterNodes.Items).To(o.HaveLen(2)) + + for _, masterNode := range masterNodes.Items { + shutdownOrRebootNode(oc, masterNode.Name, "openshift-etcd", "shutdown", "-r", "+1") } g.By("Waiting for master nodes to become NotReady") - for _, node := range masterNodes.Items { - waitForNodeCondition(oc, node.Name, corev1.NodeReady, corev1.ConditionFalse, statusNotReady, 10*time.Minute) + for _, masterNode := range masterNodes.Items { + waitForNodeCondition(oc, masterNode.Name, corev1.NodeReady, corev1.ConditionFalse, statusNotReady, 10*time.Minute) } g.By("Waiting for master nodes to become Ready") - for _, node := range masterNodes.Items { - waitForNodeCondition(oc, node.Name, corev1.NodeReady, corev1.ConditionTrue, statusReady, 15*time.Minute) + for _, masterNode := range masterNodes.Items { + waitForNodeCondition(oc, masterNode.Name, corev1.NodeReady, corev1.ConditionTrue, statusReady, 15*time.Minute) } g.By("Waiting for etcd quorum to be restored") - err = wait.PollUntilContextTimeout(ctx, 15*time.Second, 15*time.Minute, true, func(ctx context.Context) (bool, error) { + err = wait.PollUntilContextTimeout(context.Background(), 15*time.Second, 15*time.Minute, true, func(ctx context.Context) (bool, error) { operator, err := oc.AdminConfigClient().ConfigV1().ClusterOperators().Get(ctx, "etcd", metav1.GetOptions{}) if err != nil { return false, nil diff --git a/test/extended/two_node/tnf_node_replacement.go b/test/extended/two_node/tnf_node_replacement.go new file mode 100644 index 000000000000..81b76e2183e3 --- /dev/null +++ b/test/extended/two_node/tnf_node_replacement.go @@ -0,0 +1,1601 @@ +package two_node + +import ( + "encoding/json" + "fmt" + "math/rand" + "os" + "path/filepath" + "regexp" + "strings" + "time" + + metal3v1alpha1 "github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1" + g "github.com/onsi/ginkgo/v2" + o "github.com/onsi/gomega" + configv1 "github.com/openshift/api/config/v1" + "github.com/openshift/origin/test/extended/two_node/utils" + exutil "github.com/openshift/origin/test/extended/util" + "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/klog/v2" +) + +// Constants +const ( + backupDirName = "tnf-node-replacement-backup" + + // OpenShift namespaces + machineAPINamespace = "openshift-machine-api" + etcdNamespace = "openshift-etcd" + + // Timeouts and intervals + nodeRecoveryTimeout = 10 * time.Minute + nodeRecoveryPollInterval = 15 * time.Second + csrApprovalTimeout = 5 * time.Minute + csrApprovalPollInterval = 10 * time.Second + clusterOperatorTimeout = 5 * time.Minute + clusterOperatorPollInterval = 10 * time.Second + etcdStopWaitTime = 30 * time.Second + etcdStatusCheckTimeout = 2 * time.Minute + etcdStatusCheckPollInterval = 5 * time.Second + + // Expected counts + expectedCSRCount = 2 + + // Resource types + secretResourceType = "secret" + bmhResourceType = "bmh" + machineResourceType = "machines.machine.openshift.io" + + // Output formats + yamlOutputFormat = "yaml" + + // Node states + nodeReadyState = "Ready" + + // BMH states + bmhProvisionedState = "provisioned" + + // Cluster operator states + coDegradedState = "Degraded=True" + coProgressingState = "Progressing=True" + + // Base names for dynamic resource names + etcdPeerSecretBaseName = "etcd-peer" + etcdServingSecretBaseName = "etcd-serving" + etcdServingMetricsSecretBaseName = "etcd-serving-metrics" + tnfAuthJobBaseName = "tnf-auth-job" + tnfAfterSetupJobBaseName = "tnf-after-setup-job" + + // Virsh commands + virshProvisioningBridge = "ostestpr" + + // Additional constants for pacemaker operations + pacemakerQuorumTimeout = 5 * time.Minute + pacemakerQuorumPollInterval = 10 * time.Second +) + +// Variables + +// TNFTestConfig holds all test configuration and state +// This struct groups related variables to avoid global variable shadowing and improve maintainability +type TNFTestConfig struct { + HypervisorConfig utils.SSHConfig + + // Node configuration + TargetNodeName string + TargetNodeIP string + TargetVMName string + TargetMachineName string + TargetMachineHash string + TargetBMCSecretName string + TargetBMHName string + TargetNodeMAC string + SurvivingNodeName string + SurvivingNodeIP string + + // Dynamic resource names + EtcdPeerSecretName string + EtcdServingSecretName string + EtcdServingMetricsSecretName string + TNFAuthJobName string + TNFAfterSetupJobName string + + // Backup and recovery + GlobalBackupDir string + + // Test execution tracking + HasAttemptedNodeProvisioning bool + + // Known hosts file paths + HypervisorKnownHostsPath string + TargetNodeKnownHostsPath string + SurvivingNodeKnownHostsPath string +} + +// etcdMemberListResponse represents the JSON response from etcdctl member list -w json +type etcdMemberListResponse struct { + Header etcdResponseHeader `json:"header"` + Members []etcdMember `json:"members"` +} + +// etcdResponseHeader represents the header in etcd responses +type etcdResponseHeader struct { + ClusterID uint64 `json:"cluster_id"` + MemberID uint64 `json:"member_id"` + RaftTerm int `json:"raft_term"` +} + +// etcdMember represents a single etcd member +type etcdMember struct { + ID uint64 `json:"ID"` + Name string `json:"name"` + PeerURLs []string `json:"peerURLs"` + ClientURLs []string `json:"clientURLs"` + IsLearner bool `json:"isLearner"` +} + +// Global test configuration instance +var ( + oc = createCLI(admin) +) + +// Main test function +var _ = g.Describe("[sig-etcd][apigroup:config.openshift.io][OCPFeatureGate:DualReplica][Suite:openshift/two-node][Disruptive][Requires:HypervisorSSHConfig] TNF", func() { + var testConfig TNFTestConfig + defer g.GinkgoRecover() + + g.BeforeEach(func() { + skipIfNotTopology(oc, configv1.DualReplicaTopologyMode) + setupTestEnvironment(&testConfig, oc) + }) + + g.AfterEach(func() { + // Always attempt recovery if we have backup data + if testConfig.GlobalBackupDir != "" { + g.By("Attempting cluster recovery from backup") + recoverClusterFromBackup(&testConfig, oc) + } + // Clean up target node known_hosts only if it was created (after reprovisioning) + if testConfig.TargetNodeKnownHostsPath != "" { + utils.CleanupRemoteKnownHostsFile(&testConfig.HypervisorConfig, testConfig.HypervisorKnownHostsPath, testConfig.TargetNodeKnownHostsPath) + } + utils.CleanupRemoteKnownHostsFile(&testConfig.HypervisorConfig, testConfig.HypervisorKnownHostsPath, testConfig.SurvivingNodeKnownHostsPath) + utils.CleanupLocalKnownHostsFile(&testConfig.HypervisorConfig, testConfig.HypervisorKnownHostsPath) + }) + + g.It("should recover from an in-place node replacement", func() { + + g.By("Backing up the target node's configuration") + backupDir := backupTargetNodeConfiguration(&testConfig, oc) + testConfig.GlobalBackupDir = backupDir // Store globally for recovery + defer func() { + if backupDir != "" && testConfig.GlobalBackupDir == "" { + // Only clean up if recovery didn't need it + os.RemoveAll(backupDir) + } + }() + + g.By("Destroying the target VM") + destroyVM(&testConfig) + + g.By("Manually restoring etcd quorum on the survivor") + restoreEtcdQuorumOnSurvivor(&testConfig, oc) + + g.By("Deleting OpenShift node references") + deleteNodeReferences(&testConfig, oc) + + g.By("Recreating the target VM using backed up configuration") + recreateTargetVM(&testConfig, oc, backupDir) + + g.By("Provisioning the target node with Ironic") + provisionTargetNodeWithIronic(&testConfig, oc) + + g.By("Approving certificate signing requests for the new node") + approveCSRs(oc) + + g.By("Waiting for the replacement node to appear in the cluster") + waitForNodeRecovery(&testConfig, oc) + + g.By("Restoring pacemaker cluster configuration") + restorePacemakerCluster(&testConfig, oc) + + g.By("Verifying the cluster is fully restored") + verifyRestoredCluster(&testConfig, oc) + + g.By("Successfully completed node replacement process") + klog.V(2).Infof("Node replacement process completed. Backup files created in: %s", backupDir) + }) +}) + +// Step functions that run all of the steps + +// findObjectByNamePattern finds an object by regex pattern matching +func findObjectByNamePattern(oc *exutil.CLI, resourceType, namespace, nodeName, suffix string) string { + // List all objects of the specified type in the namespace + objectsOutput, err := oc.AsAdmin().Run("get").Args(resourceType, "-n", namespace, "-o", "name").Output() + o.Expect(err).To(o.BeNil(), "Expected to list %s objects without error", resourceType) + + // Create regex pattern based on whether suffix is provided + var pattern string + if suffix == "" { + // For objects without suffix (like BareMetalHost): *-{nodeName} + pattern = fmt.Sprintf(`.*-%s$`, regexp.QuoteMeta(nodeName)) + } else { + // For objects with suffix (like BMC secrets): *-{nodeName}-{suffix} + pattern = fmt.Sprintf(`.*-%s-%s$`, regexp.QuoteMeta(nodeName), regexp.QuoteMeta(suffix)) + } + + regex, err := regexp.Compile(pattern) + o.Expect(err).To(o.BeNil(), "Expected to compile regex pattern without error") + + // Search through the objects + lines := strings.Split(objectsOutput, "\n") + for _, line := range lines { + line = strings.TrimSpace(line) + if line == "" { + continue + } + + // Extract object name by finding the last "/" and taking everything after it + // This handles both simple resource types (secret/name) and API group types (baremetalhost.metal3.io/name) + lastSlashIndex := strings.LastIndex(line, "/") + if lastSlashIndex == -1 { + continue // Skip lines without "/" + } + + objectName := line[lastSlashIndex+1:] + if regex.MatchString(objectName) { + klog.V(2).Infof("Found %s: %s", resourceType, objectName) + return objectName // Return just the name without the type prefix + } + } + + // Throw an error if no match is found + if suffix == "" { + o.Expect("").To(o.BeEmpty(), "Expected to find %s matching pattern *-%s", resourceType, nodeName) + } else { + o.Expect("").To(o.BeEmpty(), "Expected to find %s matching pattern *-%s-%s", resourceType, nodeName, suffix) + } + return "" +} + +// backupTargetNodeConfiguration backs up all necessary resources for node replacement +func backupTargetNodeConfiguration(testConfig *TNFTestConfig, oc *exutil.CLI) string { + // Create backup directory + var err error + backupDir, err := os.MkdirTemp("", backupDirName) + o.Expect(err).To(o.BeNil(), "Expected to create backup directory without error") + + // Download backup of BMC secret + bmcSecretOutput, err := oc.AsAdmin().Run("get").Args(secretResourceType, testConfig.TargetBMCSecretName, "-n", machineAPINamespace, "-o", yamlOutputFormat).Output() + o.Expect(err).To(o.BeNil(), "Expected to get BMC secret without error") + bmcSecretFile := filepath.Join(backupDir, testConfig.TargetBMCSecretName+".yaml") + err = os.WriteFile(bmcSecretFile, []byte(bmcSecretOutput), 0644) + o.Expect(err).To(o.BeNil(), "Expected to write BMC secret backup without error") + + // Download backup of BareMetalHost + bmhOutput, err := oc.AsAdmin().Run("get").Args(bmhResourceType, testConfig.TargetBMHName, "-n", machineAPINamespace, "-o", yamlOutputFormat).Output() + o.Expect(err).To(o.BeNil(), "Expected to get BareMetalHost without error") + bmhFile := filepath.Join(backupDir, testConfig.TargetBMHName+".yaml") + err = os.WriteFile(bmhFile, []byte(bmhOutput), 0644) + o.Expect(err).To(o.BeNil(), "Expected to write BareMetalHost backup without error") + + // Backup machine definition using the stored testConfig.TargetMachineName + machineOutput, err := oc.AsAdmin().Run("get").Args(machineResourceType, testConfig.TargetMachineName, "-n", machineAPINamespace, "-o", yamlOutputFormat).Output() + o.Expect(err).To(o.BeNil(), "Expected to get machine without error") + machineFile := filepath.Join(backupDir, fmt.Sprintf("%s-machine.yaml", testConfig.TargetMachineName)) + err = os.WriteFile(machineFile, []byte(machineOutput), 0644) + o.Expect(err).To(o.BeNil(), "Expected to write machine backup without error") + + // Backup etcd secrets + etcdSecrets := []string{ + testConfig.EtcdPeerSecretName, + testConfig.EtcdServingSecretName, + testConfig.EtcdServingMetricsSecretName, + } + + for _, secretName := range etcdSecrets { + // Get the secret if it exists + secretOutput, err := oc.AsAdmin().Run("get").Args(secretResourceType, secretName, "-n", etcdNamespace, "-o", yamlOutputFormat).Output() + if err != nil { + klog.Warningf("Could not backup etcd secret %s: %v", secretName, err) + continue + } + + secretFile := filepath.Join(backupDir, secretName+".yaml") + err = os.WriteFile(secretFile, []byte(secretOutput), 0644) + o.Expect(err).To(o.BeNil(), "Expected to write etcd secret %s backup without error", secretName) + klog.V(2).Infof("Backed up etcd secret: %s", secretName) + } + + klog.V(4).Infof("About to validate testConfig.TargetVMName, current value: %s", testConfig.TargetVMName) + // Validate that testConfig.TargetVMName is set + if testConfig.TargetVMName == "" { + klog.V(2).Infof("testConfig.TargetVMName bytes: %v", []byte(testConfig.TargetVMName)) + klog.V(2).Infof("ERROR: testConfig.TargetVMName is empty! This should have been set in setupTestEnvironment") + klog.V(2).Infof("testConfig.TargetNodeName: %s", testConfig.TargetNodeName) + klog.V(2).Infof("testConfig.SurvivingNodeName: %s", testConfig.SurvivingNodeName) + o.Expect(testConfig.TargetVMName).ToNot(o.BeEmpty(), "Expected testConfig.TargetVMName to be set before backing up VM configuration") + } + // Get XML dump of VM using SSH to hypervisor + xmlOutput, err := utils.VirshDumpXML(testConfig.TargetVMName, &testConfig.HypervisorConfig, testConfig.HypervisorKnownHostsPath) + o.Expect(err).To(o.BeNil(), "Expected to get XML dump without error") + + xmlFile := filepath.Join(backupDir, testConfig.TargetVMName+".xml") + err = os.WriteFile(xmlFile, []byte(xmlOutput), 0644) + o.Expect(err).To(o.BeNil(), "Expected to write XML dump to file without error") + + return backupDir +} + +// destroyVM destroys the target VM using SSH to hypervisor +func destroyVM(testConfig *TNFTestConfig) { + o.Expect(testConfig.TargetVMName).ToNot(o.BeEmpty(), "Expected testConfig.TargetVMName to be set before destroying VM") + klog.V(2).Infof("Destroying VM: %s", testConfig.TargetVMName) + + // Undefine and destroy VM using SSH to hypervisor + err := utils.VirshUndefineVM(testConfig.TargetVMName, &testConfig.HypervisorConfig, testConfig.HypervisorKnownHostsPath) + o.Expect(err).To(o.BeNil(), "Expected to undefine VM without error") + + err = utils.VirshDestroyVM(testConfig.TargetVMName, &testConfig.HypervisorConfig, testConfig.HypervisorKnownHostsPath) + o.Expect(err).To(o.BeNil(), "Expected to destroy VM without error") + + klog.V(2).Infof("VM %s destroyed successfully", testConfig.TargetVMName) +} + +// deleteNodeReferences deletes OpenShift resources related to the target node +func deleteNodeReferences(testConfig *TNFTestConfig, oc *exutil.CLI) { + klog.V(2).Infof("Deleting OpenShift resources for node: %s", testConfig.TargetNodeName) + + // Delete old etcd certificates using dynamic names + _, err := oc.AsAdmin().Run("delete").Args(secretResourceType, testConfig.EtcdPeerSecretName, "-n", etcdNamespace).Output() + o.Expect(err).To(o.BeNil(), "Expected to delete %s secret without error", testConfig.EtcdPeerSecretName) + + _, err = oc.AsAdmin().Run("delete").Args(secretResourceType, testConfig.EtcdServingSecretName, "-n", etcdNamespace).Output() + o.Expect(err).To(o.BeNil(), "Expected to delete %s secret without error", testConfig.EtcdServingSecretName) + + _, err = oc.AsAdmin().Run("delete").Args(secretResourceType, testConfig.EtcdServingMetricsSecretName, "-n", etcdNamespace).Output() + o.Expect(err).To(o.BeNil(), "Expected to delete %s secret without error", testConfig.EtcdServingMetricsSecretName) + + // Delete BareMetalHost entry + _, err = oc.AsAdmin().Run("delete").Args(bmhResourceType, testConfig.TargetBMHName, "-n", machineAPINamespace).Output() + o.Expect(err).To(o.BeNil(), "Expected to delete BareMetalHost without error") + + // Delete machine entry using the stored testConfig.TargetMachineName + _, err = oc.AsAdmin().Run("delete").Args(machineResourceType, testConfig.TargetMachineName, "-n", machineAPINamespace).Output() + o.Expect(err).To(o.BeNil(), "Expected to delete machine without error") + + klog.V(2).Infof("OpenShift resources for node %s deleted successfully", testConfig.TargetNodeName) +} + +// restoreEtcdQuorumOnSurvivor restores etcd quorum on the surviving node +func restoreEtcdQuorumOnSurvivor(testConfig *TNFTestConfig, oc *exutil.CLI) { + klog.V(2).Infof("Restoring etcd quorum on surviving node: %s", testConfig.SurvivingNodeName) + + // Wait 30 seconds after node deletion to allow etcd to stop naturally + g.By("Waiting 30 seconds for etcd to stop naturally after node deletion") + time.Sleep(etcdStopWaitTime) + + // Check that etcd has stopped on the survivor before proceeding + g.By("Verifying that etcd has stopped on the surviving node") + err := waitForEtcdToStop(testConfig) + o.Expect(err).To(o.BeNil(), "Expected etcd to stop on surviving node %s within timeout", testConfig.SurvivingNodeName) + + // SSH to hypervisor, then to surviving node to run pcs debug-start + // We need to chain the SSH commands: host -> hypervisor -> surviving node + output, _, err := utils.PcsDebugRestart(testConfig.SurvivingNodeIP, &testConfig.HypervisorConfig, testConfig.HypervisorKnownHostsPath, testConfig.SurvivingNodeKnownHostsPath) + if err != nil { + o.Expect(err).To(o.BeNil(), fmt.Sprintf("Failed to restore etcd quorum on %s: %v, output: %s", testConfig.SurvivingNodeName, err, output)) + } + + // Verify that etcd has started on the survivor after debug-start + g.By("Verifying that etcd has started on the surviving node after debug-start") + err = waitForEtcdToStart(testConfig) + o.Expect(err).To(o.BeNil(), "Expected etcd to start on surviving node %s within timeout", testConfig.SurvivingNodeName) + + // Log pacemaker status to check if etcd has been started on the survivor + pcsStatusOutput, _, err := utils.PcsStatus(testConfig.SurvivingNodeIP, &testConfig.HypervisorConfig, testConfig.HypervisorKnownHostsPath, testConfig.SurvivingNodeKnownHostsPath) + if err != nil { + klog.Warningf("Failed to get pacemaker status on survivor %s: %v", testConfig.SurvivingNodeIP, err) + } else { + klog.V(4).Infof("Pacemaker status on survivor %s:\n%s", testConfig.SurvivingNodeIP, pcsStatusOutput) + } + + klog.V(2).Infof("Successfully restored etcd quorum on surviving node: %s", testConfig.SurvivingNodeName) + + // Wait for pacemaker to restore quorum before proceeding with OpenShift API operations + g.By("Waiting for pacemaker to restore quorum after VM destruction") + output, err = monitorClusterOperators(oc) + o.Expect(err).To(o.BeNil(), "Expected pacemaker to restore quorum within timeout") + klog.V(2).Infof("Cluster operators status:\n%s", output) +} + +// recreateTargetVM recreates the target VM using backed up configuration +func recreateTargetVM(testConfig *TNFTestConfig, oc *exutil.CLI, backupDir string) { + o.Expect(testConfig.TargetVMName).ToNot(o.BeEmpty(), "Expected testConfig.TargetVMName to be set before recreating VM") + // Read the backed up XML + xmlFile := filepath.Join(backupDir, testConfig.TargetVMName+".xml") + xmlContent, err := os.ReadFile(xmlFile) + o.Expect(err).To(o.BeNil(), "Expected to read XML backup without error") + xmlOutput := string(xmlContent) + + // Create a temporary file on the hypervisor with the XML content + // First, create the XML file on the hypervisor + createXMLCommand := fmt.Sprintf(`cat > /tmp/%s.xml <<'XML_EOF' +%s +XML_EOF`, testConfig.TargetVMName, xmlOutput) + + _, _, err = utils.ExecuteSSHCommand(createXMLCommand, &testConfig.HypervisorConfig, testConfig.HypervisorKnownHostsPath) + o.Expect(err).To(o.BeNil(), "Expected to create XML file on hypervisor without error") + + // Redefine the VM using the backed up XML + err = utils.VirshDefineVM(fmt.Sprintf("/tmp/%s.xml", testConfig.TargetVMName), &testConfig.HypervisorConfig, testConfig.HypervisorKnownHostsPath) + o.Expect(err).To(o.BeNil(), "Expected to define VM without error") + + // Start the VM with autostart enabled + err = utils.VirshStartVM(testConfig.TargetVMName, &testConfig.HypervisorConfig, testConfig.HypervisorKnownHostsPath) + o.Expect(err).To(o.BeNil(), "Expected to start VM without error") + + err = utils.VirshAutostartVM(testConfig.TargetVMName, &testConfig.HypervisorConfig, testConfig.HypervisorKnownHostsPath) + o.Expect(err).To(o.BeNil(), "Expected to enable autostart for VM without error") + + // Clean up temporary XML file + _, _, err = utils.ExecuteSSHCommand(fmt.Sprintf("rm -f /tmp/%s.xml", testConfig.TargetVMName), &testConfig.HypervisorConfig, testConfig.HypervisorKnownHostsPath) + o.Expect(err).To(o.BeNil(), "Expected to clean up temporary XML file without error") +} + +// provisionTargetNodeWithIronic handles the Ironic provisioning process +func provisionTargetNodeWithIronic(testConfig *TNFTestConfig, oc *exutil.CLI) { + o.Expect(testConfig.TargetVMName).ToNot(o.BeEmpty(), "Expected testConfig.TargetVMName to be set before provisioning with Ironic") + + // Set flag to indicate we're attempting node provisioning + testConfig.HasAttemptedNodeProvisioning = true + + recreateBMCSecret(testConfig, oc) + newUUID, newMACAddress, err := utils.GetVMNetworkInfo(testConfig.TargetVMName, virshProvisioningBridge, &testConfig.HypervisorConfig, testConfig.HypervisorKnownHostsPath) + o.Expect(err).To(o.BeNil(), "Expected to get VM network info: %v", err) + updateAndCreateBMH(testConfig, oc, newUUID, newMACAddress) + waitForBMHProvisioning(testConfig, oc) + reapplyDetachedAnnotation(testConfig, oc) + recreateMachine(testConfig, oc) +} + +// approveCSRs monitors and approves Certificate Signing Requests +func approveCSRs(oc *exutil.CLI) { + // Monitor CSRs and approve them as they appear + maxCSRWaitTime := csrApprovalTimeout + csrPollInterval := csrApprovalPollInterval + csrStartTime := time.Now() + approvedCount := 0 + targetApprovedCount := expectedCSRCount + + for time.Since(csrStartTime) < maxCSRWaitTime && approvedCount < targetApprovedCount { + // Get pending CSRs + csrOutput, err := oc.AsAdmin().Run("get").Args("csr", "-o", "json").Output() + if err == nil { + // Extract CSR names that need approval (status is empty) + pendingCSRs := []string{} + lines := strings.Split(csrOutput, "\n") + for _, line := range lines { + if strings.Contains(line, "\"name\"") && strings.Contains(line, "\"status\": {}") { + // Extract CSR name from JSON + start := strings.Index(line, "\"name\": \"") + 8 + end := strings.Index(line[start:], "\"") + start + if start > 7 && end > start { + csrName := line[start:end] + pendingCSRs = append(pendingCSRs, csrName) + } + } + } + + // Approve pending CSRs + for _, csrName := range pendingCSRs { + klog.V(2).Infof("Approving CSR: %s", csrName) + _, err = oc.AsAdmin().Run("adm").Args("certificate", "approve", csrName).Output() + if err == nil { + approvedCount++ + klog.V(2).Infof("Approved CSR %s (total approved: %d)", csrName, approvedCount) + } + } + } + + if approvedCount < targetApprovedCount { + klog.V(2).Infof("Waiting for more CSRs to approve... (approved: %d/%d, elapsed: %v)", approvedCount, targetApprovedCount, time.Since(csrStartTime)) + time.Sleep(csrPollInterval) + } + } + + // Verify we have approved the expected number of CSRs + o.Expect(approvedCount).To(o.BeNumerically(">=", targetApprovedCount), fmt.Sprintf("Expected to approve at least %d CSRs, but only approved %d", targetApprovedCount, approvedCount)) + klog.V(2).Infof("Successfully approved %d CSRs", approvedCount) +} + +// waitForNodeRecovery monitors for the replacement node to appear in the cluster +func waitForNodeRecovery(testConfig *TNFTestConfig, oc *exutil.CLI) { + maxWaitTime := nodeRecoveryTimeout + pollInterval := nodeRecoveryPollInterval + startTime := time.Now() + + for time.Since(startTime) < maxWaitTime { + // Check if the target node exists + _, err := oc.AsAdmin().Run("get").Args("node", testConfig.TargetNodeName).Output() + if err == nil { + klog.V(2).Infof("Replacement node %s has appeared in the cluster", testConfig.TargetNodeName) + + // Wait a bit more for the node to be fully ready + time.Sleep(30 * time.Second) + + // Verify the node is in Ready state + nodeOutput, err := oc.AsAdmin().Run("get").Args("node", testConfig.TargetNodeName, "-o", "wide").Output() + if err == nil { + klog.V(4).Infof("Node status: %s", nodeOutput) + if strings.Contains(nodeOutput, nodeReadyState) { + klog.V(2).Infof("Node %s is now Ready", testConfig.TargetNodeName) + return + } + } + } + + klog.V(2).Infof("Waiting for replacement node %s to appear... (elapsed: %v)", testConfig.TargetNodeName, time.Since(startTime)) + time.Sleep(pollInterval) + } + + // If we reach here, the timeout was exceeded + o.Expect(false).To(o.BeTrue(), fmt.Sprintf("Replacement node %s did not appear within %v timeout", testConfig.TargetNodeName, maxWaitTime)) +} + +// restorePacemakerCluster restores the pacemaker cluster configuration +func restorePacemakerCluster(testConfig *TNFTestConfig, oc *exutil.CLI) { + // Prepare known hosts file for the target node now that it has been reprovisioned + // The SSH key changed during reprovisioning, so we need to scan it again + klog.V(2).Infof("Preparing known_hosts for reprovisioned target node: %s", testConfig.TargetNodeIP) + targetNodeKnownHostsPath, err := utils.PrepareRemoteKnownHostsFile(testConfig.TargetNodeIP, &testConfig.HypervisorConfig, testConfig.HypervisorKnownHostsPath) + o.Expect(err).To(o.BeNil(), "Expected to prepare target node known hosts file after reprovisioning without error") + testConfig.TargetNodeKnownHostsPath = targetNodeKnownHostsPath + + utils.DeleteNodeJobs(testConfig.TNFAuthJobName, testConfig.TNFAfterSetupJobName, oc) + utils.RestoreEtcdRevision(testConfig.TargetNodeName, testConfig.TargetNodeIP, &testConfig.HypervisorConfig, testConfig.HypervisorKnownHostsPath, testConfig.TargetNodeKnownHostsPath, oc) + utils.CycleRemovedNode(testConfig.TargetNodeName, testConfig.TargetNodeIP, testConfig.SurvivingNodeIP, &testConfig.HypervisorConfig, testConfig.HypervisorKnownHostsPath, testConfig.SurvivingNodeKnownHostsPath) +} + +// verifyRestoredCluster verifies that the cluster is fully restored and healthy +func verifyRestoredCluster(testConfig *TNFTestConfig, oc *exutil.CLI) { + klog.V(2).Infof("Verifying cluster restoration: checking node status and cluster operators") + + // Step 1: Verify both nodes are in Ready state + g.By("Verifying both nodes are in Ready state") + + // Check target node + targetNodeOutput, err := oc.AsAdmin().Run("get").Args("node", testConfig.TargetNodeName, "-o", "wide").Output() + o.Expect(err).To(o.BeNil(), "Expected to get target node %s without error", testConfig.TargetNodeName) + o.Expect(targetNodeOutput).To(o.ContainSubstring(nodeReadyState), "Expected target node %s to be in Ready state", testConfig.TargetNodeName) + klog.V(2).Infof("Target node %s is Ready", testConfig.TargetNodeName) + + // Check surviving node + survivingNodeOutput, err := oc.AsAdmin().Run("get").Args("node", testConfig.SurvivingNodeName, "-o", "wide").Output() + o.Expect(err).To(o.BeNil(), "Expected to get surviving node %s without error", testConfig.SurvivingNodeName) + o.Expect(survivingNodeOutput).To(o.ContainSubstring(nodeReadyState), "Expected surviving node %s to be in Ready state", testConfig.SurvivingNodeName) + klog.V(2).Infof("Surviving node %s is Ready", testConfig.SurvivingNodeName) + + // Step 2: Verify all cluster operators are available (not degraded or progressing) + g.By("Verifying all cluster operators are available") + coOutput, err := monitorClusterOperators(oc) + o.Expect(err).To(o.BeNil(), "Expected all cluster operators to be available") + klog.V(2).Infof("All cluster operators are available and healthy") + + // Log final status + klog.V(2).Infof("Cluster verification completed successfully:") + klog.V(2).Infof(" - Target node %s is Ready", testConfig.TargetNodeName) + klog.V(2).Infof(" - Surviving node %s is Ready", testConfig.SurvivingNodeName) + klog.V(2).Infof(" - All cluster operators are available") + klog.V(2).Infof("\nFinal cluster operators status:\n%s", coOutput) +} + +// monitorClusterOperators monitors cluster operators and ensures they are all available +func monitorClusterOperators(oc *exutil.CLI) (string, error){ + maxWaitTime := clusterOperatorTimeout + pollInterval := clusterOperatorPollInterval + startTime := time.Now() + + for time.Since(startTime) < maxWaitTime { + // Get cluster operators status + coOutput, err := oc.AsAdmin().Run("get").Args("co", "-o", "wide").Output() + if err != nil { + klog.V(2).Infof("Error getting cluster operators: %v", err) + time.Sleep(pollInterval) + continue + } + + // Parse the output to check operator statuses + lines := strings.Split(coOutput, "\n") + allAvailable := true + hasDegraded := false + hasProgressing := false + + for _, line := range lines { + // Skip header line + if strings.Contains(line, "NAME") && strings.Contains(line, "VERSION") { + continue + } + + // Skip empty lines + if strings.TrimSpace(line) == "" { + continue + } + + // Check for degraded or progressing operators + if strings.Contains(line, coDegradedState) { + hasDegraded = true + allAvailable = false + klog.V(2).Infof("Found degraded operator: %s", line) + } + if strings.Contains(line, coProgressingState) { + hasProgressing = true + allAvailable = false + klog.V(2).Infof("Found progressing operator: %s", line) + } + } + + // Log current status + klog.V(2).Infof("Cluster operators status check (elapsed: %v):", time.Since(startTime)) + klog.V(2).Infof("All available: %v, Has degraded: %v, Has progressing: %v", allAvailable, hasDegraded, hasProgressing) + + // If all operators are available, we're done + if allAvailable { + klog.V(2).Infof("All cluster operators are available!") + return coOutput, nil + } + + // Log the current operator status for debugging + klog.V(4).Infof("Current cluster operators status:\n%s", coOutput) + + // Wait before next check + time.Sleep(pollInterval) + } + + // If we reach here, the timeout was exceeded + // Get final status for debugging + finalCoOutput, err := oc.AsAdmin().Run("get").Args("co", "-o", "wide").Output() + if err == nil { + klog.V(4).Infof("Final cluster operators status after timeout:\n%s", finalCoOutput) + } + + return finalCoOutput, fmt.Errorf("cluster operators did not become available within %v timeout", maxWaitTime) +} + +// Utility functions + +func getNodeMACAddress(oc *exutil.CLI, nodeName string) string { + // Find the BareMetalHost name using regex pattern matching + bmhName := findObjectByNamePattern(oc, bmhResourceType, machineAPINamespace, nodeName, "") + + // Get the BareMetalHost YAML to extract the MAC address + bmhOutput, err := kubectlGetResource(oc, bmhResourceType, bmhName, machineAPINamespace, yamlOutputFormat) + o.Expect(err).To(o.BeNil(), "Expected to get BareMetalHost without error") + + // Parse the YAML into a BareMetalHost object + var bmh metal3v1alpha1.BareMetalHost + decoder := yaml.NewYAMLOrJSONDecoder(strings.NewReader(bmhOutput), 4096) + err = decoder.Decode(&bmh) + o.Expect(err).To(o.BeNil(), "Expected to parse BareMetalHost YAML without error") + + // Extract the MAC address from the BareMetalHost spec + macAddress := bmh.Spec.BootMACAddress + o.Expect(macAddress).ToNot(o.BeEmpty(), "Expected BareMetalHost %s to have a BootMACAddress", bmhName) + + klog.V(2).Infof("Found MAC address %s for node %s", macAddress, nodeName) + return macAddress +} + +// setupTestEnvironment validates prerequisites and gathers required information +func setupTestEnvironment(testConfig *TNFTestConfig, oc *exutil.CLI) { + // Get hypervisor configuration from test context + if !exutil.HasHypervisorConfig() { + printHypervisorConfigUsage() + o.Expect(fmt.Errorf("no hypervisor configuration available")).To(o.BeNil(), "Hypervisor configuration is required. See usage message above for configuration options.") + } + + config := exutil.GetHypervisorConfig() + testConfig.HypervisorConfig.IP = config.HypervisorIP + testConfig.HypervisorConfig.User = config.SSHUser + testConfig.HypervisorConfig.PrivateKeyPath = config.PrivateKey + + klog.V(2).Infof("Using hypervisor configuration from test context:") + klog.V(2).Infof(" Hypervisor IP: %s", testConfig.HypervisorConfig.IP) + klog.V(2).Infof(" SSH User: %s", testConfig.HypervisorConfig.User) + klog.V(2).Infof(" Private Key Path: %s", testConfig.HypervisorConfig.PrivateKeyPath) + + // Validate that the private key file exists + if _, err := os.Stat(testConfig.HypervisorConfig.PrivateKeyPath); os.IsNotExist(err) { + o.Expect(err).To(o.BeNil(), "Private key file does not exist at path: %s", testConfig.HypervisorConfig.PrivateKeyPath) + } + + knownHostsPath, err := utils.PrepareLocalKnownHostsFile(&testConfig.HypervisorConfig) + o.Expect(err).To(o.BeNil(), "Expected to prepare local known hosts file without error") + testConfig.HypervisorKnownHostsPath = knownHostsPath + + // Verify hypervisor connectivity and virsh availability + err = utils.VerifyHypervisorAvailability(&testConfig.HypervisorConfig, testConfig.HypervisorKnownHostsPath) + o.Expect(err).To(o.BeNil(), "Expected to verify hypervisor connectivity without error") + + // Set target and surviving node names dynamically (random selection) + testConfig.TargetNodeName, testConfig.SurvivingNodeName = getRandomControlPlaneNode(oc) + + // Set dynamic resource names based on target node + setDynamicResourceNames(testConfig, oc) + + // Get IP addresses for both nodes + testConfig.TargetNodeIP, testConfig.SurvivingNodeIP = getNodeIPs(oc, testConfig.TargetNodeName, testConfig.SurvivingNodeName) + + // Prepare known hosts file for the surviving node + // Note: We don't prepare the target node's known_hosts here because its SSH key will change + // after reprovisioning. It will be prepared in restorePacemakerCluster after the node is ready. + survivingNodeKnownHostsPath, err := utils.PrepareRemoteKnownHostsFile(testConfig.SurvivingNodeIP, &testConfig.HypervisorConfig, testConfig.HypervisorKnownHostsPath) + o.Expect(err).To(o.BeNil(), "Expected to prepare surviving node known hosts file without error") + testConfig.SurvivingNodeKnownHostsPath = survivingNodeKnownHostsPath + + klog.V(2).Infof("Target node for replacement: %s (IP: %s)", testConfig.TargetNodeName, testConfig.TargetNodeIP) + klog.V(2).Infof("Surviving node: %s (IP: %s)", testConfig.SurvivingNodeName, testConfig.SurvivingNodeIP) + klog.V(2).Infof("Target node MAC: %s", testConfig.TargetNodeMAC) + klog.V(2).Infof("Target VM for replacement: %s", testConfig.TargetVMName) + klog.V(2).Infof("Target machine name: %s", testConfig.TargetMachineName) + + klog.V(2).Infof("Test environment setup complete. Hypervisor IP: %s", testConfig.HypervisorConfig.IP) + klog.V(4).Infof("setupTestEnvironment completed, testConfig.TargetVMName: %s", testConfig.TargetVMName) +} + +// getRandomControlPlaneNode returns a random control plane node for replacement and the surviving node +func getRandomControlPlaneNode(oc *exutil.CLI) (string, string) { + controlPlaneNodes, err := getNodes(oc, labelNodeRoleControlPlane) + o.Expect(err).To(o.BeNil(), "Expected to get control plane nodes without error") + + // Ensure we have at least 2 control plane nodes + o.Expect(len(controlPlaneNodes.Items)).To(o.BeNumerically(">=", 2), "Expected at least 2 control plane nodes for replacement test") + + // Select a random node using the same approach as other TNF recovery tests + randomIndex := rand.Intn(len(controlPlaneNodes.Items)) + selectedNode := controlPlaneNodes.Items[randomIndex].Name + + // Validate that the selected node name is not empty + o.Expect(selectedNode).ToNot(o.BeEmpty(), "Expected selected control plane node name to not be empty") + + // Find the surviving node (the other control plane node) + var survivingNode string + for i, node := range controlPlaneNodes.Items { + if i != randomIndex { + survivingNode = node.Name + break + } + } + + // Validate that the surviving node name is not empty + o.Expect(survivingNode).ToNot(o.BeEmpty(), "Expected surviving control plane node name to not be empty") + + klog.V(2).Infof("Randomly selected control plane node for replacement: %s (index: %d)", selectedNode, randomIndex) + klog.V(2).Infof("Surviving control plane node: %s", survivingNode) + + return selectedNode, survivingNode +} + +// getNodeIPs retrieves the IP addresses for the target and surviving nodes +func getNodeIPs(oc *exutil.CLI, targetNodeName, survivingNodeName string) (string, string) { + // Get target node IP + targetNodeIP, err := getNodeInternalIP(oc, targetNodeName) + o.Expect(err).To(o.BeNil(), "Expected to get target node IP without error") + o.Expect(targetNodeIP).ToNot(o.BeEmpty(), "Expected target node IP to not be empty") + + // Get surviving node IP + survivingNodeIP, err := getNodeInternalIP(oc, survivingNodeName) + o.Expect(err).To(o.BeNil(), "Expected to get surviving node IP without error") + o.Expect(survivingNodeIP).ToNot(o.BeEmpty(), "Expected surviving node IP to not be empty") + + klog.V(2).Infof("Target node %s IP: %s", targetNodeName, targetNodeIP) + klog.V(2).Infof("Surviving node %s IP: %s", survivingNodeName, survivingNodeIP) + + return targetNodeIP, survivingNodeIP +} + +// getNodeInternalIP gets the internal IP address of a node +func getNodeInternalIP(oc *exutil.CLI, nodeName string) (string, error) { + // Get node details in wide format to see IP addresses + nodeOutput, err := oc.AsAdmin().Run("get").Args("node", nodeName, "-o", "wide").Output() + if err != nil { + return "", fmt.Errorf("failed to get node %s details: %v", nodeName, err) + } + + // Parse the output to extract the internal IP + lines := strings.Split(nodeOutput, "\n") + for _, line := range lines { + // Skip header line + if strings.Contains(line, "NAME") && strings.Contains(line, "INTERNAL-IP") { + continue + } + + // Skip empty lines + if strings.TrimSpace(line) == "" { + continue + } + + // Split by whitespace and get the INTERNAL-IP column (usually the 6th column) + fields := strings.Fields(line) + if len(fields) >= 6 { + internalIP := fields[5] // INTERNAL-IP is typically the 6th column + // Validate that it looks like an IP address + if strings.Contains(internalIP, ".") && len(internalIP) > 7 { + return internalIP, nil + } + } + } + + return "", fmt.Errorf("could not find internal IP for node %s in output: %s", nodeName, nodeOutput) +} + +// printHypervisorConfigUsage prints a detailed usage message for hypervisor configuration +func printHypervisorConfigUsage() { + usageMessage := ` +================================================================================ +TNF Node Replacement Test - Missing Hypervisor Configuration +================================================================================ + +This test requires hypervisor SSH configuration to perform node replacement +operations. Please provide the configuration using the --with-hypervisor-json flag: + +Example: +openshift-tests run openshift/two-node --with-hypervisor-json='{ + "IP": "192.168.111.1", + "User": "root", + "privateKey": "/path/to/private/key" +}' + +Configuration Details: +- IP: IP address of the hypervisor host for SSH access +- User: Username for SSH connection (typically "root") +- privateKey: Local file path to the SSH private key + +The test will use this configuration to: +- SSH into the hypervisor to manage VMs +- Perform node replacement operations +- Recover from node failures + +Environment Variable Alternative: +You can also set the HYPERVISOR_CONFIG environment variable: +export HYPERVISOR_CONFIG='{"IP":"192.168.111.1","User":"root","privateKey":"/path/to/key"}' + +For more information, see the test documentation or contact the test team. +================================================================================ +` + g.GinkgoT().Logf(usageMessage) +} + +// extractMachineNameFromBMH extracts the machine name from BareMetalHost's consumerRef +func extractMachineNameFromBMH(oc *exutil.CLI, nodeName string) string { + // Find the BareMetalHost name using regex pattern matching + bmhName := findObjectByNamePattern(oc, bmhResourceType, machineAPINamespace, nodeName, "") + + // Get the BareMetalHost YAML to extract the machine name + bmhOutput, err := kubectlGetResource(oc, bmhResourceType, bmhName, machineAPINamespace, yamlOutputFormat) + o.Expect(err).To(o.BeNil(), "Expected to get BareMetalHost without error") + + // Parse the YAML into a BareMetalHost object + var bmh metal3v1alpha1.BareMetalHost + decoder := yaml.NewYAMLOrJSONDecoder(strings.NewReader(bmhOutput), 4096) + err = decoder.Decode(&bmh) + o.Expect(err).To(o.BeNil(), "Expected to parse BareMetalHost YAML without error") + + // Extract the machine name from consumerRef + o.Expect(bmh.Spec.ConsumerRef).ToNot(o.BeNil(), "Expected BareMetalHost to have a consumerRef") + o.Expect(bmh.Spec.ConsumerRef.Name).ToNot(o.BeEmpty(), "Expected consumerRef to have a name") + + machineName := bmh.Spec.ConsumerRef.Name + klog.V(2).Infof("Found machine name: %s", machineName) + return machineName +} + +// kubectlGetResource is a utility function to get Kubernetes resources +func kubectlGetResource(oc *exutil.CLI, resourceType, name, namespace, outputFormat string) (string, error) { + args := []string{resourceType} + if name != "" { + args = append(args, name) + } + if namespace != "" { + args = append(args, "-n", namespace) + } + if outputFormat != "" { + args = append(args, "-o", outputFormat) + } + return oc.AsAdmin().Run("get").Args(args...).Output() +} + +// kubectlCreateResource is a utility function to create Kubernetes resources from file +func kubectlCreateResource(oc *exutil.CLI, filePath string) error { + _, err := oc.AsAdmin().Run("create").Args("-f", filePath).Output() + return err +} + +// recoverClusterFromBackup attempts to recover the cluster from backup if the test fails +func recoverClusterFromBackup(testConfig *TNFTestConfig, oc *exutil.CLI) { + klog.V(2).Infof("Starting cluster recovery from backup directory: %s", testConfig.GlobalBackupDir) + + defer func() { + if r := recover(); r != nil { + klog.Errorf("Recovery failed with panic: %v", r) + } + // Clean up backup directory after recovery attempt + if testConfig.GlobalBackupDir != "" { + os.RemoveAll(testConfig.GlobalBackupDir) + testConfig.GlobalBackupDir = "" + } + }() + + // Step 1: Recreate the VM from backup + klog.V(2).Infof("Step 1: Recreating VM from backup") + if err := recoverVMFromBackup(testConfig); err != nil { + klog.Errorf("Failed to recover VM: %v", err) + return + } + + // Wait for VM to start + klog.V(2).Infof("Waiting for VM to start...") + time.Sleep(3 * time.Minute) + + // Step 2: Promote etcd learner member to prevent stalling + klog.V(2).Infof("Step 2: Promoting etcd learner member to prevent stalling") + if err := promoteEtcdLearnerMember(testConfig); err != nil { + klog.Warningf("Failed to promote etcd learner member: %v", err) + // Don't return here, continue with recovery as this is not critical + } + + // Step 3: Recreate etcd secrets from backup + klog.V(2).Infof("Step 3: Recreating etcd secrets from backup") + if err := recoverEtcdSecretsFromBackup(testConfig, oc); err != nil { + klog.Errorf("Failed to recover etcd secrets: %v", err) + return + } + + // Step 4: Recreate BMH and Machine + klog.V(2).Infof("Step 4: Recreating BMH and Machine from backup") + if err := recoverBMHAndMachineFromBackup(testConfig, oc); err != nil { + klog.Errorf("Failed to recover BMH and Machine: %v", err) + return + } + + // Step 5: Re-enable stonith on the surviving node + klog.V(2).Infof("Step 5: Re-enabling stonith on the surviving node") + if err := reenableStonith(testConfig); err != nil { + klog.Warningf("Failed to re-enable stonith: %v", err) + // Don't return here, continue with recovery as this is not critical + } + + // Step 6: Approve CSRs only if we attempted node provisioning + if testConfig.HasAttemptedNodeProvisioning { + klog.V(2).Infof("Step 6: Approving CSRs for cluster recovery (node provisioning was attempted)") + go func() { + // Run CSR approval in background for 5 minutes + timeout := time.After(5 * time.Minute) + ticker := time.NewTicker(10 * time.Second) + defer ticker.Stop() + + for { + select { + case <-timeout: + return + case <-ticker.C: + approveAnyPendingCSRs(oc) + } + } + }() + + klog.V(2).Infof("Cluster recovery initiated with CSR approval. Monitoring for 5 minutes...") + time.Sleep(5 * time.Minute) + } else { + klog.V(2).Infof("Step 6: Skipping CSR approval (no node provisioning was attempted)") + } + + klog.V(2).Infof("Cluster recovery process completed") +} + +// recoverVMFromBackup recreates the VM from the backed up XML +func recoverVMFromBackup(testConfig *TNFTestConfig) error { + // Check if the specific VM already exists + _, err := utils.VirshVMExists(testConfig.TargetVMName, &testConfig.HypervisorConfig, testConfig.HypervisorKnownHostsPath) + if err == nil { + klog.V(2).Infof("VM %s already exists, skipping recreation", testConfig.TargetVMName) + return nil + } + + o.Expect(testConfig.TargetVMName).ToNot(o.BeEmpty(), "Expected testConfig.TargetVMName to be set before recreating VM") + // Read the backed up XML + xmlFile := filepath.Join(testConfig.GlobalBackupDir, testConfig.TargetVMName+".xml") + xmlContent, err := os.ReadFile(xmlFile) + if err != nil { + return fmt.Errorf("failed to read XML backup: %v", err) + } + + // Create a temporary file on the hypervisor with the XML content + createXMLCommand := fmt.Sprintf(`cat > /tmp/%s.xml <<'XML_EOF' +%s +XML_EOF`, testConfig.TargetVMName, string(xmlContent)) + + _, _, err = utils.ExecuteSSHCommand(createXMLCommand, &testConfig.HypervisorConfig, testConfig.HypervisorKnownHostsPath) + if err != nil { + return fmt.Errorf("failed to create XML file on hypervisor: %v", err) + } + + // Redefine the VM using the backed up XML + err = utils.VirshDefineVM(fmt.Sprintf("/tmp/%s.xml", testConfig.TargetVMName), &testConfig.HypervisorConfig, testConfig.HypervisorKnownHostsPath) + if err != nil { + return fmt.Errorf("failed to define VM: %v", err) + } + + // Start the VM + err = utils.VirshStartVM(testConfig.TargetVMName, &testConfig.HypervisorConfig, testConfig.HypervisorKnownHostsPath) + if err != nil { + return fmt.Errorf("failed to start VM: %v", err) + } + + // Enable autostart + err = utils.VirshAutostartVM(testConfig.TargetVMName, &testConfig.HypervisorConfig, testConfig.HypervisorKnownHostsPath) + if err != nil { + klog.Warningf("Failed to enable autostart for VM: %v", err) + } + + // Clean up temporary XML file + _, _, err = utils.ExecuteSSHCommand(fmt.Sprintf("rm -f /tmp/%s.xml", testConfig.TargetVMName), &testConfig.HypervisorConfig, testConfig.HypervisorKnownHostsPath) + if err != nil { + klog.Warningf("Failed to clean up temporary XML file: %v", err) + } + + klog.V(2).Infof("Recreated VM: %s", testConfig.TargetVMName) + return nil +} + +// retryRecoveryOperation retries a recovery operation until it succeeds or times out +// This is needed because etcd learner promotion can cause intermittent API failures +func retryRecoveryOperation(operation func() error, operationName string) error { + maxRetries := 10 + retryInterval := 30 * time.Second + timeout := 5 * time.Minute + + startTime := time.Now() + + for i := 0; i < maxRetries && time.Since(startTime) < timeout; i++ { + err := operation() + if err == nil { + klog.V(2).Infof("Recovery operation %s succeeded on attempt %d after %v", operationName, i+1, time.Since(startTime)) + return nil + } + + // Check if this is an etcd learner error that we should retry + if isEtcdLearnerError(err) { + klog.V(2).Infof("Recovery operation %s failed on attempt %d due to etcd learner error (will retry): %v", operationName, i+1, err) + } else { + klog.V(2).Infof("Recovery operation %s failed on attempt %d with non-retryable error: %v", operationName, i+1, err) + return err // Don't retry non-etcd learner errors + } + + if i < maxRetries-1 && time.Since(startTime) < timeout { + klog.V(2).Infof("Retrying recovery operation %s in %v...", operationName, retryInterval) + time.Sleep(retryInterval) + } + } + + return fmt.Errorf("recovery operation %s failed after %d attempts over %v", operationName, maxRetries, time.Since(startTime)) +} + +// isEtcdLearnerError checks if an error is related to etcd learner restrictions +func isEtcdLearnerError(err error) bool { + if err == nil { + return false + } + + errStr := strings.ToLower(err.Error()) + // Common etcd learner error patterns + learnerErrorPatterns := []string{ + "rpc error: code = Unavailable", + "etcdserver: too many requests", + "etcdserver: request timed out", + "context deadline exceeded", + "connection refused", + "learner", + "not a voter", + "raft: not leader", + } + + for _, pattern := range learnerErrorPatterns { + if strings.Contains(errStr, pattern) { + return true + } + } + + return false +} + +// retryOperationWithTimeout retries an operation until it succeeds or times out +func retryOperationWithTimeout(operation func() error, timeout, pollInterval time.Duration, operationName string) error { + startTime := time.Now() + + for time.Since(startTime) < timeout { + err := operation() + if err == nil { + klog.V(2).Infof("Operation %s succeeded after %v", operationName, time.Since(startTime)) + return nil + } + + klog.V(2).Infof("Operation %s failed, retrying in %v: %v", operationName, pollInterval, err) + time.Sleep(pollInterval) + } + + return fmt.Errorf("operation %s failed after %v timeout", operationName, timeout) +} + +// recoverEtcdSecretsFromBackup recreates etcd secrets from backup with retry logic +func recoverEtcdSecretsFromBackup(testConfig *TNFTestConfig, oc *exutil.CLI) error { + etcdSecrets := []string{ + testConfig.EtcdPeerSecretName, + testConfig.EtcdServingSecretName, + testConfig.EtcdServingMetricsSecretName, + } + + for _, secretName := range etcdSecrets { + secretFile := filepath.Join(testConfig.GlobalBackupDir, secretName+".yaml") + if _, err := os.Stat(secretFile); os.IsNotExist(err) { + klog.Warningf("Backup file for etcd secret %s not found", secretName) + continue + } + + // Check if the secret already exists + _, err := oc.AsAdmin().Run("get").Args(secretResourceType, secretName, "-n", etcdNamespace).Output() + if err == nil { + klog.V(2).Infof("Etcd secret %s already exists, skipping recreation", secretName) + continue + } + + // Retry the secret creation with etcd learner error handling + err = retryRecoveryOperation(func() error { + return kubectlCreateResource(oc, secretFile) + }, fmt.Sprintf("create etcd secret %s", secretName)) + + if err != nil { + klog.Warningf("Failed to recreate etcd secret %s after retries: %v", secretName, err) + continue + } + klog.V(2).Infof("Recreated etcd secret: %s", secretName) + } + + return nil +} + +// recoverBMHAndMachineFromBackup recreates BMH and Machine from backup with retry logic +func recoverBMHAndMachineFromBackup(testConfig *TNFTestConfig, oc *exutil.CLI) error { + + err := recreateBMCSecret(testConfig, oc) + if err != nil { + return fmt.Errorf("failed to recreate BMC secret: %v", err) + } + + // Recreate Machine with retry + machineFile := filepath.Join(testConfig.GlobalBackupDir, testConfig.TargetMachineName+"-machine.yaml") + + // Check if Machine already exists + _, err = oc.AsAdmin().Run("get").Args(machineResourceType, testConfig.TargetMachineName, "-n", machineAPINamespace).Output() + if err != nil { + // Retry Machine creation + err = retryRecoveryOperation(func() error { + return kubectlCreateResource(oc, machineFile) + }, fmt.Sprintf("create Machine %s", testConfig.TargetMachineName)) + + if err != nil { + return fmt.Errorf("failed to recreate Machine after retries: %v", err) + } + klog.V(2).Infof("Recreated Machine: %s", testConfig.TargetMachineName) + } else { + klog.V(2).Infof("Machine %s already exists, skipping recreation", testConfig.TargetMachineName) + } + + return nil +} + +// approveAnyPendingCSRs approves any pending CSRs found in the cluster with retry logic +func approveAnyPendingCSRs(oc *exutil.CLI) { + // Get pending CSRs with retry + var csrOutput string + err := retryRecoveryOperation(func() error { + var err error + csrOutput, err = oc.AsAdmin().Run("get").Args("csr", "-o", "json").Output() + return err + }, "get pending CSRs") + + if err != nil { + klog.Warningf("Failed to get CSRs after retries: %v", err) + return + } + + // Extract CSR names that need approval (status is empty) + pendingCSRs := []string{} + lines := strings.Split(csrOutput, "\n") + for _, line := range lines { + if strings.Contains(line, "\"name\"") && strings.Contains(line, "\"status\": {}") { + // Extract CSR name from JSON + start := strings.Index(line, "\"name\": \"") + 8 + end := strings.Index(line[start:], "\"") + start + if start > 7 && end > start { + csrName := line[start:end] + pendingCSRs = append(pendingCSRs, csrName) + } + } + } + + // Approve pending CSRs with retry + for _, csrName := range pendingCSRs { + klog.V(2).Infof("Approving CSR during recovery: %s", csrName) + + err = retryRecoveryOperation(func() error { + _, err := oc.AsAdmin().Run("adm").Args("certificate", "approve", csrName).Output() + return err + }, fmt.Sprintf("approve CSR %s", csrName)) + + if err == nil { + klog.V(2).Infof("Approved CSR during recovery: %s", csrName) + } else { + klog.Warningf("Failed to approve CSR %s after retries: %v", csrName, err) + } + } +} + +// waitForPacemakerQuorum waits for pacemaker to restore quorum after node deletion +func waitForPacemakerQuorum() error { + klog.V(2).Infof("Waiting for API server to restore quorum (checking with oc status)...") + + return retryOperationWithTimeout(func() error { + // Check if the API server is accessible by running a simple oc command + // If we have quorum, the API server will be working. If not, the request will timeout. + _, err := oc.AsAdmin().Run("status").Args().Output() + if err != nil { + return fmt.Errorf("API server not yet accessible (no quorum): %v", err) + } + + klog.V(4).Infof("API server is accessible - quorum restored") + return nil + }, pacemakerQuorumTimeout, pacemakerQuorumPollInterval, "API server quorum restoration") +} + +// promoteEtcdLearnerMember promotes the etcd learner member to voter status +func promoteEtcdLearnerMember(testConfig *TNFTestConfig) error { + klog.V(2).Infof("Attempting to promote etcd learner member on surviving node: %s (IP: %s)", testConfig.SurvivingNodeName, testConfig.SurvivingNodeIP) + + return retryOperationWithTimeout(func() error { + // First, get the list of etcd members to find the learner + memberListCmd := `sudo podman exec -it etcd etcdctl member list -w json` + output, _, err := utils.ExecuteRemoteSSHCommand(testConfig.SurvivingNodeIP, memberListCmd, &testConfig.HypervisorConfig, testConfig.HypervisorKnownHostsPath, testConfig.SurvivingNodeKnownHostsPath) + if err != nil { + return fmt.Errorf("failed to get etcd member list on %s: %v", testConfig.SurvivingNodeIP, err) + } + + klog.V(4).Infof("Etcd member list output: %s", output) + + // Parse the JSON output to find the learner member + learnerMemberID, learnerName, err := findLearnerMemberID(output) + if err != nil { + return fmt.Errorf("failed to find learner member ID: %v", err) + } + + if learnerMemberID == "" { + klog.V(2).Infof("No learner member found, all members are already voters") + return nil // No learner to promote, this is success + } + + klog.V(2).Infof("Found learner member: ID=%s, Name=%s", learnerMemberID, learnerName) + + // Promote the learner member + promoteCmd := fmt.Sprintf(`sudo podman exec -it etcd etcdctl member promote %s`, learnerMemberID) + promoteOutput, _, err := utils.ExecuteRemoteSSHCommand(testConfig.SurvivingNodeIP, promoteCmd, &testConfig.HypervisorConfig, testConfig.HypervisorKnownHostsPath, testConfig.SurvivingNodeKnownHostsPath) + if err != nil { + return fmt.Errorf("failed to promote etcd learner member %s on %s: %v, output: %s", learnerMemberID, testConfig.SurvivingNodeIP, err, promoteOutput) + } + + klog.V(4).Infof("Successfully promoted etcd learner member %s: %s", learnerMemberID, promoteOutput) + return nil + }, 10*time.Minute, 30*time.Second, "promote etcd learner member") +} + +// findLearnerMemberID parses the etcd member list JSON output to find the learner member ID and name +func findLearnerMemberID(memberListJSON string) (string, string, error) { + // Parse the JSON output + var memberList etcdMemberListResponse + err := json.Unmarshal([]byte(memberListJSON), &memberList) + if err != nil { + return "", "", fmt.Errorf("failed to parse etcd member list JSON: %v", err) + } + + // Find learner members + for _, member := range memberList.Members { + if member.IsLearner { + // Convert member ID to hex string format (etcdctl expects hex format) + memberIDHex := fmt.Sprintf("%x", member.ID) + klog.V(2).Infof("Found learner member: ID=%s (hex: %s), Name=%s", fmt.Sprintf("%d", member.ID), memberIDHex, member.Name) + return memberIDHex, member.Name, nil + } + } + + // No learner found + klog.V(2).Infof("No learner member found in member list") + return "", "", nil +} + +// Missing functions that need to be implemented +func recreateBMCSecret(testConfig *TNFTestConfig, oc *exutil.CLI) error { + // Recreate BMC secret with retry + bmcSecretFile := filepath.Join(testConfig.GlobalBackupDir, testConfig.TargetBMCSecretName+".yaml") + + // Check if BMC secret already exists + _, err := oc.AsAdmin().Run("get").Args(secretResourceType, testConfig.TargetBMCSecretName, "-n", machineAPINamespace).Output() + if err != nil { + // Retry BMC secret creation + err = retryRecoveryOperation(func() error { + return kubectlCreateResource(oc, bmcSecretFile) + }, fmt.Sprintf("create BMC secret %s", testConfig.TargetBMCSecretName)) + + if err != nil { + return fmt.Errorf("failed to recreate BMC secret after retries: %v", err) + } + klog.V(2).Infof("Recreated BMC secret: %s", testConfig.TargetBMCSecretName) + } else { + klog.V(2).Infof("BMC secret %s already exists, skipping recreation", testConfig.TargetBMCSecretName) + } + + return nil +} + +func updateAndCreateBMH(testConfig *TNFTestConfig, oc *exutil.CLI, newUUID, newMACAddress string) { + klog.V(2).Infof("Creating BareMetalHost with UUID: %s, MAC: %s", newUUID, newMACAddress) + + // Read the BMH template from testdata + templatePath := filepath.Join("test", "extended", "testdata", "two_node", "baremetalhost-template.yaml") + templateContent, err := os.ReadFile(templatePath) + o.Expect(err).To(o.BeNil(), "Expected to read BMH template without error") + + // Replace placeholders with actual values + bmhContent := string(templateContent) + bmhContent = strings.ReplaceAll(bmhContent, "{NAME}", testConfig.TargetBMHName) + bmhContent = strings.ReplaceAll(bmhContent, "{IP}", testConfig.TargetNodeIP) + bmhContent = strings.ReplaceAll(bmhContent, "{UUID}", newUUID) + bmhContent = strings.ReplaceAll(bmhContent, "{CREDENTIALS_NAME}", testConfig.TargetBMCSecretName) + bmhContent = strings.ReplaceAll(bmhContent, "{BOOT_MAC_ADDRESS}", newMACAddress) + + // Create a temporary file with the updated BMH content + tmpFile, err := os.CreateTemp("", "bmh-*.yaml") + o.Expect(err).To(o.BeNil(), "Expected to create temporary BMH file without error") + defer os.Remove(tmpFile.Name()) + + _, err = tmpFile.WriteString(bmhContent) + o.Expect(err).To(o.BeNil(), "Expected to write BMH content to temporary file without error") + tmpFile.Close() + + // Create the BareMetalHost using oc create + _, err = oc.AsAdmin().Run("create").Args("-f", tmpFile.Name()).Output() + o.Expect(err).To(o.BeNil(), "Expected to create BareMetalHost without error") + + klog.V(2).Infof("Successfully created BareMetalHost: %s", testConfig.TargetBMHName) +} + +func waitForBMHProvisioning(testConfig *TNFTestConfig, oc *exutil.CLI) { + klog.V(2).Infof("Waiting for BareMetalHost %s to be provisioned...", testConfig.TargetBMHName) + + maxWaitTime := 15 * time.Minute + pollInterval := 30 * time.Second + startTime := time.Now() + + for time.Since(startTime) < maxWaitTime { + // Get the specific BareMetalHost in YAML format + bmhOutput, err := oc.AsAdmin().Run("get").Args(bmhResourceType, testConfig.TargetBMHName, "-n", machineAPINamespace, "-o", yamlOutputFormat).Output() + if err != nil { + klog.V(2).Infof("Error getting BareMetalHost %s: %v", testConfig.TargetBMHName, err) + time.Sleep(pollInterval) + continue + } + + // Parse the YAML into a BareMetalHost object + var bmh metal3v1alpha1.BareMetalHost + decoder := yaml.NewYAMLOrJSONDecoder(strings.NewReader(bmhOutput), 4096) + err = decoder.Decode(&bmh) + if err != nil { + klog.V(2).Infof("Error parsing BareMetalHost YAML: %v", err) + time.Sleep(pollInterval) + continue + } + + // Check the provisioning state + currentState := string(bmh.Status.Provisioning.State) + klog.V(4).Infof("BareMetalHost %s current state: %s", testConfig.TargetBMHName, currentState) + + // Check if BMH is in provisioned state + if currentState == bmhProvisionedState { + klog.V(2).Infof("BareMetalHost %s is provisioned", testConfig.TargetBMHName) + return + } + + // Log additional status information + if bmh.Status.ErrorMessage != "" { + klog.V(2).Infof("BareMetalHost %s error message: %s", testConfig.TargetBMHName, bmh.Status.ErrorMessage) + } + + klog.V(2).Infof("Waiting for BareMetalHost %s provisioning (current state: %s, elapsed: %v)", + testConfig.TargetBMHName, currentState, time.Since(startTime)) + time.Sleep(pollInterval) + } + + // If we reach here, the timeout was exceeded + o.Expect(false).To(o.BeTrue(), fmt.Sprintf("BareMetalHost %s did not reach provisioned state within %v timeout", testConfig.TargetBMHName, maxWaitTime)) +} + +func reapplyDetachedAnnotation(testConfig *TNFTestConfig, oc *exutil.CLI) { + klog.V(2).Infof("Applying detached annotation to BareMetalHost: %s", testConfig.TargetBMHName) + + // Apply the detached annotation to the specific BMH + _, err := oc.AsAdmin().Run("annotate").Args( + bmhResourceType, testConfig.TargetBMHName, + "-n", machineAPINamespace, + "baremetalhost.metal3.io/detached=true", + "--overwrite", + ).Output() + o.Expect(err).To(o.BeNil(), "Expected to apply detached annotation to BMH %s without error", testConfig.TargetBMHName) + + klog.V(2).Infof("Successfully applied detached annotation to BareMetalHost: %s", testConfig.TargetBMHName) +} + +func recreateMachine(testConfig *TNFTestConfig, oc *exutil.CLI) { + klog.V(2).Infof("Recreating Machine: %s", testConfig.TargetMachineName) + + // Check if the machine already exists + _, err := oc.AsAdmin().Run("get").Args(machineResourceType, testConfig.TargetMachineName, "-n", machineAPINamespace).Output() + if err == nil { + klog.V(2).Infof("Machine %s already exists, skipping recreation", testConfig.TargetMachineName) + return + } + + // Read the Machine template from testdata + templatePath := filepath.Join("test", "extended", "testdata", "two_node", "machine-template.yaml") + templateContent, err := os.ReadFile(templatePath) + o.Expect(err).To(o.BeNil(), "Expected to read Machine template without error") + + // Replace placeholders with actual values + machineContent := string(templateContent) + machineContent = strings.ReplaceAll(machineContent, "{NODE_NAME}", testConfig.TargetBMHName) + machineContent = strings.ReplaceAll(machineContent, "{MACHINE_NAME}", testConfig.TargetMachineName) + machineContent = strings.ReplaceAll(machineContent, "{MACHINE_HASH}", testConfig.TargetMachineHash) + + // Create a temporary file with the updated Machine content + tmpFile, err := os.CreateTemp("", "machine-*.yaml") + o.Expect(err).To(o.BeNil(), "Expected to create temporary Machine file without error") + defer os.Remove(tmpFile.Name()) + + _, err = tmpFile.WriteString(machineContent) + o.Expect(err).To(o.BeNil(), "Expected to write Machine content to temporary file without error") + tmpFile.Close() + + // Create the Machine using oc create + _, err = oc.AsAdmin().Run("create").Args("-f", tmpFile.Name()).Output() + o.Expect(err).To(o.BeNil(), "Expected to create Machine without error") + + klog.V(2).Infof("Successfully recreated Machine: %s", testConfig.TargetMachineName) +} + +func setDynamicResourceNames(testConfig *TNFTestConfig, oc *exutil.CLI) { + // Set dynamic resource names based on target node + testConfig.EtcdPeerSecretName = fmt.Sprintf("%s-%s", etcdPeerSecretBaseName, testConfig.TargetNodeName) + testConfig.EtcdServingSecretName = fmt.Sprintf("%s-%s", etcdServingSecretBaseName, testConfig.TargetNodeName) + testConfig.EtcdServingMetricsSecretName = fmt.Sprintf("%s-%s", etcdServingMetricsSecretBaseName, testConfig.TargetNodeName) + testConfig.TNFAuthJobName = fmt.Sprintf("%s-%s", tnfAuthJobBaseName, testConfig.TargetNodeName) + testConfig.TNFAfterSetupJobName = fmt.Sprintf("%s-%s", tnfAfterSetupJobBaseName, testConfig.TargetNodeName) + testConfig.TargetBMCSecretName = findObjectByNamePattern(oc, secretResourceType, machineAPINamespace, testConfig.TargetNodeName, "bmc-secret") + testConfig.TargetBMHName = findObjectByNamePattern(oc, bmhResourceType, machineAPINamespace, testConfig.TargetNodeName, "") + + // Get the MAC address of the target node from its BareMetalHost + testConfig.TargetNodeMAC = getNodeMACAddress(oc, testConfig.TargetNodeName) + klog.V(4).Infof("Found targetNodeMAC: %s for node: %s", testConfig.TargetNodeMAC, testConfig.TargetNodeName) + + // Find the corresponding VM name by matching MAC addresses + var err error + testConfig.TargetVMName, err = utils.GetVMNameByMACMatch(testConfig.TargetNodeName, testConfig.TargetNodeMAC, virshProvisioningBridge, &testConfig.HypervisorConfig, testConfig.HypervisorKnownHostsPath) + klog.V(4).Infof("GetVMNameByMACMatch returned: testConfig.TargetVMName=%s, err=%v", testConfig.TargetVMName, err) + o.Expect(err).To(o.BeNil(), "Expected to find VM name for node %s with MAC %s: %v", testConfig.TargetNodeName, testConfig.TargetNodeMAC, err) + + // Ensure we found a valid VM name + o.Expect(testConfig.TargetVMName).ToNot(o.BeEmpty(), "Expected to find a valid VM name for node %s with MAC %s", testConfig.TargetNodeName, testConfig.TargetNodeMAC) + + // Extract and store the machine name from the BMH consumerRef + testConfig.TargetMachineName = extractMachineNameFromBMH(oc, testConfig.TargetNodeName) + + // Extract the machine hash from the machine name + // Machine name format: {cluster}-{hash}-{role}-{index} (e.g., "ostest-abc123-master-0") + machineNameParts := strings.Split(testConfig.TargetMachineName, "-") + if len(machineNameParts) >= 4 { + testConfig.TargetMachineHash = machineNameParts[1] + klog.V(2).Infof("Extracted machine hash: %s from machine name: %s", testConfig.TargetMachineHash, testConfig.TargetMachineName) + } else { + klog.Warningf("Unable to extract machine hash from machine name: %s (unexpected format)", testConfig.TargetMachineName) + } +} + +// waitForEtcdToStop waits for etcd to stop on the surviving node +func waitForEtcdToStop(testConfig *TNFTestConfig) error { + klog.V(2).Infof("Waiting for etcd to stop on surviving node: %s", testConfig.SurvivingNodeName) + + return retryOperationWithTimeout(func() error { + // Check etcd resource status on the surviving node + output, _, err := utils.PcsResourceStatus(testConfig.SurvivingNodeName, testConfig.SurvivingNodeIP, &testConfig.HypervisorConfig, testConfig.HypervisorKnownHostsPath, testConfig.SurvivingNodeKnownHostsPath) + if err != nil { + return fmt.Errorf("failed to get etcd resource status on %s: %v, output: %s", testConfig.SurvivingNodeName, err, output) + } + + klog.V(4).Infof("Etcd resource status on %s:\n%s", testConfig.SurvivingNodeName, output) + + // Check if etcd is stopped (not started) on the surviving node + // We expect to see "Stopped: [ master-X ]" or no "Started:" line for the survivor + lines := strings.Split(output, "\n") + for _, line := range lines { + line = strings.TrimSpace(line) + if strings.Contains(line, "Started:") && strings.Contains(line, testConfig.SurvivingNodeName) { + return fmt.Errorf("etcd is still started on surviving node %s", testConfig.SurvivingNodeName) + } + } + + // If we get here, etcd is not started on the surviving node + klog.V(2).Infof("Etcd has stopped on surviving node: %s", testConfig.SurvivingNodeName) + return nil + }, etcdStatusCheckTimeout, etcdStatusCheckPollInterval, fmt.Sprintf("etcd stop on %s", testConfig.SurvivingNodeName)) +} + +// waitForEtcdToStart waits for etcd to start on the surviving node +func waitForEtcdToStart(testConfig *TNFTestConfig) error { + klog.V(2).Infof("Waiting for etcd to start on surviving node: %s", testConfig.SurvivingNodeName) + + return retryOperationWithTimeout(func() error { + // Check etcd resource status on the surviving node + output, _, err := utils.PcsResourceStatus(testConfig.SurvivingNodeName, testConfig.SurvivingNodeIP, &testConfig.HypervisorConfig, testConfig.HypervisorKnownHostsPath, testConfig.SurvivingNodeKnownHostsPath) + if err != nil { + return fmt.Errorf("failed to get etcd resource status on %s: %v, output: %s", testConfig.SurvivingNodeName, err, output) + } + + klog.V(4).Infof("Etcd resource status on %s:\n%s", testConfig.SurvivingNodeName, output) + + // Check if etcd is started on the surviving node + // We expect to see "Started: [ master-X ]" for the survivor + lines := strings.Split(output, "\n") + for _, line := range lines { + line = strings.TrimSpace(line) + if strings.Contains(line, "Started:") && strings.Contains(line, testConfig.SurvivingNodeName) { + klog.V(2).Infof("Etcd has started on surviving node: %s", testConfig.SurvivingNodeName) + return nil + } + } + + // If we get here, etcd is not started on the surviving node + // Get pacemaker journal logs to help with debugging + klog.V(2).Infof("Etcd is not started on %s, getting pacemaker journal logs for debugging", testConfig.SurvivingNodeName) + journalOutput, _, journalErr := utils.PcsJournal(25, testConfig.SurvivingNodeIP, &testConfig.HypervisorConfig, testConfig.HypervisorKnownHostsPath, testConfig.SurvivingNodeKnownHostsPath) + if journalErr != nil { + klog.Warningf("Failed to get pacemaker journal logs on %s: %v", testConfig.SurvivingNodeName, journalErr) + } else { + klog.V(4).Infof("Last 20 lines of pacemaker journal on %s:\n%s", testConfig.SurvivingNodeName, journalOutput) + } + + return fmt.Errorf("etcd is not started on surviving node %s", testConfig.SurvivingNodeName) + }, etcdStatusCheckTimeout, etcdStatusCheckPollInterval, fmt.Sprintf("etcd start on %s", testConfig.SurvivingNodeName)) +} + +// reenableStonith re-enables stonith on the surviving node +func reenableStonith(testConfig *TNFTestConfig) error { + klog.V(2).Infof("Re-enabling stonith on surviving node: %s", testConfig.SurvivingNodeName) + + // Execute the stonith enable command on the surviving node + output, _, err := utils.PcsEnableStonith(testConfig.SurvivingNodeIP, &testConfig.HypervisorConfig, testConfig.HypervisorKnownHostsPath, testConfig.SurvivingNodeKnownHostsPath) + if err != nil { + return fmt.Errorf("failed to re-enable stonith on %s: %v, output: %s", testConfig.SurvivingNodeName, err, output) + } + + klog.V(2).Infof("Successfully re-enabled stonith on surviving node: %s", testConfig.SurvivingNodeName) + klog.V(4).Infof("Stonith enable output: %s", output) + return nil +} \ No newline at end of file diff --git a/test/extended/two_node/tnf_recovery.go b/test/extended/two_node/tnf_recovery.go index 2fe727bbd02c..8cd21405c59b 100644 --- a/test/extended/two_node/tnf_recovery.go +++ b/test/extended/two_node/tnf_recovery.go @@ -34,7 +34,7 @@ var _ = g.Describe("[sig-etcd][apigroup:config.openshift.io][OCPFeatureGate:Dual defer g.GinkgoRecover() var ( - oc = util.NewCLIWithoutNamespace("").AsAdmin() + oc = createCLI(admin) etcdClientFactory *helpers.EtcdClientFactoryImpl peerNode, targetNode corev1.Node ) @@ -47,7 +47,7 @@ var _ = g.Describe("[sig-etcd][apigroup:config.openshift.io][OCPFeatureGate:Dual return ensureEtcdOperatorHealthy(oc) }, etcdOperatorIsHealthyTimeout, pollInterval).ShouldNot(o.HaveOccurred(), "etcd cluster operator should be healthy before starting test") - nodes, err := oc.AdminKubeClient().CoreV1().Nodes().List(context.Background(), metav1.ListOptions{}) + nodes, err := getNodes(oc, allNodes) o.Expect(err).ShouldNot(o.HaveOccurred(), "Expected to retrieve nodes without error") o.Expect(len(nodes.Items)).To(o.BeNumerically("==", 2), "Expected to find 2 Nodes only") @@ -56,6 +56,7 @@ var _ = g.Describe("[sig-etcd][apigroup:config.openshift.io][OCPFeatureGate:Dual peerNode = nodes.Items[randomIndex] // Select the remaining index targetNode = nodes.Items[(randomIndex+1)%len(nodes.Items)] + g.GinkgoT().Printf("Randomly selected %s (%s) to be shut down and %s (%s) to take the lead\n", targetNode.Name, targetNode.Status.Addresses[0].Address, &peerNode.Name, peerNode.Status.Addresses[0].Address) kubeClient := oc.KubeClient() etcdClientFactory = helpers.NewEtcdClientFactory(kubeClient) diff --git a/test/extended/two_node/tnf_topology.go b/test/extended/two_node/tnf_topology.go index b333dd5ae8ac..3ea56e8f60e4 100644 --- a/test/extended/two_node/tnf_topology.go +++ b/test/extended/two_node/tnf_topology.go @@ -19,7 +19,7 @@ const ensurePodmanEtcdContainerIsRunning = "podman inspect --format '{{.State.Ru var _ = g.Describe("[sig-node][apigroup:config.openshift.io][OCPFeatureGate:DualReplica] Two Node with Fencing topology", func() { defer g.GinkgoRecover() var ( - oc = exutil.NewCLIWithoutNamespace("") + oc = createCLI(nonAdmin) ) g.BeforeEach(func() { @@ -33,15 +33,11 @@ var _ = g.Describe("[sig-node][apigroup:config.openshift.io][OCPFeatureGate:Dual ) g.By(fmt.Sprintf("Ensuring only %d control-plane nodes in the cluster and no arbiter nodes", expectedControlPlanes)) - controlPlaneNodes, err := oc.AdminKubeClient().CoreV1().Nodes().List(context.Background(), metav1.ListOptions{ - LabelSelector: labelNodeRoleControlPlane, - }) + controlPlaneNodes, err := getNodes(oc, labelNodeRoleControlPlane) o.Expect(err).ShouldNot(o.HaveOccurred(), "Expected to retrieve control-plane nodes without error") o.Expect(len(controlPlaneNodes.Items)).To(o.Equal(expectedControlPlanes), fmt.Sprintf("Expected %d Control-plane Nodes, found %d", expectedControlPlanes, len(controlPlaneNodes.Items))) - arbiterNodes, err := oc.AdminKubeClient().CoreV1().Nodes().List(context.Background(), metav1.ListOptions{ - LabelSelector: labelNodeRoleArbiter, - }) + arbiterNodes, err := getNodes(oc, labelNodeRoleArbiter) o.Expect(err).ShouldNot(o.HaveOccurred(), "Expected to retrieve arbiter nodes without error") o.Expect(len(arbiterNodes.Items)).To(o.Equal(expectedArbiters), fmt.Sprintf("Expected %d Arbiter Nodes, found %d", expectedArbiters, len(arbiterNodes.Items))) }) @@ -88,7 +84,7 @@ var _ = g.Describe("[sig-node][apigroup:config.openshift.io][OCPFeatureGate:Dual var _ = g.Describe("[sig-etcd][apigroup:config.openshift.io][OCPFeatureGate:DualReplica] Two Node with Fencing", func() { defer g.GinkgoRecover() var ( - oc = exutil.NewCLIWithoutNamespace("") + oc = createCLI(nonAdmin) ) g.BeforeEach(func() { @@ -110,7 +106,6 @@ var _ = g.Describe("[sig-etcd][apigroup:config.openshift.io][OCPFeatureGate:Dual etcdContainerCount := 0 etcdctlContainerCount := 0 - for _, pod := range pods.Items { for _, container := range pod.Spec.Containers { if container.Name == "etcd" { diff --git a/test/extended/two_node/utils/hypervisor.go b/test/extended/two_node/utils/hypervisor.go new file mode 100644 index 000000000000..f6cb8d760d4a --- /dev/null +++ b/test/extended/two_node/utils/hypervisor.go @@ -0,0 +1,125 @@ +// Package utils provides hypervisor configuration and validation utilities for two-node cluster testing. +// +// Tests requiring hypervisor access should include the [Requires:HypervisorSSHConfig] annotation. +// +// Configuration can be provided via command-line flag or environment variable: +// +// openshift-tests run openshift/two-node --with-hypervisor-json='{ +// "IP": "192.168.111.1", +// "User": "root", +// "privateKeyPath": "/path/to/private/key" +// }' +// +// Or: +// +// export HYPERVISOR_CONFIG='{"IP":"192.168.111.1","User":"root","privateKeyPath":"/path/to/key"}' +// openshift-tests run openshift/two-node +// +// Usage example: +// +// if !exutil.HasHypervisorConfig() { +// utils.PrintHypervisorConfigUsage() +// return +// } +// config := exutil.GetHypervisorConfig() +// utils.VerifyHypervisorConnectivity(&config, knownHostsPath) +package utils + +import ( + "fmt" + "strings" + + g "github.com/onsi/ginkgo/v2" + "k8s.io/klog/v2" +) + +// PrintHypervisorConfigUsage prints usage instructions for configuring hypervisor SSH access. +// Call this when HasHypervisorConfig() returns false to provide configuration guidance. +func PrintHypervisorConfigUsage() { + usageMessage := ` +================================================================================ +Two-Node Test Suite - Hypervisor Configuration Required +================================================================================ + +This test requires hypervisor SSH configuration to manage virtual machines +and perform node operations. The [Requires:HypervisorSSHConfig] annotation +indicates this requirement. + +CONFIGURATION METHODS: + +1. Command-Line Flag (recommended for interactive testing): + + openshift-tests run openshift/two-node --with-hypervisor-json='{ + "IP": "192.168.111.1", + "User": "root", + "privateKeyPath": "/path/to/private/key" + }' + +2. Environment Variable (recommended for CI/CD): + + export HYPERVISOR_CONFIG='{"IP":"192.168.111.1","User":"root","privateKeyPath":"/path/to/key"}' + openshift-tests run openshift/two-node + +CONFIGURATION FIELDS: + +- IP: IP address or hostname of the hypervisor +- User: SSH username (typically "root") +- privateKeyPath: Absolute path to SSH private key file + +TROUBLESHOOTING: + +If configuration fails: +1. Verify JSON syntax is valid +2. Check that the private key file exists +3. Test SSH connectivity: ssh -i @ +4. Verify virsh is available: ssh @ 'virsh version' + +================================================================================ +` + g.GinkgoT().Logf(usageMessage) +} + +// VerifyHypervisorAvailability verifies SSH connectivity to the hypervisor and checks +// that virsh and libvirt are available. +func VerifyHypervisorAvailability(sshConfig *SSHConfig, knownHostsPath string) error { + klog.V(2).Infof("Verifying hypervisor connectivity to %s@%s", sshConfig.User, sshConfig.IP) + + // Test basic SSH connectivity + output, _, err := VerifyConnectivity(sshConfig, knownHostsPath) + if err != nil { + klog.ErrorS(err, "Failed to establish SSH connection to hypervisor", + "user", sshConfig.User, + "host", sshConfig.IP, + "output", output) + klog.ErrorS(nil, "Ensure the hypervisor is accessible and SSH key is correct") + return fmt.Errorf("failed to establish SSH connection to hypervisor %s@%s: %w", sshConfig.User, sshConfig.IP, err) + } + klog.V(2).Infof("SSH connectivity verified: %s", strings.TrimSpace(output)) + + // Test virsh availability and basic functionality + output, err = VerifyVirsh(sshConfig, knownHostsPath) + if err != nil { + klog.ErrorS(err, "virsh is not available or not working on hypervisor", + "user", sshConfig.User, + "host", sshConfig.IP, + "output", output) + klog.ErrorS(nil, "Ensure libvirt and virsh are installed on the hypervisor") + return fmt.Errorf("virsh is not available or not working on hypervisor %s@%s: %w", sshConfig.User, sshConfig.IP, err) + } + klog.V(2).Infof("virsh availability verified: %s", strings.TrimSpace(output)) + + // Test libvirt connection by listing VMs + output, err = VirshListAllVMs(sshConfig, knownHostsPath) + if err != nil { + klog.ErrorS(err, "Failed to connect to libvirt on hypervisor", + "user", sshConfig.User, + "host", sshConfig.IP, + "output", output) + klog.ErrorS(nil, "Ensure libvirtd service is running and user has access") + return fmt.Errorf("failed to connect to libvirt on hypervisor %s@%s: %w", sshConfig.User, sshConfig.IP, err) + } + klog.V(2).Infof("libvirt connection verified, found VMs: %s", strings.TrimSpace(output)) + + klog.V(2).Infof("Hypervisor connectivity verification completed successfully") + return nil +} diff --git a/test/extended/two_node/utils/pacemaker.go b/test/extended/two_node/utils/pacemaker.go new file mode 100644 index 000000000000..189415db66a9 --- /dev/null +++ b/test/extended/two_node/utils/pacemaker.go @@ -0,0 +1,394 @@ +// Package utils provides Pacemaker cluster management utilities for two-node OpenShift cluster testing. +// +// This package enables management and recovery operations for Pacemaker-managed etcd clusters in +// two-node OpenShift deployments. It provides high-level functions for cluster operations, resource +// management, and disaster recovery scenarios. +// +// Background: +// +// Two-node OpenShift clusters use Pacemaker to manage etcd quorum and provide high availability. +// Pacemaker uses the PCS (Pacemaker Configuration System) command-line tool for cluster management. +// This package wraps PCS commands and provides utilities specific to two-node cluster recovery. +// +// Key Features: +// - Pacemaker cluster status monitoring +// - etcd resource management (start, stop, debug operations) +// - STONITH (node fencing) control +// - Cluster membership management (add/remove nodes) +// - etcd revision file restoration +// - Node job cleanup for test scenarios +// - Retry utilities for handling transient failures +// +// Error Handling: +// +// All functions in this package return errors instead of using assertions (o.Expect). +// This makes them suitable for use as library functions. Calling code should check +// and handle errors appropriately, typically using o.Expect() in test code. +// +// Common Usage Patterns: +// +// 1. Monitoring Cluster Status: +// +// status, stderr, err := PcsStatus(remoteNodeIP, sshConfig, localKnownHostsPath, remoteKnownHostsPath) +// resourceStatus, stderr, err := PcsResourceStatus("master-0", remoteNodeIP, sshConfig, localKnownHostsPath, remoteKnownHostsPath) +// journal, stderr, err := PcsJournal(remoteNodeIP, sshConfig, localKnownHostsPath, remoteKnownHostsPath) +// +// 2. Quorum Recovery Operations: +// +// // Disable STONITH before recovery +// _, _, err := PcsDisableStonith(remoteNodeIP, sshConfig, localKnownHostsPath, remoteKnownHostsPath) +// +// // Restore etcd quorum on remote node +// _, _, err := PcsDebugRestart(remoteNodeIP, sshConfig, localKnownHostsPath, remoteKnownHostsPath) +// +// // Re-enable STONITH after recovery +// _, _, err := PcsEnableStonith(remoteNodeIP, sshConfig, localKnownHostsPath, remoteKnownHostsPath) +// +// 3. Node Replacement Operations: +// +// // Remove old node and add replacement +// err := CycleRemovedNode(failedNodeName, failedNodeIP, runningNodeName, runningNodeIP, sshConfig, localKnownHostsPath, remoteKnownHostsPath) +// if err != nil { +// return fmt.Errorf("failed to cycle node: %w", err) +// } +// +// // Restore etcd revision on new node +// err = RestoreEtcdRevision(nodeName, remoteNodeIP, sshConfig, localKnownHostsPath, remoteKnownHostsPath, oc) +// if err != nil { +// return fmt.Errorf("failed to restore etcd revision: %w", err) +// } +// +// // Clean up old jobs +// err = DeleteNodeJobs(authJobName, afterSetupJobName, oc) +// if err != nil { +// return fmt.Errorf("failed to delete jobs: %w", err) +// } +// +// STONITH (Shoot The Other Node In The Head): +// +// STONITH is Pacemaker's node-level fencing mechanism that ensures cluster integrity by forcefully +// powering off or isolating unresponsive nodes. During recovery operations, STONITH is typically +// disabled to prevent automatic fencing, then re-enabled after the cluster is stable. +// +// Two-Node Quorum Challenge: +// +// In a two-node cluster, losing one node means losing quorum (majority). If fencing is properly enabled, +// pacemaker will restore quorum automatically by fencing the failed node and restarting the running node +// as a cluster of one. However, in the case that fencing fails, the PcsDebugRestart function can be used to +// bypass normal cluster checks and force etcd to start on the running node, restoring cluster operations +// until the failed node can be recovered or replaced. +// +// All PCS commands are executed on cluster nodes via two-hop SSH connections through a hypervisor, +// using the SSH utilities from this package. +package utils + +import ( + "fmt" + "time" + + exutil "github.com/openshift/origin/test/extended/util" + "k8s.io/klog/v2" +) + +// Pacemaker-related constants +const ( + superuserPrefix = "sudo" + pcsExecutable = "pcs" + noEnvVars = "" + + // PCS commands + pcsClusterNodeAdd = "cluster node add %s addr=%s --start --enable" + pcsResourceDebugStop = "resource debug-stop etcd --full" + pcsResourceDebugStartEnvVars = "OCF_RESKEY_CRM_meta_notify_start_resource='etcd'" + pcsResourceDebugStart = "resource debug-start etcd --full" + pcsDisableStonith = "property set stonith-enabled=false" + pcsEnableStonith = "property set stonith-enabled=true" + pcsClusterNodeRemove = "cluster node remove %s" + pcsResourceStatus = "resource status etcd node=%s" + pcsStatus = "status" + + etcdNamespace = "openshift-etcd" + mkdirEtcdDir = "sudo mkdir /var/lib/etcd" + chmodEtcdDir = "sudo chmod %o /var/lib/etcd" + revisionJSONTemplate = `{"clusterId":"0","raftIndex":{"https://%s:%s":0},"maxRaftIndex":0,"created":""}` + etcdDirPermissions = 0766 + etcdFilePermissions = 0644 + etcdPort = "2379" + chmodRevisionJSON = "sudo chmod %o /var/lib/etcd/revision.json" +) + +func formatPcsCommandString(command string, envVars string) string { + if envVars != "" { + return fmt.Sprintf("%s %s %s %s", superuserPrefix, envVars, pcsExecutable, command) + } + + return fmt.Sprintf("%s %s %s", superuserPrefix, pcsExecutable, command) +} + +// PcsDebugRestart restores etcd quorum on a node by performing a debug stop and start. +// This is used in single-node quorum recovery scenarios after a node failure. +// +// The function performs the following operations: +// 1. Stops the etcd resource using "pcs resource debug-stop etcd --full" +// 2. Starts the etcd resource using "pcs resource debug-start etcd --full" with notify metadata +// 3. Verifies the operation by checking pacemaker status +// +// This is critical for two-node clusters where losing one node would normally prevent etcd from +// achieving quorum. The debug-start bypasses normal cluster checks to force etcd to start. +// +// Parameters: +// - remoteNodeIP: IP address of the remote node to restore etcd on +// - sshConfig: SSH configuration for connecting to the hypervisor +// - localKnownHostsPath: Path to the known_hosts file for the hypervisor connection +// - remoteKnownHostsPath: Path to the known_hosts file on the hypervisor for the node connection +// +// Returns: +// - string: Command stdout +// - string: Command stderr +// - error: Any error that occurred during the restart operation +func PcsDebugRestart(remoteNodeIP string, sshConfig *SSHConfig, localKnownHostsPath, remoteKnownHostsPath string) (string, string, error) { + klog.V(2).Infof("Restoring etcd quorum on remote node: %s", remoteNodeIP) + + // SSH to hypervisor, then to remote node to run pcs debug-start + // We need to chain the SSH commands: host -> hypervisor -> remote node + output, stderr, err := ExecuteRemoteSSHCommand(remoteNodeIP, fmt.Sprintf("%s && %s", formatPcsCommandString(pcsResourceDebugStop, noEnvVars), formatPcsCommandString(pcsResourceDebugStart, pcsResourceDebugStartEnvVars)), sshConfig, localKnownHostsPath, remoteKnownHostsPath) + if err != nil { + klog.ErrorS(err, "Failed to restart etcd", "node", remoteNodeIP, "stderr", stderr) + return output, stderr, err + } + + // Log pacemaker status to check if etcd has been started on the remote node + pcsStatusOutput, stderr, err := PcsStatus(remoteNodeIP, sshConfig, localKnownHostsPath, remoteKnownHostsPath) + if err != nil { + klog.Warning("Failed to get pacemaker status on remote node", "node", remoteNodeIP, "error", err) + } else { + klog.V(4).Infof("Pacemaker status on remote node %s:\n%s", remoteNodeIP, pcsStatusOutput) + } + + klog.V(2).Infof("Successfully restored etcd quorum on remote node: %s", remoteNodeIP) + return output, stderr, nil +} + +// PcsDebugStart restores etcd quorum on a node by performing a debug start. +// This is used in single-node quorum recovery scenarios after a node failure. +// +// The function performs the following operations: +// 1. Starts the etcd resource using "pcs resource debug-start etcd --full" with notify metadata +// 2. Verifies the operation by checking pacemaker status +// +// This is critical for two-node clusters where losing one node would normally prevent etcd from +// achieving quorum. The debug-start bypasses normal cluster checks to force etcd to start. +// +// Parameters: +// - remoteNodeIP: IP address of the remote node to restore etcd on +// - sshConfig: SSH configuration for connecting to the hypervisor +// - localKnownHostsPath: Path to the known_hosts file for the hypervisor connection +// - remoteKnownHostsPath: Path to the known_hosts file on the hypervisor for the node connection +// +// Returns: +// - string: Command stdout +// - string: Command stderr +// - error: Any error that occurred during the restart operation +func PcsDebugStart(remoteNodeIP string, sshConfig *SSHConfig, localKnownHostsPath, remoteKnownHostsPath string) (string, string, error) { + klog.V(2).Infof("Restoring etcd quorum on remote node: %s", remoteNodeIP) + + // SSH to hypervisor, then to remote node to run pcs debug-start + // We need to chain the SSH commands: host -> hypervisor -> remote node + output, stderr, err := ExecuteRemoteSSHCommand(remoteNodeIP, formatPcsCommandString(pcsResourceDebugStart, pcsResourceDebugStartEnvVars), sshConfig, localKnownHostsPath, remoteKnownHostsPath) + if err != nil { + klog.ErrorS(err, "Failed to restart etcd", "node", remoteNodeIP, "stderr", stderr) + return output, stderr, err + } + + // Log pacemaker status to check if etcd has been started on the remote node + pcsStatusOutput, stderr, err := PcsStatus(remoteNodeIP, sshConfig, localKnownHostsPath, remoteKnownHostsPath) + if err != nil { + klog.Warning("Failed to get pacemaker status on remote node", "node", remoteNodeIP, "error", err) + } else { + klog.V(4).Infof("Pacemaker status on remote node %s:\n%s", remoteNodeIP, pcsStatusOutput) + } + + klog.V(2).Infof("Successfully restored etcd quorum on remote node: %s", remoteNodeIP) + return output, stderr, nil +} + +// PcsStatus retrieves the overall pacemaker cluster status. +// This shows the state of all cluster resources, nodes, and any failures. +func PcsStatus(remoteNodeIP string, sshConfig *SSHConfig, localKnownHostsPath, remoteKnownHostsPath string) (string, string, error) { + return ExecuteRemoteSSHCommand(remoteNodeIP, formatPcsCommandString(pcsStatus, noEnvVars), sshConfig, localKnownHostsPath, remoteKnownHostsPath) +} + +// PcsResourceStatus retrieves the status of a specific pacemaker resource (etcd) on a node. +// This is more targeted than PcsStatus and shows whether the etcd resource is started/stopped. +func PcsResourceStatus(nodeName, remoteNodeIP string, sshConfig *SSHConfig, localKnownHostsPath, remoteKnownHostsPath string) (string, string, error) { + return ExecuteRemoteSSHCommand(remoteNodeIP, formatPcsCommandString(fmt.Sprintf(pcsResourceStatus, nodeName), noEnvVars), sshConfig, localKnownHostsPath, remoteKnownHostsPath) +} + +// PcsDisableStonith disables STONITH (Shoot The Other Node In The Head) in the pacemaker cluster. +// This is typically done during maintenance or recovery operations to prevent automatic fencing. +func PcsDisableStonith(remoteNodeIP string, sshConfig *SSHConfig, localKnownHostsPath, remoteKnownHostsPath string) (string, string, error) { + return ExecuteRemoteSSHCommand(remoteNodeIP, formatPcsCommandString(pcsDisableStonith, noEnvVars), sshConfig, localKnownHostsPath, remoteKnownHostsPath) +} + +// PcsEnableStonith re-enables STONITH in the pacemaker cluster after maintenance is complete. +// STONITH provides node-level fencing to ensure cluster integrity. +func PcsEnableStonith(remoteNodeIP string, sshConfig *SSHConfig, localKnownHostsPath, remoteKnownHostsPath string) (string, string, error) { + return ExecuteRemoteSSHCommand(remoteNodeIP, formatPcsCommandString(pcsEnableStonith, noEnvVars), sshConfig, localKnownHostsPath, remoteKnownHostsPath) +} + +// PcsJournal retrieves the last pcsJournalTailLines lines of the pacemaker systemd journal logs. +// This is useful for debugging pacemaker behavior and troubleshooting cluster issues. +func PcsJournal(pcsJournalTailLines int, remoteNodeIP string, sshConfig *SSHConfig, localKnownHostsPath, remoteKnownHostsPath string) (string, string, error) { + return ExecuteRemoteSSHCommand(remoteNodeIP, fmt.Sprintf("sudo journalctl -u pacemaker --no-pager | grep podman-etcd | tail -n %d", pcsJournalTailLines), sshConfig, localKnownHostsPath, remoteKnownHostsPath) +} + +// RestoreEtcdRevision restores the etcd revision.json file on a replacement node and triggers etcd redeployment. +// This is a critical step in node replacement to ensure the new node can join the etcd cluster correctly. +// +// The function performs the following steps: +// 1. Creates /var/lib/etcd directory on the new node +// 2. Sets appropriate permissions on the directory (0766) +// 3. Creates revision.json with cluster metadata pointing to the new node's IP +// 4. Sets file permissions on revision.json (0644) +// 5. Triggers an etcd redeployment via the etcd operator using forceRedeploymentReason +// +// Parameters: +// - nodeName: Name of the replacement OpenShift node (unused but kept for clarity) +// - remoteNodeIP: IP address of the replacement node +// - sshConfig: SSH configuration for connecting to the hypervisor +// - localKnownHostsPath: Path to the known_hosts file for the hypervisor connection +// - remoteKnownHostsPath: Path to the known_hosts file on the hypervisor for the node connection +// - oc: OpenShift CLI client for patching the etcd operator +// +// Returns: +// - error: Any error encountered during revision file creation or etcd redeployment +func RestoreEtcdRevision(nodeName, remoteNodeIP string, sshConfig *SSHConfig, localKnownHostsPath, remoteKnownHostsPath string, oc *exutil.CLI) error { + // Create the revision.json file on the new node using constants + revisionScript := fmt.Sprintf(` + %s + %s + echo '%s' | sudo tee -a /var/lib/etcd/revision.json + %s + `, mkdirEtcdDir, fmt.Sprintf(chmodEtcdDir, etcdDirPermissions), fmt.Sprintf(revisionJSONTemplate, remoteNodeIP, etcdPort), fmt.Sprintf(chmodRevisionJSON, etcdFilePermissions)) + + _, _, err := ExecuteRemoteSSHCommand(remoteNodeIP, revisionScript, sshConfig, localKnownHostsPath, remoteKnownHostsPath) + if err != nil { + return fmt.Errorf("failed to create etcd revision.json on node %s: %w", remoteNodeIP, err) + } + + // Redeploy etcd with a force redeployment reason + forceRedeploymentReason := fmt.Sprintf("recovery-%s", time.Now().Format(time.RFC3339Nano)) + _, err = oc.AsAdmin().Run("patch").Args("etcd", "cluster", "-p", fmt.Sprintf(`{"spec": {"forceRedeploymentReason": "%s"}}`, forceRedeploymentReason), "--type=merge").Output() + if err != nil { + return fmt.Errorf("failed to trigger etcd redeployment: %w", err) + } + + klog.V(2).Infof("Successfully restored etcd revision on node %s and triggered redeployment", remoteNodeIP) + return nil +} + +// CycleRemovedNode removes and re-adds a node in the pacemaker cluster configuration. +// This is necessary when replacing a failed node to update the cluster membership. +// +// The function executes two pcs commands on the remote node: +// 1. "pcs cluster node remove " - removes the old/failed node +// 2. "pcs cluster node add addr= --start --enable" - adds the replacement node +// +// Parameters: +// - failedNodeName: Name of the replacement node to cycle +// - failedNodeIP: IP address of the replacement node +// - runningNodeIP: IP address of the remote node where commands are executed +// - sshConfig: SSH configuration for connecting to the hypervisor +// - localKnownHostsPath: Path to the known_hosts file for the hypervisor connection +// - remoteKnownHostsPath: Path to the known_hosts file on the hypervisor for the node connection +// +// Returns: +// - error: Any error encountered during node removal or addition +func CycleRemovedNode(failedNodeName, failedNodeIP, runningNodeIP string, sshConfig *SSHConfig, localKnownHostsPath, remoteKnownHostsPath string) error { + // Remove and re-add the node in pacemaker using constants + pcsScript := fmt.Sprintf(` + %s + %s + `, + formatPcsCommandString(fmt.Sprintf(pcsClusterNodeRemove, failedNodeName), noEnvVars), + formatPcsCommandString(fmt.Sprintf(pcsClusterNodeAdd, failedNodeName, failedNodeIP), noEnvVars), + ) + + _, _, err := ExecuteRemoteSSHCommand(runningNodeIP, pcsScript, sshConfig, localKnownHostsPath, remoteKnownHostsPath) + if err != nil { + return fmt.Errorf("failed to cycle node %s in pacemaker cluster: %w", failedNodeName, err) + } + + klog.V(2).Infof("Successfully cycled node %s in pacemaker cluster", failedNodeName) + return nil +} + +// DeleteNodeJobs deletes TNF (Two Node Federation) related jobs for node authentication and setup. +// These jobs need to be cleaned up during node replacement to allow new jobs to be created. +// +// Parameters: +// - authJobName: Name of the TNF authentication job to delete (e.g., "tnf-auth-job-master-0") +// - afterSetupJobName: Name of the TNF after-setup job to delete (e.g., "tnf-after-setup-job-master-0") +// - oc: OpenShift CLI client for deleting the jobs +// +// Returns: +// - error: Any error encountered during job deletion +func DeleteNodeJobs(authJobName, afterSetupJobName string, oc *exutil.CLI) error { + // Delete the old tnf-auth-job using dynamic name + _, err := oc.AsAdmin().Run("delete").Args("job", authJobName, "-n", etcdNamespace).Output() + if err != nil { + return fmt.Errorf("failed to delete job %s: %w", authJobName, err) + } + klog.V(2).Infof("Deleted job %s", authJobName) + + // Delete the old tnf-after-setup-job using dynamic name + _, err = oc.AsAdmin().Run("delete").Args("job", afterSetupJobName, "-n", etcdNamespace).Output() + if err != nil { + return fmt.Errorf("failed to delete job %s: %w", afterSetupJobName, err) + } + klog.V(2).Infof("Deleted job %s", afterSetupJobName) + + return nil +} + +// RetryOperationWithTimeout retries an operation until it succeeds or times out. +// This is a general-purpose retry utility used throughout the two-node test utilities. +// +// The function polls the operation at regular intervals until either: +// - The operation succeeds (returns nil error) +// - The timeout is exceeded +// +// This is useful for operations that may fail temporarily due to cluster state transitions, +// API server unavailability, or resource propagation delays. +// +// Parameters: +// - operation: Function to execute that returns an error (nil on success) +// - timeout: Maximum time to wait for the operation to succeed +// - pollInterval: Time to wait between retry attempts +// - operationName: Descriptive name for logging purposes +// +// Returns: +// - error: nil if operation succeeded, timeout error if it failed within the timeout period +// +// Example: +// +// err := RetryOperationWithTimeout(func() error { +// _, err := oc.AsAdmin().Run("get").Args("node", "master-0").Output() +// return err +// }, 5*time.Minute, 10*time.Second, "get node master-0") +func RetryOperationWithTimeout(operation func() error, timeout, pollInterval time.Duration, operationName string) error { + startTime := time.Now() + + for time.Since(startTime) < timeout { + err := operation() + if err == nil { + klog.V(2).Infof("Operation %s succeeded after %v", operationName, time.Since(startTime)) + return nil + } + + klog.V(4).Infof("Operation %s failed, retrying in %v: %v", operationName, pollInterval, err) + time.Sleep(pollInterval) + } + + return fmt.Errorf("operation %s failed after %v timeout", operationName, timeout) +} diff --git a/test/extended/two_node/utils/ssh.go b/test/extended/two_node/utils/ssh.go new file mode 100644 index 000000000000..20cb0dd9e7d1 --- /dev/null +++ b/test/extended/two_node/utils/ssh.go @@ -0,0 +1,305 @@ +// Package utils provides SSH utilities for remote command execution in two-node cluster tests. +// +// Supports direct SSH connections (local → hypervisor) and two-hop connections (local → hypervisor → node). +// +// Usage example: +// +// // Prepare known_hosts files +// localKnownHostsPath, err := PrepareLocalKnownHostsFile(hypervisorConfig) +// remoteKnownHostsPath, err := PrepareRemoteKnownHostsFile(remoteNodeIP, hypervisorConfig, localKnownHostsPath) +// +// // Execute commands +// output, stderr, err := ExecuteSSHCommand("virsh list --all", hypervisorConfig, localKnownHostsPath) +// output, stderr, err := ExecuteRemoteSSHCommand(remoteNodeIP, "oc get nodes", hypervisorConfig, localKnownHostsPath, remoteKnownHostsPath) +// +// // Cleanup +// CleanupRemoteKnownHostsFile(hypervisorConfig, localKnownHostsPath, remoteKnownHostsPath) +// CleanupLocalKnownHostsFile(hypervisorConfig, localKnownHostsPath) +package utils + +import ( + "bytes" + "fmt" + "os" + "os/exec" + "strings" + "time" + + "k8s.io/klog/v2" +) + +// SSHConfig contains the configuration needed to establish SSH connections to remote hosts +type SSHConfig struct { + IP string // IP address of the remote host + User string // SSH username for authentication + PrivateKeyPath string // Path to the SSH private key file +} + +// SSH-related constants +const ( + // SSH command patterns + sshStrictHostKeyChecking = "StrictHostKeyChecking=no" + userKnownHostsFile = "UserKnownHostsFile" + sshKeyscanCommand = "ssh-keyscan" + sshConnectivityTest = "echo 'SSH connectivity test successful'" + + // Startup operation timeouts and intervals + vmStartTimeout = 2 * time.Minute // Maximum time to wait for VM startup + vmStartPollInterval = 15 * time.Second // Interval between VM state checks + + // File paths + knownHostsTempPrefix = "known_hosts_" // Prefix for temporary known_hosts files + remoteInfix = "remote_" // Infix for remote known_hosts files +) + +// PrepareLocalKnownHostsFile creates a temporary known_hosts file and scans the SSH host key. +// This prevents "permanently added" warnings that cause SSH commands to fail. +// +// Parameters: +// - sshConfig: SSH configuration for the host to scan +// +// Returns: +// - string: Path to the created temporary known_hosts file +// - error: Any error that occurred during file creation or host key scanning +func PrepareLocalKnownHostsFile(sshConfig *SSHConfig) (string, error) { + klog.V(2).Infof("Preparing local known_hosts file for %q", sshConfig.IP) + + // Create a temporary known hosts file + tempFile, err := os.CreateTemp("", knownHostsTempPrefix+"*") + if err != nil { + klog.ErrorS(err, "Failed to create temporary known_hosts file") + return "", err + } + + knownHostsPath := tempFile.Name() + tempFile.Close() + + // Use ssh-keyscan to get the host key and add it to our known hosts file + keyscanCmd := exec.Command(sshKeyscanCommand, "-H", sshConfig.IP) + keyscanOutput, err := keyscanCmd.Output() + if err != nil { + klog.ErrorS(err, "Failed to scan host key", "host", sshConfig.IP) + return "", err + } + + // Write the host key to our known hosts file with secure permissions (0600) + err = os.WriteFile(knownHostsPath, []byte(keyscanOutput), 0600) + if err != nil { + klog.ErrorS(err, "Failed to write known_hosts file") + return "", err + } + + klog.V(2).Infof("Successfully created local known_hosts file: %q", knownHostsPath) + return knownHostsPath, nil +} + +// PrepareRemoteKnownHostsFile creates a known_hosts file on the proxy node for accessing the remote node. +// Used for two-hop SSH connections (local → proxy → remote). +// +// Parameters: +// - remoteNodeIP: IP address of the remote node to scan +// - proxyNodeSSHConfig: SSH configuration for the proxy node (hypervisor) +// - localKnownHostsPath: Path to the local known_hosts file for connecting to the proxy node +// +// Returns: +// - string: Path to the created remote known_hosts file on the proxy node +// - error: Any error that occurred during file creation or host key scanning +func PrepareRemoteKnownHostsFile(remoteNodeIP string, proxyNodeSSHConfig *SSHConfig, localKnownHostsPath string) (string, error) { + klog.V(2).Infof("Preparing remote known_hosts file on proxy node %q for remote node %q", proxyNodeSSHConfig.IP, remoteNodeIP) + + // Create a temporary known hosts file on the proxy node for the remote node + knownHostsPath := fmt.Sprintf("/tmp/%s%s%s", knownHostsTempPrefix, remoteInfix, remoteNodeIP) + + // Use ssh-keyscan on the proxy node to get the remote node's host key and create the file + // Capture stderr for logging instead of suppressing it + keyscanCmd := fmt.Sprintf(`ssh-keyscan -H %s`, remoteNodeIP) + keyscanOutput, stderr, err := ExecuteSSHCommand(keyscanCmd, proxyNodeSSHConfig, localKnownHostsPath) + if err != nil { + klog.ErrorS(err, "Failed to scan host key for remote node", "remoteNode", remoteNodeIP, "stderr", stderr) + return "", err + } + + // Log any warnings from ssh-keyscan + if stderr != "" { + klog.V(4).Infof("ssh-keyscan warnings for %s: %s", remoteNodeIP, stderr) + } + + // Create the known hosts file on the proxy node with secure permissions + createKnownHostsCmd := fmt.Sprintf(`echo '%s' > %s && chmod 600 %s`, strings.TrimSpace(keyscanOutput), knownHostsPath, knownHostsPath) + _, _, err = ExecuteSSHCommand(createKnownHostsCmd, proxyNodeSSHConfig, localKnownHostsPath) + if err != nil { + klog.ErrorS(err, "Failed to create known_hosts file on proxy node") + return "", err + } + + klog.V(2).Infof("Successfully created remote known_hosts file: %q", knownHostsPath) + return knownHostsPath, nil +} + +// ExecuteSSHCommand executes a command on a remote host via SSH. +// +// Parameters: +// - command: The command to execute on the remote host +// - sshConfig: SSH configuration for the remote host +// - knownHostsPath: Path to the known_hosts file to use for the connection +// +// Returns: +// - string: Standard output from the command +// - string: Standard error from the command +// - error: Any error that occurred (only non-zero exit codes are treated as errors) +func ExecuteSSHCommand(command string, sshConfig *SSHConfig, knownHostsPath string) (string, string, error) { + // Build the SSH command to run directly on the host + sshArgs := []string{ + "-i", sshConfig.PrivateKeyPath, + "-o", sshStrictHostKeyChecking, + "-o", fmt.Sprintf("%s=%s", userKnownHostsFile, knownHostsPath), + fmt.Sprintf("%s@%s", sshConfig.User, sshConfig.IP), + command, + } + + // Log the SSH command being executed + klog.V(4).Infof("Executing SSH command on %q: ssh %s", sshConfig.IP, strings.Join(sshArgs, " ")) + + // Execute SSH command directly on the host + cmd := exec.Command("ssh", sshArgs...) + + // Capture stdout and stderr separately + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + err := cmd.Run() + + // Log the output for debugging (debug level) + if stdout.Len() > 0 { + klog.V(5).Infof("SSH stdout: %q", stdout.String()) + } + if stderr.Len() > 0 { + klog.V(5).Infof("SSH stderr: %q", stderr.String()) + } + + // Only treat non-zero exit codes as errors + // stderr may contain warnings or informational messages that don't indicate failure + if err != nil { + klog.ErrorS(err, "SSH command failed", "host", sshConfig.IP, "stderr", stderr.String()) + return stdout.String(), stderr.String(), fmt.Errorf("SSH command failed: %v, stderr: %q, stdout: %q", err, stderr.String(), stdout.String()) + } + + klog.V(4).Infof("SSH command completed successfully on %q", sshConfig.IP) + return stdout.String(), stderr.String(), nil +} + +// ExecuteRemoteSSHCommand executes a command on an OpenShift node via two-hop SSH (local → hypervisor → node). +// Uses 'core' user for the node connection. +// +// Parameters: +// - remoteNodeIP: IP address of the remote node to execute the command on +// - command: The command to execute on the remote node +// - sshConfig: SSH configuration for the proxy node (hypervisor) +// - localKnownHostsPath: Path to the local known_hosts file for connecting to the proxy node +// - remoteKnownHostsPath: Path to the remote known_hosts file on the proxy node for connecting to the remote node +// +// Returns: +// - string: Standard output from the command +// - string: Standard error from the command +// - error: Any error that occurred during command execution +func ExecuteRemoteSSHCommand(remoteNodeIP, command string, sshConfig *SSHConfig, localKnownHostsPath, remoteKnownHostsPath string) (string, string, error) { + // Build the nested SSH command that will be executed on the hypervisor to reach the node + // This creates: ssh -i key -o options -o UserKnownHostsFile= core@remoteNodeIP 'command' + nestedSSHCommand := fmt.Sprintf("ssh -o %s -o %s=%s core@%s '%s'", + sshStrictHostKeyChecking, + userKnownHostsFile, + remoteKnownHostsPath, + remoteNodeIP, + strings.ReplaceAll(command, "'", "'\\''"), // Escape single quotes in the command + ) + + // Log the full two-hop SSH command being executed + klog.V(4).Infof("Executing two-hop SSH command to node %q via hypervisor %q", remoteNodeIP, sshConfig.IP) + + // Execute the nested SSH command on the hypervisor (which will SSH to the node) + stdout, stderr, err := ExecuteSSHCommand(nestedSSHCommand, sshConfig, localKnownHostsPath) + if err != nil { + klog.ErrorS(err, "Remote SSH command to node failed", "node", remoteNodeIP, "stderr", stderr, "stdout", stdout) + } else { + klog.V(4).Infof("Successfully executed command on remote node %q", remoteNodeIP) + } + + return stdout, stderr, err +} + +// CleanupRemoteKnownHostsFile removes the temporary known_hosts file from the proxy node. +// Errors are logged but not critical. +// +// Parameters: +// - sshConfig: SSH configuration for the proxy node +// - localKnownHostsPath: Path to the local known_hosts file for connecting to the proxy node +// - remoteKnownHostsPath: Path to the remote known_hosts file on the proxy node to remove +// +// Returns: +// - error: Any error that occurred during cleanup (logged as warning, not critical) +func CleanupRemoteKnownHostsFile(sshConfig *SSHConfig, localKnownHostsPath string, remoteKnownHostsPath string) error { + // Clean up the known hosts file on the proxy node (while we still have connectivity) + if remoteKnownHostsPath == "" { + klog.V(2).Info("No remote known_hosts file to clean up") + return nil + } + + klog.V(2).Infof("Cleaning up remote known_hosts file: %q", remoteKnownHostsPath) + + // Clean up the known hosts file on the proxy node + _, _, err := ExecuteSSHCommand(fmt.Sprintf("rm -f %s", remoteKnownHostsPath), sshConfig, localKnownHostsPath) + if err != nil { + klog.Warning("Failed to clean up remote known_hosts file", "error", err) + return err + } + + klog.V(2).Info("Successfully cleaned up remote known_hosts file") + return nil +} + +// CleanupLocalKnownHostsFile removes the temporary local known hosts file. +// This should be called after completing operations that required the local known_hosts file. +// +// The function performs a non-critical cleanup operation. If the cleanup fails, it logs a warning +// but does not fail the test, as the temporary file will eventually be cleaned up by the system. +// +// Parameters: +// - sshConfig: SSH configuration (used for logging context) +// - knownHostsPath: Path to the local known_hosts file to remove +// +// Returns: +// - error: Any error that occurred during cleanup (logged as warning, not critical) +func CleanupLocalKnownHostsFile(sshConfig *SSHConfig, knownHostsPath string) error { + // Clean up the local known hosts file + if knownHostsPath == "" { + klog.V(2).Info("No local known_hosts file to clean up") + return nil + } + + klog.V(2).Infof("Cleaning up local known_hosts file: %q", knownHostsPath) + + err := os.Remove(knownHostsPath) + if err != nil { + klog.Warning("Failed to clean up local known_hosts file", "error", err) + return err + } + + klog.V(2).Info("Successfully cleaned up local known_hosts file") + return nil +} + +// VerifyConnectivity tests SSH connectivity to a remote host by executing a simple echo command. +// This is useful for verifying that SSH is properly configured before attempting more complex operations. +// +// Parameters: +// - sshConfig: SSH configuration for the host to test connectivity to +// - knownHostsPath: Path to the known_hosts file to use for the connection +// +// Returns: +// - string: Standard output from the connectivity test command +// - string: Standard error from the connectivity test command +// - error: Any error that occurred during the connectivity test +func VerifyConnectivity(sshConfig *SSHConfig, knownHostsPath string) (string, string, error) { + return ExecuteSSHCommand(sshConnectivityTest, sshConfig, knownHostsPath) +} diff --git a/test/extended/two_node/utils/virsh.go b/test/extended/two_node/utils/virsh.go new file mode 100644 index 000000000000..f250aa2e87db --- /dev/null +++ b/test/extended/two_node/utils/virsh.go @@ -0,0 +1,676 @@ +// Package utils provides libvirt/virsh utilities for managing virtual machines in two-node cluster testing. +// +// This package enables VM lifecycle management, inspection, and configuration through the virsh +// command-line tool. It supports operations on remote hypervisors via SSH, making it suitable +// for test environments where VMs are managed on a separate hypervisor host. +// +// Key Features: +// - VM lifecycle operations (define, start, stop, destroy, autostart) +// - VM inspection (list VMs, get UUID, dump XML configuration) +// - XML parsing for extracting network configuration (MAC addresses, bridges) +// - VM discovery by MAC address correlation +// - VM recreation from saved XML configurations +// - Wait utilities for VM state transitions +// +// Error Handling: +// +// All functions return errors instead of using assertions. Virsh command failures, +// XML parsing errors, and timeout conditions are returned as errors for the calling +// code to handle appropriately. +// +// Common Usage Patterns: +// +// 1. Listing and Inspecting VMs: +// +// vms, err := VirshListAllVMs(sshConfig, knownHostsPath) +// uuid, err := VirshGetVMUUID("master-0", sshConfig, knownHostsPath) +// xml, err := VirshDumpXML("master-0", sshConfig, knownHostsPath) +// +// 2. VM Lifecycle Management: +// +// err := VirshStartVM("master-0", sshConfig, knownHostsPath) +// err := WaitForVMToStart("master-0", sshConfig, knownHostsPath) +// err := VirshDestroyVM("master-0", sshConfig, knownHostsPath) +// err := VirshUndefineVM("master-0", sshConfig, knownHostsPath) +// +// 3. VM Network Configuration: +// +// mac, err := ExtractMACAddressFromXML(xmlContent, "ostestbm") +// vmName, err := GetVMNameByMACMatch("master-0", "52:54:00:12:34:56", "ostestpr", sshConfig, knownHostsPath) +// uuid, mac, err := GetVMNetworkInfo("master-0", "ostestpr", sshConfig, knownHostsPath) +// +// 4. VM Recovery Operations: +// +// err := RecreateVMFromXML("master-0", xmlContent, sshConfig, knownHostsPath) +// +// All virsh commands are executed on a remote hypervisor via SSH. The functions in this package +// wrap the low-level SSH utilities from this package to provide a higher-level API for VM management. +// +// Retry Utilities: +// +// Some operations like WaitForVMToStart use the RetryOperationWithTimeout utility from the +// pacemaker utilities package to handle transient failures and wait for state transitions. +// +// XML Parsing: +// +// The package includes structures for parsing libvirt domain XML, focusing on network configuration. +// The Domain, Devices, Interface, MAC, and Source types map to libvirt XML elements and enable +// programmatic extraction of VM configuration details. +package utils + +import ( + "encoding/xml" + "fmt" + "strings" + + "k8s.io/klog/v2" +) + +// Domain represents a libvirt domain (virtual machine) configuration +// It maps to the root element in libvirt XML +type Domain struct { + XMLName xml.Name `xml:"domain"` + Name string `xml:"name"` + UUID string `xml:"uuid"` + Devices Devices `xml:"devices"` +} + +// Devices contains the hardware devices attached to a VM +type Devices struct { + Interfaces []Interface `xml:"interface"` +} + +// Interface represents a network interface configuration in libvirt XML +type Interface struct { + Type string `xml:"type,attr"` + MAC MAC `xml:"mac"` + Source Source `xml:"source"` +} + +// MAC contains the MAC address of a network interface +type MAC struct { + Address string `xml:"address,attr"` +} + +// Source specifies the network source (bridge, network, etc) for an interface +type Source struct { + Bridge string `xml:"bridge,attr"` +} + +// Constants for virsh commands +const ( + virshCommand = "virsh" + virshListAllName = "list --all --name" + virshConnectionOption = "-c qemu:///system" +) + +// VerifyVirsh checks if virsh is available and working on the target host +// by executing 'virsh version' command +// +// Parameters: +// - sshConfig: SSH configuration for connecting to the hypervisor +// - knownHostsPath: Path to the known_hosts file for SSH +// +// Returns: +// - string: The virsh version output +// - error: Any error that occurred during the check +func VerifyVirsh(sshConfig *SSHConfig, knownHostsPath string) (string, error) { + klog.V(4).Infof("VerifyVirsh: Checking virsh availability on %s", sshConfig.IP) + output, err := VirshCommand("version", sshConfig, knownHostsPath) + if err != nil { + klog.ErrorS(err, "VerifyVirsh failed", "host", sshConfig.IP) + } else { + klog.V(2).Infof("VerifyVirsh: Success - %s", output) + } + return output, err +} + +// VirshCommand executes a virsh command on the remote hypervisor via SSH +// +// Parameters: +// - command: The virsh command to execute (without 'virsh' prefix) +// - sshConfig: SSH configuration for connecting to the hypervisor +// - knownHostsPath: Path to the known_hosts file for SSH +// +// Returns: +// - string: The command output +// - error: Any error that occurred during execution +func VirshCommand(command string, sshConfig *SSHConfig, knownHostsPath string) (string, error) { + fullCommand := fmt.Sprintf("%s %s %s", virshCommand, virshConnectionOption, command) + klog.V(4).Infof("VirshCommand: Executing '%s' on %s", fullCommand, sshConfig.IP) + output, _, err := ExecuteSSHCommand(fullCommand, sshConfig, knownHostsPath) + if err != nil { + klog.ErrorS(err, "VirshCommand failed", "command", fullCommand, "host", sshConfig.IP) + } else { + klog.V(4).Infof("VirshCommand: Success - output length: %d bytes", len(output)) + } + return output, err +} + +// VirshDumpXML retrieves the XML configuration of a VM +// +// Parameters: +// - vmName: Name of the VM to dump XML for +// - sshConfig: SSH configuration for connecting to the hypervisor +// - knownHostsPath: Path to the known_hosts file for SSH +// +// Returns: +// - string: The VM's XML configuration +// - error: Any error that occurred during retrieval +func VirshDumpXML(vmName string, sshConfig *SSHConfig, knownHostsPath string) (string, error) { + klog.V(4).Infof("VirshDumpXML: Getting XML for VM '%s'", vmName) + output, err := VirshCommand(fmt.Sprintf("dumpxml %s", vmName), sshConfig, knownHostsPath) + if err != nil { + klog.ErrorS(err, "VirshDumpXML failed", "vm", vmName) + } else { + klog.V(4).Infof("VirshDumpXML: Success for VM '%s' - XML length: %d bytes", vmName, len(output)) + } + return output, err +} + +// VirshListAllVMs lists all VMs (running and stopped) on the hypervisor +// +// Parameters: +// - sshConfig: SSH configuration for connecting to the hypervisor +// - knownHostsPath: Path to the known_hosts file for SSH +// +// Returns: +// - string: Newline-separated list of VM names +// - error: Any error that occurred during listing +func VirshListAllVMs(sshConfig *SSHConfig, knownHostsPath string) (string, error) { + klog.V(4).Infof("VirshListAllVMs: Listing all VMs on %s", sshConfig.IP) + output, err := VirshCommand(virshListAllName, sshConfig, knownHostsPath) + if err != nil { + klog.ErrorS(err, "VirshListAllVMs failed", "host", sshConfig.IP) + } else { + vmCount := len(strings.Fields(output)) + klog.V(2).Infof("VirshListAllVMs: Found %d VMs", vmCount) + } + return output, err +} + +// VirshVMExists checks if a VM with the given name exists on the hypervisor +// +// Parameters: +// - vmName: Name of the VM to check +// - sshConfig: SSH configuration for connecting to the hypervisor +// - knownHostsPath: Path to the known_hosts file for SSH +// +// Returns: +// - string: Command output (empty if VM doesn't exist) +// - error: Error if VM doesn't exist or command fails +func VirshVMExists(vmName string, sshConfig *SSHConfig, knownHostsPath string) (string, error) { + klog.V(4).Infof("VirshVMExists: Checking if VM '%s' exists", vmName) + output, err := VirshCommand(fmt.Sprintf("%s | grep -q %s", virshListAllName, vmName), sshConfig, knownHostsPath) + if err != nil { + klog.V(4).Infof("VirshVMExists: VM '%s' does not exist or grep failed - %v", vmName, err) + } else { + klog.V(2).Infof("VirshVMExists: VM '%s' exists", vmName) + } + return output, err +} + +// VirshGetVMUUID retrieves the UUID of a VM +// +// Parameters: +// - vmName: Name of the VM to get UUID for +// - sshConfig: SSH configuration for connecting to the hypervisor +// - knownHostsPath: Path to the known_hosts file for SSH +// +// Returns: +// - string: The VM's UUID (trimmed of whitespace) +// - error: Any error that occurred during retrieval +func VirshGetVMUUID(vmName string, sshConfig *SSHConfig, knownHostsPath string) (string, error) { + klog.V(4).Infof("VirshGetVMUUID: Getting UUID for VM '%s'", vmName) + output, err := VirshCommand(fmt.Sprintf("domuuid %s", vmName), sshConfig, knownHostsPath) + uuid := strings.TrimSpace(output) + if err != nil { + klog.ErrorS(err, "VirshGetVMUUID failed", "vm", vmName) + } else { + klog.V(2).Infof("VirshGetVMUUID: VM '%s' has UUID: %s", vmName, uuid) + } + return uuid, err +} + +// VirshUndefineVM undefines (removes the configuration of) a VM +// Note: This does not delete the VM's disk images, only the libvirt configuration +// +// Parameters: +// - vmName: Name of the VM to undefine +// - sshConfig: SSH configuration for connecting to the hypervisor +// - knownHostsPath: Path to the known_hosts file for SSH +// +// Returns: +// - error: Any error that occurred during undefine operation +func VirshUndefineVM(vmName string, sshConfig *SSHConfig, knownHostsPath string) error { + klog.V(2).Infof("VirshUndefineVM: Undefining VM '%s' (including NVRAM)", vmName) + _, err := VirshCommand(fmt.Sprintf("undefine %s --nvram", vmName), sshConfig, knownHostsPath) + if err != nil { + klog.ErrorS(err, "VirshUndefineVM failed", "vm", vmName) + } else { + klog.V(2).Infof("VirshUndefineVM: Successfully undefined VM '%s'", vmName) + } + return err +} + +// VirshDestroyVM forcefully stops (destroys) a running VM +// This is equivalent to pulling the power plug on a physical machine +// +// Parameters: +// - vmName: Name of the VM to destroy +// - sshConfig: SSH configuration for connecting to the hypervisor +// - knownHostsPath: Path to the known_hosts file for SSH +// +// Returns: +// - error: Any error that occurred during destroy operation +func VirshDestroyVM(vmName string, sshConfig *SSHConfig, knownHostsPath string) error { + klog.V(2).Infof("VirshDestroyVM: Forcefully stopping VM '%s'", vmName) + _, err := VirshCommand(fmt.Sprintf("destroy %s", vmName), sshConfig, knownHostsPath) + if err != nil { + klog.ErrorS(err, "VirshDestroyVM failed", "vm", vmName) + } else { + klog.V(2).Infof("VirshDestroyVM: Successfully destroyed VM '%s'", vmName) + } + return err +} + +// VirshDefineVM defines (registers) a new VM from an XML configuration file +// +// Parameters: +// - xmlFilePath: Path to the XML file on the hypervisor containing VM configuration +// - sshConfig: SSH configuration for connecting to the hypervisor +// - knownHostsPath: Path to the known_hosts file for SSH +// +// Returns: +// - error: Any error that occurred during define operation +func VirshDefineVM(xmlFilePath string, sshConfig *SSHConfig, knownHostsPath string) error { + klog.V(2).Infof("VirshDefineVM: Defining VM from XML file '%s'", xmlFilePath) + _, err := VirshCommand(fmt.Sprintf("define %s", xmlFilePath), sshConfig, knownHostsPath) + if err != nil { + klog.ErrorS(err, "VirshDefineVM failed", "xmlFile", xmlFilePath) + } else { + klog.V(2).Infof("VirshDefineVM: Successfully defined VM from '%s'", xmlFilePath) + } + return err +} + +// VirshStartVM starts a defined VM +// +// Parameters: +// - vmName: Name of the VM to start +// - sshConfig: SSH configuration for connecting to the hypervisor +// - knownHostsPath: Path to the known_hosts file for SSH +// +// Returns: +// - error: Any error that occurred during start operation +func VirshStartVM(vmName string, sshConfig *SSHConfig, knownHostsPath string) error { + klog.V(2).Infof("VirshStartVM: Starting VM '%s'", vmName) + _, err := VirshCommand(fmt.Sprintf("start %s", vmName), sshConfig, knownHostsPath) + if err != nil { + klog.ErrorS(err, "VirshStartVM failed", "vm", vmName) + } else { + klog.V(2).Infof("VirshStartVM: Successfully started VM '%s'", vmName) + } + return err +} + +// VirshAutostartVM enables autostart for a VM (starts automatically on hypervisor boot) +// +// Parameters: +// - vmName: Name of the VM to enable autostart for +// - sshConfig: SSH configuration for connecting to the hypervisor +// - knownHostsPath: Path to the known_hosts file for SSH +// +// Returns: +// - error: Any error that occurred during autostart enable operation +func VirshAutostartVM(vmName string, sshConfig *SSHConfig, knownHostsPath string) error { + klog.V(2).Infof("VirshAutostartVM: Enabling autostart for VM '%s'", vmName) + _, err := VirshCommand(fmt.Sprintf("autostart %s", vmName), sshConfig, knownHostsPath) + if err != nil { + klog.ErrorS(err, "VirshAutostartVM failed", "vm", vmName) + } else { + klog.V(2).Infof("VirshAutostartVM: Successfully enabled autostart for VM '%s'", vmName) + } + return err +} + +// ExtractIPFromVMXML attempts to extract the IP address for a VM from its XML configuration +// Note: This typically does not work as IP addresses are usually assigned dynamically by DHCP +// and are not stored in the domain XML. This function is kept for reference but may need +// to be replaced with a different IP discovery mechanism (e.g., checking DHCP leases). +// +// Parameters: +// - xmlContent: The VM's XML configuration as a string +// - networkName: The name of the network bridge to find the interface for +// +// Returns: +// - string: The IP address (typically empty as IPs aren't stored in XML) +// - error: Error indicating IP addresses are not in domain XML or parsing failed +func ExtractIPFromVMXML(xmlContent, networkName string) (string, error) { + klog.V(4).Infof("ExtractIPFromVMXML: Attempting to extract IP for network '%s'", networkName) + + var domain Domain + err := xml.Unmarshal([]byte(xmlContent), &domain) + if err != nil { + klog.ErrorS(err, "ExtractIPFromVMXML failed to parse domain XML") + return "", fmt.Errorf("failed to parse domain XML: %v", err) + } + + klog.V(4).Infof("ExtractIPFromVMXML: Parsed domain '%s', checking %d interfaces", domain.Name, len(domain.Devices.Interfaces)) + + // Look for the interface with the specified network + for _, iface := range domain.Devices.Interfaces { + klog.V(4).Infof("ExtractIPFromVMXML: Checking interface with bridge '%s'", iface.Source.Bridge) + if iface.Source.Bridge == networkName { + // Note: IP addresses are typically not stored in the domain XML + // They are assigned dynamically by DHCP. This function might need + // to be updated to get IP from a different source. + klog.Warningf("Found interface for network '%s', but IPs are not in domain XML", networkName) + klog.V(2).Infof("Found interface for network %s, but IP addresses are not stored in domain XML", networkName) + return "", fmt.Errorf("interface found for network %s, but IP addresses are not stored in domain XML", networkName) + } + } + + klog.Warningf("No interface found for network '%s'", networkName) + return "", fmt.Errorf("no interface found for network %s", networkName) +} + +// ExtractMACAddressFromXML extracts the MAC address for a specific network bridge from VM XML. +// This parses the libvirt domain XML to find the network interface attached to the specified +// bridge and returns its MAC address. +// +// The function is commonly used to: +// - Correlate VMs with OpenShift nodes by matching MAC addresses +// - Retrieve network configuration for node replacement operations +// - Discover VM network topology +// +// Parameters: +// - xmlContent: The VM's XML configuration as a string (from virsh dumpxml) +// - networkBridge: The name of the network bridge to find the MAC address for (e.g., "ostestbm", "ostestpr") +// +// Returns: +// - string: The MAC address in standard format (e.g., "52:54:00:12:34:56") +// - error: Error if parsing fails or no interface is found on the specified bridge +func ExtractMACAddressFromXML(xmlContent string, networkBridge string) (string, error) { + klog.V(4).Infof("ExtractMACAddressFromXML: Extracting MAC for bridge '%s'", networkBridge) + + var domain Domain + err := xml.Unmarshal([]byte(xmlContent), &domain) + if err != nil { + klog.ErrorS(err, "ExtractMACAddressFromXML failed to parse domain XML") + return "", fmt.Errorf("failed to parse domain XML: %v", err) + } + + klog.V(4).Infof("ExtractMACAddressFromXML: Parsed domain '%s', checking %d interfaces", domain.Name, len(domain.Devices.Interfaces)) + + // Look for the interface with ostestpr bridge + for _, iface := range domain.Devices.Interfaces { + klog.V(4).Infof("ExtractMACAddressFromXML: Checking interface with bridge '%s', MAC '%s'", iface.Source.Bridge, iface.MAC.Address) + if iface.Source.Bridge == networkBridge { + klog.V(2).Infof("ExtractMACAddressFromXML: Found MAC address '%s' for bridge '%s'", iface.MAC.Address, networkBridge) + klog.V(2).Infof("Found %s interface with MAC: %s", networkBridge, iface.MAC.Address) + return iface.MAC.Address, nil + } + } + + klog.ErrorS(nil, "ExtractMACAddressFromXML: No interface found for bridge", "bridge", networkBridge) + return "", fmt.Errorf("no %s interface found in domain XML", networkBridge) +} + +// GetVMNameByMACMatch finds the VM name that has a specific MAC address on a given network bridge. +// This is used to correlate OpenShift nodes (identified by MAC address) with their underlying VMs. +// +// The function performs an exhaustive search by: +// 1. Listing all VMs on the hypervisor (both running and stopped) +// 2. For each VM, retrieving its XML configuration via virsh dumpxml +// 3. Parsing the XML to extract MAC addresses for interfaces on the specified bridge +// 4. Comparing the extracted MAC with the target MAC address +// 5. Returning the VM name when a match is found +// +// This is useful in node replacement scenarios where you need to find which VM corresponds +// to a specific OpenShift node based on its BareMetalHost MAC address. +// +// Parameters: +// - nodeName: Name of the OpenShift node (used for logging and error messages) +// - nodeMAC: The MAC address to search for (in format "52:54:00:xx:xx:xx") +// - networkBridge: The network bridge name to check (e.g., "ostestpr" for provisioning network) +// - sshConfig: SSH configuration for connecting to the hypervisor +// - knownHostsPath: Path to the known_hosts file for SSH +// +// Returns: +// - string: The name of the matching VM +// - error: Error if no VM found with the specified MAC or if any operation fails +func GetVMNameByMACMatch(nodeName, nodeMAC string, networkBridge string, sshConfig *SSHConfig, knownHostsPath string) (string, error) { + klog.V(4).Infof("GetVMNameByMACMatch: Searching for VM with MAC '%s' on bridge '%s' (node: %s)", nodeMAC, networkBridge, nodeName) + + // Get list of all VMs using SSH to hypervisor + vmListOutput, err := VirshListAllVMs(sshConfig, knownHostsPath) + klog.V(4).Infof("VirshListAllVMs output: %s", vmListOutput) + if err != nil { + klog.ErrorS(err, "GetVMNameByMACMatch failed to get VM list") + return "", fmt.Errorf("failed to get VM list: %v", err) + } + + vmNames := strings.Fields(vmListOutput) + klog.V(4).Infof("GetVMNameByMACMatch: Found %d VMs to check: %v", len(vmNames), vmNames) + klog.V(2).Infof("Found VMs: %v", vmNames) + + // Check each VM to find the one with matching MAC address + for i, vmName := range vmNames { + if vmName == "" { + klog.V(4).Infof("GetVMNameByMACMatch: Skipping empty VM name at index %d", i) + continue + } + + klog.V(4).Infof("GetVMNameByMACMatch: Checking VM %d/%d: '%s'", i+1, len(vmNames), vmName) + + // Get VM XML configuration using SSH to hypervisor + vmXML, err := VirshDumpXML(vmName, sshConfig, knownHostsPath) + klog.V(4).Infof("Getting XML for VM: %s", vmName) + if err != nil { + klog.Warningf("Could not get XML for VM '%s', skipping - %v", vmName, err) + continue + } + + // Extract MAC address from VM XML for the ostestpr bridge + vmMAC, err := ExtractMACAddressFromXML(vmXML, networkBridge) + if err != nil { + klog.Warningf("Could not extract MAC from VM '%s', skipping - %v", vmName, err) + continue + } + + klog.V(4).Infof("GetVMNameByMACMatch: VM '%s' has MAC '%s'", vmName, vmMAC) + klog.V(2).Infof("VM %s has MAC %s", vmName, vmMAC) + klog.V(4).Infof("Comparing VM MAC %s with target MAC %s", vmMAC, nodeMAC) + + // Check if this VM's MAC matches the node's MAC + if vmMAC == nodeMAC { + klog.V(2).Infof("GetVMNameByMACMatch: Found matching VM '%s' with MAC '%s'", vmName, vmMAC) + klog.V(2).Infof("Found matching VM: %s (MAC: %s)", vmName, vmMAC) + return vmName, nil + } + } + + klog.ErrorS(nil, "GetVMNameByMACMatch: No VM found with MAC", "mac", nodeMAC, "node", nodeName) + return "", fmt.Errorf("no VM found with MAC address %s for node %s", nodeMAC, nodeName) +} + +// GetVMNetworkInfo retrieves the UUID and MAC address for a VM's network interface +// +// Parameters: +// - vmName: Name of the VM to get network info for +// - networkBridge: The network bridge name to extract MAC address from +// - sshConfig: SSH configuration for connecting to the hypervisor +// - knownHostsPath: Path to the known_hosts file for SSH +// +// Returns: +// - string: The VM's UUID +// - string: The MAC address for the specified network bridge +// - error: Any error that occurred during retrieval +func GetVMNetworkInfo(vmName string, networkBridge string, sshConfig *SSHConfig, knownHostsPath string) (string, string, error) { + klog.V(4).Infof("GetVMNetworkInfo: Getting network info for VM '%s' on bridge '%s'", vmName, networkBridge) + + newUUID, err := VirshGetVMUUID(vmName, sshConfig, knownHostsPath) + if err != nil { + klog.ErrorS(err, "GetVMNetworkInfo failed to get UUID", "vm", vmName) + return "", "", fmt.Errorf("failed to get VM UUID: %v", err) + } + + newXMLOutput, err := VirshDumpXML(vmName, sshConfig, knownHostsPath) + if err != nil { + klog.ErrorS(err, "GetVMNetworkInfo failed to get XML", "vm", vmName) + return "", "", fmt.Errorf("failed to get VM XML: %v", err) + } + + newMACAddress, err := ExtractMACAddressFromXML(newXMLOutput, networkBridge) + if err != nil { + klog.ErrorS(err, "GetVMNetworkInfo failed to extract MAC", "vm", vmName) + return "", "", fmt.Errorf("failed to find MAC address in VM XML: %v", err) + } + + klog.V(2).Infof("GetVMNetworkInfo: Successfully retrieved info for VM '%s': UUID=%s, MAC=%s", vmName, newUUID, newMACAddress) + return newUUID, newMACAddress, nil +} + +// RecreateVMFromXML recreates a VM from its XML configuration. +// This is typically used during node replacement or disaster recovery scenarios. +// +// The function performs the following steps: +// 1. Validates VM name to prevent command injection +// 2. Checks if the VM already exists (skips recreation if it does) +// 3. Creates a temporary XML file on the hypervisor (/tmp/.xml) +// 4. Defines the VM in libvirt using the XML configuration +// 5. Starts the VM +// 6. Enables autostart so the VM starts automatically on hypervisor boot +// 7. Cleans up the temporary XML file +// +// Security: The VM name is validated to prevent shell command injection attacks. +// +// Parameters: +// - vmName: Name of the VM to recreate (must not contain shell metacharacters) +// - xmlContent: The complete libvirt XML configuration for the VM +// - sshConfig: SSH configuration for connecting to the hypervisor +// - knownHostsPath: Path to the known_hosts file for SSH +// +// Returns: +// - error: Any error that occurred during recreation (nil if VM already exists) +func RecreateVMFromXML(vmName, xmlContent string, sshConfig *SSHConfig, knownHostsPath string) error { + klog.V(2).Infof("RecreateVMFromXML: Starting recreation of VM '%s'", vmName) + + // Validate VM name to prevent command injection + if strings.ContainsAny(vmName, ";&|$`\\\"'<>()[]{}!*?~") { + klog.ErrorS(nil, "RecreateVMFromXML: Invalid VM name contains shell metacharacters", "vmName", vmName) + return fmt.Errorf("invalid VM name contains shell metacharacters: %s", vmName) + } + + // Check if VM already exists using the dedicated function + _, err := VirshVMExists(vmName, sshConfig, knownHostsPath) + if err == nil { + klog.V(2).Infof("RecreateVMFromXML: VM '%s' already exists, skipping recreation", vmName) + klog.V(2).Infof("VM %s already exists, skipping recreation", vmName) + return nil + } + klog.V(4).Infof("RecreateVMFromXML: VM '%s' does not exist, proceeding with recreation", vmName) + + // Create a temporary file on the hypervisor with the XML content + createXMLCommand := fmt.Sprintf(`cat > /tmp/%s.xml <<'XML_EOF' +%s +XML_EOF`, vmName, xmlContent) + + klog.V(4).Infof("RecreateVMFromXML: Creating temporary XML file /tmp/%s.xml", vmName) + _, _, err = ExecuteSSHCommand(createXMLCommand, sshConfig, knownHostsPath) + if err != nil { + klog.ErrorS(err, "RecreateVMFromXML failed to create XML file") + return fmt.Errorf("failed to create XML file on hypervisor: %v", err) + } + + // Redefine the VM using the backed up XML (using helper function) + klog.V(4).Infof("RecreateVMFromXML: Defining VM '%s' from XML", vmName) + err = VirshDefineVM(fmt.Sprintf("/tmp/%s.xml", vmName), sshConfig, knownHostsPath) + if err != nil { + klog.ErrorS(err, "RecreateVMFromXML failed to define VM") + return fmt.Errorf("failed to define VM: %v", err) + } + + // Start the VM (using helper function) + klog.V(4).Infof("RecreateVMFromXML: Starting VM '%s'", vmName) + err = VirshStartVM(vmName, sshConfig, knownHostsPath) + if err != nil { + klog.ErrorS(err, "RecreateVMFromXML failed to start VM") + return fmt.Errorf("failed to start VM: %v", err) + } + + // Enable autostart (using helper function) + klog.V(4).Infof("RecreateVMFromXML: Enabling autostart for VM '%s'", vmName) + err = VirshAutostartVM(vmName, sshConfig, knownHostsPath) + if err != nil { + klog.Warningf("Failed to enable autostart (non-fatal) - %v", err) + } + + // Clean up temporary XML file + klog.V(4).Infof("RecreateVMFromXML: Cleaning up temporary XML file /tmp/%s.xml", vmName) + _, _, err = ExecuteSSHCommand(fmt.Sprintf("rm -f /tmp/%s.xml", vmName), sshConfig, knownHostsPath) + if err != nil { + klog.Warningf("Failed to clean up XML file (non-fatal) - %v", err) + } + + klog.V(2).Infof("RecreateVMFromXML: Successfully recreated VM '%s'", vmName) + klog.V(2).Infof("Recreated VM: %s", vmName) + return nil +} + +// WaitForVMToStart waits for a VM to reach running state with retry logic. +// This polls the VM state periodically until it reports as "running" or the timeout is exceeded. +// +// The function performs two checks: +// 1. Verifies the VM exists in the virsh VM list +// 2. Checks that the VM's domain state is "running" (not just defined or paused) +// +// Parameters: +// - vmName: Name of the VM to wait for +// - sshConfig: SSH configuration for connecting to the hypervisor +// - knownHostsPath: Path to the known_hosts file for SSH +// +// Returns: +// - error: Error if VM doesn't start within timeout period (vmStartTimeout) or if any operation fails +func WaitForVMToStart(vmName string, sshConfig *SSHConfig, knownHostsPath string) error { + klog.V(2).Infof("WaitForVMToStart: Starting wait for VM '%s' to reach running state", vmName) + klog.V(2).Infof("Waiting for VM %s to start...", vmName) + + err := RetryOperationWithTimeout(func() error { + klog.V(4).Infof("WaitForVMToStart: Checking if VM '%s' is running (retry iteration)", vmName) + + // Check if VM exists using VirshVMExists helper + _, err := VirshVMExists(vmName, sshConfig, knownHostsPath) + if err != nil { + klog.V(4).Infof("WaitForVMToStart: VM '%s' not found in VM list - %v", vmName, err) + return fmt.Errorf("VM %s not yet running: %v", vmName, err) + } + + // Check if VM is actually running (not just defined) + statusOutput, err := VirshCommand(fmt.Sprintf("domstate %s", vmName), sshConfig, knownHostsPath) + if err != nil { + klog.ErrorS(err, "WaitForVMToStart failed to check VM state", "vm", vmName) + return fmt.Errorf("failed to check VM %s state: %v", vmName, err) + } + + statusOutput = strings.TrimSpace(statusOutput) + klog.V(4).Infof("WaitForVMToStart: VM '%s' current state: %s", vmName, statusOutput) + + if !strings.Contains(statusOutput, "running") { + return fmt.Errorf("VM %s is not running, current state: %s", vmName, statusOutput) + } + + klog.V(2).Infof("WaitForVMToStart: VM '%s' is confirmed running", vmName) + klog.V(2).Infof("VM %s is now running", vmName) + return nil + }, vmStartTimeout, vmStartPollInterval, fmt.Sprintf("VM %s startup", vmName)) + + if err != nil { + klog.ErrorS(err, "WaitForVMToStart timeout or error", "vm", vmName) + } else { + klog.V(2).Infof("WaitForVMToStart: Successfully confirmed VM '%s' is running", vmName) + } + + return err +} diff --git a/test/extended/util/test_setup.go b/test/extended/util/test_setup.go index 4a64234625f2..be05160c7dbf 100644 --- a/test/extended/util/test_setup.go +++ b/test/extended/util/test_setup.go @@ -2,6 +2,7 @@ package util import ( "context" + "encoding/json" "flag" "fmt" "os" @@ -56,6 +57,36 @@ func InitStandardFlags() { func InitTest(dryRun bool) error { InitDefaultEnvironmentVariables() + // Set hypervisor configuration in TestContext if available + hypervisorConfigJSON := os.Getenv("HYPERVISOR_CONFIG") + if hypervisorConfigJSON != "" { + // Parse and validate hypervisor configuration + var hypervisorConfig struct { + HypervisorIP string `json:"hypervisorIP"` + SSHUser string `json:"sshUser"` + PrivateKey string `json:"privateKey"` + } + if err := json.Unmarshal([]byte(hypervisorConfigJSON), &hypervisorConfig); err != nil { + return fmt.Errorf("failed to parse hypervisor configuration JSON: %v", err) + } + + // Validate required fields + if hypervisorConfig.HypervisorIP == "" { + return fmt.Errorf("hypervisorIP is required in hypervisor configuration") + } + if hypervisorConfig.SSHUser == "" { + return fmt.Errorf("sshUser is required in hypervisor configuration") + } + if hypervisorConfig.PrivateKey == "" { + return fmt.Errorf("privateKey is required in hypervisor configuration") + } + + // Store the hypervisor configuration in TestContext for tests to access + // We'll use the existing CloudConfig.ConfigFile field to store the JSON + // This is a workaround since we can't extend TestContextType directly + TestContext.CloudConfig.ConfigFile = hypervisorConfigJSON + } + TestContext.DeleteNamespace = os.Getenv("DELETE_NAMESPACE") != "false" TestContext.VerifyServiceAccount = true testfiles.AddFileSource(e2etestingmanifests.GetE2ETestingManifestsFS()) @@ -228,3 +259,31 @@ func addRoleToE2EServiceAccounts(rbacClient rbacv1client.RbacV1Interface, namesp FatalErr(err) } } + +// GetHypervisorConfig returns the hypervisor configuration if available +func GetHypervisorConfig() *struct { + HypervisorIP string `json:"hypervisorIP"` + SSHUser string `json:"sshUser"` + PrivateKey string `json:"privateKey"` +} { + hypervisorConfigJSON := TestContext.CloudConfig.ConfigFile + if hypervisorConfigJSON == "" { + return nil + } + + var hypervisorConfig struct { + HypervisorIP string `json:"hypervisorIP"` + SSHUser string `json:"sshUser"` + PrivateKey string `json:"privateKey"` + } + if err := json.Unmarshal([]byte(hypervisorConfigJSON), &hypervisorConfig); err != nil { + return nil + } + + return &hypervisorConfig +} + +// HasHypervisorConfig returns true if hypervisor configuration is available +func HasHypervisorConfig() bool { + return TestContext.CloudConfig.ConfigFile != "" +} diff --git a/vendor/github.com/metal3-io/baremetal-operator/apis/LICENSE b/vendor/github.com/metal3-io/baremetal-operator/apis/LICENSE new file mode 100644 index 000000000000..261eeb9e9f8b --- /dev/null +++ b/vendor/github.com/metal3-io/baremetal-operator/apis/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/baremetalhost_types.go b/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/baremetalhost_types.go new file mode 100644 index 000000000000..266ca5e16e13 --- /dev/null +++ b/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/baremetalhost_types.go @@ -0,0 +1,1167 @@ +/* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "errors" + "fmt" + "strings" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +// NOTE: json tags are required. Any new fields you add must have +// json tags for the fields to be serialized. + +// NOTE(dhellmann): Update docs/api.md when changing these data structure. + +const ( + // BareMetalHostFinalizer is the name of the finalizer added to + // hosts to block delete operations until the physical host can be + // deprovisioned. + BareMetalHostFinalizer string = "baremetalhost.metal3.io" + + // PausedAnnotation is the annotation that pauses the reconciliation (triggers + // an immediate requeue). + PausedAnnotation = "baremetalhost.metal3.io/paused" + + // DetachedAnnotation is the annotation which stops provisioner management of the host + // unlike in the paused case, the host status may be updated. + DetachedAnnotation = "baremetalhost.metal3.io/detached" + + // StatusAnnotation is the annotation that keeps a copy of the Status of BMH + // This is particularly useful when we pivot BMH. If the status + // annotation is present and status is empty, BMO will reconstruct BMH Status + // from the status annotation. + StatusAnnotation = "baremetalhost.metal3.io/status" + + // RebootAnnotationPrefix is the annotation which tells the host which mode to use + // when rebooting - hard/soft. + RebootAnnotationPrefix = "reboot.metal3.io" + + // InspectAnnotationPrefix is used to specify if automatic introspection carried out + // during registration of BMH is enabled or disabled. + InspectAnnotationPrefix = "inspect.metal3.io" + + // HardwareDetailsAnnotation provides the hardware details for the host + // in case its not already part of the host status and when introspection + // is disabled. + HardwareDetailsAnnotation = InspectAnnotationPrefix + "/hardwaredetails" + + // InspectAnnotationValueDisabled is a constant string="disabled" + // This is particularly useful to check if inspect annotation is disabled + // inspect.metal3.io=disabled. + InspectAnnotationValueDisabled = "disabled" +) + +// RootDeviceHints holds the hints for specifying the storage location +// for the root filesystem for the image. +type RootDeviceHints struct { + // A Linux device name like "/dev/vda", or a by-path link to it like + // "/dev/disk/by-path/pci-0000:01:00.0-scsi-0:2:0:0". The hint must match + // the actual value exactly. + DeviceName string `json:"deviceName,omitempty"` + + // A SCSI bus address like 0:0:0:0. The hint must match the actual + // value exactly. + HCTL string `json:"hctl,omitempty"` + + // A vendor-specific device identifier. The hint can be a + // substring of the actual value. + Model string `json:"model,omitempty"` + + // The name of the vendor or manufacturer of the device. The hint + // can be a substring of the actual value. + Vendor string `json:"vendor,omitempty"` + + // Device serial number. The hint must match the actual value + // exactly. + SerialNumber string `json:"serialNumber,omitempty"` + + // The minimum size of the device in Gigabytes. + // +kubebuilder:validation:Minimum=0 + MinSizeGigabytes int `json:"minSizeGigabytes,omitempty"` + + // Unique storage identifier. The hint must match the actual value + // exactly. + WWN string `json:"wwn,omitempty"` + + // Unique storage identifier with the vendor extension + // appended. The hint must match the actual value exactly. + WWNWithExtension string `json:"wwnWithExtension,omitempty"` + + // Unique vendor storage identifier. The hint must match the + // actual value exactly. + WWNVendorExtension string `json:"wwnVendorExtension,omitempty"` + + // True if the device should use spinning media, false otherwise. + Rotational *bool `json:"rotational,omitempty"` +} + +// BootMode is the boot mode of the system. +// +kubebuilder:validation:Enum=UEFI;UEFISecureBoot;legacy +type BootMode string + +// Allowed boot mode from metal3. +const ( + UEFI BootMode = "UEFI" + UEFISecureBoot BootMode = "UEFISecureBoot" + Legacy BootMode = "legacy" + DefaultBootMode BootMode = UEFI +) + +// OperationalStatus represents the state of the host. +type OperationalStatus string + +const ( + // OperationalStatusOK is the status value for when the host is + // configured correctly and is manageable. + OperationalStatusOK OperationalStatus = "OK" + + // OperationalStatusDiscovered is the status value for when the + // host is only partially configured, such as when the BMC + // address is known but the login credentials are not. + OperationalStatusDiscovered OperationalStatus = "discovered" + + // OperationalStatusError is the status value for when the host + // has any sort of error. + OperationalStatusError OperationalStatus = "error" + + // OperationalStatusDelayed is the status value for when the host + // deployment needs to be delayed to limit simultaneous hosts provisioning. + OperationalStatusDelayed = "delayed" + + // OperationalStatusDetached is the status value when the host is + // marked unmanaged via the detached annotation. + OperationalStatusDetached OperationalStatus = "detached" + + // OperationalStatusServicing is the status value when the host is + // undergoing servicing (e.g. checking firmware settings). + OperationalStatusServicing OperationalStatus = "servicing" +) + +// OperationalStatusAllowed represents the allowed values of OperationalStatus. +var OperationalStatusAllowed = []string{"", string(OperationalStatusOK), string(OperationalStatusDiscovered), string(OperationalStatusError), string(OperationalStatusDelayed), string(OperationalStatusDetached)} + +// ErrorType indicates the class of problem that has caused the Host resource +// to enter an error state. +type ErrorType string + +const ( + // ProvisionedRegistrationError is an error condition occurring when the controller + // is unable to re-register an already provisioned host. + ProvisionedRegistrationError ErrorType = "provisioned registration error" + // RegistrationError is an error condition occurring when the + // controller is unable to connect to the Host's baseboard management + // controller. + RegistrationError ErrorType = "registration error" + // InspectionError is an error condition occurring when an attempt to + // obtain hardware details from the Host fails. + InspectionError ErrorType = "inspection error" + // PreparationError is an error condition occurring when do + // cleaning steps failed. + PreparationError ErrorType = "preparation error" + // ProvisioningError is an error condition occurring when the controller + // fails to provision or deprovision the Host. + ProvisioningError ErrorType = "provisioning error" + // PowerManagementError is an error condition occurring when the + // controller is unable to modify the power state of the Host. + PowerManagementError ErrorType = "power management error" + // DetachError is an error condition occurring when the + // controller is unable to detatch the host from the provisioner. + DetachError ErrorType = "detach error" + // ServicingError is an error condition occurring when + // service steps failed. + ServicingError ErrorType = "servicing error" +) + +// ErrorTypeAllowed represents the allowed values of ErrorType. +var ErrorTypeAllowed = []string{"", string(ProvisionedRegistrationError), string(RegistrationError), string(InspectionError), string(PreparationError), string(ProvisioningError), string(PowerManagementError)} + +// ProvisioningState defines the states the provisioner will report +// the host has having. +type ProvisioningState string + +const ( + // StateNone means the state is unknown. + StateNone ProvisioningState = "" + + // StateUnmanaged means there is insufficient information available to + // register the host. + StateUnmanaged ProvisioningState = "unmanaged" + + // StateRegistering means we are telling the backend about the host. + StateRegistering ProvisioningState = "registering" + + // StateMatchProfile used to mean we are assigning a profile. + // It no longer does anything, profile matching is done on registration. + StateMatchProfile ProvisioningState = "match profile" + + // StatePreparing means we are removing existing configuration and set new configuration to the host. + StatePreparing ProvisioningState = "preparing" + + // StateReady is a deprecated name for StateAvailable. + StateReady ProvisioningState = "ready" + + // StateAvailable means the host can be consumed. + StateAvailable ProvisioningState = "available" + + // StateProvisioning means we are writing an image to the host's + // disk(s). + StateProvisioning ProvisioningState = "provisioning" + + // StateProvisioned means we have written an image to the host's + // disk(s). + StateProvisioned ProvisioningState = "provisioned" + + // StateExternallyProvisioned means something else is managing the + // image on the host. + StateExternallyProvisioned ProvisioningState = "externally provisioned" + + // StateDeprovisioning means we are removing an image from the + // host's disk(s). + StateDeprovisioning ProvisioningState = "deprovisioning" + + // StateInspecting means we are running the agent on the host to + // learn about the hardware components available there. + StateInspecting ProvisioningState = "inspecting" + + // StatePoweringOffBeforeDelete means we are in the process of + // powering off the host before it's deleted. + StatePoweringOffBeforeDelete ProvisioningState = "powering off before delete" + + // StateDeleting means we are in the process of cleaning up the host + // ready for deletion. + StateDeleting ProvisioningState = "deleting" +) + +// BMCDetails contains the information necessary to communicate with +// the bare metal controller module on host. +type BMCDetails struct { + + // Address holds the URL for accessing the controller on the network. + // The scheme part designates the driver to use with the host. + Address string `json:"address"` + + // The name of the secret containing the BMC credentials (requires + // keys "username" and "password"). + CredentialsName string `json:"credentialsName"` + + // DisableCertificateVerification disables verification of server + // certificates when using HTTPS to connect to the BMC. This is + // required when the server certificate is self-signed, but is + // insecure because it allows a man-in-the-middle to intercept the + // connection. + DisableCertificateVerification bool `json:"disableCertificateVerification,omitempty"` +} + +// HardwareRAIDVolume defines the desired configuration of volume in hardware RAID. +type HardwareRAIDVolume struct { + // Size of the logical disk to be created in GiB. If unspecified or + // set be 0, the maximum capacity of disk will be used for logical + // disk. + // +kubebuilder:validation:Minimum=0 + SizeGibibytes *int `json:"sizeGibibytes,omitempty"` + + // RAID level for the logical disk. The following levels are supported: + // 0, 1, 2, 5, 6, 1+0, 5+0, 6+0 (drivers may support only some of them). + // +kubebuilder:validation:Enum="0";"1";"2";"5";"6";"1+0";"5+0";"6+0" + Level string `json:"level" required:"true"` + + // Name of the volume. Should be unique within the Node. If not + // specified, the name will be auto-generated. + // +kubebuilder:validation:MaxLength=64 + Name string `json:"name,omitempty"` + + // Select disks with only rotational (if set to true) or solid-state + // (if set to false) storage. By default, any disks can be picked. + Rotational *bool `json:"rotational,omitempty"` + + // Integer, number of physical disks to use for the logical disk. + // Defaults to minimum number of disks required for the particular RAID + // level. + // +kubebuilder:validation:Minimum=1 + NumberOfPhysicalDisks *int `json:"numberOfPhysicalDisks,omitempty"` + + // The name of the RAID controller to use. + Controller string `json:"controller,omitempty"` + + // Optional list of physical disk names to be used for the hardware RAID volumes. The disk names are interpreted + // by the hardware RAID controller, and the format is hardware specific. + PhysicalDisks []string `json:"physicalDisks,omitempty"` +} + +// SoftwareRAIDVolume defines the desired configuration of volume in software RAID. +type SoftwareRAIDVolume struct { + // Size of the logical disk to be created in GiB. + // If unspecified or set be 0, the maximum capacity of disk will be used for logical disk. + // +kubebuilder:validation:Minimum=0 + SizeGibibytes *int `json:"sizeGibibytes,omitempty"` + + // RAID level for the logical disk. The following levels are supported: + // 0, 1 and 1+0. + // +kubebuilder:validation:Enum="0";"1";"1+0" + Level string `json:"level" required:"true"` + + // A list of device hints, the number of items should be greater than or equal to 2. + // +kubebuilder:validation:MinItems=2 + PhysicalDisks []RootDeviceHints `json:"physicalDisks,omitempty"` +} + +// RAIDConfig contains the configuration that are required to config RAID in Bare Metal server. +type RAIDConfig struct { + // The list of logical disks for hardware RAID, if rootDeviceHints isn't used, first volume is root volume. + // You can set the value of this field to `[]` to clear all the hardware RAID configurations. + // +optional + // +nullable + HardwareRAIDVolumes []HardwareRAIDVolume `json:"hardwareRAIDVolumes"` + + // The list of logical disks for software RAID, if rootDeviceHints isn't used, first volume is root volume. + // If HardwareRAIDVolumes is set this item will be invalid. + // The number of created Software RAID devices must be 1 or 2. + // If there is only one Software RAID device, it has to be a RAID-1. + // If there are two, the first one has to be a RAID-1, while the RAID level for the second one can be 0, 1, or 1+0. + // As the first RAID device will be the deployment device, + // enforcing a RAID-1 reduces the risk of ending up with a non-booting host in case of a disk failure. + // Software RAID will always be deleted. + // +kubebuilder:validation:MaxItems=2 + // +optional + // +nullable + SoftwareRAIDVolumes []SoftwareRAIDVolume `json:"softwareRAIDVolumes"` +} + +// FirmwareConfig contains the configuration that you want to configure BIOS settings in Bare metal server. +type FirmwareConfig struct { + // Supports the virtualization of platform hardware. + // +kubebuilder:validation:Enum=true;false + VirtualizationEnabled *bool `json:"virtualizationEnabled,omitempty"` + + // Allows a single physical processor core to appear as several logical processors. + // +kubebuilder:validation:Enum=true;false + SimultaneousMultithreadingEnabled *bool `json:"simultaneousMultithreadingEnabled,omitempty"` + + // SR-IOV support enables a hypervisor to create virtual instances of a PCI-express device, potentially increasing performance. + // +kubebuilder:validation:Enum=true;false + SriovEnabled *bool `json:"sriovEnabled,omitempty"` +} + +// BareMetalHostSpec defines the desired state of BareMetalHost. +type BareMetalHostSpec struct { + // Important: Run "make generate manifests" to regenerate code + // after modifying this file + + // Taints is the full, authoritative list of taints to apply to + // the corresponding Machine. This list will overwrite any + // modifications made to the Machine on an ongoing basis. + // +optional + Taints []corev1.Taint `json:"taints,omitempty"` + + // How do we connect to the BMC (Baseboard Management Controller) on + // the host? + BMC BMCDetails `json:"bmc,omitempty"` + + // RAID configuration for bare metal server. If set, the RAID settings + // will be applied before the host is provisioned. If not, the current + // settings will not be modified. Only one of the sub-fields + // hardwareRAIDVolumes and softwareRAIDVolumes can be set at the same + // time. + RAID *RAIDConfig `json:"raid,omitempty"` + + // Firmware (BIOS) configuration for bare metal server. If set, the + // requested settings will be applied before the host is provisioned. + // Only some vendor drivers support this field. An alternative is to + // use HostFirmwareSettings resources that allow changing arbitrary + // values and support the generic Redfish-based drivers. + Firmware *FirmwareConfig `json:"firmware,omitempty"` + + // What is the name of the hardware profile for this host? + // Hardware profiles are deprecated and should not be used. + // Use the separate fields Architecture and RootDeviceHints instead. + // Set to "empty" to prepare for the future version of the API + // without hardware profiles. + HardwareProfile string `json:"hardwareProfile,omitempty"` + + // Provide guidance about how to choose the device for the image + // being provisioned. The default is currently to use /dev/sda as + // the root device. + RootDeviceHints *RootDeviceHints `json:"rootDeviceHints,omitempty"` + + // Select the method of initializing the hardware during boot. + // Defaults to UEFI. Legacy boot should only be used for hardware that + // does not support UEFI correctly. Set to UEFISecureBoot to turn + // secure boot on automatically after provisioning. + // +optional + BootMode BootMode `json:"bootMode,omitempty"` + + // The MAC address of the NIC used for provisioning the host. In case + // of network boot, this is the MAC address of the PXE booting + // interface. The MAC address of the BMC must never be used here! + // +kubebuilder:validation:Pattern=`[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}` + BootMACAddress string `json:"bootMACAddress,omitempty"` + + // Should the host be powered on? If the host is currently in a stable + // state (e.g. provisioned), its power state will be forced to match + // this value. + Online bool `json:"online"` + + // ConsumerRef can be used to store information about something + // that is using a host. When it is not empty, the host is + // considered "in use". The common use case is a link to a Machine + // resource when the host is used by Cluster API. + ConsumerRef *corev1.ObjectReference `json:"consumerRef,omitempty"` + + // Image holds the details of the image to be provisioned. Populating + // the image will cause the host to start provisioning. + Image *Image `json:"image,omitempty"` + + // UserData holds the reference to the Secret containing the user data + // which is passed to the Config Drive and interpreted by the + // first-boot software such as cloud-init. The format of user data is + // specific to the first-boot software. + UserData *corev1.SecretReference `json:"userData,omitempty"` + + // PreprovisioningNetworkDataName is the name of the Secret in the + // local namespace containing network configuration which is passed to + // the preprovisioning image, and to the Config Drive if not overridden + // by specifying NetworkData. + PreprovisioningNetworkDataName string `json:"preprovisioningNetworkDataName,omitempty"` + + // NetworkData holds the reference to the Secret containing network + // configuration which is passed to the Config Drive and interpreted + // by the first boot software such as cloud-init. + NetworkData *corev1.SecretReference `json:"networkData,omitempty"` + + // MetaData holds the reference to the Secret containing host metadata + // which is passed to the Config Drive. By default, metadata will be + // generated for the host, so most users do not need to set this field. + MetaData *corev1.SecretReference `json:"metaData,omitempty"` + + // Description is a human-entered text used to help identify the host. + Description string `json:"description,omitempty"` + + // ExternallyProvisioned means something else has provisioned the + // image running on the host, and the operator should only manage + // the power status. This field is used for integration with already + // provisioned hosts and when pivoting hosts between clusters. If + // unsure, leave this field as false. + ExternallyProvisioned bool `json:"externallyProvisioned,omitempty"` + + // When set to disabled, automated cleaning will be skipped + // during provisioning and deprovisioning. + // +optional + // +kubebuilder:default:=metadata + // +kubebuilder:validation:Optional + AutomatedCleaningMode AutomatedCleaningMode `json:"automatedCleaningMode,omitempty"` + + // A custom deploy procedure. This is an advanced feature that allows + // using a custom deploy step provided by a site-specific deployment + // ramdisk. Most users will want to use "image" instead. Setting this + // field triggers provisioning. + // +optional + CustomDeploy *CustomDeploy `json:"customDeploy,omitempty"` + + // CPU architecture of the host, e.g. "x86_64" or "aarch64". If unset, + // eventually populated by inspection. + // +optional + Architecture string `json:"architecture,omitempty"` + + // When set to true, power off of the node will be disabled, + // instead, a reboot will be used in place of power on/off + // +optional + DisablePowerOff bool `json:"disablePowerOff,omitempty"` +} + +// AutomatedCleaningMode is the interface to enable/disable automated cleaning +// +kubebuilder:validation:Enum:=metadata;disabled +type AutomatedCleaningMode string + +// Allowed automated cleaning modes. +const ( + CleaningModeDisabled AutomatedCleaningMode = "disabled" + CleaningModeMetadata AutomatedCleaningMode = "metadata" +) + +// ChecksumType holds the algorithm name for the checksum +// +kubebuilder:validation:Enum=md5;sha256;sha512;auto +type ChecksumType string + +const ( + // MD5 checksum type. + MD5 ChecksumType = "md5" + + // SHA256 checksum type. + SHA256 ChecksumType = "sha256" + + // SHA512 checksum type. + SHA512 ChecksumType = "sha512" + + // Automatically detect. + AutoChecksum ChecksumType = "auto" +) + +// Image holds the details of an image either to provisioned or that +// has been provisioned. +type Image struct { + // URL is a location of an image to deploy. + URL string `json:"url"` + + // Checksum is the checksum for the image. Required for all formats + // except for "live-iso". + Checksum string `json:"checksum,omitempty"` + + // ChecksumType is the checksum algorithm for the image, e.g md5, sha256 or sha512. + // The special value "auto" can be used to detect the algorithm from the checksum. + // If missing, MD5 is used. If in doubt, use "auto". + ChecksumType ChecksumType `json:"checksumType,omitempty"` + + // Format contains the format of the image (raw, qcow2, ...). + // When set to "live-iso", an ISO 9660 image referenced by the url will + // be live-booted and not deployed to disk. + // +kubebuilder:validation:Enum=raw;qcow2;vdi;vmdk;live-iso + DiskFormat *string `json:"format,omitempty"` +} + +func (image *Image) IsLiveISO() bool { + return image != nil && image.DiskFormat != nil && *image.DiskFormat == "live-iso" +} + +// Custom deploy is a description of a customized deploy process. +type CustomDeploy struct { + // Custom deploy method name. + // This name is specific to the deploy ramdisk used. If you don't have + // a custom deploy ramdisk, you shouldn't use CustomDeploy. + Method string `json:"method"` +} + +// FIXME(dhellmann): We probably want some other module to own these +// data structures. + +// ClockSpeed is a clock speed in MHz +// +kubebuilder:validation:Format=double +type ClockSpeed float64 + +// ClockSpeed multipliers. +const ( + MegaHertz ClockSpeed = 1.0 + GigaHertz = 1000 * MegaHertz +) + +// Capacity is a disk size in Bytes. +type Capacity int64 + +// Capacity multipliers. +const ( + Byte Capacity = 1 + KibiByte = Byte * 1024 + KiloByte = Byte * 1000 + MebiByte = KibiByte * 1024 + MegaByte = KiloByte * 1000 + GibiByte = MebiByte * 1024 + GigaByte = MegaByte * 1000 + TebiByte = GibiByte * 1024 + TeraByte = GigaByte * 1000 +) + +// DiskType is a disk type, i.e. HDD, SSD, NVME. +type DiskType string + +// DiskType constants. +const ( + HDD DiskType = "HDD" + SSD DiskType = "SSD" + NVME DiskType = "NVME" +) + +// CPU describes one processor on the host. +type CPU struct { + Arch string `json:"arch,omitempty"` + Model string `json:"model,omitempty"` + ClockMegahertz ClockSpeed `json:"clockMegahertz,omitempty"` + Flags []string `json:"flags,omitempty"` + Count int `json:"count,omitempty"` +} + +// Storage describes one storage device (disk, SSD, etc.) on the host. +type Storage struct { + // A Linux device name of the disk, e.g. + // "/dev/disk/by-path/pci-0000:01:00.0-scsi-0:2:0:0". This will be a name + // that is stable across reboots if one is available. + Name string `json:"name,omitempty"` + + // A list of alternate Linux device names of the disk, e.g. "/dev/sda". + // Note that this list is not exhaustive, and names may not be stable + // across reboots. + AlternateNames []string `json:"alternateNames,omitempty"` + + // Whether this disk represents rotational storage. + // This field is not recommended for usage, please + // prefer using 'Type' field instead, this field + // will be deprecated eventually. + Rotational bool `json:"rotational,omitempty"` + + // Device type, one of: HDD, SSD, NVME. + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Enum=HDD;SSD;NVME; + Type DiskType `json:"type,omitempty"` + + // The size of the disk in Bytes + SizeBytes Capacity `json:"sizeBytes,omitempty"` + + // The name of the vendor of the device + Vendor string `json:"vendor,omitempty"` + + // Hardware model + Model string `json:"model,omitempty"` + + // The serial number of the device + SerialNumber string `json:"serialNumber,omitempty"` + + // The WWN of the device + WWN string `json:"wwn,omitempty"` + + // The WWN Vendor extension of the device + WWNVendorExtension string `json:"wwnVendorExtension,omitempty"` + + // The WWN with the extension + WWNWithExtension string `json:"wwnWithExtension,omitempty"` + + // The SCSI location of the device + HCTL string `json:"hctl,omitempty"` +} + +// VLANID is a 12-bit 802.1Q VLAN identifier +// +kubebuilder:validation:Type=integer +// +kubebuilder:validation:Minimum=0 +// +kubebuilder:validation:Maximum=4094 +type VLANID int32 + +// VLAN represents the name and ID of a VLAN. +type VLAN struct { + ID VLANID `json:"id,omitempty"` + + Name string `json:"name,omitempty"` +} + +// NIC describes one network interface on the host. +type NIC struct { + // The name of the network interface, e.g. "en0" + Name string `json:"name,omitempty"` + + // The vendor and product IDs of the NIC, e.g. "0x8086 0x1572" + Model string `json:"model,omitempty"` + + // The device MAC address + // +kubebuilder:validation:Pattern=`[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}` + MAC string `json:"mac,omitempty"` + + // The IP address of the interface. This will be an IPv4 or IPv6 address + // if one is present. If both IPv4 and IPv6 addresses are present in a + // dual-stack environment, two nics will be output, one with each IP. + IP string `json:"ip,omitempty"` + + // The speed of the device in Gigabits per second + SpeedGbps int `json:"speedGbps,omitempty"` + + // The VLANs available + VLANs []VLAN `json:"vlans,omitempty"` + + // The untagged VLAN ID + //nolint:tagliatelle + VLANID VLANID `json:"vlanId,omitempty"` + + // Whether the NIC is PXE Bootable + PXE bool `json:"pxe,omitempty"` +} + +// Firmware describes the firmware on the host. +type Firmware struct { + // The BIOS for this firmware + BIOS BIOS `json:"bios,omitempty"` +} + +// BIOS describes the BIOS version on the host. +type BIOS struct { + // The release/build date for this BIOS + Date string `json:"date,omitempty"` + + // The vendor name for this BIOS + Vendor string `json:"vendor,omitempty"` + + // The version of the BIOS + Version string `json:"version,omitempty"` +} + +// HardwareDetails collects all of the information about hardware +// discovered on the host. +type HardwareDetails struct { + // System vendor information. + SystemVendor HardwareSystemVendor `json:"systemVendor,omitempty"` + // System firmware information. + Firmware Firmware `json:"firmware,omitempty"` + // The host's amount of memory in Mebibytes. + RAMMebibytes int `json:"ramMebibytes,omitempty"` + // List of network interfaces for the host. + NIC []NIC `json:"nics,omitempty"` + // List of storage (disk, SSD, etc.) available to the host. + Storage []Storage `json:"storage,omitempty"` + // Details of the CPU(s) in the system. + CPU CPU `json:"cpu,omitempty"` + Hostname string `json:"hostname,omitempty"` +} + +// HardwareSystemVendor stores details about the whole hardware system. +type HardwareSystemVendor struct { + Manufacturer string `json:"manufacturer,omitempty"` + ProductName string `json:"productName,omitempty"` + SerialNumber string `json:"serialNumber,omitempty"` +} + +// CredentialsStatus contains the reference and version of the last +// set of BMC credentials the controller was able to validate. +type CredentialsStatus struct { + Reference *corev1.SecretReference `json:"credentials,omitempty"` + Version string `json:"credentialsVersion,omitempty"` +} + +// RebootMode defines known variations of reboot modes. +type RebootMode string + +const ( + // RebootModeHard defined for hard reset of a host. + RebootModeHard RebootMode = "hard" + // RebootModeSoft defined for soft reset of a host. + RebootModeSoft RebootMode = "soft" +) + +// RebootAnnotationArguments defines the arguments of the RebootAnnotation type. +type RebootAnnotationArguments struct { + Mode RebootMode `json:"mode"` + Force bool `json:"force"` +} + +type DetachedDeleteAction string + +const ( + DetachedDeleteActionDelay = "delay" + DetachedDeleteActionDelete = "delete" +) + +type DetachedAnnotationArguments struct { + // DeleteAction indicates the desired delete logic when the detached annotation is present + DeleteAction DetachedDeleteAction `json:"deleteAction,omitempty"` +} + +// Match compares the saved status information with the name and +// content of a secret object. +func (cs CredentialsStatus) Match(secret corev1.Secret) bool { + switch { + case cs.Reference == nil: + return false + case cs.Reference.Name != secret.ObjectMeta.Name: + return false + case cs.Reference.Namespace != secret.ObjectMeta.Namespace: + return false + case cs.Version != secret.ObjectMeta.ResourceVersion: + return false + } + return true +} + +// OperationMetric contains metadata about an operation (inspection, +// provisioning, etc.) used for tracking metrics. +type OperationMetric struct { + // +nullable + Start metav1.Time `json:"start,omitempty"` + // +nullable + End metav1.Time `json:"end,omitempty"` +} + +// Duration returns the length of time that was spent on the +// operation. If the operation is not finished, it returns 0. +func (om OperationMetric) Duration() time.Duration { + if om.Start.IsZero() { + return 0 + } + return om.End.Time.Sub(om.Start.Time) +} + +// OperationHistory holds information about operations performed on a +// host. +type OperationHistory struct { + Register OperationMetric `json:"register,omitempty"` + Inspect OperationMetric `json:"inspect,omitempty"` + Provision OperationMetric `json:"provision,omitempty"` + Deprovision OperationMetric `json:"deprovision,omitempty"` +} + +// BareMetalHostStatus defines the observed state of BareMetalHost. +type BareMetalHostStatus struct { + // Important: Run "make generate manifests" to regenerate code + // after modifying this file + + // OperationalStatus holds the status of the host + // +kubebuilder:validation:Enum="";OK;discovered;error;delayed;detached;servicing + OperationalStatus OperationalStatus `json:"operationalStatus"` + + // ErrorType indicates the type of failure encountered when the + // OperationalStatus is OperationalStatusError + // +kubebuilder:validation:Enum=provisioned registration error;registration error;inspection error;preparation error;provisioning error;power management error;servicing error + ErrorType ErrorType `json:"errorType,omitempty"` + + // LastUpdated identifies when this status was last observed. + // +optional + LastUpdated *metav1.Time `json:"lastUpdated,omitempty"` + + // The name of the profile matching the hardware details. + // Hardware profiles are deprecated and should not be relied on. + HardwareProfile string `json:"hardwareProfile,omitempty"` + + // The hardware discovered to exist on the host. + // This field will be removed in the next API version in favour of the + // separate HardwareData resource. + HardwareDetails *HardwareDetails `json:"hardware,omitempty"` + + // Information tracked by the provisioner. + Provisioning ProvisionStatus `json:"provisioning"` + + // The last credentials we were able to validate as working. + GoodCredentials CredentialsStatus `json:"goodCredentials,omitempty"` + + // The last credentials we sent to the provisioning backend. + TriedCredentials CredentialsStatus `json:"triedCredentials,omitempty"` + + // The last error message reported by the provisioning subsystem. + ErrorMessage string `json:"errorMessage"` + + // The currently detected power state of the host. This field may get + // briefly out of sync with the actual state of the hardware while + // provisioning processes are running. + PoweredOn bool `json:"poweredOn"` + + // OperationHistory holds information about operations performed + // on this host. + OperationHistory OperationHistory `json:"operationHistory,omitempty"` + + // ErrorCount records how many times the host has encoutered an error since the last successful operation + // +kubebuilder:default:=0 + ErrorCount int `json:"errorCount"` +} + +// ProvisionStatus holds the state information for a single target. +type ProvisionStatus struct { + // An indicator for what the provisioner is doing with the host. + State ProvisioningState `json:"state"` + + // The hosts's ID from the underlying provisioning tool (e.g. the + // Ironic node UUID). + //nolint:tagliatelle + ID string `json:"ID"` + + // Image holds the details of the last image successfully + // provisioned to the host. + Image Image `json:"image,omitempty"` + + // The root device hints used to provision the host. + RootDeviceHints *RootDeviceHints `json:"rootDeviceHints,omitempty"` + + // BootMode indicates the boot mode used to provision the host. + BootMode BootMode `json:"bootMode,omitempty"` + + // The RAID configuration that has been applied. + RAID *RAIDConfig `json:"raid,omitempty"` + + // The firmware settings that have been applied. + Firmware *FirmwareConfig `json:"firmware,omitempty"` + + // Custom deploy procedure applied to the host. + CustomDeploy *CustomDeploy `json:"customDeploy,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BareMetalHost is the Schema for the baremetalhosts API +// +k8s:openapi-gen=true +// +kubebuilder:resource:shortName=bmh;bmhost +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.operationalStatus",description="Operational status",priority=1 +// +kubebuilder:printcolumn:name="State",type="string",JSONPath=".status.provisioning.state",description="Provisioning status" +// +kubebuilder:printcolumn:name="Consumer",type="string",JSONPath=".spec.consumerRef.name",description="Consumer using this host" +// +kubebuilder:printcolumn:name="BMC",type="string",JSONPath=".spec.bmc.address",description="Address of management controller",priority=1 +// +kubebuilder:printcolumn:name="Online",type="string",JSONPath=".spec.online",description="Whether the host is online or not" +// +kubebuilder:printcolumn:name="Error",type="string",JSONPath=".status.errorType",description="Type of the most recent error" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Time duration since creation of BaremetalHost" +// +kubebuilder:object:root=true +type BareMetalHost struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec BareMetalHostSpec `json:"spec,omitempty"` + Status BareMetalHostStatus `json:"status,omitempty"` +} + +// BootMode returns the boot method to use for the host. +func (host *BareMetalHost) BootMode() BootMode { + mode := host.Spec.BootMode + if mode == "" { + return DefaultBootMode + } + return mode +} + +// HasBMCDetails returns true if the BMC details are set. +func (host *BareMetalHost) HasBMCDetails() bool { + return host.Spec.BMC.Address != "" || host.Spec.BMC.CredentialsName != "" +} + +// NeedsHardwareProfile returns true if the profile is not set. +func (host *BareMetalHost) NeedsHardwareProfile() bool { + return host.Status.HardwareProfile == "" +} + +// HardwareProfile returns the hardware profile name for the host. +func (host *BareMetalHost) HardwareProfile() string { + return host.Status.HardwareProfile +} + +// SetHardwareProfile updates the hardware profile name and returns +// true when a change is made or false when no change is made. +func (host *BareMetalHost) SetHardwareProfile(name string) (dirty bool) { + if host.Status.HardwareProfile != name { + host.Status.HardwareProfile = name + dirty = true + } + return dirty +} + +// SetOperationalStatus updates the OperationalStatus field and returns +// true when a change is made or false when no change is made. +func (host *BareMetalHost) SetOperationalStatus(status OperationalStatus) bool { + if host.Status.OperationalStatus != status { + host.Status.OperationalStatus = status + return true + } + return false +} + +// OperationalStatus returns the contents of the OperationalStatus +// field. +func (host *BareMetalHost) OperationalStatus() OperationalStatus { + return host.Status.OperationalStatus +} + +// CredentialsKey returns a NamespacedName suitable for loading the +// Secret containing the credentials associated with the host. +func (host *BareMetalHost) CredentialsKey() types.NamespacedName { + return types.NamespacedName{ + Name: host.Spec.BMC.CredentialsName, + Namespace: host.ObjectMeta.Namespace, + } +} + +// NeedsHardwareInspection looks at the state of the host to determine +// if hardware inspection should be run. +func (host *BareMetalHost) NeedsHardwareInspection() bool { + if host.Spec.ExternallyProvisioned { + // Never perform inspection if we already know something is + // using the host and we didn't provision it. + return false + } + if host.WasProvisioned() { + // Never perform inspection if we have already provisioned + // this host, because we don't want to reboot it. + return false + } + return host.Status.HardwareDetails == nil +} + +// NeedsProvisioning compares the settings with the provisioning +// status and returns true when more work is needed or false +// otherwise. +func (host *BareMetalHost) NeedsProvisioning() bool { + if !host.Spec.Online { + // The host is not supposed to be powered on. + return false + } + + return host.hasNewImage() || host.hasNewCustomDeploy() +} + +func (host *BareMetalHost) hasNewImage() bool { + if host.Spec.Image == nil { + // Without an image, there is nothing to provision. + return false + } + if host.Spec.Image.URL == "" { + // We have an Image struct but it is empty + return false + } + if host.Status.Provisioning.Image.URL == "" { + // We have an image set, but not provisioned. + return true + } + return false +} + +func (host *BareMetalHost) hasNewCustomDeploy() bool { + if host.Spec.CustomDeploy == nil { + return false + } + if host.Spec.CustomDeploy.Method == "" { + return false + } + if host.Status.Provisioning.CustomDeploy == nil { + return true + } + if host.Status.Provisioning.CustomDeploy.Method != host.Spec.CustomDeploy.Method { + return true + } + return false +} + +// WasProvisioned returns true when we think we have placed an image +// on the host. +func (host *BareMetalHost) WasProvisioned() bool { + if host.Spec.ExternallyProvisioned { + return false + } + if host.Status.Provisioning.Image.URL != "" { + // We have an image provisioned. + return true + } + if host.Status.Provisioning.CustomDeploy != nil { + // We have a custom deploy provisioned. + return true + } + return false +} + +// UpdateGoodCredentials modifies the GoodCredentials portion of the +// Status struct to record the details of the secret containing +// credentials known to work. +func (host *BareMetalHost) UpdateGoodCredentials(currentSecret corev1.Secret) { + host.Status.GoodCredentials.Version = currentSecret.ObjectMeta.ResourceVersion + host.Status.GoodCredentials.Reference = &corev1.SecretReference{ + Name: currentSecret.ObjectMeta.Name, + Namespace: currentSecret.ObjectMeta.Namespace, + } +} + +// UpdateTriedCredentials modifies the TriedCredentials portion of the +// Status struct to record the details of the secret containing +// credentials known to work. +func (host *BareMetalHost) UpdateTriedCredentials(currentSecret corev1.Secret) { + host.Status.TriedCredentials.Version = currentSecret.ObjectMeta.ResourceVersion + host.Status.TriedCredentials.Reference = &corev1.SecretReference{ + Name: currentSecret.ObjectMeta.Name, + Namespace: currentSecret.ObjectMeta.Namespace, + } +} + +// NewEvent creates a new event associated with the object and ready +// to be published to the kubernetes API. +func (host *BareMetalHost) NewEvent(reason, message string) corev1.Event { + t := metav1.Now() + return corev1.Event{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: reason + "-", + Namespace: host.ObjectMeta.Namespace, + }, + InvolvedObject: corev1.ObjectReference{ + Kind: "BareMetalHost", + Namespace: host.Namespace, + Name: host.Name, + UID: host.UID, + APIVersion: GroupVersion.String(), + }, + Reason: reason, + Message: message, + Source: corev1.EventSource{ + Component: "metal3-baremetal-controller", + }, + FirstTimestamp: t, + LastTimestamp: t, + Count: 1, + Type: corev1.EventTypeNormal, + ReportingController: "metal3.io/baremetal-controller", + Related: host.Spec.ConsumerRef, + } +} + +// OperationMetricForState returns a pointer to the metric for the given +// provisioning state. +func (host *BareMetalHost) OperationMetricForState(operation ProvisioningState) (metric *OperationMetric) { + history := &host.Status.OperationHistory + switch operation { + case StateRegistering: + metric = &history.Register + case StateInspecting: + metric = &history.Inspect + case StateProvisioning: + metric = &history.Provision + case StateDeprovisioning: + metric = &history.Deprovision + default: + } + return +} + +var supportedChecksums = strings.Join([]string{string(AutoChecksum), string(MD5), string(SHA256), string(SHA512)}, ", ") + +// GetChecksum method returns the checksum of an image. +func (image *Image) GetChecksum() (checksum, checksumType string, err error) { + if image == nil { + return "", "", errors.New("image is not provided") + } + + if image.DiskFormat != nil && *image.DiskFormat == "live-iso" { + // Checksum is not required for live-iso + return "", "", nil + } + + // FIXME(dtantsur): Ironic supports oci:// images with an embedded checksum + if image.Checksum == "" { + // Return empty if checksum is not provided + return "", "", errors.New("checksum is required for normal images") + } + + switch image.ChecksumType { + case MD5, SHA256, SHA512: + checksumType = string(image.ChecksumType) + case "", AutoChecksum: + // No type, let Ironic detect + default: + return "", "", fmt.Errorf("unknown checksumType %s, supported are %s", image.ChecksumType, supportedChecksums) + } + + checksum = image.Checksum + return checksum, checksumType, nil +} + +// +kubebuilder:object:root=true + +// BareMetalHostList contains a list of BareMetalHost. +type BareMetalHostList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []BareMetalHost `json:"items"` +} + +func init() { + SchemeBuilder.Register(&BareMetalHost{}, &BareMetalHostList{}) +} diff --git a/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/bmceventsubscription_types.go b/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/bmceventsubscription_types.go new file mode 100644 index 000000000000..d01c61b328ef --- /dev/null +++ b/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/bmceventsubscription_types.go @@ -0,0 +1,80 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + + // BMCEventSubscriptionFinalizer is the name of the finalizer added to + // subscriptions to block delete operations until the subscription is removed + // from the BMC. + BMCEventSubscriptionFinalizer string = "bmceventsubscription.metal3.io" +) + +type BMCEventSubscriptionSpec struct { + // A reference to a BareMetalHost + HostName string `json:"hostName,omitempty"` + + // A webhook URL to send events to + Destination string `json:"destination,omitempty"` + + // Arbitrary user-provided context for the event + Context string `json:"context,omitempty"` + + // A secret containing HTTP headers which should be passed along to the Destination + // when making a request + HTTPHeadersRef *corev1.SecretReference `json:"httpHeadersRef,omitempty"` +} + +type BMCEventSubscriptionStatus struct { + SubscriptionID string `json:"subscriptionID,omitempty"` + Error string `json:"error,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +// BMCEventSubscription is the Schema for the fast eventing API +// +k8s:openapi-gen=true +// +kubebuilder:resource:shortName=bes;bmcevent +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Error",type="string",JSONPath=".status.error",description="The most recent error message" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Time duration since creation of BMCEventSubscription" +// +kubebuilder:object:root=true +type BMCEventSubscription struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec BMCEventSubscriptionSpec `json:"spec,omitempty"` + Status BMCEventSubscriptionStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// BMCEventSubscriptionList contains a list of BMCEventSubscriptions. +type BMCEventSubscriptionList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []BMCEventSubscription `json:"items"` +} + +func init() { + SchemeBuilder.Register(&BMCEventSubscription{}, &BMCEventSubscriptionList{}) +} diff --git a/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/dataimage_types.go b/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/dataimage_types.go new file mode 100644 index 000000000000..6d0259b553e8 --- /dev/null +++ b/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/dataimage_types.go @@ -0,0 +1,79 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const DataImageFinalizer = "dataimage.metal3.io" + +// Contains the DataImage currently attached to the BMH. +type AttachedImageReference struct { + URL string `json:"url"` +} + +// Contains the count of errors and the last error message. +type DataImageError struct { + Count int `json:"count"` + Message string `json:"message"` +} + +// DataImageSpec defines the desired state of DataImage. +type DataImageSpec struct { + // Url is the address of the dataImage that we want to attach + // to a BareMetalHost + URL string `json:"url"` +} + +// DataImageStatus defines the observed state of DataImage. +type DataImageStatus struct { + // Time of last reconciliation + // +optional + LastReconciled *metav1.Time `json:"lastReconciled,omitempty"` + + // Currently attached DataImage + AttachedImage AttachedImageReference `json:"attachedImage,omitempty"` + + // Error count and message when attaching/detaching + Error DataImageError `json:"error,omitempty"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status + +// DataImage is the Schema for the dataimages API. +type DataImage struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec DataImageSpec `json:"spec,omitempty"` + Status DataImageStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// DataImageList contains a list of DataImage. +type DataImageList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []DataImage `json:"items"` +} + +func init() { + SchemeBuilder.Register(&DataImage{}, &DataImageList{}) +} diff --git a/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/doc.go b/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/doc.go new file mode 100644 index 000000000000..b5c2caa04e81 --- /dev/null +++ b/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1alpha1 contains API Schema definitions for the metal3.io v1alpha1 API group +// +kubebuilder:object:generate=true +// +k8s:openapi-gen=true +// +groupName=metal3.io +package v1alpha1 diff --git a/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/firmwareschema_types.go b/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/firmwareschema_types.go new file mode 100644 index 000000000000..036e00e8eaa2 --- /dev/null +++ b/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/firmwareschema_types.go @@ -0,0 +1,183 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "fmt" + "strconv" + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// Additional data describing the firmware setting. +type SettingSchema struct { + + // The type of setting. + // +kubebuilder:validation:Enum=Enumeration;String;Integer;Boolean;Password + //nolint:tagliatelle + AttributeType string `json:"attribute_type,omitempty"` + + //nolint:tagliatelle + // The allowable value for an Enumeration type setting. + AllowableValues []string `json:"allowable_values,omitempty"` + + // The lowest value for an Integer type setting. + //nolint:tagliatelle + LowerBound *int `json:"lower_bound,omitempty"` + + // The highest value for an Integer type setting. + //nolint:tagliatelle + UpperBound *int `json:"upper_bound,omitempty"` + + // Minimum length for a String type setting. + //nolint:tagliatelle + MinLength *int `json:"min_length,omitempty"` + + // Maximum length for a String type setting. + //nolint:tagliatelle + MaxLength *int `json:"max_length,omitempty"` + + // Whether or not this setting is read only. + //nolint:tagliatelle + ReadOnly *bool `json:"read_only,omitempty"` + + // Whether or not this setting's value is unique to this node, e.g. + // a serial number. + Unique *bool `json:"unique,omitempty"` +} + +type SchemaSettingError struct { + name string + message string +} + +func (e SchemaSettingError) Error() string { + return fmt.Sprintf("Setting %s is invalid, %s", e.name, e.message) +} + +func (schema *SettingSchema) Validate(name string, value intstr.IntOrString) error { + if schema.ReadOnly != nil && *schema.ReadOnly { + return SchemaSettingError{name: name, message: "it is ReadOnly"} + } + + if strings.Contains(name, "Password") { + return SchemaSettingError{name: name, message: "Password fields can't be set"} + } + + // Check if valid based on type + switch schema.AttributeType { + case "Enumeration": + for _, av := range schema.AllowableValues { + if value.String() == av { + return nil + } + } + return SchemaSettingError{name: name, message: "unknown enumeration value - " + value.String()} + + case "Integer": + if value.Type == intstr.String { + if _, err := strconv.Atoi(value.String()); err != nil { + return SchemaSettingError{name: name, message: fmt.Sprintf("String %s entered while integer expected", value.String())} + } + } + if schema.LowerBound != nil && value.IntValue() < *schema.LowerBound { + return SchemaSettingError{name: name, message: fmt.Sprintf("integer %d is below minimum value %d", value.IntValue(), *schema.LowerBound)} + } + if schema.UpperBound != nil && value.IntValue() > *schema.UpperBound { + return SchemaSettingError{name: name, message: fmt.Sprintf("integer %d is above maximum value %d", value.IntValue(), *schema.UpperBound)} + } + return nil + + case "String": + strLen := len(value.String()) + if schema.MinLength != nil && strLen < *schema.MinLength { + return SchemaSettingError{name: name, message: fmt.Sprintf("string %s length is below minimum length %d", value.String(), *schema.MinLength)} + } + if schema.MaxLength != nil && strLen > *schema.MaxLength { + return SchemaSettingError{name: name, message: fmt.Sprintf("string %s length is above maximum length %d", value.String(), *schema.MaxLength)} + } + return nil + + case "Boolean": + if value.String() == "true" || value.String() == "false" { + return nil + } + return SchemaSettingError{name: name, message: value.String() + " is not a boolean"} + + case "Password": + // Prevent sets of password types + return SchemaSettingError{name: name, message: "passwords are immutable"} + + case "": + // allow the set as BIOS registry fields may not have been available + return nil + + default: + // Unexpected attribute type + return SchemaSettingError{name: name, message: "unexpected attribute type " + schema.AttributeType} + } +} + +// FirmwareSchemaSpec defines the desired state of FirmwareSchema. +type FirmwareSchemaSpec struct { + + // The hardware vendor associated with this schema + // +optional + HardwareVendor string `json:"hardwareVendor,omitempty"` + + // The hardware model associated with this schema + // +optional + HardwareModel string `json:"hardwareModel,omitempty"` + + // Map of firmware name to schema + Schema map[string]SettingSchema `json:"schema" required:"true"` +} + +//+kubebuilder:object:root=true + +// FirmwareSchema is the Schema for the firmwareschemas API. +type FirmwareSchema struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec FirmwareSchemaSpec `json:"spec,omitempty"` +} + +// Check whether the setting's name and value is valid using the schema. +func (host *FirmwareSchema) ValidateSetting(name string, value intstr.IntOrString, schemas map[string]SettingSchema) error { + schema, ok := schemas[name] + if !ok { + return SchemaSettingError{name: name, message: "it is not in the associated schema"} + } + + return schema.Validate(name, value) +} + +//+kubebuilder:object:root=true + +// FirmwareSchemaList contains a list of FirmwareSchema. +type FirmwareSchemaList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []FirmwareSchema `json:"items"` +} + +func init() { + SchemeBuilder.Register(&FirmwareSchema{}, &FirmwareSchemaList{}) +} diff --git a/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/groupversion_info.go b/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/groupversion_info.go new file mode 100644 index 000000000000..8b39c89213b9 --- /dev/null +++ b/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/groupversion_info.go @@ -0,0 +1,35 @@ +/* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1alpha1 contains API Schema definitions for the metal3.io v1alpha1 API group +// +kubebuilder:object:generate=true +// +groupName=metal3.io +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects. + GroupVersion = schema.GroupVersion{Group: "metal3.io", Version: "v1alpha1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme. + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/hardwaredata_types.go b/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/hardwaredata_types.go new file mode 100644 index 000000000000..a8cdb97b1051 --- /dev/null +++ b/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/hardwaredata_types.go @@ -0,0 +1,53 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// HardwareDataSpec defines the desired state of HardwareData. +type HardwareDataSpec struct { + + // The hardware discovered on the host during its inspection. + HardwareDetails *HardwareDetails `json:"hardware,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=hardwaredata,scope=Namespaced,shortName=hd +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Time duration since creation of HardwareData" + +// HardwareData is the Schema for the hardwaredata API. +type HardwareData struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec HardwareDataSpec `json:"spec,omitempty"` +} + +//+kubebuilder:object:root=true + +// HardwareDataList contains a list of HardwareData. +type HardwareDataList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []HardwareData `json:"items"` +} + +func init() { + SchemeBuilder.Register(&HardwareData{}, &HardwareDataList{}) +} diff --git a/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/hostfirmwarecomponents_types.go b/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/hostfirmwarecomponents_types.go new file mode 100644 index 000000000000..51b406e7ee37 --- /dev/null +++ b/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/hostfirmwarecomponents_types.go @@ -0,0 +1,121 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "fmt" + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// FirmwareUpdate defines a firmware update specification. +type FirmwareUpdate struct { + Component string `json:"component"` + URL string `json:"url"` +} + +// FirmwareComponentStatus defines the status of a firmware component. +type FirmwareComponentStatus struct { + Component string `json:"component"` + InitialVersion string `json:"initialVersion"` + CurrentVersion string `json:"currentVersion,omitempty"` + LastVersionFlashed string `json:"lastVersionFlashed,omitempty"` + UpdatedAt metav1.Time `json:"updatedAt,omitempty"` +} + +type UpdatesConditionType string + +const ( + // Indicates that the updates in the Spec are different than Status. + HostFirmwareComponentsChangeDetected UpdatesConditionType = "ChangeDetected" + + // Indicates if the updates are valid and can be configured on the host. + HostFirmwareComponentsValid UpdatesConditionType = "Valid" +) + +// Firmware component constants. +const ( + // NICComponentPrefix is the prefix for NIC firmware components. + NICComponentPrefix = "nic:" +) + +// HostFirmwareComponentsSpec defines the desired state of HostFirmwareComponents. +type HostFirmwareComponentsSpec struct { + Updates []FirmwareUpdate `json:"updates"` +} + +// HostFirmwareComponentsStatus defines the observed state of HostFirmwareComponents. +type HostFirmwareComponentsStatus struct { + // Updates is the list of all firmware components that should be updated + // they are specified via name and url fields. + // +optional + Updates []FirmwareUpdate `json:"updates,omitempty"` + + // Components is the list of all available firmware components and their information. + Components []FirmwareComponentStatus `json:"components,omitempty"` + + // Time that the status was last updated + // +optional + LastUpdated *metav1.Time `json:"lastUpdated,omitempty"` + + // Track whether updates stored in the spec are valid based on the schema + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status + +// HostFirmwareComponents is the Schema for the hostfirmwarecomponents API. +type HostFirmwareComponents struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec HostFirmwareComponentsSpec `json:"spec,omitempty"` + Status HostFirmwareComponentsStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// HostFirmwareComponentsList contains a list of HostFirmwareComponents. +type HostFirmwareComponentsList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []HostFirmwareComponents `json:"items"` +} + +// Check whether the updates's names are valid. +func (host *HostFirmwareComponents) ValidateHostFirmwareComponents() error { + allowedNames := map[string]struct{}{"bmc": {}, "bios": {}} + for _, update := range host.Spec.Updates { + componentName := update.Component + if _, ok := allowedNames[componentName]; !ok && !strings.HasPrefix(componentName, NICComponentPrefix) { + return fmt.Errorf("component %s is invalid, only 'bmc', 'bios', or names starting with '%s' are allowed as update names", update.Component, NICComponentPrefix) + } + } + + return nil +} + +func init() { + SchemeBuilder.Register(&HostFirmwareComponents{}, &HostFirmwareComponentsList{}) +} diff --git a/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/hostfirmwaresettings_types.go b/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/hostfirmwaresettings_types.go new file mode 100644 index 000000000000..16f14857c77f --- /dev/null +++ b/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/hostfirmwaresettings_types.go @@ -0,0 +1,101 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +type SettingsMap map[string]string +type DesiredSettingsMap map[string]intstr.IntOrString + +type SchemaReference struct { + // `namespace` is the namespace of the where the schema is stored. + Namespace string `json:"namespace"` + // `name` is the reference to the schema. + Name string `json:"name"` +} + +type SettingsConditionType string + +const ( + // Indicates that the settings in the Spec are different than Status. + FirmwareSettingsChangeDetected SettingsConditionType = "ChangeDetected" + + // Indicates if the settings are valid and can be configured on the host. + FirmwareSettingsValid SettingsConditionType = "Valid" +) + +// HostFirmwareSettingsSpec defines the desired state of HostFirmwareSettings. +type HostFirmwareSettingsSpec struct { + + // Settings are the desired firmware settings stored as name/value pairs. + // +patchStrategy=merge + Settings DesiredSettingsMap `json:"settings" patchStrategy:"merge" required:"true"` +} + +// HostFirmwareSettingsStatus defines the observed state of HostFirmwareSettings. +type HostFirmwareSettingsStatus struct { + // FirmwareSchema is a reference to the Schema used to describe each + // FirmwareSetting. By default, this will be a Schema in the same + // Namespace as the settings but it can be overwritten in the Spec + FirmwareSchema *SchemaReference `json:"schema,omitempty"` + + // Settings are the firmware settings stored as name/value pairs + Settings SettingsMap `json:"settings" required:"true"` + + // Time that the status was last updated + // +optional + LastUpdated *metav1.Time `json:"lastUpdated,omitempty"` + + // Track whether settings stored in the spec are valid based on the schema + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:resource:shortName=hfs +//+kubebuilder:subresource:status + +// HostFirmwareSettings is the Schema for the hostfirmwaresettings API. +type HostFirmwareSettings struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec HostFirmwareSettingsSpec `json:"spec,omitempty"` + Status HostFirmwareSettingsStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// HostFirmwareSettingsList contains a list of HostFirmwareSettings. +type HostFirmwareSettingsList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []HostFirmwareSettings `json:"items"` +} + +func init() { + SchemeBuilder.Register(&HostFirmwareSettings{}, &HostFirmwareSettingsList{}) +} diff --git a/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/hostupdatepolicy_types.go b/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/hostupdatepolicy_types.go new file mode 100644 index 000000000000..7da82b4bbe79 --- /dev/null +++ b/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/hostupdatepolicy_types.go @@ -0,0 +1,67 @@ +/* +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// HostUpdatePolicy enumerates the allowed host update policies. +type UpdatePolicy string + +const ( + HostUpdatePolicyOnPreparing UpdatePolicy = "onPreparing" + HostUpdatePolicyOnReboot UpdatePolicy = "onReboot" +) + +// HostUpdatePolicySpec defines the desired state of HostUpdatePolicy. +type HostUpdatePolicySpec struct { + // Defines policy for changing firmware settings + // +optional + // +kubebuilder:validation:Enum="onPreparing";"onReboot" + FirmwareSettings UpdatePolicy `json:"firmwareSettings,omitempty"` + + // Defines policy for updating firmware + // +optional + // +kubebuilder:validation:Enum="onPreparing";"onReboot" + FirmwareUpdates UpdatePolicy `json:"firmwareUpdates,omitempty"` +} + +// HostUpdatePolicyStatus defines the observed state of HostUpdatePolicy. +type HostUpdatePolicyStatus struct{} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// HostUpdatePolicy is the Schema for the hostupdatepolicy API. +type HostUpdatePolicy struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec HostUpdatePolicySpec `json:"spec,omitempty"` + Status HostUpdatePolicyStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// HostUpdatePolicyList contains a list of HostUpdatePolicy. +type HostUpdatePolicyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []HostUpdatePolicy `json:"items"` +} + +func init() { + SchemeBuilder.Register(&HostUpdatePolicy{}, &HostUpdatePolicyList{}) +} diff --git a/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/preprovisioningimage_types.go b/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/preprovisioningimage_types.go new file mode 100644 index 000000000000..99679187111f --- /dev/null +++ b/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/preprovisioningimage_types.go @@ -0,0 +1,128 @@ +/* +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ImageFormat enumerates the allowed image formats +// +kubebuilder:validation:Enum=iso;initrd +type ImageFormat string + +const PreprovisioningImageFinalizer = "preprovisioningimage.metal3.io" + +const ( + ImageFormatISO ImageFormat = "iso" + ImageFormatInitRD ImageFormat = "initrd" +) + +// PreprovisioningImageSpec defines the desired state of PreprovisioningImage. +type PreprovisioningImageSpec struct { + // networkDataName is the name of a Secret in the local namespace that + // contains network data to build in to the image. + // +optional + NetworkDataName string `json:"networkDataName,omitempty"` + + // architecture is the processor architecture for which to build the image. + // +optional + Architecture string `json:"architecture,omitempty"` + + // acceptFormats is a list of acceptable image formats. + // +optional + AcceptFormats []ImageFormat `json:"acceptFormats,omitempty"` +} + +type SecretStatus struct { + Name string `json:"name,omitempty"` + Version string `json:"version,omitempty"` +} + +type ImageStatusConditionType string + +const ( + // Ready indicates that the Image is available and ready to be downloaded. + ConditionImageReady ImageStatusConditionType = "Ready" + + // Error indicates that the operator was unable to build an image. + ConditionImageError ImageStatusConditionType = "Error" +) + +// PreprovisioningImageStatus defines the observed state of PreprovisioningImage. +type PreprovisioningImageStatus struct { + // imageUrl is the URL from which the built image can be downloaded. + //nolint:tagliatelle + ImageUrl string `json:"imageUrl,omitempty"` //nolint:revive,stylecheck + + // kernelUrl is the URL from which the kernel of the image can be downloaded. + // Only makes sense for initrd images. + // +optional + //nolint:tagliatelle + KernelUrl string `json:"kernelUrl,omitempty"` //nolint:revive,stylecheck + + // extraKernelParams is a string with extra parameters to pass to the + // kernel when booting the image over network. Only makes sense for initrd images. + // +optional + ExtraKernelParams string `json:"extraKernelParams,omitempty"` + + // format is the type of image that is available at the download url: + // either iso or initrd. + // +optional + Format ImageFormat `json:"format,omitempty"` + + // networkData is a reference to the version of the Secret containing the + // network data used to build the image. + // +optional + NetworkData SecretStatus `json:"networkData,omitempty"` + + // architecture is the processor architecture for which the image is built + Architecture string `json:"architecture,omitempty"` + + // conditions describe the state of the built image + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:resource:shortName=ppimg +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status",description="Whether the image is ready" +// +kubebuilder:printcolumn:name="Reason",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].reason",description="The reason for the image readiness status" +// +kubebuilder:subresource:status + +// PreprovisioningImage is the Schema for the preprovisioningimages API. +type PreprovisioningImage struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec PreprovisioningImageSpec `json:"spec,omitempty"` + Status PreprovisioningImageStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// PreprovisioningImageList contains a list of PreprovisioningImage. +type PreprovisioningImageList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []PreprovisioningImage `json:"items"` +} + +func init() { + SchemeBuilder.Register(&PreprovisioningImage{}, &PreprovisioningImageList{}) +} diff --git a/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000000..cd7bf2cd93f7 --- /dev/null +++ b/vendor/github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,1620 @@ +//go:build !ignore_autogenerated + +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AttachedImageReference) DeepCopyInto(out *AttachedImageReference) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AttachedImageReference. +func (in *AttachedImageReference) DeepCopy() *AttachedImageReference { + if in == nil { + return nil + } + out := new(AttachedImageReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BIOS) DeepCopyInto(out *BIOS) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BIOS. +func (in *BIOS) DeepCopy() *BIOS { + if in == nil { + return nil + } + out := new(BIOS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BMCDetails) DeepCopyInto(out *BMCDetails) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BMCDetails. +func (in *BMCDetails) DeepCopy() *BMCDetails { + if in == nil { + return nil + } + out := new(BMCDetails) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BMCEventSubscription) DeepCopyInto(out *BMCEventSubscription) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BMCEventSubscription. +func (in *BMCEventSubscription) DeepCopy() *BMCEventSubscription { + if in == nil { + return nil + } + out := new(BMCEventSubscription) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BMCEventSubscription) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BMCEventSubscriptionList) DeepCopyInto(out *BMCEventSubscriptionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BMCEventSubscription, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BMCEventSubscriptionList. +func (in *BMCEventSubscriptionList) DeepCopy() *BMCEventSubscriptionList { + if in == nil { + return nil + } + out := new(BMCEventSubscriptionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BMCEventSubscriptionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BMCEventSubscriptionSpec) DeepCopyInto(out *BMCEventSubscriptionSpec) { + *out = *in + if in.HTTPHeadersRef != nil { + in, out := &in.HTTPHeadersRef, &out.HTTPHeadersRef + *out = new(v1.SecretReference) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BMCEventSubscriptionSpec. +func (in *BMCEventSubscriptionSpec) DeepCopy() *BMCEventSubscriptionSpec { + if in == nil { + return nil + } + out := new(BMCEventSubscriptionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BMCEventSubscriptionStatus) DeepCopyInto(out *BMCEventSubscriptionStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BMCEventSubscriptionStatus. +func (in *BMCEventSubscriptionStatus) DeepCopy() *BMCEventSubscriptionStatus { + if in == nil { + return nil + } + out := new(BMCEventSubscriptionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BareMetalHost) DeepCopyInto(out *BareMetalHost) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BareMetalHost. +func (in *BareMetalHost) DeepCopy() *BareMetalHost { + if in == nil { + return nil + } + out := new(BareMetalHost) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BareMetalHost) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BareMetalHostList) DeepCopyInto(out *BareMetalHostList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BareMetalHost, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BareMetalHostList. +func (in *BareMetalHostList) DeepCopy() *BareMetalHostList { + if in == nil { + return nil + } + out := new(BareMetalHostList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BareMetalHostList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BareMetalHostSpec) DeepCopyInto(out *BareMetalHostSpec) { + *out = *in + if in.Taints != nil { + in, out := &in.Taints, &out.Taints + *out = make([]v1.Taint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + out.BMC = in.BMC + if in.RAID != nil { + in, out := &in.RAID, &out.RAID + *out = new(RAIDConfig) + (*in).DeepCopyInto(*out) + } + if in.Firmware != nil { + in, out := &in.Firmware, &out.Firmware + *out = new(FirmwareConfig) + (*in).DeepCopyInto(*out) + } + if in.RootDeviceHints != nil { + in, out := &in.RootDeviceHints, &out.RootDeviceHints + *out = new(RootDeviceHints) + (*in).DeepCopyInto(*out) + } + if in.ConsumerRef != nil { + in, out := &in.ConsumerRef, &out.ConsumerRef + *out = new(v1.ObjectReference) + **out = **in + } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(Image) + (*in).DeepCopyInto(*out) + } + if in.UserData != nil { + in, out := &in.UserData, &out.UserData + *out = new(v1.SecretReference) + **out = **in + } + if in.NetworkData != nil { + in, out := &in.NetworkData, &out.NetworkData + *out = new(v1.SecretReference) + **out = **in + } + if in.MetaData != nil { + in, out := &in.MetaData, &out.MetaData + *out = new(v1.SecretReference) + **out = **in + } + if in.CustomDeploy != nil { + in, out := &in.CustomDeploy, &out.CustomDeploy + *out = new(CustomDeploy) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BareMetalHostSpec. +func (in *BareMetalHostSpec) DeepCopy() *BareMetalHostSpec { + if in == nil { + return nil + } + out := new(BareMetalHostSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BareMetalHostStatus) DeepCopyInto(out *BareMetalHostStatus) { + *out = *in + if in.LastUpdated != nil { + in, out := &in.LastUpdated, &out.LastUpdated + *out = (*in).DeepCopy() + } + if in.HardwareDetails != nil { + in, out := &in.HardwareDetails, &out.HardwareDetails + *out = new(HardwareDetails) + (*in).DeepCopyInto(*out) + } + in.Provisioning.DeepCopyInto(&out.Provisioning) + in.GoodCredentials.DeepCopyInto(&out.GoodCredentials) + in.TriedCredentials.DeepCopyInto(&out.TriedCredentials) + in.OperationHistory.DeepCopyInto(&out.OperationHistory) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BareMetalHostStatus. +func (in *BareMetalHostStatus) DeepCopy() *BareMetalHostStatus { + if in == nil { + return nil + } + out := new(BareMetalHostStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CPU) DeepCopyInto(out *CPU) { + *out = *in + if in.Flags != nil { + in, out := &in.Flags, &out.Flags + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CPU. +func (in *CPU) DeepCopy() *CPU { + if in == nil { + return nil + } + out := new(CPU) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CredentialsStatus) DeepCopyInto(out *CredentialsStatus) { + *out = *in + if in.Reference != nil { + in, out := &in.Reference, &out.Reference + *out = new(v1.SecretReference) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CredentialsStatus. +func (in *CredentialsStatus) DeepCopy() *CredentialsStatus { + if in == nil { + return nil + } + out := new(CredentialsStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomDeploy) DeepCopyInto(out *CustomDeploy) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomDeploy. +func (in *CustomDeploy) DeepCopy() *CustomDeploy { + if in == nil { + return nil + } + out := new(CustomDeploy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataImage) DeepCopyInto(out *DataImage) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataImage. +func (in *DataImage) DeepCopy() *DataImage { + if in == nil { + return nil + } + out := new(DataImage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DataImage) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataImageError) DeepCopyInto(out *DataImageError) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataImageError. +func (in *DataImageError) DeepCopy() *DataImageError { + if in == nil { + return nil + } + out := new(DataImageError) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataImageList) DeepCopyInto(out *DataImageList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DataImage, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataImageList. +func (in *DataImageList) DeepCopy() *DataImageList { + if in == nil { + return nil + } + out := new(DataImageList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DataImageList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataImageSpec) DeepCopyInto(out *DataImageSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataImageSpec. +func (in *DataImageSpec) DeepCopy() *DataImageSpec { + if in == nil { + return nil + } + out := new(DataImageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataImageStatus) DeepCopyInto(out *DataImageStatus) { + *out = *in + if in.LastReconciled != nil { + in, out := &in.LastReconciled, &out.LastReconciled + *out = (*in).DeepCopy() + } + out.AttachedImage = in.AttachedImage + out.Error = in.Error +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataImageStatus. +func (in *DataImageStatus) DeepCopy() *DataImageStatus { + if in == nil { + return nil + } + out := new(DataImageStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in DesiredSettingsMap) DeepCopyInto(out *DesiredSettingsMap) { + { + in := &in + *out = make(DesiredSettingsMap, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DesiredSettingsMap. +func (in DesiredSettingsMap) DeepCopy() DesiredSettingsMap { + if in == nil { + return nil + } + out := new(DesiredSettingsMap) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DetachedAnnotationArguments) DeepCopyInto(out *DetachedAnnotationArguments) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DetachedAnnotationArguments. +func (in *DetachedAnnotationArguments) DeepCopy() *DetachedAnnotationArguments { + if in == nil { + return nil + } + out := new(DetachedAnnotationArguments) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Firmware) DeepCopyInto(out *Firmware) { + *out = *in + out.BIOS = in.BIOS +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Firmware. +func (in *Firmware) DeepCopy() *Firmware { + if in == nil { + return nil + } + out := new(Firmware) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirmwareComponentStatus) DeepCopyInto(out *FirmwareComponentStatus) { + *out = *in + in.UpdatedAt.DeepCopyInto(&out.UpdatedAt) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirmwareComponentStatus. +func (in *FirmwareComponentStatus) DeepCopy() *FirmwareComponentStatus { + if in == nil { + return nil + } + out := new(FirmwareComponentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirmwareConfig) DeepCopyInto(out *FirmwareConfig) { + *out = *in + if in.VirtualizationEnabled != nil { + in, out := &in.VirtualizationEnabled, &out.VirtualizationEnabled + *out = new(bool) + **out = **in + } + if in.SimultaneousMultithreadingEnabled != nil { + in, out := &in.SimultaneousMultithreadingEnabled, &out.SimultaneousMultithreadingEnabled + *out = new(bool) + **out = **in + } + if in.SriovEnabled != nil { + in, out := &in.SriovEnabled, &out.SriovEnabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirmwareConfig. +func (in *FirmwareConfig) DeepCopy() *FirmwareConfig { + if in == nil { + return nil + } + out := new(FirmwareConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirmwareSchema) DeepCopyInto(out *FirmwareSchema) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirmwareSchema. +func (in *FirmwareSchema) DeepCopy() *FirmwareSchema { + if in == nil { + return nil + } + out := new(FirmwareSchema) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FirmwareSchema) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirmwareSchemaList) DeepCopyInto(out *FirmwareSchemaList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]FirmwareSchema, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirmwareSchemaList. +func (in *FirmwareSchemaList) DeepCopy() *FirmwareSchemaList { + if in == nil { + return nil + } + out := new(FirmwareSchemaList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FirmwareSchemaList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirmwareSchemaSpec) DeepCopyInto(out *FirmwareSchemaSpec) { + *out = *in + if in.Schema != nil { + in, out := &in.Schema, &out.Schema + *out = make(map[string]SettingSchema, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirmwareSchemaSpec. +func (in *FirmwareSchemaSpec) DeepCopy() *FirmwareSchemaSpec { + if in == nil { + return nil + } + out := new(FirmwareSchemaSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FirmwareUpdate) DeepCopyInto(out *FirmwareUpdate) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FirmwareUpdate. +func (in *FirmwareUpdate) DeepCopy() *FirmwareUpdate { + if in == nil { + return nil + } + out := new(FirmwareUpdate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HardwareData) DeepCopyInto(out *HardwareData) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HardwareData. +func (in *HardwareData) DeepCopy() *HardwareData { + if in == nil { + return nil + } + out := new(HardwareData) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HardwareData) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HardwareDataList) DeepCopyInto(out *HardwareDataList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HardwareData, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HardwareDataList. +func (in *HardwareDataList) DeepCopy() *HardwareDataList { + if in == nil { + return nil + } + out := new(HardwareDataList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HardwareDataList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HardwareDataSpec) DeepCopyInto(out *HardwareDataSpec) { + *out = *in + if in.HardwareDetails != nil { + in, out := &in.HardwareDetails, &out.HardwareDetails + *out = new(HardwareDetails) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HardwareDataSpec. +func (in *HardwareDataSpec) DeepCopy() *HardwareDataSpec { + if in == nil { + return nil + } + out := new(HardwareDataSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HardwareDetails) DeepCopyInto(out *HardwareDetails) { + *out = *in + out.SystemVendor = in.SystemVendor + out.Firmware = in.Firmware + if in.NIC != nil { + in, out := &in.NIC, &out.NIC + *out = make([]NIC, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Storage != nil { + in, out := &in.Storage, &out.Storage + *out = make([]Storage, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.CPU.DeepCopyInto(&out.CPU) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HardwareDetails. +func (in *HardwareDetails) DeepCopy() *HardwareDetails { + if in == nil { + return nil + } + out := new(HardwareDetails) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HardwareRAIDVolume) DeepCopyInto(out *HardwareRAIDVolume) { + *out = *in + if in.SizeGibibytes != nil { + in, out := &in.SizeGibibytes, &out.SizeGibibytes + *out = new(int) + **out = **in + } + if in.Rotational != nil { + in, out := &in.Rotational, &out.Rotational + *out = new(bool) + **out = **in + } + if in.NumberOfPhysicalDisks != nil { + in, out := &in.NumberOfPhysicalDisks, &out.NumberOfPhysicalDisks + *out = new(int) + **out = **in + } + if in.PhysicalDisks != nil { + in, out := &in.PhysicalDisks, &out.PhysicalDisks + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HardwareRAIDVolume. +func (in *HardwareRAIDVolume) DeepCopy() *HardwareRAIDVolume { + if in == nil { + return nil + } + out := new(HardwareRAIDVolume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HardwareSystemVendor) DeepCopyInto(out *HardwareSystemVendor) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HardwareSystemVendor. +func (in *HardwareSystemVendor) DeepCopy() *HardwareSystemVendor { + if in == nil { + return nil + } + out := new(HardwareSystemVendor) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostFirmwareComponents) DeepCopyInto(out *HostFirmwareComponents) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostFirmwareComponents. +func (in *HostFirmwareComponents) DeepCopy() *HostFirmwareComponents { + if in == nil { + return nil + } + out := new(HostFirmwareComponents) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HostFirmwareComponents) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostFirmwareComponentsList) DeepCopyInto(out *HostFirmwareComponentsList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HostFirmwareComponents, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostFirmwareComponentsList. +func (in *HostFirmwareComponentsList) DeepCopy() *HostFirmwareComponentsList { + if in == nil { + return nil + } + out := new(HostFirmwareComponentsList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HostFirmwareComponentsList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostFirmwareComponentsSpec) DeepCopyInto(out *HostFirmwareComponentsSpec) { + *out = *in + if in.Updates != nil { + in, out := &in.Updates, &out.Updates + *out = make([]FirmwareUpdate, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostFirmwareComponentsSpec. +func (in *HostFirmwareComponentsSpec) DeepCopy() *HostFirmwareComponentsSpec { + if in == nil { + return nil + } + out := new(HostFirmwareComponentsSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostFirmwareComponentsStatus) DeepCopyInto(out *HostFirmwareComponentsStatus) { + *out = *in + if in.Updates != nil { + in, out := &in.Updates, &out.Updates + *out = make([]FirmwareUpdate, len(*in)) + copy(*out, *in) + } + if in.Components != nil { + in, out := &in.Components, &out.Components + *out = make([]FirmwareComponentStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LastUpdated != nil { + in, out := &in.LastUpdated, &out.LastUpdated + *out = (*in).DeepCopy() + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostFirmwareComponentsStatus. +func (in *HostFirmwareComponentsStatus) DeepCopy() *HostFirmwareComponentsStatus { + if in == nil { + return nil + } + out := new(HostFirmwareComponentsStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostFirmwareSettings) DeepCopyInto(out *HostFirmwareSettings) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostFirmwareSettings. +func (in *HostFirmwareSettings) DeepCopy() *HostFirmwareSettings { + if in == nil { + return nil + } + out := new(HostFirmwareSettings) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HostFirmwareSettings) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostFirmwareSettingsList) DeepCopyInto(out *HostFirmwareSettingsList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HostFirmwareSettings, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostFirmwareSettingsList. +func (in *HostFirmwareSettingsList) DeepCopy() *HostFirmwareSettingsList { + if in == nil { + return nil + } + out := new(HostFirmwareSettingsList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HostFirmwareSettingsList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostFirmwareSettingsSpec) DeepCopyInto(out *HostFirmwareSettingsSpec) { + *out = *in + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = make(DesiredSettingsMap, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostFirmwareSettingsSpec. +func (in *HostFirmwareSettingsSpec) DeepCopy() *HostFirmwareSettingsSpec { + if in == nil { + return nil + } + out := new(HostFirmwareSettingsSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostFirmwareSettingsStatus) DeepCopyInto(out *HostFirmwareSettingsStatus) { + *out = *in + if in.FirmwareSchema != nil { + in, out := &in.FirmwareSchema, &out.FirmwareSchema + *out = new(SchemaReference) + **out = **in + } + if in.Settings != nil { + in, out := &in.Settings, &out.Settings + *out = make(SettingsMap, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.LastUpdated != nil { + in, out := &in.LastUpdated, &out.LastUpdated + *out = (*in).DeepCopy() + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostFirmwareSettingsStatus. +func (in *HostFirmwareSettingsStatus) DeepCopy() *HostFirmwareSettingsStatus { + if in == nil { + return nil + } + out := new(HostFirmwareSettingsStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostUpdatePolicy) DeepCopyInto(out *HostUpdatePolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostUpdatePolicy. +func (in *HostUpdatePolicy) DeepCopy() *HostUpdatePolicy { + if in == nil { + return nil + } + out := new(HostUpdatePolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HostUpdatePolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostUpdatePolicyList) DeepCopyInto(out *HostUpdatePolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HostUpdatePolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostUpdatePolicyList. +func (in *HostUpdatePolicyList) DeepCopy() *HostUpdatePolicyList { + if in == nil { + return nil + } + out := new(HostUpdatePolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HostUpdatePolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostUpdatePolicySpec) DeepCopyInto(out *HostUpdatePolicySpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostUpdatePolicySpec. +func (in *HostUpdatePolicySpec) DeepCopy() *HostUpdatePolicySpec { + if in == nil { + return nil + } + out := new(HostUpdatePolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostUpdatePolicyStatus) DeepCopyInto(out *HostUpdatePolicyStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostUpdatePolicyStatus. +func (in *HostUpdatePolicyStatus) DeepCopy() *HostUpdatePolicyStatus { + if in == nil { + return nil + } + out := new(HostUpdatePolicyStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Image) DeepCopyInto(out *Image) { + *out = *in + if in.DiskFormat != nil { + in, out := &in.DiskFormat, &out.DiskFormat + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Image. +func (in *Image) DeepCopy() *Image { + if in == nil { + return nil + } + out := new(Image) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NIC) DeepCopyInto(out *NIC) { + *out = *in + if in.VLANs != nil { + in, out := &in.VLANs, &out.VLANs + *out = make([]VLAN, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NIC. +func (in *NIC) DeepCopy() *NIC { + if in == nil { + return nil + } + out := new(NIC) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperationHistory) DeepCopyInto(out *OperationHistory) { + *out = *in + in.Register.DeepCopyInto(&out.Register) + in.Inspect.DeepCopyInto(&out.Inspect) + in.Provision.DeepCopyInto(&out.Provision) + in.Deprovision.DeepCopyInto(&out.Deprovision) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperationHistory. +func (in *OperationHistory) DeepCopy() *OperationHistory { + if in == nil { + return nil + } + out := new(OperationHistory) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperationMetric) DeepCopyInto(out *OperationMetric) { + *out = *in + in.Start.DeepCopyInto(&out.Start) + in.End.DeepCopyInto(&out.End) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperationMetric. +func (in *OperationMetric) DeepCopy() *OperationMetric { + if in == nil { + return nil + } + out := new(OperationMetric) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PreprovisioningImage) DeepCopyInto(out *PreprovisioningImage) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreprovisioningImage. +func (in *PreprovisioningImage) DeepCopy() *PreprovisioningImage { + if in == nil { + return nil + } + out := new(PreprovisioningImage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PreprovisioningImage) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PreprovisioningImageList) DeepCopyInto(out *PreprovisioningImageList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PreprovisioningImage, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreprovisioningImageList. +func (in *PreprovisioningImageList) DeepCopy() *PreprovisioningImageList { + if in == nil { + return nil + } + out := new(PreprovisioningImageList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PreprovisioningImageList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PreprovisioningImageSpec) DeepCopyInto(out *PreprovisioningImageSpec) { + *out = *in + if in.AcceptFormats != nil { + in, out := &in.AcceptFormats, &out.AcceptFormats + *out = make([]ImageFormat, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreprovisioningImageSpec. +func (in *PreprovisioningImageSpec) DeepCopy() *PreprovisioningImageSpec { + if in == nil { + return nil + } + out := new(PreprovisioningImageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PreprovisioningImageStatus) DeepCopyInto(out *PreprovisioningImageStatus) { + *out = *in + out.NetworkData = in.NetworkData + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreprovisioningImageStatus. +func (in *PreprovisioningImageStatus) DeepCopy() *PreprovisioningImageStatus { + if in == nil { + return nil + } + out := new(PreprovisioningImageStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProvisionStatus) DeepCopyInto(out *ProvisionStatus) { + *out = *in + in.Image.DeepCopyInto(&out.Image) + if in.RootDeviceHints != nil { + in, out := &in.RootDeviceHints, &out.RootDeviceHints + *out = new(RootDeviceHints) + (*in).DeepCopyInto(*out) + } + if in.RAID != nil { + in, out := &in.RAID, &out.RAID + *out = new(RAIDConfig) + (*in).DeepCopyInto(*out) + } + if in.Firmware != nil { + in, out := &in.Firmware, &out.Firmware + *out = new(FirmwareConfig) + (*in).DeepCopyInto(*out) + } + if in.CustomDeploy != nil { + in, out := &in.CustomDeploy, &out.CustomDeploy + *out = new(CustomDeploy) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProvisionStatus. +func (in *ProvisionStatus) DeepCopy() *ProvisionStatus { + if in == nil { + return nil + } + out := new(ProvisionStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RAIDConfig) DeepCopyInto(out *RAIDConfig) { + *out = *in + if in.HardwareRAIDVolumes != nil { + in, out := &in.HardwareRAIDVolumes, &out.HardwareRAIDVolumes + *out = make([]HardwareRAIDVolume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SoftwareRAIDVolumes != nil { + in, out := &in.SoftwareRAIDVolumes, &out.SoftwareRAIDVolumes + *out = make([]SoftwareRAIDVolume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RAIDConfig. +func (in *RAIDConfig) DeepCopy() *RAIDConfig { + if in == nil { + return nil + } + out := new(RAIDConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RebootAnnotationArguments) DeepCopyInto(out *RebootAnnotationArguments) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RebootAnnotationArguments. +func (in *RebootAnnotationArguments) DeepCopy() *RebootAnnotationArguments { + if in == nil { + return nil + } + out := new(RebootAnnotationArguments) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RootDeviceHints) DeepCopyInto(out *RootDeviceHints) { + *out = *in + if in.Rotational != nil { + in, out := &in.Rotational, &out.Rotational + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RootDeviceHints. +func (in *RootDeviceHints) DeepCopy() *RootDeviceHints { + if in == nil { + return nil + } + out := new(RootDeviceHints) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaReference) DeepCopyInto(out *SchemaReference) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaReference. +func (in *SchemaReference) DeepCopy() *SchemaReference { + if in == nil { + return nil + } + out := new(SchemaReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SchemaSettingError) DeepCopyInto(out *SchemaSettingError) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SchemaSettingError. +func (in *SchemaSettingError) DeepCopy() *SchemaSettingError { + if in == nil { + return nil + } + out := new(SchemaSettingError) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretStatus) DeepCopyInto(out *SecretStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretStatus. +func (in *SecretStatus) DeepCopy() *SecretStatus { + if in == nil { + return nil + } + out := new(SecretStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SettingSchema) DeepCopyInto(out *SettingSchema) { + *out = *in + if in.AllowableValues != nil { + in, out := &in.AllowableValues, &out.AllowableValues + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.LowerBound != nil { + in, out := &in.LowerBound, &out.LowerBound + *out = new(int) + **out = **in + } + if in.UpperBound != nil { + in, out := &in.UpperBound, &out.UpperBound + *out = new(int) + **out = **in + } + if in.MinLength != nil { + in, out := &in.MinLength, &out.MinLength + *out = new(int) + **out = **in + } + if in.MaxLength != nil { + in, out := &in.MaxLength, &out.MaxLength + *out = new(int) + **out = **in + } + if in.ReadOnly != nil { + in, out := &in.ReadOnly, &out.ReadOnly + *out = new(bool) + **out = **in + } + if in.Unique != nil { + in, out := &in.Unique, &out.Unique + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SettingSchema. +func (in *SettingSchema) DeepCopy() *SettingSchema { + if in == nil { + return nil + } + out := new(SettingSchema) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in SettingsMap) DeepCopyInto(out *SettingsMap) { + { + in := &in + *out = make(SettingsMap, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SettingsMap. +func (in SettingsMap) DeepCopy() SettingsMap { + if in == nil { + return nil + } + out := new(SettingsMap) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SoftwareRAIDVolume) DeepCopyInto(out *SoftwareRAIDVolume) { + *out = *in + if in.SizeGibibytes != nil { + in, out := &in.SizeGibibytes, &out.SizeGibibytes + *out = new(int) + **out = **in + } + if in.PhysicalDisks != nil { + in, out := &in.PhysicalDisks, &out.PhysicalDisks + *out = make([]RootDeviceHints, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SoftwareRAIDVolume. +func (in *SoftwareRAIDVolume) DeepCopy() *SoftwareRAIDVolume { + if in == nil { + return nil + } + out := new(SoftwareRAIDVolume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Storage) DeepCopyInto(out *Storage) { + *out = *in + if in.AlternateNames != nil { + in, out := &in.AlternateNames, &out.AlternateNames + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Storage. +func (in *Storage) DeepCopy() *Storage { + if in == nil { + return nil + } + out := new(Storage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VLAN) DeepCopyInto(out *VLAN) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VLAN. +func (in *VLAN) DeepCopy() *VLAN { + if in == nil { + return nil + } + out := new(VLAN) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go index 7e19eba0904d..ffb24e8e3130 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_compare.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_compare.go @@ -390,7 +390,8 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []compareResult{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) + failMessage := fmt.Sprintf("\"%v\" is not greater than \"%v\"", e1, e2) + return compareTwoValues(t, e1, e2, []compareResult{compareGreater}, failMessage, msgAndArgs...) } // GreaterOrEqual asserts that the first element is greater than or equal to the second @@ -403,7 +404,8 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []compareResult{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) + failMessage := fmt.Sprintf("\"%v\" is not greater than or equal to \"%v\"", e1, e2) + return compareTwoValues(t, e1, e2, []compareResult{compareGreater, compareEqual}, failMessage, msgAndArgs...) } // Less asserts that the first element is less than the second @@ -415,7 +417,8 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []compareResult{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) + failMessage := fmt.Sprintf("\"%v\" is not less than \"%v\"", e1, e2) + return compareTwoValues(t, e1, e2, []compareResult{compareLess}, failMessage, msgAndArgs...) } // LessOrEqual asserts that the first element is less than or equal to the second @@ -428,7 +431,8 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []compareResult{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) + failMessage := fmt.Sprintf("\"%v\" is not less than or equal to \"%v\"", e1, e2) + return compareTwoValues(t, e1, e2, []compareResult{compareLess, compareEqual}, failMessage, msgAndArgs...) } // Positive asserts that the specified element is positive @@ -440,7 +444,8 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { h.Helper() } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []compareResult{compareGreater}, "\"%v\" is not positive", msgAndArgs...) + failMessage := fmt.Sprintf("\"%v\" is not positive", e) + return compareTwoValues(t, e, zero.Interface(), []compareResult{compareGreater}, failMessage, msgAndArgs...) } // Negative asserts that the specified element is negative @@ -452,7 +457,8 @@ func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { h.Helper() } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []compareResult{compareLess}, "\"%v\" is not negative", msgAndArgs...) + failMessage := fmt.Sprintf("\"%v\" is not negative", e) + return compareTwoValues(t, e, zero.Interface(), []compareResult{compareLess}, failMessage, msgAndArgs...) } func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []compareResult, failMessage string, msgAndArgs ...interface{}) bool { @@ -468,11 +474,11 @@ func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedCompare compareResult, isComparable := compare(e1, e2, e1Kind) if !isComparable { - return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...) + return Fail(t, fmt.Sprintf(`Can not compare type "%T"`, e1), msgAndArgs...) } if !containsValue(allowedComparesResults, compareResult) { - return Fail(t, fmt.Sprintf(failMessage, e1, e2), msgAndArgs...) + return Fail(t, failMessage, msgAndArgs...) } return true diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go index 190634165774..c592f6ad5fb6 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -50,10 +50,19 @@ func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string return ElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...) } -// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Emptyf asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // assert.Emptyf(t, obj, "error message %s", "formatted") +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -117,10 +126,8 @@ func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg stri // Errorf asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if assert.Errorf(t, err, "error message %s", "formatted") { -// assert.Equal(t, expectedErrorf, err) -// } +// actualObj, err := SomeFunction() +// assert.Errorf(t, err, "error message %s", "formatted") func Errorf(t TestingT, err error, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -438,7 +445,19 @@ func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interf return IsNonIncreasing(t, object, append([]interface{}{msg}, args...)...) } +// IsNotTypef asserts that the specified objects are not of the same type. +// +// assert.IsNotTypef(t, &NotMyStruct{}, &MyStruct{}, "error message %s", "formatted") +func IsNotTypef(t TestingT, theType interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return IsNotType(t, theType, object, append([]interface{}{msg}, args...)...) +} + // IsTypef asserts that the specified objects are of the same type. +// +// assert.IsTypef(t, &MyStruct{}, &MyStruct{}, "error message %s", "formatted") func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -585,8 +604,7 @@ func NotElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg str return NotElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...) } -// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmptyf asserts that the specified object is NOT [Empty]. // // if assert.NotEmptyf(t, obj, "error message %s", "formatted") { // assert.Equal(t, "two", obj[1]) @@ -693,12 +711,15 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, return NotSame(t, expected, actual, append([]interface{}{msg}, args...)...) } -// NotSubsetf asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubsetf asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // assert.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted") // assert.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") +// assert.NotSubsetf(t, [1, 3, 4], {1: "one", 2: "two"}, "error message %s", "formatted") +// assert.NotSubsetf(t, {"x": 1, "y": 2}, ["z"], "error message %s", "formatted") func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -782,11 +803,15 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg return Same(t, expected, actual, append([]interface{}{msg}, args...)...) } -// Subsetf asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subsetf asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // assert.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted") // assert.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") +// assert.Subsetf(t, [1, 2, 3], {1: "one", 2: "two"}, "error message %s", "formatted") +// assert.Subsetf(t, {"x": 1, "y": 2}, ["x"], "error message %s", "formatted") func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go index 21629087baf7..58db928450dc 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -92,10 +92,19 @@ func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg st return ElementsMatchf(a.t, listA, listB, msg, args...) } -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Empty asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // a.Empty(obj) +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -103,10 +112,19 @@ func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { return Empty(a.t, object, msgAndArgs...) } -// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Emptyf asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // a.Emptyf(obj, "error message %s", "formatted") +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -224,10 +242,8 @@ func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string // Error asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if a.Error(err) { -// assert.Equal(t, expectedError, err) -// } +// actualObj, err := SomeFunction() +// a.Error(err) func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -297,10 +313,8 @@ func (a *Assertions) ErrorIsf(err error, target error, msg string, args ...inter // Errorf asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if a.Errorf(err, "error message %s", "formatted") { -// assert.Equal(t, expectedErrorf, err) -// } +// actualObj, err := SomeFunction() +// a.Errorf(err, "error message %s", "formatted") func (a *Assertions) Errorf(err error, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -868,7 +882,29 @@ func (a *Assertions) IsNonIncreasingf(object interface{}, msg string, args ...in return IsNonIncreasingf(a.t, object, msg, args...) } +// IsNotType asserts that the specified objects are not of the same type. +// +// a.IsNotType(&NotMyStruct{}, &MyStruct{}) +func (a *Assertions) IsNotType(theType interface{}, object interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return IsNotType(a.t, theType, object, msgAndArgs...) +} + +// IsNotTypef asserts that the specified objects are not of the same type. +// +// a.IsNotTypef(&NotMyStruct{}, &MyStruct{}, "error message %s", "formatted") +func (a *Assertions) IsNotTypef(theType interface{}, object interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return IsNotTypef(a.t, theType, object, msg, args...) +} + // IsType asserts that the specified objects are of the same type. +// +// a.IsType(&MyStruct{}, &MyStruct{}) func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -877,6 +913,8 @@ func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAnd } // IsTypef asserts that the specified objects are of the same type. +// +// a.IsTypef(&MyStruct{}, &MyStruct{}, "error message %s", "formatted") func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1162,8 +1200,7 @@ func (a *Assertions) NotElementsMatchf(listA interface{}, listB interface{}, msg return NotElementsMatchf(a.t, listA, listB, msg, args...) } -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmpty asserts that the specified object is NOT [Empty]. // // if a.NotEmpty(obj) { // assert.Equal(t, "two", obj[1]) @@ -1175,8 +1212,7 @@ func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) boo return NotEmpty(a.t, object, msgAndArgs...) } -// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmptyf asserts that the specified object is NOT [Empty]. // // if a.NotEmptyf(obj, "error message %s", "formatted") { // assert.Equal(t, "two", obj[1]) @@ -1378,12 +1414,15 @@ func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg stri return NotSamef(a.t, expected, actual, msg, args...) } -// NotSubset asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubset asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.NotSubset([1, 3, 4], [1, 2]) // a.NotSubset({"x": 1, "y": 2}, {"z": 3}) +// a.NotSubset([1, 3, 4], {1: "one", 2: "two"}) +// a.NotSubset({"x": 1, "y": 2}, ["z"]) func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1391,12 +1430,15 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs return NotSubset(a.t, list, subset, msgAndArgs...) } -// NotSubsetf asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubsetf asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted") // a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") +// a.NotSubsetf([1, 3, 4], {1: "one", 2: "two"}, "error message %s", "formatted") +// a.NotSubsetf({"x": 1, "y": 2}, ["z"], "error message %s", "formatted") func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1556,11 +1598,15 @@ func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, return Samef(a.t, expected, actual, msg, args...) } -// Subset asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subset asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.Subset([1, 2, 3], [1, 2]) // a.Subset({"x": 1, "y": 2}, {"x": 1}) +// a.Subset([1, 2, 3], {1: "one", 2: "two"}) +// a.Subset({"x": 1, "y": 2}, ["x"]) func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1568,11 +1614,15 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ... return Subset(a.t, list, subset, msgAndArgs...) } -// Subsetf asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subsetf asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted") // a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") +// a.Subsetf([1, 2, 3], {1: "one", 2: "two"}, "error message %s", "formatted") +// a.Subsetf({"x": 1, "y": 2}, ["x"], "error message %s", "formatted") func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/assert/assertion_order.go b/vendor/github.com/stretchr/testify/assert/assertion_order.go index 1d2f71824aa9..2fdf80fdd308 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_order.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_order.go @@ -33,7 +33,7 @@ func isOrdered(t TestingT, object interface{}, allowedComparesResults []compareR compareResult, isComparable := compare(prevValueInterface, valueInterface, firstValueKind) if !isComparable { - return Fail(t, fmt.Sprintf("Can not compare type \"%s\" and \"%s\"", reflect.TypeOf(value), reflect.TypeOf(prevValue)), msgAndArgs...) + return Fail(t, fmt.Sprintf(`Can not compare type "%T" and "%T"`, value, prevValue), msgAndArgs...) } if !containsValue(allowedComparesResults, compareResult) { diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go index 4e91332bb51c..de8de0cb6c43 100644 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -210,59 +210,77 @@ the problem actually occurred in calling code.*/ // of each stack frame leading from the current test to the assert call that // failed. func CallerInfo() []string { - var pc uintptr - var ok bool var file string var line int var name string + const stackFrameBufferSize = 10 + pcs := make([]uintptr, stackFrameBufferSize) + callers := []string{} - for i := 0; ; i++ { - pc, file, line, ok = runtime.Caller(i) - if !ok { - // The breaks below failed to terminate the loop, and we ran off the - // end of the call stack. - break - } + offset := 1 - // This is a huge edge case, but it will panic if this is the case, see #180 - if file == "" { - break - } + for { + n := runtime.Callers(offset, pcs) - f := runtime.FuncForPC(pc) - if f == nil { - break - } - name = f.Name() - - // testing.tRunner is the standard library function that calls - // tests. Subtests are called directly by tRunner, without going through - // the Test/Benchmark/Example function that contains the t.Run calls, so - // with subtests we should break when we hit tRunner, without adding it - // to the list of callers. - if name == "testing.tRunner" { + if n == 0 { break } - parts := strings.Split(file, "/") - if len(parts) > 1 { - filename := parts[len(parts)-1] - dir := parts[len(parts)-2] - if (dir != "assert" && dir != "mock" && dir != "require") || filename == "mock_test.go" { - callers = append(callers, fmt.Sprintf("%s:%d", file, line)) + frames := runtime.CallersFrames(pcs[:n]) + + for { + frame, more := frames.Next() + pc = frame.PC + file = frame.File + line = frame.Line + + // This is a huge edge case, but it will panic if this is the case, see #180 + if file == "" { + break } - } - // Drop the package - segments := strings.Split(name, ".") - name = segments[len(segments)-1] - if isTest(name, "Test") || - isTest(name, "Benchmark") || - isTest(name, "Example") { - break + f := runtime.FuncForPC(pc) + if f == nil { + break + } + name = f.Name() + + // testing.tRunner is the standard library function that calls + // tests. Subtests are called directly by tRunner, without going through + // the Test/Benchmark/Example function that contains the t.Run calls, so + // with subtests we should break when we hit tRunner, without adding it + // to the list of callers. + if name == "testing.tRunner" { + break + } + + parts := strings.Split(file, "/") + if len(parts) > 1 { + filename := parts[len(parts)-1] + dir := parts[len(parts)-2] + if (dir != "assert" && dir != "mock" && dir != "require") || filename == "mock_test.go" { + callers = append(callers, fmt.Sprintf("%s:%d", file, line)) + } + } + + // Drop the package + dotPos := strings.LastIndexByte(name, '.') + name = name[dotPos+1:] + if isTest(name, "Test") || + isTest(name, "Benchmark") || + isTest(name, "Example") { + break + } + + if !more { + break + } } + + // Next batch + offset += cap(pcs) } return callers @@ -437,17 +455,34 @@ func NotImplements(t TestingT, interfaceObject interface{}, object interface{}, return true } +func isType(expectedType, object interface{}) bool { + return ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) +} + // IsType asserts that the specified objects are of the same type. -func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { +// +// assert.IsType(t, &MyStruct{}, &MyStruct{}) +func IsType(t TestingT, expectedType, object interface{}, msgAndArgs ...interface{}) bool { + if isType(expectedType, object) { + return true + } if h, ok := t.(tHelper); ok { h.Helper() } + return Fail(t, fmt.Sprintf("Object expected to be of type %T, but was %T", expectedType, object), msgAndArgs...) +} - if !ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) { - return Fail(t, fmt.Sprintf("Object expected to be of type %v, but was %v", reflect.TypeOf(expectedType), reflect.TypeOf(object)), msgAndArgs...) +// IsNotType asserts that the specified objects are not of the same type. +// +// assert.IsNotType(t, &NotMyStruct{}, &MyStruct{}) +func IsNotType(t TestingT, theType, object interface{}, msgAndArgs ...interface{}) bool { + if !isType(theType, object) { + return true } - - return true + if h, ok := t.(tHelper); ok { + h.Helper() + } + return Fail(t, fmt.Sprintf("Object type expected to be different than %T", theType), msgAndArgs...) } // Equal asserts that two objects are equal. @@ -475,7 +510,6 @@ func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) } return true - } // validateEqualArgs checks whether provided arguments can be safely used in the @@ -510,8 +544,9 @@ func Same(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) b if !same { // both are pointers but not the same type & pointing to the same address return Fail(t, fmt.Sprintf("Not same: \n"+ - "expected: %p %#v\n"+ - "actual : %p %#v", expected, expected, actual, actual), msgAndArgs...) + "expected: %p %#[1]v\n"+ + "actual : %p %#[2]v", + expected, actual), msgAndArgs...) } return true @@ -530,14 +565,14 @@ func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{} same, ok := samePointers(expected, actual) if !ok { - //fails when the arguments are not pointers + // fails when the arguments are not pointers return !(Fail(t, "Both arguments must be pointers", msgAndArgs...)) } if same { return Fail(t, fmt.Sprintf( - "Expected and actual point to the same object: %p %#v", - expected, expected), msgAndArgs...) + "Expected and actual point to the same object: %p %#[1]v", + expected), msgAndArgs...) } return true } @@ -549,7 +584,7 @@ func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{} func samePointers(first, second interface{}) (same bool, ok bool) { firstPtr, secondPtr := reflect.ValueOf(first), reflect.ValueOf(second) if firstPtr.Kind() != reflect.Ptr || secondPtr.Kind() != reflect.Ptr { - return false, false //not both are pointers + return false, false // not both are pointers } firstType, secondType := reflect.TypeOf(first), reflect.TypeOf(second) @@ -610,7 +645,6 @@ func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interfa } return true - } // EqualExportedValues asserts that the types of two objects are equal and their public @@ -665,7 +699,6 @@ func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{} } return Equal(t, expected, actual, msgAndArgs...) - } // NotNil asserts that the specified object is not nil. @@ -715,37 +748,45 @@ func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { // isEmpty gets whether the specified object is considered empty or not. func isEmpty(object interface{}) bool { - // get nil case out of the way if object == nil { return true } - objValue := reflect.ValueOf(object) + return isEmptyValue(reflect.ValueOf(object)) +} +// isEmptyValue gets whether the specified reflect.Value is considered empty or not. +func isEmptyValue(objValue reflect.Value) bool { + if objValue.IsZero() { + return true + } + // Special cases of non-zero values that we consider empty switch objValue.Kind() { // collection types are empty when they have no element + // Note: array types are empty when they match their zero-initialized state. case reflect.Chan, reflect.Map, reflect.Slice: return objValue.Len() == 0 - // pointers are empty if nil or if the value they point to is empty + // non-nil pointers are empty if the value they point to is empty case reflect.Ptr: - if objValue.IsNil() { - return true - } - deref := objValue.Elem().Interface() - return isEmpty(deref) - // for all other types, compare against the zero value - // array types are empty when they match their zero-initialized state - default: - zero := reflect.Zero(objValue.Type()) - return reflect.DeepEqual(object, zero.Interface()) + return isEmptyValue(objValue.Elem()) } + return false } -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Empty asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // assert.Empty(t, obj) +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { pass := isEmpty(object) if !pass { @@ -756,11 +797,9 @@ func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { } return pass - } -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmpty asserts that the specified object is NOT [Empty]. // // if assert.NotEmpty(t, obj) { // assert.Equal(t, "two", obj[1]) @@ -775,7 +814,6 @@ func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { } return pass - } // getLen tries to get the length of an object. @@ -819,7 +857,6 @@ func True(t TestingT, value bool, msgAndArgs ...interface{}) bool { } return true - } // False asserts that the specified value is false. @@ -834,7 +871,6 @@ func False(t TestingT, value bool, msgAndArgs ...interface{}) bool { } return true - } // NotEqual asserts that the specified values are NOT equal. @@ -857,7 +893,6 @@ func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{ } return true - } // NotEqualValues asserts that two objects are not equal even when converted to the same type @@ -880,7 +915,6 @@ func NotEqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...inte // return (true, false) if element was not found. // return (true, true) if element was found. func containsElement(list interface{}, element interface{}) (ok, found bool) { - listValue := reflect.ValueOf(list) listType := reflect.TypeOf(list) if listType == nil { @@ -915,7 +949,6 @@ func containsElement(list interface{}, element interface{}) (ok, found bool) { } } return true, false - } // Contains asserts that the specified string, list(array, slice...) or map contains the @@ -938,7 +971,6 @@ func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bo } return true - } // NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the @@ -961,14 +993,17 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) } return true - } -// Subset asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subset asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // assert.Subset(t, [1, 2, 3], [1, 2]) // assert.Subset(t, {"x": 1, "y": 2}, {"x": 1}) +// assert.Subset(t, [1, 2, 3], {1: "one", 2: "two"}) +// assert.Subset(t, {"x": 1, "y": 2}, ["x"]) func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { if h, ok := t.(tHelper); ok { h.Helper() @@ -983,7 +1018,7 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok } subsetKind := reflect.TypeOf(subset).Kind() - if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map { + if subsetKind != reflect.Array && subsetKind != reflect.Slice && subsetKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) } @@ -1007,6 +1042,13 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok } subsetList := reflect.ValueOf(subset) + if subsetKind == reflect.Map { + keys := make([]interface{}, subsetList.Len()) + for idx, key := range subsetList.MapKeys() { + keys[idx] = key.Interface() + } + subsetList = reflect.ValueOf(keys) + } for i := 0; i < subsetList.Len(); i++ { element := subsetList.Index(i).Interface() ok, found := containsElement(list, element) @@ -1021,12 +1063,15 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok return true } -// NotSubset asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubset asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // assert.NotSubset(t, [1, 3, 4], [1, 2]) // assert.NotSubset(t, {"x": 1, "y": 2}, {"z": 3}) +// assert.NotSubset(t, [1, 3, 4], {1: "one", 2: "two"}) +// assert.NotSubset(t, {"x": 1, "y": 2}, ["z"]) func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1041,7 +1086,7 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) } subsetKind := reflect.TypeOf(subset).Kind() - if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map { + if subsetKind != reflect.Array && subsetKind != reflect.Slice && subsetKind != reflect.Map { return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) } @@ -1065,11 +1110,18 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) } subsetList := reflect.ValueOf(subset) + if subsetKind == reflect.Map { + keys := make([]interface{}, subsetList.Len()) + for idx, key := range subsetList.MapKeys() { + keys[idx] = key.Interface() + } + subsetList = reflect.ValueOf(keys) + } for i := 0; i < subsetList.Len(); i++ { element := subsetList.Index(i).Interface() ok, found := containsElement(list, element) if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) + return Fail(t, fmt.Sprintf("%q could not be applied builtin len()", list), msgAndArgs...) } if !found { return true @@ -1591,10 +1643,8 @@ func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool { // Error asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if assert.Error(t, err) { -// assert.Equal(t, expectedError, err) -// } +// actualObj, err := SomeFunction() +// assert.Error(t, err) func Error(t TestingT, err error, msgAndArgs ...interface{}) bool { if err == nil { if h, ok := t.(tHelper); ok { @@ -1667,7 +1717,6 @@ func matchRegexp(rx interface{}, str interface{}) bool { default: return r.MatchString(fmt.Sprint(v)) } - } // Regexp asserts that a specified regexp matches a string. @@ -1703,7 +1752,6 @@ func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interf } return !match - } // Zero asserts that i is the zero value for its type. @@ -1814,6 +1862,11 @@ func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{ return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid json.\nJSON parsing error: '%s'", expected, err.Error()), msgAndArgs...) } + // Shortcut if same bytes + if actual == expected { + return true + } + if err := json.Unmarshal([]byte(actual), &actualJSONAsInterface); err != nil { return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid json.\nJSON parsing error: '%s'", actual, err.Error()), msgAndArgs...) } @@ -1832,6 +1885,11 @@ func YAMLEq(t TestingT, expected string, actual string, msgAndArgs ...interface{ return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid yaml.\nYAML parsing error: '%s'", expected, err.Error()), msgAndArgs...) } + // Shortcut if same bytes + if actual == expected { + return true + } + if err := yaml.Unmarshal([]byte(actual), &actualYAMLAsInterface); err != nil { return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid yaml.\nYAML error: '%s'", actual, err.Error()), msgAndArgs...) } @@ -1933,6 +1991,7 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t } ch := make(chan bool, 1) + checkCond := func() { ch <- condition() } timer := time.NewTimer(waitFor) defer timer.Stop() @@ -1940,18 +1999,23 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t ticker := time.NewTicker(tick) defer ticker.Stop() - for tick := ticker.C; ; { + var tickC <-chan time.Time + + // Check the condition once first on the initial call. + go checkCond() + + for { select { case <-timer.C: return Fail(t, "Condition never satisfied", msgAndArgs...) - case <-tick: - tick = nil - go func() { ch <- condition() }() + case <-tickC: + tickC = nil + go checkCond() case v := <-ch: if v { return true } - tick = ticker.C + tickC = ticker.C } } } @@ -1964,6 +2028,9 @@ type CollectT struct { errors []error } +// Helper is like [testing.T.Helper] but does nothing. +func (CollectT) Helper() {} + // Errorf collects the error. func (c *CollectT) Errorf(format string, args ...interface{}) { c.errors = append(c.errors, fmt.Errorf(format, args...)) @@ -2021,35 +2088,42 @@ func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time var lastFinishedTickErrs []error ch := make(chan *CollectT, 1) + checkCond := func() { + collect := new(CollectT) + defer func() { + ch <- collect + }() + condition(collect) + } + timer := time.NewTimer(waitFor) defer timer.Stop() ticker := time.NewTicker(tick) defer ticker.Stop() - for tick := ticker.C; ; { + var tickC <-chan time.Time + + // Check the condition once first on the initial call. + go checkCond() + + for { select { case <-timer.C: for _, err := range lastFinishedTickErrs { t.Errorf("%v", err) } return Fail(t, "Condition never satisfied", msgAndArgs...) - case <-tick: - tick = nil - go func() { - collect := new(CollectT) - defer func() { - ch <- collect - }() - condition(collect) - }() + case <-tickC: + tickC = nil + go checkCond() case collect := <-ch: if !collect.failed() { return true } // Keep the errors from the last ended condition, so that they can be copied to t if timeout is reached. lastFinishedTickErrs = collect.errors - tick = ticker.C + tickC = ticker.C } } } @@ -2064,6 +2138,7 @@ func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.D } ch := make(chan bool, 1) + checkCond := func() { ch <- condition() } timer := time.NewTimer(waitFor) defer timer.Stop() @@ -2071,18 +2146,23 @@ func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.D ticker := time.NewTicker(tick) defer ticker.Stop() - for tick := ticker.C; ; { + var tickC <-chan time.Time + + // Check the condition once first on the initial call. + go checkCond() + + for { select { case <-timer.C: return true - case <-tick: - tick = nil - go func() { ch <- condition() }() + case <-tickC: + tickC = nil + go checkCond() case v := <-ch: if v { return Fail(t, "Condition satisfied", msgAndArgs...) } - tick = ticker.C + tickC = ticker.C } } } @@ -2100,9 +2180,12 @@ func ErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool { var expectedText string if target != nil { expectedText = target.Error() + if err == nil { + return Fail(t, fmt.Sprintf("Expected error with %q in chain but got nil.", expectedText), msgAndArgs...) + } } - chain := buildErrorChainString(err) + chain := buildErrorChainString(err, false) return Fail(t, fmt.Sprintf("Target error should be in err chain:\n"+ "expected: %q\n"+ @@ -2125,7 +2208,7 @@ func NotErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool { expectedText = target.Error() } - chain := buildErrorChainString(err) + chain := buildErrorChainString(err, false) return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+ "found: %q\n"+ @@ -2143,11 +2226,17 @@ func ErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{ return true } - chain := buildErrorChainString(err) + expectedType := reflect.TypeOf(target).Elem().String() + if err == nil { + return Fail(t, fmt.Sprintf("An error is expected but got nil.\n"+ + "expected: %s", expectedType), msgAndArgs...) + } + + chain := buildErrorChainString(err, true) return Fail(t, fmt.Sprintf("Should be in error chain:\n"+ - "expected: %q\n"+ - "in chain: %s", target, chain, + "expected: %s\n"+ + "in chain: %s", expectedType, chain, ), msgAndArgs...) } @@ -2161,24 +2250,46 @@ func NotErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interfa return true } - chain := buildErrorChainString(err) + chain := buildErrorChainString(err, true) return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+ - "found: %q\n"+ - "in chain: %s", target, chain, + "found: %s\n"+ + "in chain: %s", reflect.TypeOf(target).Elem().String(), chain, ), msgAndArgs...) } -func buildErrorChainString(err error) string { +func unwrapAll(err error) (errs []error) { + errs = append(errs, err) + switch x := err.(type) { + case interface{ Unwrap() error }: + err = x.Unwrap() + if err == nil { + return + } + errs = append(errs, unwrapAll(err)...) + case interface{ Unwrap() []error }: + for _, err := range x.Unwrap() { + errs = append(errs, unwrapAll(err)...) + } + } + return +} + +func buildErrorChainString(err error, withType bool) string { if err == nil { return "" } - e := errors.Unwrap(err) - chain := fmt.Sprintf("%q", err.Error()) - for e != nil { - chain += fmt.Sprintf("\n\t%q", e.Error()) - e = errors.Unwrap(e) + var chain string + errs := unwrapAll(err) + for i := range errs { + if i != 0 { + chain += "\n\t" + } + chain += fmt.Sprintf("%q", errs[i].Error()) + if withType { + chain += fmt.Sprintf(" (%T)", errs[i]) + } } return chain } diff --git a/vendor/github.com/stretchr/testify/assert/doc.go b/vendor/github.com/stretchr/testify/assert/doc.go index 4953981d3878..a0b953aa5cf6 100644 --- a/vendor/github.com/stretchr/testify/assert/doc.go +++ b/vendor/github.com/stretchr/testify/assert/doc.go @@ -1,5 +1,9 @@ // Package assert provides a set of comprehensive testing tools for use with the normal Go testing system. // +// # Note +// +// All functions in this package return a bool value indicating whether the assertion has passed. +// // # Example Usage // // The following is a complete example using assert in a standard test function: diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go index 861ed4b7ced0..5a6bb75f2cfa 100644 --- a/vendor/github.com/stretchr/testify/assert/http_assertions.go +++ b/vendor/github.com/stretchr/testify/assert/http_assertions.go @@ -138,7 +138,7 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, contains := strings.Contains(body, fmt.Sprint(str)) if !contains { - Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...) + Fail(t, fmt.Sprintf("Expected response body for %q to contain %q but found %q", url+"?"+values.Encode(), str, body), msgAndArgs...) } return contains @@ -158,7 +158,7 @@ func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url strin contains := strings.Contains(body, fmt.Sprint(str)) if contains { - Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...) + Fail(t, fmt.Sprintf("Expected response body for %q to NOT contain %q but found %q", url+"?"+values.Encode(), str, body), msgAndArgs...) } return !contains diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go index baa0cc7d7fca..5a74c4f4d5be 100644 --- a/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go +++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go @@ -1,5 +1,4 @@ //go:build testify_yaml_custom && !testify_yaml_fail && !testify_yaml_default -// +build testify_yaml_custom,!testify_yaml_fail,!testify_yaml_default // Package yaml is an implementation of YAML functions that calls a pluggable implementation. // diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go index b83c6cf64c2a..0bae80e34a30 100644 --- a/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go +++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go @@ -1,5 +1,4 @@ //go:build !testify_yaml_fail && !testify_yaml_custom -// +build !testify_yaml_fail,!testify_yaml_custom // Package yaml is just an indirection to handle YAML deserialization. // diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go index e78f7dfe69a1..8041803fd2f1 100644 --- a/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go +++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go @@ -1,5 +1,4 @@ //go:build testify_yaml_fail && !testify_yaml_custom && !testify_yaml_default -// +build testify_yaml_fail,!testify_yaml_custom,!testify_yaml_default // Package yaml is an implementation of YAML functions that always fail. // diff --git a/vendor/github.com/stretchr/testify/mock/mock.go b/vendor/github.com/stretchr/testify/mock/mock.go index eb5682df9789..efc89deff38a 100644 --- a/vendor/github.com/stretchr/testify/mock/mock.go +++ b/vendor/github.com/stretchr/testify/mock/mock.go @@ -208,9 +208,16 @@ func (c *Call) On(methodName string, arguments ...interface{}) *Call { return c.Parent.On(methodName, arguments...) } -// Unset removes a mock handler from being called. +// Unset removes all mock handlers that satisfy the call instance arguments from being +// called. Only supported on call instances with static input arguments. // -// test.On("func", mock.Anything).Unset() +// For example, the only handler remaining after the following would be "MyMethod(2, 2)": +// +// Mock. +// On("MyMethod", 2, 2).Return(0). +// On("MyMethod", 3, 3).Return(0). +// On("MyMethod", Anything, Anything).Return(0) +// Mock.On("MyMethod", 3, 3).Unset() func (c *Call) Unset() *Call { var unlockOnce sync.Once @@ -331,7 +338,10 @@ func (m *Mock) TestData() objx.Map { Setting expectations */ -// Test sets the test struct variable of the mock object +// Test sets the [TestingT] on which errors will be reported, otherwise errors +// will cause a panic. +// Test should not be called on an object that is going to be used in a +// goroutine other than the one running the test function. func (m *Mock) Test(t TestingT) { m.mutex.Lock() defer m.mutex.Unlock() @@ -494,7 +504,7 @@ func (m *Mock) MethodCalled(methodName string, arguments ...interface{}) Argumen // expected call found, but it has already been called with repeatable times if call != nil { m.mutex.Unlock() - m.fail("\nassert: mock: The method has been called over %d times.\n\tEither do one more Mock.On(\"%s\").Return(...), or remove extra call.\n\tThis call was unexpected:\n\t\t%s\n\tat: %s", call.totalCalls, methodName, callString(methodName, arguments, true), assert.CallerInfo()) + m.fail("\nassert: mock: The method has been called over %d times.\n\tEither do one more Mock.On(%#v).Return(...), or remove extra call.\n\tThis call was unexpected:\n\t\t%s\n\tat: %s", call.totalCalls, methodName, callString(methodName, arguments, true), assert.CallerInfo()) } // we have to fail here - because we don't know what to do // as the return arguments. This is because: @@ -514,7 +524,7 @@ func (m *Mock) MethodCalled(methodName string, arguments ...interface{}) Argumen assert.CallerInfo(), ) } else { - m.fail("\nassert: mock: I don't know what to return because the method call was unexpected.\n\tEither do Mock.On(\"%s\").Return(...) first, or remove the %s() call.\n\tThis method was unexpected:\n\t\t%s\n\tat: %s", methodName, methodName, callString(methodName, arguments, true), assert.CallerInfo()) + m.fail("\nassert: mock: I don't know what to return because the method call was unexpected.\n\tEither do Mock.On(%#v).Return(...) first, or remove the %s() call.\n\tThis method was unexpected:\n\t\t%s\n\tat: %s", methodName, methodName, callString(methodName, arguments, true), assert.CallerInfo()) } } @@ -661,7 +671,7 @@ func (m *Mock) AssertNumberOfCalls(t TestingT, methodName string, expectedCalls actualCalls++ } } - return assert.Equal(t, expectedCalls, actualCalls, fmt.Sprintf("Expected number of calls (%d) does not match the actual number of calls (%d).", expectedCalls, actualCalls)) + return assert.Equal(t, expectedCalls, actualCalls, fmt.Sprintf("Expected number of calls (%d) of method %s does not match the actual number of calls (%d).", expectedCalls, methodName, actualCalls)) } // AssertCalled asserts that the method was called. diff --git a/vendor/github.com/stretchr/testify/require/doc.go b/vendor/github.com/stretchr/testify/require/doc.go index 968434724559..c8e3f94a8034 100644 --- a/vendor/github.com/stretchr/testify/require/doc.go +++ b/vendor/github.com/stretchr/testify/require/doc.go @@ -23,6 +23,8 @@ // // The `require` package have same global functions as in the `assert` package, // but instead of returning a boolean result they call `t.FailNow()`. +// A consequence of this is that it must be called from the goroutine running +// the test function, not from other goroutines created during the test. // // Every assertion function also takes an optional string message as the final argument, // allowing custom error messages to be appended to the message the assertion method outputs. diff --git a/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/testify/require/require.go index d8921950d7be..2d02f9bcef1b 100644 --- a/vendor/github.com/stretchr/testify/require/require.go +++ b/vendor/github.com/stretchr/testify/require/require.go @@ -117,10 +117,19 @@ func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string t.FailNow() } -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Empty asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // require.Empty(t, obj) +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -131,10 +140,19 @@ func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) { t.FailNow() } -// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Emptyf asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // require.Emptyf(t, obj, "error message %s", "formatted") +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -279,10 +297,8 @@ func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, ar // Error asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if require.Error(t, err) { -// require.Equal(t, expectedError, err) -// } +// actualObj, err := SomeFunction() +// require.Error(t, err) func Error(t TestingT, err error, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -373,10 +389,8 @@ func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface // Errorf asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if require.Errorf(t, err, "error message %s", "formatted") { -// require.Equal(t, expectedErrorf, err) -// } +// actualObj, err := SomeFunction() +// require.Errorf(t, err, "error message %s", "formatted") func Errorf(t TestingT, err error, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1097,7 +1111,35 @@ func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interf t.FailNow() } +// IsNotType asserts that the specified objects are not of the same type. +// +// require.IsNotType(t, &NotMyStruct{}, &MyStruct{}) +func IsNotType(t TestingT, theType interface{}, object interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.IsNotType(t, theType, object, msgAndArgs...) { + return + } + t.FailNow() +} + +// IsNotTypef asserts that the specified objects are not of the same type. +// +// require.IsNotTypef(t, &NotMyStruct{}, &MyStruct{}, "error message %s", "formatted") +func IsNotTypef(t TestingT, theType interface{}, object interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.IsNotTypef(t, theType, object, msg, args...) { + return + } + t.FailNow() +} + // IsType asserts that the specified objects are of the same type. +// +// require.IsType(t, &MyStruct{}, &MyStruct{}) func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1109,6 +1151,8 @@ func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs } // IsTypef asserts that the specified objects are of the same type. +// +// require.IsTypef(t, &MyStruct{}, &MyStruct{}, "error message %s", "formatted") func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1469,8 +1513,7 @@ func NotElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg str t.FailNow() } -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmpty asserts that the specified object is NOT [Empty]. // // if require.NotEmpty(t, obj) { // require.Equal(t, "two", obj[1]) @@ -1485,8 +1528,7 @@ func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) { t.FailNow() } -// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmptyf asserts that the specified object is NOT [Empty]. // // if require.NotEmptyf(t, obj, "error message %s", "formatted") { // require.Equal(t, "two", obj[1]) @@ -1745,12 +1787,15 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, t.FailNow() } -// NotSubset asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubset asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // require.NotSubset(t, [1, 3, 4], [1, 2]) // require.NotSubset(t, {"x": 1, "y": 2}, {"z": 3}) +// require.NotSubset(t, [1, 3, 4], {1: "one", 2: "two"}) +// require.NotSubset(t, {"x": 1, "y": 2}, ["z"]) func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1761,12 +1806,15 @@ func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...i t.FailNow() } -// NotSubsetf asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubsetf asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // require.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted") // require.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") +// require.NotSubsetf(t, [1, 3, 4], {1: "one", 2: "two"}, "error message %s", "formatted") +// require.NotSubsetf(t, {"x": 1, "y": 2}, ["z"], "error message %s", "formatted") func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1971,11 +2019,15 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg t.FailNow() } -// Subset asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subset asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // require.Subset(t, [1, 2, 3], [1, 2]) // require.Subset(t, {"x": 1, "y": 2}, {"x": 1}) +// require.Subset(t, [1, 2, 3], {1: "one", 2: "two"}) +// require.Subset(t, {"x": 1, "y": 2}, ["x"]) func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1986,11 +2038,15 @@ func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...inte t.FailNow() } -// Subsetf asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subsetf asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // require.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted") // require.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") +// require.Subsetf(t, [1, 2, 3], {1: "one", 2: "two"}, "error message %s", "formatted") +// require.Subsetf(t, {"x": 1, "y": 2}, ["x"], "error message %s", "formatted") func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/testify/require/require_forward.go index 1bd87304f431..e6f7e944684e 100644 --- a/vendor/github.com/stretchr/testify/require/require_forward.go +++ b/vendor/github.com/stretchr/testify/require/require_forward.go @@ -93,10 +93,19 @@ func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg st ElementsMatchf(a.t, listA, listB, msg, args...) } -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Empty asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // a.Empty(obj) +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -104,10 +113,19 @@ func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) { Empty(a.t, object, msgAndArgs...) } -// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. +// Emptyf asserts that the given value is "empty". +// +// [Zero values] are "empty". +// +// Arrays are "empty" if every element is the zero value of the type (stricter than "empty"). +// +// Slices, maps and channels with zero length are "empty". +// +// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty". // // a.Emptyf(obj, "error message %s", "formatted") +// +// [Zero values]: https://go.dev/ref/spec#The_zero_value func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -225,10 +243,8 @@ func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string // Error asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if a.Error(err) { -// assert.Equal(t, expectedError, err) -// } +// actualObj, err := SomeFunction() +// a.Error(err) func (a *Assertions) Error(err error, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -298,10 +314,8 @@ func (a *Assertions) ErrorIsf(err error, target error, msg string, args ...inter // Errorf asserts that a function returned an error (i.e. not `nil`). // -// actualObj, err := SomeFunction() -// if a.Errorf(err, "error message %s", "formatted") { -// assert.Equal(t, expectedErrorf, err) -// } +// actualObj, err := SomeFunction() +// a.Errorf(err, "error message %s", "formatted") func (a *Assertions) Errorf(err error, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -869,7 +883,29 @@ func (a *Assertions) IsNonIncreasingf(object interface{}, msg string, args ...in IsNonIncreasingf(a.t, object, msg, args...) } +// IsNotType asserts that the specified objects are not of the same type. +// +// a.IsNotType(&NotMyStruct{}, &MyStruct{}) +func (a *Assertions) IsNotType(theType interface{}, object interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + IsNotType(a.t, theType, object, msgAndArgs...) +} + +// IsNotTypef asserts that the specified objects are not of the same type. +// +// a.IsNotTypef(&NotMyStruct{}, &MyStruct{}, "error message %s", "formatted") +func (a *Assertions) IsNotTypef(theType interface{}, object interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + IsNotTypef(a.t, theType, object, msg, args...) +} + // IsType asserts that the specified objects are of the same type. +// +// a.IsType(&MyStruct{}, &MyStruct{}) func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -878,6 +914,8 @@ func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAnd } // IsTypef asserts that the specified objects are of the same type. +// +// a.IsTypef(&MyStruct{}, &MyStruct{}, "error message %s", "formatted") func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1163,8 +1201,7 @@ func (a *Assertions) NotElementsMatchf(listA interface{}, listB interface{}, msg NotElementsMatchf(a.t, listA, listB, msg, args...) } -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmpty asserts that the specified object is NOT [Empty]. // // if a.NotEmpty(obj) { // assert.Equal(t, "two", obj[1]) @@ -1176,8 +1213,7 @@ func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) { NotEmpty(a.t, object, msgAndArgs...) } -// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. +// NotEmptyf asserts that the specified object is NOT [Empty]. // // if a.NotEmptyf(obj, "error message %s", "formatted") { // assert.Equal(t, "two", obj[1]) @@ -1379,12 +1415,15 @@ func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg stri NotSamef(a.t, expected, actual, msg, args...) } -// NotSubset asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubset asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.NotSubset([1, 3, 4], [1, 2]) // a.NotSubset({"x": 1, "y": 2}, {"z": 3}) +// a.NotSubset([1, 3, 4], {1: "one", 2: "two"}) +// a.NotSubset({"x": 1, "y": 2}, ["z"]) func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1392,12 +1431,15 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs NotSubset(a.t, list, subset, msgAndArgs...) } -// NotSubsetf asserts that the specified list(array, slice...) or map does NOT -// contain all elements given in the specified subset list(array, slice...) or -// map. +// NotSubsetf asserts that the list (array, slice, or map) does NOT contain all +// elements given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted") // a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") +// a.NotSubsetf([1, 3, 4], {1: "one", 2: "two"}, "error message %s", "formatted") +// a.NotSubsetf({"x": 1, "y": 2}, ["z"], "error message %s", "formatted") func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1557,11 +1599,15 @@ func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, Samef(a.t, expected, actual, msg, args...) } -// Subset asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subset asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.Subset([1, 2, 3], [1, 2]) // a.Subset({"x": 1, "y": 2}, {"x": 1}) +// a.Subset([1, 2, 3], {1: "one", 2: "two"}) +// a.Subset({"x": 1, "y": 2}, ["x"]) func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1569,11 +1615,15 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ... Subset(a.t, list, subset, msgAndArgs...) } -// Subsetf asserts that the specified list(array, slice...) or map contains all -// elements given in the specified subset list(array, slice...) or map. +// Subsetf asserts that the list (array, slice, or map) contains all elements +// given in the subset (array, slice, or map). +// Map elements are key-value pairs unless compared with an array or slice where +// only the map key is evaluated. // // a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted") // a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") +// a.Subsetf([1, 2, 3], {1: "one", 2: "two"}, "error message %s", "formatted") +// a.Subsetf({"x": 1, "y": 2}, ["x"], "error message %s", "formatted") func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() diff --git a/vendor/modules.txt b/vendor/modules.txt index cbaf12703731..08213f08905f 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1167,6 +1167,9 @@ github.com/mattn/go-isatty # github.com/mattn/go-sqlite3 v1.14.30 ## explicit; go 1.19 github.com/mattn/go-sqlite3 +# github.com/metal3-io/baremetal-operator/apis v0.11.0 +## explicit; go 1.24.0 +github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1 # github.com/metallb/frr-k8s v0.0.15 ## explicit; go 1.22.0 github.com/metallb/frr-k8s/api/v1beta1 @@ -1893,7 +1896,7 @@ github.com/stoewer/go-strcase # github.com/stretchr/objx v0.5.2 ## explicit; go 1.20 github.com/stretchr/objx -# github.com/stretchr/testify v1.10.0 +# github.com/stretchr/testify v1.11.1 ## explicit; go 1.17 github.com/stretchr/testify/assert github.com/stretchr/testify/assert/yaml @@ -2670,7 +2673,7 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# k8s.io/api v0.33.4 => github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20250906192346-6efb6a95323f +# k8s.io/api v0.33.5 => github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20250906192346-6efb6a95323f ## explicit; go 1.24.0 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -2782,7 +2785,7 @@ k8s.io/apiextensions-apiserver/pkg/registry/customresource/tableconvertor k8s.io/apiextensions-apiserver/pkg/registry/customresourcedefinition k8s.io/apiextensions-apiserver/test/integration k8s.io/apiextensions-apiserver/test/integration/fixtures -# k8s.io/apimachinery v0.33.4 => github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20250906192346-6efb6a95323f +# k8s.io/apimachinery v0.33.5 => github.com/openshift/kubernetes/staging/src/k8s.io/apimachinery v0.0.0-20250906192346-6efb6a95323f ## explicit; go 1.24.0 k8s.io/apimachinery/pkg/api/apitesting k8s.io/apimachinery/pkg/api/equality @@ -4668,8 +4671,8 @@ sigs.k8s.io/cloud-provider-azure/pkg/azclient/virtualnetworklinkclient # sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader v0.0.16 ## explicit; go 1.22.0 sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader -# sigs.k8s.io/controller-runtime v0.19.0 -## explicit; go 1.22.0 +# sigs.k8s.io/controller-runtime v0.21.0 +## explicit; go 1.24.0 sigs.k8s.io/controller-runtime/pkg/conversion sigs.k8s.io/controller-runtime/pkg/scheme # sigs.k8s.io/gateway-api v1.2.1