diff --git a/deploy/addons/assets.go b/deploy/addons/assets.go index 19b3221b8bed..c4dfc661726f 100644 --- a/deploy/addons/assets.go +++ b/deploy/addons/assets.go @@ -44,6 +44,10 @@ var ( //go:embed storage-provisioner-gluster/*.tmpl StorageProvisionerGlusterAssets embed.FS + // StorageProvisionerRancherAssets assets for storage-provisioner-rancher addon + //go:embed storage-provisioner-rancher/*.tmpl + StorageProvisionerRancherAssets embed.FS + // EfkAssets assets for efk addon //go:embed efk/*.tmpl EfkAssets embed.FS diff --git a/deploy/addons/storage-provisioner-rancher/storage-provisioner-rancher.yaml.tmpl b/deploy/addons/storage-provisioner-rancher/storage-provisioner-rancher.yaml.tmpl new file mode 100644 index 000000000000..e19a18092043 --- /dev/null +++ b/deploy/addons/storage-provisioner-rancher/storage-provisioner-rancher.yaml.tmpl @@ -0,0 +1,131 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: local-path-storage + +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: local-path-provisioner-service-account + namespace: local-path-storage + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: local-path-provisioner-role +rules: + - apiGroups: [ "" ] + resources: [ "nodes", "persistentvolumeclaims", "configmaps" ] + verbs: [ "get", "list", "watch" ] + - apiGroups: [ "" ] + resources: [ "endpoints", "persistentvolumes", "pods" ] + verbs: [ "*" ] + - apiGroups: [ "" ] + resources: [ "events" ] + verbs: [ "create", "patch" ] + - apiGroups: [ "storage.k8s.io" ] + resources: [ "storageclasses" ] + verbs: [ "get", "list", "watch" ] + +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: local-path-provisioner-bind +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: local-path-provisioner-role +subjects: + - kind: ServiceAccount + name: local-path-provisioner-service-account + namespace: local-path-storage + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: local-path-provisioner + namespace: local-path-storage +spec: + replicas: 1 + selector: + matchLabels: + app: local-path-provisioner + template: + metadata: + labels: + app: local-path-provisioner + spec: + serviceAccountName: local-path-provisioner-service-account + containers: + - name: local-path-provisioner + image: {{.CustomRegistries.LocalPathProvisioner | default .ImageRepository | default .Registries.LocalPathProvisioner }}{{ .Images.LocalPathProvisioner }} + imagePullPolicy: IfNotPresent + command: + - local-path-provisioner + - --debug + - start + - --config + - /etc/config/config.json + volumeMounts: + - name: config-volume + mountPath: /etc/config/ + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + volumes: + - name: config-volume + configMap: + name: local-path-config + +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: local-path +provisioner: rancher.io/local-path +volumeBindingMode: WaitForFirstConsumer +reclaimPolicy: Delete + +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: local-path-config + namespace: local-path-storage +data: + config.json: |- + { + "nodePathMap":[ + { + "node":"DEFAULT_PATH_FOR_NON_LISTED_NODES", + "paths":["/opt/local-path-provisioner"] + } + ] + } + setup: |- + #!/bin/sh + set -eu + mkdir -m 0777 -p "$VOL_DIR" + teardown: |- + #!/bin/sh + set -eu + rm -rf "$VOL_DIR" + helperPod.yaml: |- + apiVersion: v1 + kind: Pod + metadata: + name: helper-pod + spec: + containers: + - name: helper-pod + image: {{.CustomRegistries.Helper | default .ImageRepository | default .Registries.Helper }}{{ .Images.Helper }} + imagePullPolicy: IfNotPresent + + + diff --git a/pkg/addons/addons_storage_classes.go b/pkg/addons/addons_storage_classes.go index cd8b67272900..949c28228df4 100644 --- a/pkg/addons/addons_storage_classes.go +++ b/pkg/addons/addons_storage_classes.go @@ -39,6 +39,8 @@ func enableOrDisableStorageClasses(cc *config.ClusterConfig, name string, val st class := defaultStorageClassProvisioner if name == "storage-provisioner-gluster" { class = "glusterfile" + } else if name == "storage-provisioner-rancher" { + class = "local-path" } api, err := machine.NewAPIClient() @@ -62,6 +64,10 @@ func enableOrDisableStorageClasses(cc *config.ClusterConfig, name string, val st } if enable { + // Enable addon before marking it as default + if err = EnableOrDisableAddon(cc, name, val); err != nil { + return err + } // Only StorageClass for 'name' should be marked as default err = storageclass.SetDefaultStorageClass(storagev1, class) if err != nil { @@ -73,7 +79,10 @@ func enableOrDisableStorageClasses(cc *config.ClusterConfig, name string, val st if err != nil { return errors.Wrapf(err, "Error disabling %s as the default storage class", class) } + if err = EnableOrDisableAddon(cc, name, val); err != nil { + return err + } } - return EnableOrDisableAddon(cc, name, val) + return nil } diff --git a/pkg/addons/config.go b/pkg/addons/config.go index 351ca0c2c880..84beac1aac6f 100644 --- a/pkg/addons/config.go +++ b/pkg/addons/config.go @@ -166,6 +166,11 @@ var Addons = []*Addon{ set: SetBool, callbacks: []setFn{enableOrDisableStorageClasses}, }, + { + name: "storage-provisioner-rancher", + set: SetBool, + callbacks: []setFn{enableOrDisableStorageClasses}, + }, { name: "metallb", set: SetBool, diff --git a/pkg/minikube/assets/addons.go b/pkg/minikube/assets/addons.go index 7383e539e0f2..9b8f3103f672 100644 --- a/pkg/minikube/assets/addons.go +++ b/pkg/minikube/assets/addons.go @@ -208,6 +208,19 @@ var Addons = map[string]*Addon{ "GlusterfsServer": "docker.io", "GlusterfileProvisioner": "docker.io", }), + "storage-provisioner-rancher": NewAddon([]*BinAsset{ + MustBinAsset(addons.StorageProvisionerRancherAssets, + "storage-provisioner-rancher/storage-provisioner-rancher.yaml.tmpl", + vmpath.GuestAddonsDir, + "storage-provisioner-rancher.yaml", + "0640"), + }, false, "storage-provisioner-rancher", "3rd party (Rancher)", "", "", map[string]string{ + "LocalPathProvisioner": "rancher/local-path-provisioner:v0.0.22@sha256:e34c88ae0affb1cdefbb874140d6339d4a27ec4ee420ae8199cd839997b05246", + "Helper": "busybox:stable@sha256:3fbc632167424a6d997e74f52b878d7cc478225cffac6bc977eedfe51c7f4e79", + }, map[string]string{ + "LocalPathProvisioner": "docker.io", + "Helper": "docker.io", + }), "efk": NewAddon([]*BinAsset{ MustBinAsset(addons.EfkAssets, "efk/elasticsearch-rc.yaml.tmpl", diff --git a/site/content/en/docs/tutorials/local_path_provisioner.md b/site/content/en/docs/tutorials/local_path_provisioner.md new file mode 100644 index 000000000000..804d8806c1a6 --- /dev/null +++ b/site/content/en/docs/tutorials/local_path_provisioner.md @@ -0,0 +1,103 @@ +--- +title: "Using Local Path Provisioner" +linkTitle: "Using Local Path Provisioner" +weight: 1 +date: 2022-10-05 +description: > + Using Local Path Provisioner +--- + +## Overview + +[Local Path Provisioner](https://github.com/rancher/local-path-provisioner), provides a way for the Kubernetes users to utilize the local storage in each node. It supports multi-node setups. This tutorial will show you how to setup local-path-prvisioner on two node minikube cluster. + +## Prerequisites + +- Minikube version higher than v1.27.0 +- kubectl + +## Tutorial + +- Start a cluster with 2 nodes: + +```shell +$ minikube start -n 2 +``` + +- Enable `storage-provisioner-rancher` addon: + +``` +$ minikube addons enable storage-provisioner-rancher +``` + +- You should be able to see Pod in the `local-path-storage` namespace: + +``` +$ kubectl get pods -n local-path-storage +NAME READY STATUS RESTARTS AGE +local-path-provisioner-7f58b4649-hcbk9 1/1 Running 0 38s +``` + +- The `local-path` StorageClass should be marked as `default`: + +``` +$ kubectl get sc +NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE +local-path (default) rancher.io/local-path Delete WaitForFirstConsumer false 107s +standard k8s.io/minikube-hostpath Delete Immediate false 4m27s +``` + +- The following `yaml` creates PVC and Pod that creates file with content on second node (minikube-m02): + +``` +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: test-pvc +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 64Mi +--- +apiVersion: v1 +kind: Pod +metadata: + name: test-local-path +spec: + restartPolicy: OnFailure + nodeSelector: + "kubernetes.io/hostname": "minikube-m02" + containers: + - name: busybox + image: busybox:stable + command: ["sh", "-c", "echo 'local-path-provisioner' > /test/file1"] + volumeMounts: + - name: data + mountPath: /test + volumes: + - name: data + persistentVolumeClaim: + claimName: test-pvc +``` + +``` +$ kubectl get pvc +NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE +test-pvc Bound pvc-f07e253b-fea7-433a-b0ac-1bcea3f77076 64Mi RWO local-path 5m19s +``` + +``` +$ kubectl get pods -o wide +NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES +test-local-path 0/1 Completed 0 5m19s 10.244.1.5 minikube-m02 +``` + +- On the second node we are able to see created file with content `local-path-provisioner`: + +``` +$ minikube ssh -n minikube-m02 "cat /opt/local-path-provisioner/pvc-f07e253b-fea7-433a-b0ac-1bcea3f77076_default_test-pvc/file1" +local-path-provisioner +``` diff --git a/test/integration/addons_test.go b/test/integration/addons_test.go index 40e3fee39e05..3421c37c0452 100644 --- a/test/integration/addons_test.go +++ b/test/integration/addons_test.go @@ -37,6 +37,7 @@ import ( "github.com/blang/semver/v4" retryablehttp "github.com/hashicorp/go-retryablehttp" + core "k8s.io/api/core/v1" "k8s.io/minikube/pkg/kapi" "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/detect" @@ -78,7 +79,7 @@ func TestAddons(t *testing.T) { // so we override that here to let minikube auto-detect appropriate cgroup driver os.Setenv(constants.MinikubeForceSystemdEnv, "") - args := append([]string{"start", "-p", profile, "--wait=true", "--memory=4000", "--alsologtostderr", "--addons=registry", "--addons=metrics-server", "--addons=volumesnapshots", "--addons=csi-hostpath-driver", "--addons=gcp-auth", "--addons=cloud-spanner", "--addons=inspektor-gadget"}, StartArgs()...) + args := append([]string{"start", "-p", profile, "--wait=true", "--memory=4000", "--alsologtostderr", "--addons=registry", "--addons=metrics-server", "--addons=volumesnapshots", "--addons=csi-hostpath-driver", "--addons=gcp-auth", "--addons=cloud-spanner", "--addons=inspektor-gadget", "--addons=storage-provisioner-rancher"}, StartArgs()...) if !NoneDriver() { // none driver does not support ingress args = append(args, "--addons=ingress", "--addons=ingress-dns") } @@ -111,6 +112,7 @@ func TestAddons(t *testing.T) { {"CSI", validateCSIDriverAndSnapshots}, {"Headlamp", validateHeadlampAddon}, {"CloudSpanner", validateCloudSpannerAddon}, + {"LocalPath", validateLocalPathAddon}, } for _, tc := range tests { tc := tc @@ -837,3 +839,64 @@ func validateCloudSpannerAddon(ctx context.Context, t *testing.T, profile string t.Errorf("failed to disable cloud-spanner addon: args %q : %v", rr.Command(), err) } } + +// validateLocalPathAddon tests the functionality of the storage-provisioner-rancher addon +func validateLocalPathAddon(ctx context.Context, t *testing.T, profile string) { + + if NoneDriver() { + t.Skipf("skip local-path test on none driver") + } + + // Create a test PVC + rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "apply", "-f", filepath.Join(*testdataDir, "storage-provisioner-rancher", "pvc.yaml"))) + if err != nil { + t.Fatalf("kubectl apply pvc.yaml failed: args %q: %v", rr.Command(), err) + } + + // Deploy a simple pod with PVC + rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "apply", "-f", filepath.Join(*testdataDir, "storage-provisioner-rancher", "pod.yaml"))) + if err != nil { + t.Fatalf("kubectl apply pod.yaml failed: args %q: %v", rr.Command(), err) + } + if err := PVCWait(ctx, t, profile, "default", "test-pvc", Minutes(5)); err != nil { + t.Fatalf("failed waiting for PVC test-pvc: %v", err) + } + if _, err := PodWait(ctx, t, profile, "default", "run=test-local-path", Minutes(3)); err != nil { + t.Fatalf("failed waiting for test-local-path pod: %v", err) + } + + // Get info about PVC + rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "pvc", "test-pvc", "-o=json")) + if err != nil { + t.Fatalf("kubectl get pvc with %s failed: %v", rr.Command(), err) + } + pvc := core.PersistentVolumeClaim{} + if err := json.NewDecoder(bytes.NewReader(rr.Stdout.Bytes())).Decode(&pvc); err != nil { + t.Fatalf("failed decoding json to pvc: %v", err) + } + + rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", fmt.Sprintf("cat /opt/local-path-provisioner/%s_default_test-pvc/file1", pvc.Spec.VolumeName))) + if err != nil { + t.Fatalf("ssh error: %v", err) + } + + got := rr.Stdout.String() + want := "local-path-provisioner" + if !strings.Contains(got, want) { + t.Fatalf("%v stdout = %q, want %q", rr.Command(), got, want) + } + + // Cleanup + rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "delete", "pod", "test-local-path")) + if err != nil { + t.Logf("cleanup with %s failed: %v", rr.Command(), err) + } + rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "delete", "pvc", "test-pvc")) + if err != nil { + t.Logf("cleanup with %s failed: %v", rr.Command(), err) + } + rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "disable", "storage-provisioner-rancher", "--alsologtostderr", "-v=1")) + if err != nil { + t.Errorf("failed to disable storage-provisioner-rancher addon: args %q: %v", rr.Command(), err) + } +} diff --git a/test/integration/testdata/storage-provisioner-rancher/pod.yaml b/test/integration/testdata/storage-provisioner-rancher/pod.yaml new file mode 100644 index 000000000000..0930c37be549 --- /dev/null +++ b/test/integration/testdata/storage-provisioner-rancher/pod.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Pod +metadata: + name: test-local-path + labels: + run: test-local-path +spec: + restartPolicy: OnFailure + containers: + - name: busybox + image: busybox:stable + command: ["sh", "-c", "echo 'local-path-provisioner' > /test/file1"] + volumeMounts: + - name: data + mountPath: /test + volumes: + - name: data + persistentVolumeClaim: + claimName: test-pvc diff --git a/test/integration/testdata/storage-provisioner-rancher/pvc.yaml b/test/integration/testdata/storage-provisioner-rancher/pvc.yaml new file mode 100644 index 000000000000..d99a27189d71 --- /dev/null +++ b/test/integration/testdata/storage-provisioner-rancher/pvc.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: test-pvc +spec: + storageClassName: local-path + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 64Mi