* * ==> Audit <== * |---------|------------------------------------------------------------|------------|-------|--------------|---------------------|---------------------| | Command | Args | Profile | User | Version | Start Time | End Time | |---------|------------------------------------------------------------|------------|-------|--------------|---------------------|---------------------| | start | --profile marina --driver | marina | tadeu | v1.31.2 | 19 Sep 23 18:56 -03 | 19 Sep 23 18:58 -03 | | | docker --container-runtime | | | | | | | | containerd --cni calico | | | | | | | | --embed-certs | | | | | | | ssh | -p marina | marina | tadeu | v1.31.2 | 19 Sep 23 19:01 -03 | 19 Sep 23 19:02 -03 | | delete | marina delete | marina | tadeu | v1.31.2 | 19 Sep 23 20:25 -03 | 19 Sep 23 20:25 -03 | | start | --profile marina --driver docker --container-runtime | marina | tadeu | v1.31.2 | 19 Sep 23 20:25 -03 | 19 Sep 23 20:26 -03 | | | containerd --cni calico --embed-certs | | | | | | | | --docker-opt=add-host=hub.marina.estaleiro=172.30.0.11 | | | | | | | | --network marinanet | | | | | | | delete | marina delete | marina | tadeu | v1.31.2 | 19 Sep 23 20:27 -03 | 19 Sep 23 20:27 -03 | | start | --profile=marina --driver=docker | marina | tadeu | v1.31.2 | 19 Sep 23 20:27 -03 | 19 Sep 23 20:28 -03 | | | --container-runtime=containerd | | | | | | | | --cni=calico --embed-certs | | | | | | | | --docker-opt=add-host=hub.marina.estaleiro=172.30.0.11 | | | | | | | | --network=marinanet | | | | | | | delete | marina delete | marina | tadeu | v1.31.2 | 19 Sep 23 20:28 -03 | 19 Sep 23 20:28 -03 | | start | --profile=marina --driver=docker | marina | tadeu | v1.31.2 | 19 Sep 23 20:29 -03 | 19 Sep 23 20:30 -03 | | | --container-runtime=containerd | | | | | | | | --cni=calico --embed-certs | | | | | | | | --docker-opt=add-host=hub.marina.estaleiro=172.31.0.10 | | | | | | | | --network=marina | | | | | | | ssh | -p marina | marina | tadeu | v1.31.2 | 19 Sep 23 20:30 -03 | 19 Sep 23 20:30 -03 | | delete | marina delete | marina | tadeu | v1.31.2 | 19 Sep 23 20:31 -03 | 19 Sep 23 20:31 -03 | | start | --profile=marina | marina | tadeu | v1.31.2 | 19 Sep 23 20:41 -03 | 19 Sep 23 20:42 -03 | | | --driver=docker | | | | | | | | --container-runtime=containerd | | | | | | | | --cni=calico --embed-certs | | | | | | | | --network=whatif | | | | | | | ssh | -p marina | marina | tadeu | v1.31.2 | 19 Sep 23 20:42 -03 | 19 Sep 23 20:43 -03 | | delete | marina delete | marina | tadeu | v1.31.2 | 19 Sep 23 20:44 -03 | 19 Sep 23 20:44 -03 | | start | --profile=marina --driver=docker | marina | tadeu | v1.31.2 | 19 Sep 23 20:48 -03 | 19 Sep 23 20:49 -03 | | | --container-runtime=containerd --cni=calico | | | | | | | | --embed-certs --network=whatif --extra-config | | | | | | | | kubeadm.pod-network-cidr=172.16.0.0/16 | | | | | | | | --docker-opt=--add-host=hub1.marina.estaleiro=172.17.12.13 | | | | | | | ssh | -p marina | marina | tadeu | v1.31.2 | 19 Sep 23 20:49 -03 | 19 Sep 23 20:50 -03 | | delete | marina delete | marina | tadeu | v1.31.2 | 19 Sep 23 20:50 -03 | 19 Sep 23 20:50 -03 | | start | --profile=marina --driver=docker | marina | tadeu | v1.31.2 | 19 Sep 23 20:50 -03 | 19 Sep 23 20:51 -03 | | | --container-runtime=containerd --cni=calico | | | | | | | | --embed-certs --network=whatif --extra-config | | | | | | | | kubeadm.pod-network-cidr=172.16.0.0/16 | | | | | | | | --docker-opt=--add-host=hub1.marina.estaleiro=172.17.12.13 | | | | | | | ssh | -p marina | marina | tadeu | v1.31.2 | 19 Sep 23 20:51 -03 | 19 Sep 23 20:51 -03 | | ssh | -p marina | marina | tadeu | v1.31.2 | 19 Sep 23 20:53 -03 | 19 Sep 23 20:54 -03 | | delete | -p marina | marina | tadeu | v1.31.2 | 19 Sep 23 20:54 -03 | 19 Sep 23 20:54 -03 | | start | --profile=marina --driver=docker | marina | tadeu | v1.31.2 | 19 Sep 23 20:55 -03 | 19 Sep 23 20:55 -03 | | | --container-runtime=containerd | | | | | | | | --cni=calico --embed-certs | | | | | | | | --network=marinanet --extra-config | | | | | | | | kubeadm.pod-network-cidr=172.16.0.0/16 | | | | | | | ssh | -p marina | marina | tadeu | v1.31.2 | 19 Sep 23 20:55 -03 | 19 Sep 23 20:56 -03 | | ssh | -p marina | marina | tadeu | v1.31.2 | 19 Sep 23 20:56 -03 | 19 Sep 23 20:56 -03 | | ssh | -p marina | marina | tadeu | v1.31.2 | 19 Sep 23 21:02 -03 | 19 Sep 23 21:03 -03 | | delete | marina delete | marina | tadeu | v1.31.2 | 19 Sep 23 21:03 -03 | 19 Sep 23 21:03 -03 | | start | --profile=marina | marina | tadeu | v1.31.2 | 19 Sep 23 21:04 -03 | 19 Sep 23 21:05 -03 | | | --driver=docker | | | | | | | | --container-runtime=containerd | | | | | | | | --cni=calico --embed-certs | | | | | | | | --network=marinanet | | | | | | | ssh | -p marina | marina | tadeu | v1.31.2 | 19 Sep 23 21:07 -03 | 19 Sep 23 21:10 -03 | | delete | marina delete | marina | tadeu | v1.31.2 | 19 Sep 23 21:10 -03 | 19 Sep 23 21:10 -03 | | start | --profile=marina | marina | tadeu | v1.31.2 | 19 Sep 23 21:10 -03 | 19 Sep 23 21:11 -03 | | | --driver=docker | | | | | | | | --container-runtime=containerd | | | | | | | | --cni=calico --embed-certs | | | | | | | | --network=marinanet | | | | | | | ssh | -p marina | marina | tadeu | v1.31.2 | 19 Sep 23 21:11 -03 | 19 Sep 23 21:12 -03 | | delete | marina delete | marina | tadeu | v1.31.2 | 19 Sep 23 21:29 -03 | 19 Sep 23 21:29 -03 | | start | --profile=k8s-marina | k8s-marina | tadeu | v1.31.2 | 19 Sep 23 21:30 -03 | 19 Sep 23 21:30 -03 | | | --driver=docker | | | | | | | | --container-runtime=containerd | | | | | | | | --cni=calico --embed-certs | | | | | | | | --network= | | | | | | | delete | marina delete | marina | tadeu | v1.31.2 | 19 Sep 23 21:32 -03 | 19 Sep 23 21:32 -03 | | delete | k8s-marina delete | k8s-marina | tadeu | v1.31.2 | 19 Sep 23 21:32 -03 | 19 Sep 23 21:32 -03 | | start | --profile=k8s-marina | k8s-marina | tadeu | v1.31.2 | 19 Sep 23 21:33 -03 | 19 Sep 23 21:33 -03 | | | --driver=docker | | | | | | | | --container-runtime=containerd | | | | | | | | --cni=calico --embed-certs | | | | | | | | --network=marinanet | | | | | | | delete | k8s-marina delete | k8s-marina | tadeu | v1.31.2 | 20 Sep 23 09:52 -03 | 20 Sep 23 09:52 -03 | | start | --profile=k8s-marina | k8s-marina | tadeu | v1.31.2 | 20 Sep 23 09:52 -03 | 20 Sep 23 09:52 -03 | | | --driver=docker | | | | | | | | --container-runtime=containerd | | | | | | | | --cni=calico --embed-certs | | | | | | | | --network=marinanet | | | | | | | ssh | | minikube | tadeu | v1.31.2 | 20 Sep 23 10:06 -03 | | | ssh | -p marina | marina | tadeu | v1.31.2 | 20 Sep 23 10:06 -03 | | | ssh | -p k8s-marina | k8s-marina | tadeu | v1.31.2 | 20 Sep 23 10:07 -03 | 20 Sep 23 10:09 -03 | | delete | k8s-marina delete | k8s-marina | tadeu | v1.31.2 | 20 Sep 23 10:11 -03 | 20 Sep 23 10:11 -03 | | start | --profile=marina | marina | tadeu | v1.31.2 | 20 Sep 23 10:11 -03 | 20 Sep 23 10:12 -03 | | | --driver=docker | | | | | | | | --container-runtime=containerd | | | | | | | | --cni=calico --embed-certs | | | | | | | | --network=kubemarina | | | | | | | delete | marina delete | marina | tadeu | v1.31.2 | 20 Sep 23 10:12 -03 | 20 Sep 23 10:12 -03 | | start | --profile=heeeeeey | heeeeeey | tadeu | v1.31.2 | 20 Sep 23 10:12 -03 | 20 Sep 23 10:13 -03 | | | --driver=docker | | | | | | | | --container-runtime=containerd | | | | | | | | --cni=calico --embed-certs | | | | | | | | --network=kubemarina | | | | | | | delete | heeeeeeey delete | heeeeeeey | tadeu | v1.31.2 | 20 Sep 23 10:13 -03 | 20 Sep 23 10:13 -03 | | delete | heeeeeey delete | heeeeeey | tadeu | v1.31.2 | 20 Sep 23 10:13 -03 | 20 Sep 23 10:13 -03 | | start | --driver=docker | minikube | tadeu | v1.31.2 | 20 Sep 23 10:19 -03 | 20 Sep 23 10:19 -03 | | | --container-runtime=containerd | | | | | | | | --cni=calico --embed-certs | | | | | | | | --network=marinanet | | | | | | | delete | | minikube | tadeu | v1.31.2 | 20 Sep 23 10:20 -03 | 20 Sep 23 10:20 -03 | | start | --profile=marina | marina | tadeu | v1.31.2 | 20 Sep 23 10:20 -03 | 20 Sep 23 10:20 -03 | | | --driver=docker | | | | | | | | --container-runtime=containerd | | | | | | | | --cni=calico --embed-certs | | | | | | | | --network=marinanet | | | | | | | start | --profile=marina | marina | tadeu | v1.31.2 | 20 Sep 23 10:21 -03 | 20 Sep 23 10:21 -03 | | | --driver=docker | | | | | | | | --container-runtime=containerd | | | | | | | | --cni=calico --embed-certs | | | | | | | | --network=marinanet | | | | | | | | --alsologtostderr | | | | | | | delete | marina delete | marina | tadeu | v1.31.2 | 20 Sep 23 10:29 -03 | 20 Sep 23 10:30 -03 | | start | --profile=marina | marina | tadeu | v0.0.0-unset | 20 Sep 23 10:41 -03 | 20 Sep 23 10:45 -03 | | | --driver=docker | | | | | | | | --container-runtime=containerd | | | | | | | | --cni=calico --embed-certs | | | | | | | | --network=marinanet | | | | | | | delete | marina delete | marina | tadeu | v1.31.2 | 20 Sep 23 10:47 -03 | 20 Sep 23 10:47 -03 | | start | --profile=marina | marina | tadeu | v0.0.0-unset | 20 Sep 23 10:48 -03 | 20 Sep 23 11:03 -03 | | | --driver=docker | | | | | | | | --container-runtime=containerd | | | | | | | | --cni=calico --embed-certs | | | | | | | | --network=marinanet | | | | | | | delete | marina delete | marina | tadeu | v1.31.2 | 20 Sep 23 11:03 -03 | 20 Sep 23 11:03 -03 | | start | --profile=marina | marina | tadeu | v1.31.2 | 20 Sep 23 11:03 -03 | 20 Sep 23 11:04 -03 | | | --driver=docker | | | | | | | | --container-runtime=containerd | | | | | | | | --cni=calico --embed-certs | | | | | | | | --network=marinanet | | | | | | |---------|------------------------------------------------------------|------------|-------|--------------|---------------------|---------------------| * * ==> Last Start <== * Log file created at: 2023/09/20 11:03:55 Running on machine: tadeu-desktop Binary: Built with gc go1.20.7 for linux/amd64 Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg I0920 11:03:55.896790 3401853 out.go:296] Setting OutFile to fd 1 ... I0920 11:03:55.896863 3401853 out.go:348] isatty.IsTerminal(1) = true I0920 11:03:55.896864 3401853 out.go:309] Setting ErrFile to fd 2... I0920 11:03:55.896867 3401853 out.go:348] isatty.IsTerminal(2) = true I0920 11:03:55.896998 3401853 root.go:338] Updating PATH: /home/tadeu/.minikube/bin I0920 11:03:55.897339 3401853 out.go:303] Setting JSON to false I0920 11:03:55.899849 3401853 start.go:128] hostinfo: {"hostname":"tadeu-desktop","uptime":670894,"bootTime":1694547742,"procs":540,"os":"linux","platform":"arch","platformFamily":"arch","platformVersion":"\"23.0.2\"","kernelVersion":"6.4.14-1-MANJARO","kernelArch":"x86_64","virtualizationSystem":"kvm","virtualizationRole":"host","hostId":"662472c9-6d5a-40f1-9598-e943265a0677"} I0920 11:03:55.899879 3401853 start.go:138] virtualization: kvm host I0920 11:03:55.903582 3401853 out.go:177] ๐Ÿ˜„ [marina] minikube v1.31.2 on Arch "23.0.2" I0920 11:03:55.911396 3401853 notify.go:220] Checking for updates... I0920 11:03:55.913914 3401853 driver.go:373] Setting default libvirt URI to qemu:///system I0920 11:03:55.930882 3401853 docker.go:121] docker version: linux-24.0.5: I0920 11:03:55.930968 3401853 cli_runner.go:164] Run: docker system info --format "{{json .}}" I0920 11:03:55.967916 3401853 info.go:266] docker info: {ID:BH47:Q7TN:ABM6:NYTZ:6QJM:YTHI:QLEG:2GYD:M2ZR:C2OI:AJGW:ATBJ Containers:13 ContainersRunning:9 ContainersPaused:0 ContainersStopped:4 Images:14 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy true] [Native Overlay Diff false] [userxattr false]] SystemStatus: Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization: Log:[awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:93 OomKillDisable:false NGoroutines:91 SystemTime:2023-09-20 11:03:55.961608639 -0300 -03 LoggingDriver:json-file CgroupDriver:systemd NEventsListener:0 KernelVersion:6.4.14-1-MANJARO OperatingSystem:Manjaro Linux OSType:linux Architecture:x86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:12 MemTotal:20872761344 GenericResources: DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:tadeu-desktop Labels:[] ExperimentalBuild:false ServerVersion:24.0.5 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:fe457eb99ac0e27b3ce638175ef8e68a7d2bc373.m Expected:fe457eb99ac0e27b3ce638175ef8e68a7d2bc373.m} RuncCommit:{ID: Expected:} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings: ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/lib/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:0.11.2] map[Name:compose Path:/usr/lib/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:2.20.3]] Warnings:}} I0920 11:03:55.967983 3401853 docker.go:294] overlay module found I0920 11:03:55.971333 3401853 out.go:177] โœจ Using the docker driver based on user configuration I0920 11:03:55.977932 3401853 start.go:298] selected driver: docker I0920 11:03:55.977937 3401853 start.go:902] validating driver "docker" against I0920 11:03:55.977945 3401853 start.go:913] status for docker: {Installed:true Healthy:true Running:false NeedsImprovement:false Error: Reason: Fix: Doc: Version:} I0920 11:03:55.978062 3401853 cli_runner.go:164] Run: docker system info --format "{{json .}}" I0920 11:03:56.013830 3401853 info.go:266] docker info: {ID:BH47:Q7TN:ABM6:NYTZ:6QJM:YTHI:QLEG:2GYD:M2ZR:C2OI:AJGW:ATBJ Containers:13 ContainersRunning:9 ContainersPaused:0 ContainersStopped:4 Images:14 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Using metacopy true] [Native Overlay Diff false] [userxattr false]] SystemStatus: Plugins:{Volume:[local] Network:[bridge host ipvlan macvlan null overlay] Authorization: Log:[awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog]} MemoryLimit:true SwapLimit:true KernelMemory:false KernelMemoryTCP:false CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true PidsLimit:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6Tables:true Debug:false NFd:93 OomKillDisable:false NGoroutines:91 SystemTime:2023-09-20 11:03:56.008106555 -0300 -03 LoggingDriver:json-file CgroupDriver:systemd NEventsListener:0 KernelVersion:6.4.14-1-MANJARO OperatingSystem:Manjaro Linux OSType:linux Architecture:x86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:{AllowNondistributableArtifactsCIDRs:[] AllowNondistributableArtifactsHostnames:[] InsecureRegistryCIDRs:[127.0.0.0/8] IndexConfigs:{DockerIo:{Name:docker.io Mirrors:[] Secure:true Official:true}} Mirrors:[]} NCPU:12 MemTotal:20872761344 GenericResources: DockerRootDir:/var/lib/docker HTTPProxy: HTTPSProxy: NoProxy: Name:tadeu-desktop Labels:[] ExperimentalBuild:false ServerVersion:24.0.5 ClusterStore: ClusterAdvertise: Runtimes:{Runc:{Path:runc}} DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:fe457eb99ac0e27b3ce638175ef8e68a7d2bc373.m Expected:fe457eb99ac0e27b3ce638175ef8e68a7d2bc373.m} RuncCommit:{ID: Expected:} InitCommit:{ID:de40ad0 Expected:de40ad0} SecurityOptions:[name=seccomp,profile=builtin name=cgroupns] ProductLicense: Warnings: ServerErrors:[] ClientInfo:{Debug:false Plugins:[map[Name:buildx Path:/usr/lib/docker/cli-plugins/docker-buildx SchemaVersion:0.1.0 ShortDescription:Docker Buildx Vendor:Docker Inc. Version:0.11.2] map[Name:compose Path:/usr/lib/docker/cli-plugins/docker-compose SchemaVersion:0.1.0 ShortDescription:Docker Compose Vendor:Docker Inc. Version:2.20.3]] Warnings:}} I0920 11:03:56.013937 3401853 start_flags.go:305] no existing cluster config was found, will generate one from the flags I0920 11:03:56.014854 3401853 start_flags.go:382] Using suggested 4900MB memory alloc based on sys=19905MB, container=19905MB I0920 11:03:56.015221 3401853 start_flags.go:901] Wait components to verify : map[apiserver:true system_pods:true] I0920 11:03:56.018769 3401853 out.go:177] ๐Ÿ“Œ Using Docker driver with root privileges I0920 11:03:56.022163 3401853 cni.go:84] Creating CNI manager for "calico" I0920 11:03:56.022172 3401853 start_flags.go:314] Found "Calico" CNI - setting NetworkPlugin=cni I0920 11:03:56.022185 3401853 start_flags.go:319] config: {Name:marina KeepContext:false EmbedCerts:true MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 Memory:4900 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.27.4 ClusterName:marina Namespace:default APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:calico NodeIP: NodePort:8443 NodeName:} Nodes:[] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop: ExposedPorts:[] ListenAddress: Network:marinanet Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/tadeu:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0} I0920 11:03:56.026107 3401853 out.go:177] ๐Ÿ‘ Starting control plane node marina in cluster marina I0920 11:03:56.032739 3401853 cache.go:122] Beginning downloading kic base image for docker with containerd I0920 11:03:56.036715 3401853 out.go:177] ๐Ÿšœ Pulling base image ... I0920 11:03:56.043318 3401853 preload.go:132] Checking if preload exists for k8s version v1.27.4 and runtime containerd I0920 11:03:56.043358 3401853 preload.go:148] Found local preload: /home/tadeu/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.27.4-containerd-overlay2-amd64.tar.lz4 I0920 11:03:56.043364 3401853 cache.go:57] Caching tarball of preloaded images I0920 11:03:56.043388 3401853 image.go:79] Checking for gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 in local docker daemon I0920 11:03:56.043423 3401853 preload.go:174] Found /home/tadeu/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.27.4-containerd-overlay2-amd64.tar.lz4 in cache, skipping download I0920 11:03:56.043431 3401853 cache.go:60] Finished verifying existence of preloaded tar for v1.27.4 on containerd I0920 11:03:56.043748 3401853 profile.go:148] Saving config to /home/tadeu/.minikube/profiles/marina/config.json ... I0920 11:03:56.043767 3401853 lock.go:35] WriteFile acquiring /home/tadeu/.minikube/profiles/marina/config.json: {Name:mk245e2bbe0011a5f500a6331153986c019bc51c Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0920 11:03:56.065816 3401853 image.go:83] Found gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 in local docker daemon, skipping pull I0920 11:03:56.065827 3401853 cache.go:145] gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 exists in daemon, skipping load I0920 11:03:56.065837 3401853 cache.go:195] Successfully downloaded all kic artifacts I0920 11:03:56.065857 3401853 start.go:365] acquiring machines lock for marina: {Name:mk2bad5564e1c8a8676dd4705bf9fc65e1b75a44 Clock:{} Delay:500ms Timeout:10m0s Cancel:} I0920 11:03:56.065896 3401853 start.go:369] acquired machines lock for "marina" in 28.103ยตs I0920 11:03:56.065908 3401853 start.go:93] Provisioning new machine with config: &{Name:marina KeepContext:false EmbedCerts:true MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 Memory:4900 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.27.4 ClusterName:marina Namespace:default APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:calico NodeIP: NodePort:8443 NodeName:} Nodes:[{Name: IP: Port:8443 KubernetesVersion:v1.27.4 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop: ExposedPorts:[] ListenAddress: Network:marinanet Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/tadeu:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0} &{Name: IP: Port:8443 KubernetesVersion:v1.27.4 ContainerRuntime:containerd ControlPlane:true Worker:true} I0920 11:03:56.065961 3401853 start.go:125] createHost starting for "" (driver="docker") I0920 11:03:56.069540 3401853 out.go:204] ๐Ÿ”ฅ Creating docker container (CPUs=2, Memory=4900MB) ... I0920 11:03:56.069966 3401853 start.go:159] libmachine.API.Create for "marina" (driver="docker") I0920 11:03:56.069976 3401853 client.go:168] LocalClient.Create starting I0920 11:03:56.070013 3401853 main.go:141] libmachine: Reading certificate data from /home/tadeu/.minikube/certs/ca.pem I0920 11:03:56.070143 3401853 main.go:141] libmachine: Decoding PEM data... I0920 11:03:56.070154 3401853 main.go:141] libmachine: Parsing certificate... I0920 11:03:56.070222 3401853 main.go:141] libmachine: Reading certificate data from /home/tadeu/.minikube/certs/cert.pem I0920 11:03:56.070306 3401853 main.go:141] libmachine: Decoding PEM data... I0920 11:03:56.070314 3401853 main.go:141] libmachine: Parsing certificate... I0920 11:03:56.070633 3401853 cli_runner.go:164] Run: docker network inspect marinanet --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" I0920 11:03:56.080038 3401853 network_create.go:76] Found existing network {name:marinanet subnet:0xc00126a900 gateway:[0 0 0 0 0 0 0 0 0 0 255 255 172 30 0 1] mtu:0} I0920 11:03:56.080057 3401853 kic.go:117] calculated static IP "172.30.0.2" for the "marina" container I0920 11:03:56.080132 3401853 cli_runner.go:164] Run: docker ps -a --format {{.Names}} I0920 11:03:56.092343 3401853 cli_runner.go:164] Run: docker volume create marina --label name.minikube.sigs.k8s.io=marina --label created_by.minikube.sigs.k8s.io=true I0920 11:03:56.102673 3401853 oci.go:103] Successfully created a docker volume marina I0920 11:03:56.102729 3401853 cli_runner.go:164] Run: docker run --rm --name marina-preload-sidecar --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=marina --entrypoint /usr/bin/test -v marina:/var gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 -d /var/lib I0920 11:03:56.855716 3401853 oci.go:107] Successfully prepared a docker volume marina I0920 11:03:56.855739 3401853 preload.go:132] Checking if preload exists for k8s version v1.27.4 and runtime containerd I0920 11:03:56.855753 3401853 kic.go:190] Starting extracting preloaded images to volume ... I0920 11:03:56.855854 3401853 cli_runner.go:164] Run: docker run --rm --entrypoint /usr/bin/tar -v /home/tadeu/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.27.4-containerd-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v marina:/extractDir gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 -I lz4 -xf /preloaded.tar -C /extractDir I0920 11:04:24.611857 3401853 cli_runner.go:217] Completed: docker run --rm --entrypoint /usr/bin/tar -v /home/tadeu/.minikube/cache/preloaded-tarball/preloaded-images-k8s-v18-v1.27.4-containerd-overlay2-amd64.tar.lz4:/preloaded.tar:ro -v marina:/extractDir gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 -I lz4 -xf /preloaded.tar -C /extractDir: (27.755958288s) I0920 11:04:24.611872 3401853 kic.go:199] duration metric: took 27.756116 seconds to extract preloaded images to volume W0920 11:04:24.611939 3401853 cgroups_linux.go:77] Your kernel does not support swap limit capabilities or the cgroup is not mounted. W0920 11:04:24.611961 3401853 oci.go:240] Your kernel does not support CPU cfs period/quota or the cgroup is not mounted. I0920 11:04:24.612010 3401853 cli_runner.go:164] Run: docker info --format "'{{json .SecurityOptions}}'" I0920 11:04:24.653386 3401853 cli_runner.go:164] Run: docker run -d -t --privileged --security-opt seccomp=unconfined --tmpfs /tmp --tmpfs /run -v /lib/modules:/lib/modules:ro --hostname marina --name marina --label created_by.minikube.sigs.k8s.io=true --label name.minikube.sigs.k8s.io=marina --label role.minikube.sigs.k8s.io= --label mode.minikube.sigs.k8s.io=marina --network marinanet --ip 172.30.0.2 --volume marina:/var --security-opt apparmor=unconfined --memory=4900mb -e container=docker --expose 8443 --publish=127.0.0.1::8443 --publish=127.0.0.1::22 --publish=127.0.0.1::2376 --publish=127.0.0.1::5000 --publish=127.0.0.1::32443 gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 I0920 11:04:24.996004 3401853 cli_runner.go:164] Run: docker container inspect marina --format={{.State.Running}} I0920 11:04:25.006600 3401853 cli_runner.go:164] Run: docker container inspect marina --format={{.State.Status}} I0920 11:04:25.017284 3401853 cli_runner.go:164] Run: docker exec marina stat /var/lib/dpkg/alternatives/iptables I0920 11:04:25.062659 3401853 oci.go:144] the created container "marina" has a running status. I0920 11:04:25.062676 3401853 kic.go:221] Creating ssh key for kic: /home/tadeu/.minikube/machines/marina/id_rsa... I0920 11:04:25.134408 3401853 kic_runner.go:191] docker (temp): /home/tadeu/.minikube/machines/marina/id_rsa.pub --> /home/docker/.ssh/authorized_keys (381 bytes) I0920 11:04:25.149536 3401853 cli_runner.go:164] Run: docker container inspect marina --format={{.State.Status}} I0920 11:04:25.159871 3401853 kic_runner.go:93] Run: chown docker:docker /home/docker/.ssh/authorized_keys I0920 11:04:25.159878 3401853 kic_runner.go:114] Args: [docker exec --privileged marina chown docker:docker /home/docker/.ssh/authorized_keys] I0920 11:04:25.224086 3401853 cli_runner.go:164] Run: docker container inspect marina --format={{.State.Status}} I0920 11:04:25.235181 3401853 machine.go:88] provisioning docker machine ... I0920 11:04:25.235194 3401853 ubuntu.go:169] provisioning hostname "marina" I0920 11:04:25.235249 3401853 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" marina I0920 11:04:25.244812 3401853 main.go:141] libmachine: Using SSH client type: native I0920 11:04:25.245183 3401853 main.go:141] libmachine: &{{{ 0 [] [] []} docker [0x80f160] 0x812200 [] 0s} 127.0.0.1 32867 } I0920 11:04:25.245191 3401853 main.go:141] libmachine: About to run SSH command: sudo hostname marina && echo "marina" | sudo tee /etc/hostname I0920 11:04:25.245623 3401853 main.go:141] libmachine: Error dialing TCP: ssh: handshake failed: read tcp 127.0.0.1:59902->127.0.0.1:32867: read: connection reset by peer I0920 11:04:28.383957 3401853 main.go:141] libmachine: SSH cmd err, output: : marina I0920 11:04:28.384034 3401853 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" marina I0920 11:04:28.394714 3401853 main.go:141] libmachine: Using SSH client type: native I0920 11:04:28.395043 3401853 main.go:141] libmachine: &{{{ 0 [] [] []} docker [0x80f160] 0x812200 [] 0s} 127.0.0.1 32867 } I0920 11:04:28.395051 3401853 main.go:141] libmachine: About to run SSH command: if ! grep -xq '.*\smarina' /etc/hosts; then if grep -xq '127.0.1.1\s.*' /etc/hosts; then sudo sed -i 's/^127.0.1.1\s.*/127.0.1.1 marina/g' /etc/hosts; else echo '127.0.1.1 marina' | sudo tee -a /etc/hosts; fi fi I0920 11:04:28.508245 3401853 main.go:141] libmachine: SSH cmd err, output: : I0920 11:04:28.508258 3401853 ubuntu.go:175] set auth options {CertDir:/home/tadeu/.minikube CaCertPath:/home/tadeu/.minikube/certs/ca.pem CaPrivateKeyPath:/home/tadeu/.minikube/certs/ca-key.pem CaCertRemotePath:/etc/docker/ca.pem ServerCertPath:/home/tadeu/.minikube/machines/server.pem ServerKeyPath:/home/tadeu/.minikube/machines/server-key.pem ClientKeyPath:/home/tadeu/.minikube/certs/key.pem ServerCertRemotePath:/etc/docker/server.pem ServerKeyRemotePath:/etc/docker/server-key.pem ClientCertPath:/home/tadeu/.minikube/certs/cert.pem ServerCertSANs:[] StorePath:/home/tadeu/.minikube} I0920 11:04:28.508295 3401853 ubuntu.go:177] setting up certificates I0920 11:04:28.508302 3401853 provision.go:83] configureAuth start I0920 11:04:28.508375 3401853 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" marina I0920 11:04:28.519017 3401853 provision.go:138] copyHostCerts I0920 11:04:28.519045 3401853 exec_runner.go:144] found /home/tadeu/.minikube/ca.pem, removing ... I0920 11:04:28.519048 3401853 exec_runner.go:203] rm: /home/tadeu/.minikube/ca.pem I0920 11:04:28.519292 3401853 exec_runner.go:151] cp: /home/tadeu/.minikube/certs/ca.pem --> /home/tadeu/.minikube/ca.pem (1074 bytes) I0920 11:04:28.519368 3401853 exec_runner.go:144] found /home/tadeu/.minikube/cert.pem, removing ... I0920 11:04:28.519371 3401853 exec_runner.go:203] rm: /home/tadeu/.minikube/cert.pem I0920 11:04:28.519400 3401853 exec_runner.go:151] cp: /home/tadeu/.minikube/certs/cert.pem --> /home/tadeu/.minikube/cert.pem (1119 bytes) I0920 11:04:28.519455 3401853 exec_runner.go:144] found /home/tadeu/.minikube/key.pem, removing ... I0920 11:04:28.519457 3401853 exec_runner.go:203] rm: /home/tadeu/.minikube/key.pem I0920 11:04:28.519481 3401853 exec_runner.go:151] cp: /home/tadeu/.minikube/certs/key.pem --> /home/tadeu/.minikube/key.pem (1675 bytes) I0920 11:04:28.519630 3401853 provision.go:112] generating server cert: /home/tadeu/.minikube/machines/server.pem ca-key=/home/tadeu/.minikube/certs/ca.pem private-key=/home/tadeu/.minikube/certs/ca-key.pem org=tadeu.marina san=[172.30.0.2 127.0.0.1 localhost 127.0.0.1 minikube marina] I0920 11:04:28.688116 3401853 provision.go:172] copyRemoteCerts I0920 11:04:28.688166 3401853 ssh_runner.go:195] Run: sudo mkdir -p /etc/docker /etc/docker /etc/docker I0920 11:04:28.688205 3401853 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" marina I0920 11:04:28.699678 3401853 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32867 SSHKeyPath:/home/tadeu/.minikube/machines/marina/id_rsa Username:docker} I0920 11:04:28.783218 3401853 ssh_runner.go:362] scp /home/tadeu/.minikube/certs/ca.pem --> /etc/docker/ca.pem (1074 bytes) I0920 11:04:28.804450 3401853 ssh_runner.go:362] scp /home/tadeu/.minikube/machines/server.pem --> /etc/docker/server.pem (1192 bytes) I0920 11:04:28.825164 3401853 ssh_runner.go:362] scp /home/tadeu/.minikube/machines/server-key.pem --> /etc/docker/server-key.pem (1671 bytes) I0920 11:04:28.844414 3401853 provision.go:86] duration metric: configureAuth took 336.104701ms I0920 11:04:28.844423 3401853 ubuntu.go:193] setting minikube options for container-runtime I0920 11:04:28.844530 3401853 config.go:182] Loaded profile config "marina": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.27.4 I0920 11:04:28.844534 3401853 machine.go:91] provisioned docker machine in 3.609347369s I0920 11:04:28.844537 3401853 client.go:171] LocalClient.Create took 32.77455912s I0920 11:04:28.844546 3401853 start.go:167] duration metric: libmachine.API.Create for "marina" took 32.774579508s I0920 11:04:28.844550 3401853 start.go:300] post-start starting for "marina" (driver="docker") I0920 11:04:28.844554 3401853 start.go:329] creating required directories: [/etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs] I0920 11:04:28.844602 3401853 ssh_runner.go:195] Run: sudo mkdir -p /etc/kubernetes/addons /etc/kubernetes/manifests /var/tmp/minikube /var/lib/minikube /var/lib/minikube/certs /var/lib/minikube/images /var/lib/minikube/binaries /tmp/gvisor /usr/share/ca-certificates /etc/ssl/certs I0920 11:04:28.844640 3401853 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" marina I0920 11:04:28.856201 3401853 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32867 SSHKeyPath:/home/tadeu/.minikube/machines/marina/id_rsa Username:docker} I0920 11:04:28.943750 3401853 ssh_runner.go:195] Run: cat /etc/os-release I0920 11:04:28.946503 3401853 main.go:141] libmachine: Couldn't set key VERSION_CODENAME, no corresponding struct field found I0920 11:04:28.946518 3401853 main.go:141] libmachine: Couldn't set key PRIVACY_POLICY_URL, no corresponding struct field found I0920 11:04:28.946523 3401853 main.go:141] libmachine: Couldn't set key UBUNTU_CODENAME, no corresponding struct field found I0920 11:04:28.946528 3401853 info.go:137] Remote host: Ubuntu 22.04.2 LTS I0920 11:04:28.946533 3401853 filesync.go:126] Scanning /home/tadeu/.minikube/addons for local assets ... I0920 11:04:28.947104 3401853 filesync.go:126] Scanning /home/tadeu/.minikube/files for local assets ... I0920 11:04:28.947303 3401853 start.go:303] post-start completed in 102.749254ms I0920 11:04:28.947561 3401853 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" marina I0920 11:04:28.958303 3401853 profile.go:148] Saving config to /home/tadeu/.minikube/profiles/marina/config.json ... I0920 11:04:28.958538 3401853 ssh_runner.go:195] Run: sh -c "df -h /var | awk 'NR==2{print $5}'" I0920 11:04:28.958580 3401853 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" marina I0920 11:04:28.968862 3401853 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32867 SSHKeyPath:/home/tadeu/.minikube/machines/marina/id_rsa Username:docker} I0920 11:04:29.050373 3401853 ssh_runner.go:195] Run: sh -c "df -BG /var | awk 'NR==2{print $4}'" I0920 11:04:29.053569 3401853 start.go:128] duration metric: createHost completed in 32.987600617s I0920 11:04:29.053578 3401853 start.go:83] releasing machines lock for "marina", held for 32.987676619s I0920 11:04:29.053641 3401853 cli_runner.go:164] Run: docker container inspect -f "{{range .NetworkSettings.Networks}}{{.IPAddress}},{{.GlobalIPv6Address}}{{end}}" marina I0920 11:04:29.064913 3401853 ssh_runner.go:195] Run: cat /version.json I0920 11:04:29.064958 3401853 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" marina I0920 11:04:29.065016 3401853 ssh_runner.go:195] Run: curl -sS -m 2 https://registry.k8s.io/ I0920 11:04:29.065081 3401853 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" marina I0920 11:04:29.075003 3401853 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32867 SSHKeyPath:/home/tadeu/.minikube/machines/marina/id_rsa Username:docker} I0920 11:04:29.075842 3401853 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32867 SSHKeyPath:/home/tadeu/.minikube/machines/marina/id_rsa Username:docker} I0920 11:04:29.703639 3401853 ssh_runner.go:195] Run: systemctl --version I0920 11:04:29.711513 3401853 ssh_runner.go:195] Run: sh -c "stat /etc/cni/net.d/*loopback.conf*" I0920 11:04:29.714610 3401853 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f -name *loopback.conf* -not -name *.mk_disabled -exec sh -c "grep -q loopback {} && ( grep -q name {} || sudo sed -i '/"type": "loopback"/i \ \ \ \ "name": "loopback",' {} ) && sudo sed -i 's|"cniVersion": ".*"|"cniVersion": "1.0.0"|g' {}" ; I0920 11:04:29.736508 3401853 cni.go:230] loopback cni configuration patched: "/etc/cni/net.d/*loopback.conf*" found I0920 11:04:29.736562 3401853 ssh_runner.go:195] Run: sudo find /etc/cni/net.d -maxdepth 1 -type f ( ( -name *bridge* -or -name *podman* ) -and -not -name *.mk_disabled ) -printf "%!p(MISSING), " -exec sh -c "sudo mv {} {}.mk_disabled" ; I0920 11:04:29.756245 3401853 cni.go:262] disabled [/etc/cni/net.d/87-podman-bridge.conflist, /etc/cni/net.d/100-crio-bridge.conf] bridge cni config(s) I0920 11:04:29.756252 3401853 start.go:466] detecting cgroup driver to use... I0920 11:04:29.756274 3401853 detect.go:199] detected "systemd" cgroup driver on host os I0920 11:04:29.756320 3401853 ssh_runner.go:195] Run: sudo systemctl stop -f crio I0920 11:04:29.767204 3401853 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service crio I0920 11:04:29.775572 3401853 docker.go:196] disabling cri-docker service (if available) ... I0920 11:04:29.775750 3401853 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.socket I0920 11:04:29.786251 3401853 ssh_runner.go:195] Run: sudo systemctl stop -f cri-docker.service I0920 11:04:29.796220 3401853 ssh_runner.go:195] Run: sudo systemctl disable cri-docker.socket I0920 11:04:29.867790 3401853 ssh_runner.go:195] Run: sudo systemctl mask cri-docker.service I0920 11:04:29.936439 3401853 docker.go:212] disabling docker service ... I0920 11:04:29.936499 3401853 ssh_runner.go:195] Run: sudo systemctl stop -f docker.socket I0920 11:04:29.951634 3401853 ssh_runner.go:195] Run: sudo systemctl stop -f docker.service I0920 11:04:29.961120 3401853 ssh_runner.go:195] Run: sudo systemctl disable docker.socket I0920 11:04:30.014310 3401853 ssh_runner.go:195] Run: sudo systemctl mask docker.service I0920 11:04:30.073940 3401853 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service docker I0920 11:04:30.083146 3401853 ssh_runner.go:195] Run: /bin/bash -c "sudo mkdir -p /etc && printf %!s(MISSING) "runtime-endpoint: unix:///run/containerd/containerd.sock " | sudo tee /etc/crictl.yaml" I0920 11:04:30.096194 3401853 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)sandbox_image = .*$|\1sandbox_image = "registry.k8s.io/pause:3.9"|' /etc/containerd/config.toml" I0920 11:04:30.104922 3401853 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)restrict_oom_score_adj = .*$|\1restrict_oom_score_adj = false|' /etc/containerd/config.toml" I0920 11:04:30.113247 3401853 containerd.go:145] configuring containerd to use "systemd" as cgroup driver... I0920 11:04:30.113299 3401853 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)SystemdCgroup = .*$|\1SystemdCgroup = true|g' /etc/containerd/config.toml" I0920 11:04:30.122077 3401853 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runtime.v1.linux"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml" I0920 11:04:30.130299 3401853 ssh_runner.go:195] Run: sh -c "sudo sed -i '/systemd_cgroup/d' /etc/containerd/config.toml" I0920 11:04:30.138685 3401853 ssh_runner.go:195] Run: sh -c "sudo sed -i 's|"io.containerd.runc.v1"|"io.containerd.runc.v2"|g' /etc/containerd/config.toml" I0920 11:04:30.146611 3401853 ssh_runner.go:195] Run: sh -c "sudo rm -rf /etc/cni/net.mk" I0920 11:04:30.154858 3401853 ssh_runner.go:195] Run: sh -c "sudo sed -i -r 's|^( *)conf_dir = .*$|\1conf_dir = "/etc/cni/net.d"|g' /etc/containerd/config.toml" I0920 11:04:30.163168 3401853 ssh_runner.go:195] Run: sudo sysctl net.bridge.bridge-nf-call-iptables I0920 11:04:30.170437 3401853 ssh_runner.go:195] Run: sudo sh -c "echo 1 > /proc/sys/net/ipv4/ip_forward" I0920 11:04:30.176754 3401853 ssh_runner.go:195] Run: sudo systemctl daemon-reload I0920 11:04:30.225889 3401853 ssh_runner.go:195] Run: sudo systemctl restart containerd I0920 11:04:30.288358 3401853 start.go:513] Will wait 60s for socket path /run/containerd/containerd.sock I0920 11:04:30.288422 3401853 ssh_runner.go:195] Run: stat /run/containerd/containerd.sock I0920 11:04:30.291305 3401853 start.go:534] Will wait 60s for crictl version I0920 11:04:30.291355 3401853 ssh_runner.go:195] Run: which crictl I0920 11:04:30.294101 3401853 ssh_runner.go:195] Run: sudo /usr/bin/crictl version I0920 11:04:30.468444 3401853 start.go:550] Version: 0.1.0 RuntimeName: containerd RuntimeVersion: 1.6.21 RuntimeApiVersion: v1 I0920 11:04:30.468565 3401853 ssh_runner.go:195] Run: containerd --version I0920 11:04:30.484315 3401853 ssh_runner.go:195] Run: containerd --version I0920 11:04:30.508646 3401853 out.go:177] ๐Ÿ“ฆ Preparing Kubernetes v1.27.4 on containerd 1.6.21 ... I0920 11:04:30.515336 3401853 cli_runner.go:164] Run: docker network inspect marina --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" W0920 11:04:30.527385 3401853 cli_runner.go:211] docker network inspect marina --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}" returned with exit code 1 I0920 11:04:30.527476 3401853 network_create.go:281] running [docker network inspect marina] to gather additional debugging logs... I0920 11:04:30.527488 3401853 cli_runner.go:164] Run: docker network inspect marina W0920 11:04:30.539023 3401853 cli_runner.go:211] docker network inspect marina returned with exit code 1 I0920 11:04:30.539037 3401853 network_create.go:284] error running [docker network inspect marina]: docker network inspect marina: exit status 1 stdout: [] stderr: Error response from daemon: network marina not found I0920 11:04:30.539045 3401853 network_create.go:286] output of [docker network inspect marina]: -- stdout -- [] -- /stdout -- ** stderr ** Error response from daemon: network marina not found ** /stderr ** E0920 11:04:30.539071 3401853 start.go:131] Unable to get host IP: network inspect: docker network inspect marina --format "{"Name": "{{.Name}}","Driver": "{{.Driver}}","Subnet": "{{range .IPAM.Config}}{{.Subnet}}{{end}}","Gateway": "{{range .IPAM.Config}}{{.Gateway}}{{end}}","MTU": {{if (index .Options "com.docker.network.driver.mtu")}}{{(index .Options "com.docker.network.driver.mtu")}}{{else}}0{{end}}, "ContainerIPs": [{{range $k,$v := .Containers }}"{{$v.IPv4Address}}",{{end}}]}": exit status 1 stdout: stderr: Error response from daemon: network marina not found I0920 11:04:30.539161 3401853 preload.go:132] Checking if preload exists for k8s version v1.27.4 and runtime containerd I0920 11:04:30.539215 3401853 ssh_runner.go:195] Run: sudo crictl images --output json I0920 11:04:30.567507 3401853 containerd.go:604] all images are preloaded for containerd runtime. I0920 11:04:30.567515 3401853 containerd.go:518] Images already preloaded, skipping extraction I0920 11:04:30.567575 3401853 ssh_runner.go:195] Run: sudo crictl images --output json I0920 11:04:30.591518 3401853 containerd.go:604] all images are preloaded for containerd runtime. I0920 11:04:30.591526 3401853 cache_images.go:84] Images are preloaded, skipping loading I0920 11:04:30.591583 3401853 ssh_runner.go:195] Run: sudo crictl info I0920 11:04:30.616698 3401853 cni.go:84] Creating CNI manager for "calico" I0920 11:04:30.616706 3401853 kubeadm.go:87] Using pod CIDR: 10.244.0.0/16 I0920 11:04:30.616716 3401853 kubeadm.go:176] kubeadm options: {CertDir:/var/lib/minikube/certs ServiceCIDR:10.96.0.0/12 PodSubnet:10.244.0.0/16 AdvertiseAddress:172.30.0.2 APIServerPort:8443 KubernetesVersion:v1.27.4 EtcdDataDir:/var/lib/minikube/etcd EtcdExtraArgs:map[] ClusterName:marina NodeName:marina DNSDomain:cluster.local CRISocket:/run/containerd/containerd.sock ImageRepository: ComponentOptions:[{Component:apiServer ExtraArgs:map[enable-admission-plugins:NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota] Pairs:map[certSANs:["127.0.0.1", "localhost", "172.30.0.2"]]} {Component:controllerManager ExtraArgs:map[allocate-node-cidrs:true leader-elect:false] Pairs:map[]} {Component:scheduler ExtraArgs:map[leader-elect:false] Pairs:map[]}] FeatureArgs:map[] NodeIP:172.30.0.2 CgroupDriver:systemd ClientCAFile:/var/lib/minikube/certs/ca.crt StaticPodPath:/etc/kubernetes/manifests ControlPlaneAddress:control-plane.minikube.internal KubeProxyOptions:map[] ResolvConfSearchRegression:false KubeletConfigOpts:map[hairpinMode:hairpin-veth runtimeRequestTimeout:15m] PrependCriSocketUnix:true} I0920 11:04:30.616787 3401853 kubeadm.go:181] kubeadm config: apiVersion: kubeadm.k8s.io/v1beta3 kind: InitConfiguration localAPIEndpoint: advertiseAddress: 172.30.0.2 bindPort: 8443 bootstrapTokens: - groups: - system:bootstrappers:kubeadm:default-node-token ttl: 24h0m0s usages: - signing - authentication nodeRegistration: criSocket: unix:///run/containerd/containerd.sock name: "marina" kubeletExtraArgs: node-ip: 172.30.0.2 taints: [] --- apiVersion: kubeadm.k8s.io/v1beta3 kind: ClusterConfiguration apiServer: certSANs: ["127.0.0.1", "localhost", "172.30.0.2"] extraArgs: enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" controllerManager: extraArgs: allocate-node-cidrs: "true" leader-elect: "false" scheduler: extraArgs: leader-elect: "false" certificatesDir: /var/lib/minikube/certs clusterName: mk controlPlaneEndpoint: control-plane.minikube.internal:8443 etcd: local: dataDir: /var/lib/minikube/etcd extraArgs: proxy-refresh-interval: "70000" kubernetesVersion: v1.27.4 networking: dnsDomain: cluster.local podSubnet: "10.244.0.0/16" serviceSubnet: 10.96.0.0/12 --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration authentication: x509: clientCAFile: /var/lib/minikube/certs/ca.crt cgroupDriver: systemd hairpinMode: hairpin-veth runtimeRequestTimeout: 15m clusterDomain: "cluster.local" # disable disk resource management by default imageGCHighThresholdPercent: 100 evictionHard: nodefs.available: "0%!"(MISSING) nodefs.inodesFree: "0%!"(MISSING) imagefs.available: "0%!"(MISSING) failSwapOn: false staticPodPath: /etc/kubernetes/manifests --- apiVersion: kubeproxy.config.k8s.io/v1alpha1 kind: KubeProxyConfiguration clusterCIDR: "10.244.0.0/16" metricsBindAddress: 0.0.0.0:10249 conntrack: maxPerCore: 0 # Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established" tcpEstablishedTimeout: 0s # Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close" tcpCloseWaitTimeout: 0s I0920 11:04:30.616825 3401853 kubeadm.go:976] kubelet [Unit] Wants=containerd.service [Service] ExecStart= ExecStart=/var/lib/minikube/binaries/v1.27.4/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --config=/var/lib/kubelet/config.yaml --container-runtime-endpoint=unix:///run/containerd/containerd.sock --hostname-override=marina --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=172.30.0.2 [Install] config: {KubernetesVersion:v1.27.4 ClusterName:marina Namespace:default APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:calico NodeIP: NodePort:8443 NodeName:} I0920 11:04:30.616877 3401853 ssh_runner.go:195] Run: sudo ls /var/lib/minikube/binaries/v1.27.4 I0920 11:04:30.625974 3401853 binaries.go:44] Found k8s binaries, skipping transfer I0920 11:04:30.626037 3401853 ssh_runner.go:195] Run: sudo mkdir -p /etc/systemd/system/kubelet.service.d /lib/systemd/system /var/tmp/minikube I0920 11:04:30.634394 3401853 ssh_runner.go:362] scp memory --> /etc/systemd/system/kubelet.service.d/10-kubeadm.conf (376 bytes) I0920 11:04:30.649097 3401853 ssh_runner.go:362] scp memory --> /lib/systemd/system/kubelet.service (352 bytes) I0920 11:04:30.664998 3401853 ssh_runner.go:362] scp memory --> /var/tmp/minikube/kubeadm.yaml.new (2088 bytes) I0920 11:04:30.680392 3401853 ssh_runner.go:195] Run: grep 172.30.0.2 control-plane.minikube.internal$ /etc/hosts I0920 11:04:30.683086 3401853 ssh_runner.go:195] Run: /bin/bash -c "{ grep -v $'\tcontrol-plane.minikube.internal$' "/etc/hosts"; echo "172.30.0.2 control-plane.minikube.internal"; } > /tmp/h.$$; sudo cp /tmp/h.$$ "/etc/hosts"" I0920 11:04:30.692378 3401853 certs.go:56] Setting up /home/tadeu/.minikube/profiles/marina for IP: 172.30.0.2 I0920 11:04:30.692390 3401853 certs.go:190] acquiring lock for shared ca certs: {Name:mkd6ef9b31408b66a753afffb0e6707a3dca898b Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0920 11:04:30.692768 3401853 certs.go:199] skipping minikubeCA CA generation: /home/tadeu/.minikube/ca.key I0920 11:04:30.692871 3401853 certs.go:199] skipping proxyClientCA CA generation: /home/tadeu/.minikube/proxy-client-ca.key I0920 11:04:30.692905 3401853 certs.go:319] generating minikube-user signed cert: /home/tadeu/.minikube/profiles/marina/client.key I0920 11:04:30.692914 3401853 crypto.go:68] Generating cert /home/tadeu/.minikube/profiles/marina/client.crt with IP's: [] I0920 11:04:30.788486 3401853 crypto.go:156] Writing cert to /home/tadeu/.minikube/profiles/marina/client.crt ... I0920 11:04:30.788497 3401853 lock.go:35] WriteFile acquiring /home/tadeu/.minikube/profiles/marina/client.crt: {Name:mk83cfb39e50883f17386f3df96204cf3da5005c Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0920 11:04:30.788615 3401853 crypto.go:164] Writing key to /home/tadeu/.minikube/profiles/marina/client.key ... I0920 11:04:30.788618 3401853 lock.go:35] WriteFile acquiring /home/tadeu/.minikube/profiles/marina/client.key: {Name:mkf69ef9d0b681ecdf20e84e50ade097b683ca0c Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0920 11:04:30.788674 3401853 certs.go:319] generating minikube signed cert: /home/tadeu/.minikube/profiles/marina/apiserver.key.03f9d0a8 I0920 11:04:30.788682 3401853 crypto.go:68] Generating cert /home/tadeu/.minikube/profiles/marina/apiserver.crt.03f9d0a8 with IP's: [172.30.0.2 10.96.0.1 127.0.0.1 10.0.0.1] I0920 11:04:30.900837 3401853 crypto.go:156] Writing cert to /home/tadeu/.minikube/profiles/marina/apiserver.crt.03f9d0a8 ... I0920 11:04:30.901824 3401853 lock.go:35] WriteFile acquiring /home/tadeu/.minikube/profiles/marina/apiserver.crt.03f9d0a8: {Name:mkee4099681bbc73b35ebdf9edaaee7d93fbf4b9 Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0920 11:04:30.901945 3401853 crypto.go:164] Writing key to /home/tadeu/.minikube/profiles/marina/apiserver.key.03f9d0a8 ... I0920 11:04:30.901950 3401853 lock.go:35] WriteFile acquiring /home/tadeu/.minikube/profiles/marina/apiserver.key.03f9d0a8: {Name:mke39d9ccbff771c6a1d67eef75d3306b1681667 Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0920 11:04:30.902006 3401853 certs.go:337] copying /home/tadeu/.minikube/profiles/marina/apiserver.crt.03f9d0a8 -> /home/tadeu/.minikube/profiles/marina/apiserver.crt I0920 11:04:30.902101 3401853 certs.go:341] copying /home/tadeu/.minikube/profiles/marina/apiserver.key.03f9d0a8 -> /home/tadeu/.minikube/profiles/marina/apiserver.key I0920 11:04:30.902168 3401853 certs.go:319] generating aggregator signed cert: /home/tadeu/.minikube/profiles/marina/proxy-client.key I0920 11:04:30.902182 3401853 crypto.go:68] Generating cert /home/tadeu/.minikube/profiles/marina/proxy-client.crt with IP's: [] I0920 11:04:30.955048 3401853 crypto.go:156] Writing cert to /home/tadeu/.minikube/profiles/marina/proxy-client.crt ... I0920 11:04:30.955059 3401853 lock.go:35] WriteFile acquiring /home/tadeu/.minikube/profiles/marina/proxy-client.crt: {Name:mkdfde2d4dc6904602626b2bf7667b5a561ea30b Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0920 11:04:30.955173 3401853 crypto.go:164] Writing key to /home/tadeu/.minikube/profiles/marina/proxy-client.key ... I0920 11:04:30.955177 3401853 lock.go:35] WriteFile acquiring /home/tadeu/.minikube/profiles/marina/proxy-client.key: {Name:mk397b7e52f716f75883d86390cc1655a07af28e Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0920 11:04:30.955429 3401853 certs.go:437] found cert: /home/tadeu/.minikube/certs/home/tadeu/.minikube/certs/_.marina.estaleiro.pem (1570 bytes) I0920 11:04:30.955547 3401853 certs.go:437] found cert: /home/tadeu/.minikube/certs/home/tadeu/.minikube/certs/ca-estaleiro.pem (1822 bytes) I0920 11:04:30.955655 3401853 certs.go:437] found cert: /home/tadeu/.minikube/certs/home/tadeu/.minikube/certs/ca-key.pem (1675 bytes) I0920 11:04:30.955671 3401853 certs.go:437] found cert: /home/tadeu/.minikube/certs/home/tadeu/.minikube/certs/ca.pem (1074 bytes) I0920 11:04:30.955686 3401853 certs.go:437] found cert: /home/tadeu/.minikube/certs/home/tadeu/.minikube/certs/cert.pem (1119 bytes) I0920 11:04:30.955700 3401853 certs.go:437] found cert: /home/tadeu/.minikube/certs/home/tadeu/.minikube/certs/hub.marina.estaleiro.pem (1554 bytes) I0920 11:04:30.955808 3401853 certs.go:437] found cert: /home/tadeu/.minikube/certs/home/tadeu/.minikube/certs/key.pem (1675 bytes) I0920 11:04:30.955824 3401853 certs.go:437] found cert: /home/tadeu/.minikube/certs/home/tadeu/.minikube/certs/serpro-intra-ssl.pem (4736 bytes) I0920 11:04:30.956275 3401853 ssh_runner.go:362] scp /home/tadeu/.minikube/profiles/marina/apiserver.crt --> /var/lib/minikube/certs/apiserver.crt (1399 bytes) I0920 11:04:30.977639 3401853 ssh_runner.go:362] scp /home/tadeu/.minikube/profiles/marina/apiserver.key --> /var/lib/minikube/certs/apiserver.key (1679 bytes) I0920 11:04:30.997013 3401853 ssh_runner.go:362] scp /home/tadeu/.minikube/profiles/marina/proxy-client.crt --> /var/lib/minikube/certs/proxy-client.crt (1147 bytes) I0920 11:04:31.016387 3401853 ssh_runner.go:362] scp /home/tadeu/.minikube/profiles/marina/proxy-client.key --> /var/lib/minikube/certs/proxy-client.key (1675 bytes) I0920 11:04:31.036289 3401853 ssh_runner.go:362] scp /home/tadeu/.minikube/ca.crt --> /var/lib/minikube/certs/ca.crt (1111 bytes) I0920 11:04:31.055972 3401853 ssh_runner.go:362] scp /home/tadeu/.minikube/ca.key --> /var/lib/minikube/certs/ca.key (1679 bytes) I0920 11:04:31.075394 3401853 ssh_runner.go:362] scp /home/tadeu/.minikube/proxy-client-ca.crt --> /var/lib/minikube/certs/proxy-client-ca.crt (1119 bytes) I0920 11:04:31.094191 3401853 ssh_runner.go:362] scp /home/tadeu/.minikube/proxy-client-ca.key --> /var/lib/minikube/certs/proxy-client-ca.key (1675 bytes) I0920 11:04:31.113244 3401853 ssh_runner.go:195] Run: stat -c "%!s(MISSING) %!y(MISSING)" /usr/share/ca-certificates/serpro-intra-ssl.pem I0920 11:04:31.116316 3401853 ssh_runner.go:352] existence check for /usr/share/ca-certificates/serpro-intra-ssl.pem: stat -c "%!s(MISSING) %!y(MISSING)" /usr/share/ca-certificates/serpro-intra-ssl.pem: Process exited with status 1 stdout: stderr: stat: cannot statx '/usr/share/ca-certificates/serpro-intra-ssl.pem': No such file or directory I0920 11:04:31.116324 3401853 ssh_runner.go:362] scp /home/tadeu/.minikube/certs/serpro-intra-ssl.pem --> /usr/share/ca-certificates/serpro-intra-ssl.pem (4736 bytes) I0920 11:04:31.134876 3401853 ssh_runner.go:362] scp /home/tadeu/.minikube/ca.crt --> /usr/share/ca-certificates/minikubeCA.pem (1111 bytes) I0920 11:04:31.153089 3401853 ssh_runner.go:362] scp /home/tadeu/.minikube/certs/_.marina.estaleiro.pem --> /usr/share/ca-certificates/_.marina.estaleiro.pem (1570 bytes) I0920 11:04:31.171282 3401853 ssh_runner.go:362] scp /home/tadeu/.minikube/certs/ca-estaleiro.pem --> /usr/share/ca-certificates/ca-estaleiro.pem (1822 bytes) I0920 11:04:31.189309 3401853 ssh_runner.go:362] scp /home/tadeu/.minikube/certs/hub.marina.estaleiro.pem --> /usr/share/ca-certificates/hub.marina.estaleiro.pem (1554 bytes) I0920 11:04:31.208192 3401853 ssh_runner.go:362] scp memory --> /var/lib/minikube/kubeconfig (738 bytes) I0920 11:04:31.221212 3401853 ssh_runner.go:195] Run: openssl version I0920 11:04:31.229286 3401853 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/serpro-intra-ssl.pem && ln -fs /usr/share/ca-certificates/serpro-intra-ssl.pem /etc/ssl/certs/serpro-intra-ssl.pem" I0920 11:04:31.238016 3401853 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/serpro-intra-ssl.pem I0920 11:04:31.240786 3401853 certs.go:480] hashing: -rw-r--r-- 1 root root 4736 Sep 20 00:10 /usr/share/ca-certificates/serpro-intra-ssl.pem I0920 11:04:31.240842 3401853 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/serpro-intra-ssl.pem I0920 11:04:31.246399 3401853 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/fdfb85bb.0 || ln -fs /etc/ssl/certs/serpro-intra-ssl.pem /etc/ssl/certs/fdfb85bb.0" I0920 11:04:31.253831 3401853 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/minikubeCA.pem && ln -fs /usr/share/ca-certificates/minikubeCA.pem /etc/ssl/certs/minikubeCA.pem" I0920 11:04:31.262263 3401853 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/minikubeCA.pem I0920 11:04:31.264526 3401853 certs.go:480] hashing: -rw-r--r-- 1 root root 1111 Sep 19 21:58 /usr/share/ca-certificates/minikubeCA.pem I0920 11:04:31.264565 3401853 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/minikubeCA.pem I0920 11:04:31.268726 3401853 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/b5213941.0 || ln -fs /etc/ssl/certs/minikubeCA.pem /etc/ssl/certs/b5213941.0" I0920 11:04:31.275695 3401853 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/_.marina.estaleiro.pem && ln -fs /usr/share/ca-certificates/_.marina.estaleiro.pem /etc/ssl/certs/_.marina.estaleiro.pem" I0920 11:04:31.282476 3401853 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/_.marina.estaleiro.pem I0920 11:04:31.284820 3401853 certs.go:480] hashing: -rw-r--r-- 1 root root 1570 Sep 20 00:04 /usr/share/ca-certificates/_.marina.estaleiro.pem I0920 11:04:31.284856 3401853 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/_.marina.estaleiro.pem I0920 11:04:31.290124 3401853 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/2726fa68.0 || ln -fs /etc/ssl/certs/_.marina.estaleiro.pem /etc/ssl/certs/2726fa68.0" I0920 11:04:31.297030 3401853 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/ca-estaleiro.pem && ln -fs /usr/share/ca-certificates/ca-estaleiro.pem /etc/ssl/certs/ca-estaleiro.pem" I0920 11:04:31.304816 3401853 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/ca-estaleiro.pem I0920 11:04:31.307684 3401853 certs.go:480] hashing: -rw-r--r-- 1 root root 1822 Sep 20 00:04 /usr/share/ca-certificates/ca-estaleiro.pem I0920 11:04:31.307732 3401853 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/ca-estaleiro.pem I0920 11:04:31.312089 3401853 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/34a06d10.0 || ln -fs /etc/ssl/certs/ca-estaleiro.pem /etc/ssl/certs/34a06d10.0" I0920 11:04:31.319903 3401853 ssh_runner.go:195] Run: sudo /bin/bash -c "test -s /usr/share/ca-certificates/hub.marina.estaleiro.pem && ln -fs /usr/share/ca-certificates/hub.marina.estaleiro.pem /etc/ssl/certs/hub.marina.estaleiro.pem" I0920 11:04:31.327381 3401853 ssh_runner.go:195] Run: ls -la /usr/share/ca-certificates/hub.marina.estaleiro.pem I0920 11:04:31.329967 3401853 certs.go:480] hashing: -rw-r--r-- 1 root root 1554 Sep 20 00:04 /usr/share/ca-certificates/hub.marina.estaleiro.pem I0920 11:04:31.330005 3401853 ssh_runner.go:195] Run: openssl x509 -hash -noout -in /usr/share/ca-certificates/hub.marina.estaleiro.pem I0920 11:04:31.334500 3401853 ssh_runner.go:195] Run: sudo /bin/bash -c "test -L /etc/ssl/certs/72cd8ba8.0 || ln -fs /etc/ssl/certs/hub.marina.estaleiro.pem /etc/ssl/certs/72cd8ba8.0" I0920 11:04:31.342068 3401853 ssh_runner.go:195] Run: ls /var/lib/minikube/certs/etcd I0920 11:04:31.344611 3401853 certs.go:353] certs directory doesn't exist, likely first start: ls /var/lib/minikube/certs/etcd: Process exited with status 2 stdout: stderr: ls: cannot access '/var/lib/minikube/certs/etcd': No such file or directory I0920 11:04:31.344641 3401853 kubeadm.go:404] StartCluster: {Name:marina KeepContext:false EmbedCerts:true MinikubeISO: KicBaseImage:gcr.io/k8s-minikube/kicbase:v0.0.40@sha256:8cadf23777709e43eca447c47a45f5a4635615129267ce025193040ec92a1631 Memory:4900 CPUs:2 DiskSize:20000 VMDriver: Driver:docker HyperkitVpnKitSock: HyperkitVSockPorts:[] DockerEnv:[] ContainerVolumeMounts:[] InsecureRegistry:[] RegistryMirror:[] HostOnlyCIDR:192.168.59.1/24 HypervVirtualSwitch: HypervUseExternalSwitch:false HypervExternalAdapter: KVMNetwork:default KVMQemuURI:qemu:///system KVMGPU:false KVMHidden:false KVMNUMACount:1 APIServerPort:0 DockerOpt:[] DisableDriverMounts:false NFSShare:[] NFSSharesRoot:/nfsshares UUID: NoVTXCheck:false DNSProxy:false HostDNSResolver:true HostOnlyNicType:virtio NatNicType:virtio SSHIPAddress: SSHUser:root SSHKey: SSHPort:22 KubernetesConfig:{KubernetesVersion:v1.27.4 ClusterName:marina Namespace:default APIServerName:minikubeCA APIServerNames:[] APIServerIPs:[] DNSDomain:cluster.local ContainerRuntime:containerd CRISocket: NetworkPlugin:cni FeatureGates: ServiceCIDR:10.96.0.0/12 ImageRepository: LoadBalancerStartIP: LoadBalancerEndIP: CustomIngressCert: RegistryAliases: ExtraOptions:[] ShouldLoadCachedImages:true EnableDefaultCNI:false CNI:calico NodeIP: NodePort:8443 NodeName:} Nodes:[{Name: IP:172.30.0.2 Port:8443 KubernetesVersion:v1.27.4 ContainerRuntime:containerd ControlPlane:true Worker:true}] Addons:map[] CustomAddonImages:map[] CustomAddonRegistries:map[] VerifyComponents:map[apiserver:true system_pods:true] StartHostTimeout:6m0s ScheduledStop: ExposedPorts:[] ListenAddress: Network:marinanet Subnet: MultiNodeRequested:false ExtraDisks:0 CertExpiration:26280h0m0s Mount:false MountString:/home/tadeu:/minikube-host Mount9PVersion:9p2000.L MountGID:docker MountIP: MountMSize:262144 MountOptions:[] MountPort:0 MountType:9p MountUID:docker BinaryMirror: DisableOptimizations:false DisableMetrics:false CustomQemuFirmwarePath: SocketVMnetClientPath: SocketVMnetPath: StaticIP: SSHAuthSock: SSHAgentPID:0} I0920 11:04:31.344691 3401853 cri.go:54] listing CRI containers in root /run/containerd/runc/k8s.io: {State:paused Name: Namespaces:[kube-system]} I0920 11:04:31.344731 3401853 ssh_runner.go:195] Run: sudo -s eval "crictl ps -a --quiet --label io.kubernetes.pod.namespace=kube-system" I0920 11:04:31.367896 3401853 cri.go:89] found id: "" I0920 11:04:31.367945 3401853 ssh_runner.go:195] Run: sudo ls /var/lib/kubelet/kubeadm-flags.env /var/lib/kubelet/config.yaml /var/lib/minikube/etcd I0920 11:04:31.375233 3401853 ssh_runner.go:195] Run: sudo cp /var/tmp/minikube/kubeadm.yaml.new /var/tmp/minikube/kubeadm.yaml I0920 11:04:31.382714 3401853 kubeadm.go:226] ignoring SystemVerification for kubeadm because of docker driver I0920 11:04:31.382752 3401853 ssh_runner.go:195] Run: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf I0920 11:04:31.389568 3401853 kubeadm.go:152] config check failed, skipping stale config cleanup: sudo ls -la /etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf: Process exited with status 2 stdout: stderr: ls: cannot access '/etc/kubernetes/admin.conf': No such file or directory ls: cannot access '/etc/kubernetes/kubelet.conf': No such file or directory ls: cannot access '/etc/kubernetes/controller-manager.conf': No such file or directory ls: cannot access '/etc/kubernetes/scheduler.conf': No such file or directory I0920 11:04:31.389586 3401853 ssh_runner.go:286] Start: /bin/bash -c "sudo env PATH="/var/lib/minikube/binaries/v1.27.4:$PATH" kubeadm init --config /var/tmp/minikube/kubeadm.yaml --ignore-preflight-errors=DirAvailable--etc-kubernetes-manifests,DirAvailable--var-lib-minikube,DirAvailable--var-lib-minikube-etcd,FileAvailable--etc-kubernetes-manifests-kube-scheduler.yaml,FileAvailable--etc-kubernetes-manifests-kube-apiserver.yaml,FileAvailable--etc-kubernetes-manifests-kube-controller-manager.yaml,FileAvailable--etc-kubernetes-manifests-etcd.yaml,Port-10250,Swap,NumCPU,Mem,SystemVerification,FileContent--proc-sys-net-bridge-bridge-nf-call-iptables" I0920 11:04:31.613724 3401853 kubeadm.go:322] [WARNING Swap]: swap is enabled; production deployments should disable swap unless testing the NodeSwap feature gate of the kubelet I0920 11:04:31.699022 3401853 kubeadm.go:322] [WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service' I0920 11:04:38.737333 3401853 kubeadm.go:322] [init] Using Kubernetes version: v1.27.4 I0920 11:04:38.737363 3401853 kubeadm.go:322] [preflight] Running pre-flight checks I0920 11:04:38.737451 3401853 kubeadm.go:322] [preflight] Pulling images required for setting up a Kubernetes cluster I0920 11:04:38.737626 3401853 kubeadm.go:322] [preflight] This might take a minute or two, depending on the speed of your internet connection I0920 11:04:38.737739 3401853 kubeadm.go:322] [preflight] You can also perform this action in beforehand using 'kubeadm config images pull' I0920 11:04:38.737800 3401853 kubeadm.go:322] [certs] Using certificateDir folder "/var/lib/minikube/certs" I0920 11:04:38.741994 3401853 out.go:204] โ–ช Generating certificates and keys ... I0920 11:04:38.742053 3401853 kubeadm.go:322] [certs] Using existing ca certificate authority I0920 11:04:38.742099 3401853 kubeadm.go:322] [certs] Using existing apiserver certificate and key on disk I0920 11:04:38.742160 3401853 kubeadm.go:322] [certs] Generating "apiserver-kubelet-client" certificate and key I0920 11:04:38.742204 3401853 kubeadm.go:322] [certs] Generating "front-proxy-ca" certificate and key I0920 11:04:38.742253 3401853 kubeadm.go:322] [certs] Generating "front-proxy-client" certificate and key I0920 11:04:38.742301 3401853 kubeadm.go:322] [certs] Generating "etcd/ca" certificate and key I0920 11:04:38.742343 3401853 kubeadm.go:322] [certs] Generating "etcd/server" certificate and key I0920 11:04:38.742426 3401853 kubeadm.go:322] [certs] etcd/server serving cert is signed for DNS names [localhost marina] and IPs [172.30.0.2 127.0.0.1 ::1] I0920 11:04:38.742464 3401853 kubeadm.go:322] [certs] Generating "etcd/peer" certificate and key I0920 11:04:38.742547 3401853 kubeadm.go:322] [certs] etcd/peer serving cert is signed for DNS names [localhost marina] and IPs [172.30.0.2 127.0.0.1 ::1] I0920 11:04:38.742587 3401853 kubeadm.go:322] [certs] Generating "etcd/healthcheck-client" certificate and key I0920 11:04:38.742626 3401853 kubeadm.go:322] [certs] Generating "apiserver-etcd-client" certificate and key I0920 11:04:38.742651 3401853 kubeadm.go:322] [certs] Generating "sa" key and public key I0920 11:04:38.742684 3401853 kubeadm.go:322] [kubeconfig] Using kubeconfig folder "/etc/kubernetes" I0920 11:04:38.742713 3401853 kubeadm.go:322] [kubeconfig] Writing "admin.conf" kubeconfig file I0920 11:04:38.742747 3401853 kubeadm.go:322] [kubeconfig] Writing "kubelet.conf" kubeconfig file I0920 11:04:38.742784 3401853 kubeadm.go:322] [kubeconfig] Writing "controller-manager.conf" kubeconfig file I0920 11:04:38.742816 3401853 kubeadm.go:322] [kubeconfig] Writing "scheduler.conf" kubeconfig file I0920 11:04:38.742877 3401853 kubeadm.go:322] [kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env" I0920 11:04:38.742929 3401853 kubeadm.go:322] [kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml" I0920 11:04:38.742952 3401853 kubeadm.go:322] [kubelet-start] Starting the kubelet I0920 11:04:38.742991 3401853 kubeadm.go:322] [control-plane] Using manifest folder "/etc/kubernetes/manifests" I0920 11:04:38.746440 3401853 out.go:204] โ–ช Booting up control plane ... I0920 11:04:38.746529 3401853 kubeadm.go:322] [control-plane] Creating static Pod manifest for "kube-apiserver" I0920 11:04:38.746587 3401853 kubeadm.go:322] [control-plane] Creating static Pod manifest for "kube-controller-manager" I0920 11:04:38.746640 3401853 kubeadm.go:322] [control-plane] Creating static Pod manifest for "kube-scheduler" I0920 11:04:38.746690 3401853 kubeadm.go:322] [etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests" I0920 11:04:38.746782 3401853 kubeadm.go:322] [wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s I0920 11:04:38.746826 3401853 kubeadm.go:322] [apiclient] All control plane components are healthy after 4.004499 seconds I0920 11:04:38.746900 3401853 kubeadm.go:322] [upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace I0920 11:04:38.746991 3401853 kubeadm.go:322] [kubelet] Creating a ConfigMap "kubelet-config" in namespace kube-system with the configuration for the kubelets in the cluster I0920 11:04:38.747033 3401853 kubeadm.go:322] [upload-certs] Skipping phase. Please see --upload-certs I0920 11:04:38.747168 3401853 kubeadm.go:322] [mark-control-plane] Marking the node marina as control-plane by adding the labels: [node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers] I0920 11:04:38.747206 3401853 kubeadm.go:322] [bootstrap-token] Using token: kzy3yz.rv2i2yy3nsunnwvr I0920 11:04:38.750530 3401853 out.go:204] โ–ช Configuring RBAC rules ... I0920 11:04:38.750638 3401853 kubeadm.go:322] [bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles I0920 11:04:38.750716 3401853 kubeadm.go:322] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to get nodes I0920 11:04:38.750853 3401853 kubeadm.go:322] [bootstrap-token] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials I0920 11:04:38.750974 3401853 kubeadm.go:322] [bootstrap-token] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token I0920 11:04:38.751086 3401853 kubeadm.go:322] [bootstrap-token] Configured RBAC rules to allow certificate rotation for all node client certificates in the cluster I0920 11:04:38.751185 3401853 kubeadm.go:322] [bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace I0920 11:04:38.751299 3401853 kubeadm.go:322] [kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key I0920 11:04:38.751339 3401853 kubeadm.go:322] [addons] Applied essential addon: CoreDNS I0920 11:04:38.751388 3401853 kubeadm.go:322] [addons] Applied essential addon: kube-proxy I0920 11:04:38.751392 3401853 kubeadm.go:322] I0920 11:04:38.751455 3401853 kubeadm.go:322] Your Kubernetes control-plane has initialized successfully! I0920 11:04:38.751459 3401853 kubeadm.go:322] I0920 11:04:38.751530 3401853 kubeadm.go:322] To start using your cluster, you need to run the following as a regular user: I0920 11:04:38.751533 3401853 kubeadm.go:322] I0920 11:04:38.751557 3401853 kubeadm.go:322] mkdir -p $HOME/.kube I0920 11:04:38.751615 3401853 kubeadm.go:322] sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config I0920 11:04:38.751669 3401853 kubeadm.go:322] sudo chown $(id -u):$(id -g) $HOME/.kube/config I0920 11:04:38.751672 3401853 kubeadm.go:322] I0920 11:04:38.751726 3401853 kubeadm.go:322] Alternatively, if you are the root user, you can run: I0920 11:04:38.751729 3401853 kubeadm.go:322] I0920 11:04:38.751799 3401853 kubeadm.go:322] export KUBECONFIG=/etc/kubernetes/admin.conf I0920 11:04:38.751802 3401853 kubeadm.go:322] I0920 11:04:38.751898 3401853 kubeadm.go:322] You should now deploy a pod network to the cluster. I0920 11:04:38.751978 3401853 kubeadm.go:322] Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at: I0920 11:04:38.752047 3401853 kubeadm.go:322] https://kubernetes.io/docs/concepts/cluster-administration/addons/ I0920 11:04:38.752051 3401853 kubeadm.go:322] I0920 11:04:38.752137 3401853 kubeadm.go:322] You can now join any number of control-plane nodes by copying certificate authorities I0920 11:04:38.752213 3401853 kubeadm.go:322] and service account keys on each node and then running the following as root: I0920 11:04:38.752217 3401853 kubeadm.go:322] I0920 11:04:38.752295 3401853 kubeadm.go:322] kubeadm join control-plane.minikube.internal:8443 --token kzy3yz.rv2i2yy3nsunnwvr \ I0920 11:04:38.752417 3401853 kubeadm.go:322] --discovery-token-ca-cert-hash sha256:f9176da3366b64f18fc98dff80f500e49ec310515955e4953eca1957af8932ae \ I0920 11:04:38.752436 3401853 kubeadm.go:322] --control-plane I0920 11:04:38.752441 3401853 kubeadm.go:322] I0920 11:04:38.752524 3401853 kubeadm.go:322] Then you can join any number of worker nodes by running the following on each as root: I0920 11:04:38.752527 3401853 kubeadm.go:322] I0920 11:04:38.752611 3401853 kubeadm.go:322] kubeadm join control-plane.minikube.internal:8443 --token kzy3yz.rv2i2yy3nsunnwvr \ I0920 11:04:38.752682 3401853 kubeadm.go:322] --discovery-token-ca-cert-hash sha256:f9176da3366b64f18fc98dff80f500e49ec310515955e4953eca1957af8932ae I0920 11:04:38.752687 3401853 cni.go:84] Creating CNI manager for "calico" I0920 11:04:38.757188 3401853 out.go:177] ๐Ÿ”— Configuring Calico (Container Networking Interface) ... I0920 11:04:38.760773 3401853 cni.go:182] applying CNI manifest using /var/lib/minikube/binaries/v1.27.4/kubectl ... I0920 11:04:38.760781 3401853 ssh_runner.go:362] scp memory --> /var/tmp/minikube/cni.yaml (244810 bytes) I0920 11:04:38.784763 3401853 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.4/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml I0920 11:04:39.789959 3401853 ssh_runner.go:235] Completed: sudo /var/lib/minikube/binaries/v1.27.4/kubectl apply --kubeconfig=/var/lib/minikube/kubeconfig -f /var/tmp/minikube/cni.yaml: (1.005181079s) I0920 11:04:39.789983 3401853 ssh_runner.go:195] Run: /bin/bash -c "cat /proc/$(pgrep kube-apiserver)/oom_adj" I0920 11:04:39.790044 3401853 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.4/kubectl label nodes minikube.k8s.io/version=v1.31.2 minikube.k8s.io/commit=fd7ecd9c4599bef9f04c0986c4a0187f98a4396e minikube.k8s.io/name=marina minikube.k8s.io/updated_at=2023_09_20T11_04_39_0700 minikube.k8s.io/primary=true --all --overwrite --kubeconfig=/var/lib/minikube/kubeconfig I0920 11:04:39.790045 3401853 ssh_runner.go:195] Run: sudo /var/lib/minikube/binaries/v1.27.4/kubectl create clusterrolebinding minikube-rbac --clusterrole=cluster-admin --serviceaccount=kube-system:default --kubeconfig=/var/lib/minikube/kubeconfig I0920 11:04:39.843632 3401853 ops.go:34] apiserver oom_adj: -16 I0920 11:04:39.846063 3401853 kubeadm.go:1081] duration metric: took 56.068002ms to wait for elevateKubeSystemPrivileges. I0920 11:04:39.846072 3401853 kubeadm.go:406] StartCluster complete in 8.501433962s I0920 11:04:39.846084 3401853 settings.go:142] acquiring lock: {Name:mk216cb50db6133e403ab2a2a03f1d8509a46e25 Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0920 11:04:39.846134 3401853 settings.go:150] Updating kubeconfig: /home/tadeu/.kube/config I0920 11:04:39.847189 3401853 lock.go:35] WriteFile acquiring /home/tadeu/.kube/config: {Name:mke1eee092303e3613183065ef1ded7dcebd9892 Clock:{} Delay:500ms Timeout:1m0s Cancel:} I0920 11:04:39.847358 3401853 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.27.4/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml" I0920 11:04:39.847487 3401853 addons.go:499] enable addons start: toEnable=map[ambassador:false auto-pause:false cloud-spanner:false csi-hostpath-driver:false dashboard:false default-storageclass:true efk:false freshpod:false gcp-auth:false gvisor:false headlamp:false helm-tiller:false inaccel:false ingress:false ingress-dns:false inspektor-gadget:false istio:false istio-provisioner:false kong:false kubevirt:false logviewer:false metallb:false metrics-server:false nvidia-driver-installer:false nvidia-gpu-device-plugin:false olm:false pod-security-policy:false portainer:false registry:false registry-aliases:false registry-creds:false storage-provisioner:true storage-provisioner-gluster:false volumesnapshots:false] I0920 11:04:39.847531 3401853 addons.go:69] Setting storage-provisioner=true in profile "marina" I0920 11:04:39.847539 3401853 addons.go:231] Setting addon storage-provisioner=true in "marina" I0920 11:04:39.847549 3401853 config.go:182] Loaded profile config "marina": Driver=docker, ContainerRuntime=containerd, KubernetesVersion=v1.27.4 I0920 11:04:39.847564 3401853 addons.go:69] Setting default-storageclass=true in profile "marina" I0920 11:04:39.847571 3401853 host.go:66] Checking if "marina" exists ... I0920 11:04:39.847576 3401853 addons_storage_classes.go:33] enableOrDisableStorageClasses default-storageclass=true on "marina" I0920 11:04:39.847826 3401853 cli_runner.go:164] Run: docker container inspect marina --format={{.State.Status}} I0920 11:04:39.847859 3401853 cli_runner.go:164] Run: docker container inspect marina --format={{.State.Status}} I0920 11:04:39.864049 3401853 out.go:177] โ–ช Using image gcr.io/k8s-minikube/storage-provisioner:v5 I0920 11:04:39.863051 3401853 kapi.go:248] "coredns" deployment in "kube-system" namespace and "marina" context rescaled to 1 replicas I0920 11:04:39.864219 3401853 addons.go:231] Setting addon default-storageclass=true in "marina" I0920 11:04:39.867544 3401853 host.go:66] Checking if "marina" exists ... I0920 11:04:39.867555 3401853 start.go:223] Will wait 6m0s for node &{Name: IP:172.30.0.2 Port:8443 KubernetesVersion:v1.27.4 ContainerRuntime:containerd ControlPlane:true Worker:true} I0920 11:04:39.870937 3401853 out.go:177] ๐Ÿ”Ž Verifying Kubernetes components... I0920 11:04:39.867606 3401853 addons.go:423] installing /etc/kubernetes/addons/storage-provisioner.yaml I0920 11:04:39.867812 3401853 cli_runner.go:164] Run: docker container inspect marina --format={{.State.Status}} I0920 11:04:39.883565 3401853 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storage-provisioner.yaml (2676 bytes) I0920 11:04:39.883615 3401853 ssh_runner.go:195] Run: sudo systemctl is-active --quiet service kubelet I0920 11:04:39.883639 3401853 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" marina I0920 11:04:39.895239 3401853 ssh_runner.go:195] Run: /bin/bash -c "sudo /var/lib/minikube/binaries/v1.27.4/kubectl --kubeconfig=/var/lib/minikube/kubeconfig -n kube-system get configmap coredns -o yaml | sed -e '/^ forward . \/etc\/resolv.conf.*/i \ hosts {\n host.minikube.internal\n fallthrough\n }' -e '/^ errors *$/i \ log' | sudo /var/lib/minikube/binaries/v1.27.4/kubectl --kubeconfig=/var/lib/minikube/kubeconfig replace -f -" I0920 11:04:39.896130 3401853 api_server.go:52] waiting for apiserver process to appear ... I0920 11:04:39.896190 3401853 ssh_runner.go:195] Run: sudo pgrep -xnf kube-apiserver.*minikube.* I0920 11:04:39.896196 3401853 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32867 SSHKeyPath:/home/tadeu/.minikube/machines/marina/id_rsa Username:docker} I0920 11:04:39.896494 3401853 addons.go:423] installing /etc/kubernetes/addons/storageclass.yaml I0920 11:04:39.896500 3401853 ssh_runner.go:362] scp memory --> /etc/kubernetes/addons/storageclass.yaml (271 bytes) I0920 11:04:39.896552 3401853 cli_runner.go:164] Run: docker container inspect -f "'{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}'" marina I0920 11:04:39.909473 3401853 sshutil.go:53] new ssh client: &{IP:127.0.0.1 Port:32867 SSHKeyPath:/home/tadeu/.minikube/machines/marina/id_rsa Username:docker} I0920 11:04:39.995772 3401853 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.27.4/kubectl apply -f /etc/kubernetes/addons/storage-provisioner.yaml I0920 11:04:40.003821 3401853 ssh_runner.go:195] Run: sudo KUBECONFIG=/var/lib/minikube/kubeconfig /var/lib/minikube/binaries/v1.27.4/kubectl apply -f /etc/kubernetes/addons/storageclass.yaml I0920 11:04:40.123792 3401853 start.go:901] {"host.minikube.internal": } host record injected into CoreDNS's ConfigMap I0920 11:04:40.123833 3401853 api_server.go:72] duration metric: took 256.249982ms to wait for apiserver process to appear ... I0920 11:04:40.123843 3401853 api_server.go:88] waiting for apiserver healthz status ... I0920 11:04:40.123868 3401853 api_server.go:253] Checking apiserver healthz at https://172.30.0.2:8443/healthz ... I0920 11:04:40.127925 3401853 api_server.go:279] https://172.30.0.2:8443/healthz returned 200: ok I0920 11:04:40.128812 3401853 api_server.go:141] control plane version: v1.27.4 I0920 11:04:40.128820 3401853 api_server.go:131] duration metric: took 4.973543ms to wait for apiserver health ... I0920 11:04:40.128825 3401853 system_pods.go:43] waiting for kube-system pods to appear ... I0920 11:04:40.133545 3401853 system_pods.go:59] 4 kube-system pods found I0920 11:04:40.133558 3401853 system_pods.go:61] "etcd-marina" [6dff442a-e83b-4969-8f40-b602057118a5] Running / Ready:ContainersNotReady (containers with unready status: [etcd]) / ContainersReady:ContainersNotReady (containers with unready status: [etcd]) I0920 11:04:40.133564 3401853 system_pods.go:61] "kube-apiserver-marina" [444536de-4129-45d5-978b-8da0936c5002] Running / Ready:ContainersNotReady (containers with unready status: [kube-apiserver]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-apiserver]) I0920 11:04:40.133569 3401853 system_pods.go:61] "kube-controller-manager-marina" [5ad55ff7-21e2-44bb-bf03-0fa330dfae33] Running / Ready:ContainersNotReady (containers with unready status: [kube-controller-manager]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-controller-manager]) I0920 11:04:40.133573 3401853 system_pods.go:61] "kube-scheduler-marina" [55572a2a-cfae-49fb-bb82-1e5d0b322043] Running / Ready:ContainersNotReady (containers with unready status: [kube-scheduler]) / ContainersReady:ContainersNotReady (containers with unready status: [kube-scheduler]) I0920 11:04:40.133577 3401853 system_pods.go:74] duration metric: took 4.748723ms to wait for pod list to return data ... I0920 11:04:40.133582 3401853 kubeadm.go:581] duration metric: took 266.001824ms to wait for : map[apiserver:true system_pods:true] ... I0920 11:04:40.133590 3401853 node_conditions.go:102] verifying NodePressure condition ... I0920 11:04:40.135263 3401853 node_conditions.go:122] node storage ephemeral capacity is 114790500Ki I0920 11:04:40.135271 3401853 node_conditions.go:123] node cpu capacity is 12 I0920 11:04:40.135279 3401853 node_conditions.go:105] duration metric: took 1.686725ms to run NodePressure ... I0920 11:04:40.135287 3401853 start.go:228] waiting for startup goroutines ... I0920 11:04:40.300164 3401853 out.go:177] ๐ŸŒŸ Enabled addons: storage-provisioner, default-storageclass I0920 11:04:40.303575 3401853 addons.go:502] enable addons completed in 456.089479ms: enabled=[storage-provisioner default-storageclass] I0920 11:04:40.303596 3401853 start.go:233] waiting for cluster config update ... I0920 11:04:40.303604 3401853 start.go:242] writing updated cluster config ... I0920 11:04:40.303888 3401853 ssh_runner.go:195] Run: rm -f paused I0920 11:04:40.426237 3401853 start.go:600] kubectl: 1.28.1, cluster: 1.27.4 (minor skew: 1) I0920 11:04:40.429787 3401853 out.go:177] ๐Ÿ„ Done! kubectl is now configured to use "marina" cluster and "default" namespace by default * * ==> container status <== * CONTAINER IMAGE CREATED STATE NAME ATTEMPT POD ID POD dd5d86b0c4284 ead0a4a53df89 4 seconds ago Running coredns 0 e53872b66fe0b coredns-5d78c9869d-bzf9j 34b3f36c348dd 6e38f40d628db 14 seconds ago Running storage-provisioner 1 32afe5ecc1392 storage-provisioner 5b6839520ba19 8065b798a4d67 14 seconds ago Running calico-node 0 e52985580ac68 calico-node-66kqz 00d2ed9c624e5 8065b798a4d67 15 seconds ago Exited mount-bpffs 0 e52985580ac68 calico-node-66kqz 39dc5617f0c90 9dee260ef7f59 43 seconds ago Exited install-cni 0 e52985580ac68 calico-node-66kqz de1d2bbe03022 9dee260ef7f59 44 seconds ago Exited upgrade-ipam 0 e52985580ac68 calico-node-66kqz 69b158ef7340a 6e38f40d628db 54 seconds ago Exited storage-provisioner 0 32afe5ecc1392 storage-provisioner 694b329798fda 6848d7eda0341 54 seconds ago Running kube-proxy 0 95cb732d3a15a kube-proxy-zxknw 89c61e0d46fc0 e7972205b6614 About a minute ago Running kube-apiserver 0 9a93800e4b342 kube-apiserver-marina 914636ad99ab3 98ef2570f3cde About a minute ago Running kube-scheduler 0 6ee40ee68af66 kube-scheduler-marina 456300849f817 86b6af7dd652c About a minute ago Running etcd 0 f50ab4cc46540 etcd-marina 8894f6b2581be f466468864b7a About a minute ago Running kube-controller-manager 0 87f1b31de0f4c kube-controller-manager-marina * * ==> containerd <== * Sep 20 14:05:44 marina containerd[680]: 2023-09-20 14:05:44.436 [INFO][3022] k8s.go 489: Wrote updated endpoint to datastore ContainerID="e53872b66fe0b8ea98e2b5857a6982c08c07ace7dffdca471aa64bfd602b9cd4" Namespace="kube-system" Pod="coredns-5d78c9869d-bzf9j" WorkloadEndpoint="marina-k8s-coredns--5d78c9869d--bzf9j-eth0" Sep 20 14:05:44 marina containerd[680]: time="2023-09-20T14:05:44.451120763Z" level=info msg="loading plugin \"io.containerd.event.v1.publisher\"..." runtime=io.containerd.runc.v2 type=io.containerd.event.v1 Sep 20 14:05:44 marina containerd[680]: time="2023-09-20T14:05:44.451175025Z" level=info msg="loading plugin \"io.containerd.internal.v1.shutdown\"..." runtime=io.containerd.runc.v2 type=io.containerd.internal.v1 Sep 20 14:05:44 marina containerd[680]: time="2023-09-20T14:05:44.451183891Z" level=info msg="loading plugin \"io.containerd.ttrpc.v1.task\"..." runtime=io.containerd.runc.v2 type=io.containerd.ttrpc.v1 Sep 20 14:05:44 marina containerd[680]: time="2023-09-20T14:05:44.451297093Z" level=info msg="starting signal loop" namespace=k8s.io path=/run/containerd/io.containerd.runtime.v2.task/k8s.io/e53872b66fe0b8ea98e2b5857a6982c08c07ace7dffdca471aa64bfd602b9cd4 pid=3141 runtime=io.containerd.runc.v2 Sep 20 14:05:44 marina containerd[680]: time="2023-09-20T14:05:44.523866955Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:coredns-5d78c9869d-bzf9j,Uid:12a12c87-2dd1-4db5-8a7d-80239632737d,Namespace:kube-system,Attempt:1,} returns sandbox id \"e53872b66fe0b8ea98e2b5857a6982c08c07ace7dffdca471aa64bfd602b9cd4\"" Sep 20 14:05:44 marina containerd[680]: time="2023-09-20T14:05:44.525549181Z" level=info msg="CreateContainer within sandbox \"e53872b66fe0b8ea98e2b5857a6982c08c07ace7dffdca471aa64bfd602b9cd4\" for container &ContainerMetadata{Name:coredns,Attempt:0,}" Sep 20 14:05:44 marina containerd[680]: time="2023-09-20T14:05:44.543333162Z" level=info msg="CreateContainer within sandbox \"e53872b66fe0b8ea98e2b5857a6982c08c07ace7dffdca471aa64bfd602b9cd4\" for &ContainerMetadata{Name:coredns,Attempt:0,} returns container id \"dd5d86b0c42849f1f3e06f3b3c9d96274d42c7330c0849c9afa5f88afd346e00\"" Sep 20 14:05:44 marina containerd[680]: time="2023-09-20T14:05:44.543592878Z" level=info msg="StartContainer for \"dd5d86b0c42849f1f3e06f3b3c9d96274d42c7330c0849c9afa5f88afd346e00\"" Sep 20 14:05:44 marina containerd[680]: time="2023-09-20T14:05:44.575555452Z" level=info msg="StartContainer for \"dd5d86b0c42849f1f3e06f3b3c9d96274d42c7330c0849c9afa5f88afd346e00\" returns successfully" Sep 20 14:05:45 marina containerd[680]: time="2023-09-20T14:05:45.584719298Z" level=info msg="StopPodSandbox for \"6de572466feff2309f57c55703228ca6ca97fb29fc801a73ec3344e56beaf84d\"" Sep 20 14:05:45 marina containerd[680]: 2023-09-20 14:05:45.610 [INFO][3532] k8s.go 576: Cleaning up netns ContainerID="6de572466feff2309f57c55703228ca6ca97fb29fc801a73ec3344e56beaf84d" Sep 20 14:05:45 marina containerd[680]: 2023-09-20 14:05:45.611 [INFO][3532] dataplane_linux.go 524: Deleting workload's device in netns. ContainerID="6de572466feff2309f57c55703228ca6ca97fb29fc801a73ec3344e56beaf84d" iface="eth0" netns="/var/run/netns/cni-757f50a9-9d86-83d1-1b8d-96facb435a62" Sep 20 14:05:45 marina containerd[680]: 2023-09-20 14:05:45.611 [INFO][3532] dataplane_linux.go 535: Entered netns, deleting veth. ContainerID="6de572466feff2309f57c55703228ca6ca97fb29fc801a73ec3344e56beaf84d" iface="eth0" netns="/var/run/netns/cni-757f50a9-9d86-83d1-1b8d-96facb435a62" Sep 20 14:05:45 marina containerd[680]: 2023-09-20 14:05:45.611 [INFO][3532] dataplane_linux.go 562: Workload's veth was already gone. Nothing to do. ContainerID="6de572466feff2309f57c55703228ca6ca97fb29fc801a73ec3344e56beaf84d" iface="eth0" netns="/var/run/netns/cni-757f50a9-9d86-83d1-1b8d-96facb435a62" Sep 20 14:05:45 marina containerd[680]: 2023-09-20 14:05:45.611 [INFO][3532] k8s.go 583: Releasing IP address(es) ContainerID="6de572466feff2309f57c55703228ca6ca97fb29fc801a73ec3344e56beaf84d" Sep 20 14:05:45 marina containerd[680]: 2023-09-20 14:05:45.611 [INFO][3532] utils.go 195: Calico CNI releasing IP address ContainerID="6de572466feff2309f57c55703228ca6ca97fb29fc801a73ec3344e56beaf84d" Sep 20 14:05:45 marina containerd[680]: 2023-09-20 14:05:45.625 [INFO][3547] ipam_plugin.go 415: Releasing address using handleID ContainerID="6de572466feff2309f57c55703228ca6ca97fb29fc801a73ec3344e56beaf84d" HandleID="k8s-pod-network.6de572466feff2309f57c55703228ca6ca97fb29fc801a73ec3344e56beaf84d" Workload="marina-k8s-calico--kube--controllers--85578c44bf--p876v-eth0" Sep 20 14:05:45 marina containerd[680]: 2023-09-20 14:05:45.625 [INFO][3547] ipam_plugin.go 356: About to acquire host-wide IPAM lock. Sep 20 14:05:45 marina containerd[680]: 2023-09-20 14:05:45.625 [INFO][3547] ipam_plugin.go 371: Acquired host-wide IPAM lock. Sep 20 14:05:45 marina containerd[680]: 2023-09-20 14:05:45.629 [WARNING][3547] ipam_plugin.go 432: Asked to release address but it doesn't exist. Ignoring ContainerID="6de572466feff2309f57c55703228ca6ca97fb29fc801a73ec3344e56beaf84d" HandleID="k8s-pod-network.6de572466feff2309f57c55703228ca6ca97fb29fc801a73ec3344e56beaf84d" Workload="marina-k8s-calico--kube--controllers--85578c44bf--p876v-eth0" Sep 20 14:05:45 marina containerd[680]: 2023-09-20 14:05:45.629 [INFO][3547] ipam_plugin.go 443: Releasing address using workloadID ContainerID="6de572466feff2309f57c55703228ca6ca97fb29fc801a73ec3344e56beaf84d" HandleID="k8s-pod-network.6de572466feff2309f57c55703228ca6ca97fb29fc801a73ec3344e56beaf84d" Workload="marina-k8s-calico--kube--controllers--85578c44bf--p876v-eth0" Sep 20 14:05:45 marina containerd[680]: 2023-09-20 14:05:45.629 [INFO][3547] ipam_plugin.go 377: Released host-wide IPAM lock. Sep 20 14:05:45 marina containerd[680]: 2023-09-20 14:05:45.631 [INFO][3532] k8s.go 589: Teardown processing complete. ContainerID="6de572466feff2309f57c55703228ca6ca97fb29fc801a73ec3344e56beaf84d" Sep 20 14:05:45 marina containerd[680]: time="2023-09-20T14:05:45.633781302Z" level=info msg="TearDown network for sandbox \"6de572466feff2309f57c55703228ca6ca97fb29fc801a73ec3344e56beaf84d\" successfully" Sep 20 14:05:45 marina containerd[680]: time="2023-09-20T14:05:45.633808393Z" level=info msg="StopPodSandbox for \"6de572466feff2309f57c55703228ca6ca97fb29fc801a73ec3344e56beaf84d\" returns successfully" Sep 20 14:05:45 marina containerd[680]: time="2023-09-20T14:05:45.634242104Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:calico-kube-controllers-85578c44bf-p876v,Uid:5de66571-f84d-466b-ba58-603d5e8b6688,Namespace:kube-system,Attempt:1,}" Sep 20 14:05:45 marina containerd[680]: 2023-09-20 14:05:45.670 [INFO][3561] plugin.go 327: Calico CNI found existing endpoint: &{{WorkloadEndpoint projectcalico.org/v3} {marina-k8s-calico--kube--controllers--85578c44bf--p876v-eth0 calico-kube-controllers-85578c44bf- kube-system 5de66571-f84d-466b-ba58-603d5e8b6688 576 0 2023-09-20 14:04:54 +0000 UTC map[k8s-app:calico-kube-controllers pod-template-hash:85578c44bf projectcalico.org/namespace:kube-system projectcalico.org/orchestrator:k8s projectcalico.org/serviceaccount:calico-kube-controllers] map[] [] [] []} {k8s marina calico-kube-controllers-85578c44bf-p876v eth0 calico-kube-controllers [] [] [kns.kube-system ksa.kube-system.calico-kube-controllers] cali8b6397d290c [] []}} ContainerID="8185c100d4709d58d84ab24c0c88c8aa8faa52c537c95acf6cda22c4a4778ccc" Namespace="kube-system" Pod="calico-kube-controllers-85578c44bf-p876v" WorkloadEndpoint="marina-k8s-calico--kube--controllers--85578c44bf--p876v-" Sep 20 14:05:45 marina containerd[680]: 2023-09-20 14:05:45.670 [INFO][3561] k8s.go 74: Extracted identifiers for CmdAddK8s ContainerID="8185c100d4709d58d84ab24c0c88c8aa8faa52c537c95acf6cda22c4a4778ccc" Namespace="kube-system" Pod="calico-kube-controllers-85578c44bf-p876v" WorkloadEndpoint="marina-k8s-calico--kube--controllers--85578c44bf--p876v-eth0" Sep 20 14:05:45 marina containerd[680]: 2023-09-20 14:05:45.686 [INFO][3575] ipam_plugin.go 228: Calico CNI IPAM request count IPv4=1 IPv6=0 ContainerID="8185c100d4709d58d84ab24c0c88c8aa8faa52c537c95acf6cda22c4a4778ccc" HandleID="k8s-pod-network.8185c100d4709d58d84ab24c0c88c8aa8faa52c537c95acf6cda22c4a4778ccc" Workload="marina-k8s-calico--kube--controllers--85578c44bf--p876v-eth0" Sep 20 14:05:45 marina containerd[680]: 2023-09-20 14:05:45.692 [INFO][3575] ipam_plugin.go 268: Auto assigning IP ContainerID="8185c100d4709d58d84ab24c0c88c8aa8faa52c537c95acf6cda22c4a4778ccc" HandleID="k8s-pod-network.8185c100d4709d58d84ab24c0c88c8aa8faa52c537c95acf6cda22c4a4778ccc" Workload="marina-k8s-calico--kube--controllers--85578c44bf--p876v-eth0" assignArgs=ipam.AutoAssignArgs{Num4:1, Num6:0, HandleID:(*string)(0xc000702970), Attrs:map[string]string{"namespace":"kube-system", "node":"marina", "pod":"calico-kube-controllers-85578c44bf-p876v", "timestamp":"2023-09-20 14:05:45.686651751 +0000 UTC"}, Hostname:"marina", IPv4Pools:[]net.IPNet{}, IPv6Pools:[]net.IPNet{}, MaxBlocksPerHost:0, HostReservedAttrIPv4s:(*ipam.HostReservedAttr)(nil), HostReservedAttrIPv6s:(*ipam.HostReservedAttr)(nil), IntendedUse:"Workload"} Sep 20 14:05:45 marina containerd[680]: 2023-09-20 14:05:45.692 [INFO][3575] ipam_plugin.go 356: About to acquire host-wide IPAM lock. Sep 20 14:05:45 marina containerd[680]: 2023-09-20 14:05:45.692 [INFO][3575] ipam_plugin.go 371: Acquired host-wide IPAM lock. Sep 20 14:05:45 marina containerd[680]: 2023-09-20 14:05:45.692 [INFO][3575] ipam.go 107: Auto-assign 1 ipv4, 0 ipv6 addrs for host 'marina' Sep 20 14:05:45 marina containerd[680]: 2023-09-20 14:05:45.692 [INFO][3575] ipam.go 660: Looking up existing affinities for host handle="k8s-pod-network.8185c100d4709d58d84ab24c0c88c8aa8faa52c537c95acf6cda22c4a4778ccc" host="marina" Sep 20 14:05:45 marina containerd[680]: 2023-09-20 14:05:45.694 [INFO][3575] ipam.go 372: Looking up existing affinities for host host="marina" Sep 20 14:05:45 marina containerd[680]: 2023-09-20 14:05:45.696 [INFO][3575] ipam.go 489: Trying affinity for 10.244.168.64/26 host="marina" Sep 20 14:05:45 marina containerd[680]: 2023-09-20 14:05:45.697 [INFO][3575] ipam.go 155: Attempting to load block cidr=10.244.168.64/26 host="marina" Sep 20 14:05:45 marina containerd[680]: 2023-09-20 14:05:45.698 [INFO][3575] ipam.go 232: Affinity is confirmed and block has been loaded cidr=10.244.168.64/26 host="marina" Sep 20 14:05:45 marina containerd[680]: 2023-09-20 14:05:45.698 [INFO][3575] ipam.go 1180: Attempting to assign 1 addresses from block block=10.244.168.64/26 handle="k8s-pod-network.8185c100d4709d58d84ab24c0c88c8aa8faa52c537c95acf6cda22c4a4778ccc" host="marina" Sep 20 14:05:45 marina containerd[680]: 2023-09-20 14:05:45.698 [INFO][3575] ipam.go 1682: Creating new handle: k8s-pod-network.8185c100d4709d58d84ab24c0c88c8aa8faa52c537c95acf6cda22c4a4778ccc Sep 20 14:05:45 marina containerd[680]: 2023-09-20 14:05:45.701 [INFO][3575] ipam.go 1203: Writing block in order to claim IPs block=10.244.168.64/26 handle="k8s-pod-network.8185c100d4709d58d84ab24c0c88c8aa8faa52c537c95acf6cda22c4a4778ccc" host="marina" Sep 20 14:05:45 marina containerd[680]: 2023-09-20 14:05:45.707 [INFO][3575] ipam.go 1216: Successfully claimed IPs: [10.244.168.66/26] block=10.244.168.64/26 handle="k8s-pod-network.8185c100d4709d58d84ab24c0c88c8aa8faa52c537c95acf6cda22c4a4778ccc" host="marina" Sep 20 14:05:45 marina containerd[680]: 2023-09-20 14:05:45.707 [INFO][3575] ipam.go 847: Auto-assigned 1 out of 1 IPv4s: [10.244.168.66/26] handle="k8s-pod-network.8185c100d4709d58d84ab24c0c88c8aa8faa52c537c95acf6cda22c4a4778ccc" host="marina" Sep 20 14:05:45 marina containerd[680]: 2023-09-20 14:05:45.707 [INFO][3575] ipam_plugin.go 377: Released host-wide IPAM lock. Sep 20 14:05:45 marina containerd[680]: 2023-09-20 14:05:45.707 [INFO][3575] ipam_plugin.go 286: Calico CNI IPAM assigned addresses IPv4=[10.244.168.66/26] IPv6=[] ContainerID="8185c100d4709d58d84ab24c0c88c8aa8faa52c537c95acf6cda22c4a4778ccc" HandleID="k8s-pod-network.8185c100d4709d58d84ab24c0c88c8aa8faa52c537c95acf6cda22c4a4778ccc" Workload="marina-k8s-calico--kube--controllers--85578c44bf--p876v-eth0" Sep 20 14:05:45 marina containerd[680]: 2023-09-20 14:05:45.710 [INFO][3561] k8s.go 383: Populated endpoint ContainerID="8185c100d4709d58d84ab24c0c88c8aa8faa52c537c95acf6cda22c4a4778ccc" Namespace="kube-system" Pod="calico-kube-controllers-85578c44bf-p876v" WorkloadEndpoint="marina-k8s-calico--kube--controllers--85578c44bf--p876v-eth0" endpoint=&v3.WorkloadEndpoint{TypeMeta:v1.TypeMeta{Kind:"WorkloadEndpoint", APIVersion:"projectcalico.org/v3"}, ObjectMeta:v1.ObjectMeta{Name:"marina-k8s-calico--kube--controllers--85578c44bf--p876v-eth0", GenerateName:"calico-kube-controllers-85578c44bf-", Namespace:"kube-system", SelfLink:"", UID:"5de66571-f84d-466b-ba58-603d5e8b6688", ResourceVersion:"576", Generation:0, CreationTimestamp:time.Date(2023, time.September, 20, 14, 4, 54, 0, time.Local), DeletionTimestamp:, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{"k8s-app":"calico-kube-controllers", "pod-template-hash":"85578c44bf", "projectcalico.org/namespace":"kube-system", "projectcalico.org/orchestrator":"k8s", "projectcalico.org/serviceaccount":"calico-kube-controllers"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry(nil)}, Spec:v3.WorkloadEndpointSpec{Orchestrator:"k8s", Workload:"", Node:"marina", ContainerID:"", Pod:"calico-kube-controllers-85578c44bf-p876v", Endpoint:"eth0", ServiceAccountName:"calico-kube-controllers", IPNetworks:[]string{"10.244.168.66/32"}, IPNATs:[]v3.IPNAT(nil), IPv4Gateway:"", IPv6Gateway:"", Profiles:[]string{"kns.kube-system", "ksa.kube-system.calico-kube-controllers"}, InterfaceName:"cali8b6397d290c", MAC:"", Ports:[]v3.WorkloadEndpointPort(nil), AllowSpoofedSourcePrefixes:[]string(nil)}} Sep 20 14:05:45 marina containerd[680]: 2023-09-20 14:05:45.710 [INFO][3561] k8s.go 384: Calico CNI using IPs: [10.244.168.66/32] ContainerID="8185c100d4709d58d84ab24c0c88c8aa8faa52c537c95acf6cda22c4a4778ccc" Namespace="kube-system" Pod="calico-kube-controllers-85578c44bf-p876v" WorkloadEndpoint="marina-k8s-calico--kube--controllers--85578c44bf--p876v-eth0" Sep 20 14:05:45 marina containerd[680]: 2023-09-20 14:05:45.710 [INFO][3561] dataplane_linux.go 68: Setting the host side veth name to cali8b6397d290c ContainerID="8185c100d4709d58d84ab24c0c88c8aa8faa52c537c95acf6cda22c4a4778ccc" Namespace="kube-system" Pod="calico-kube-controllers-85578c44bf-p876v" WorkloadEndpoint="marina-k8s-calico--kube--controllers--85578c44bf--p876v-eth0" Sep 20 14:05:45 marina containerd[680]: 2023-09-20 14:05:45.711 [INFO][3561] dataplane_linux.go 473: Disabling IPv4 forwarding ContainerID="8185c100d4709d58d84ab24c0c88c8aa8faa52c537c95acf6cda22c4a4778ccc" Namespace="kube-system" Pod="calico-kube-controllers-85578c44bf-p876v" WorkloadEndpoint="marina-k8s-calico--kube--controllers--85578c44bf--p876v-eth0" Sep 20 14:05:45 marina containerd[680]: 2023-09-20 14:05:45.745 [INFO][3561] k8s.go 411: Added Mac, interface name, and active container ID to endpoint ContainerID="8185c100d4709d58d84ab24c0c88c8aa8faa52c537c95acf6cda22c4a4778ccc" Namespace="kube-system" Pod="calico-kube-controllers-85578c44bf-p876v" WorkloadEndpoint="marina-k8s-calico--kube--controllers--85578c44bf--p876v-eth0" endpoint=&v3.WorkloadEndpoint{TypeMeta:v1.TypeMeta{Kind:"WorkloadEndpoint", APIVersion:"projectcalico.org/v3"}, ObjectMeta:v1.ObjectMeta{Name:"marina-k8s-calico--kube--controllers--85578c44bf--p876v-eth0", GenerateName:"calico-kube-controllers-85578c44bf-", Namespace:"kube-system", SelfLink:"", UID:"5de66571-f84d-466b-ba58-603d5e8b6688", ResourceVersion:"576", Generation:0, CreationTimestamp:time.Date(2023, time.September, 20, 14, 4, 54, 0, time.Local), DeletionTimestamp:, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{"k8s-app":"calico-kube-controllers", "pod-template-hash":"85578c44bf", "projectcalico.org/namespace":"kube-system", "projectcalico.org/orchestrator":"k8s", "projectcalico.org/serviceaccount":"calico-kube-controllers"}, Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry(nil)}, Spec:v3.WorkloadEndpointSpec{Orchestrator:"k8s", Workload:"", Node:"marina", ContainerID:"8185c100d4709d58d84ab24c0c88c8aa8faa52c537c95acf6cda22c4a4778ccc", Pod:"calico-kube-controllers-85578c44bf-p876v", Endpoint:"eth0", ServiceAccountName:"calico-kube-controllers", IPNetworks:[]string{"10.244.168.66/32"}, IPNATs:[]v3.IPNAT(nil), IPv4Gateway:"", IPv6Gateway:"", Profiles:[]string{"kns.kube-system", "ksa.kube-system.calico-kube-controllers"}, InterfaceName:"cali8b6397d290c", MAC:"b2:1a:82:ca:2a:9d", Ports:[]v3.WorkloadEndpointPort(nil), AllowSpoofedSourcePrefixes:[]string(nil)}} Sep 20 14:05:45 marina containerd[680]: 2023-09-20 14:05:45.755 [INFO][3561] k8s.go 489: Wrote updated endpoint to datastore ContainerID="8185c100d4709d58d84ab24c0c88c8aa8faa52c537c95acf6cda22c4a4778ccc" Namespace="kube-system" Pod="calico-kube-controllers-85578c44bf-p876v" WorkloadEndpoint="marina-k8s-calico--kube--controllers--85578c44bf--p876v-eth0" Sep 20 14:05:45 marina containerd[680]: time="2023-09-20T14:05:45.786775060Z" level=info msg="loading plugin \"io.containerd.event.v1.publisher\"..." runtime=io.containerd.runc.v2 type=io.containerd.event.v1 Sep 20 14:05:45 marina containerd[680]: time="2023-09-20T14:05:45.786816457Z" level=info msg="loading plugin \"io.containerd.internal.v1.shutdown\"..." runtime=io.containerd.runc.v2 type=io.containerd.internal.v1 Sep 20 14:05:45 marina containerd[680]: time="2023-09-20T14:05:45.786829862Z" level=info msg="loading plugin \"io.containerd.ttrpc.v1.task\"..." runtime=io.containerd.runc.v2 type=io.containerd.ttrpc.v1 Sep 20 14:05:45 marina containerd[680]: time="2023-09-20T14:05:45.787029295Z" level=info msg="starting signal loop" namespace=k8s.io path=/run/containerd/io.containerd.runtime.v2.task/k8s.io/8185c100d4709d58d84ab24c0c88c8aa8faa52c537c95acf6cda22c4a4778ccc pid=3623 runtime=io.containerd.runc.v2 Sep 20 14:05:45 marina containerd[680]: time="2023-09-20T14:05:45.849014725Z" level=info msg="RunPodSandbox for &PodSandboxMetadata{Name:calico-kube-controllers-85578c44bf-p876v,Uid:5de66571-f84d-466b-ba58-603d5e8b6688,Namespace:kube-system,Attempt:1,} returns sandbox id \"8185c100d4709d58d84ab24c0c88c8aa8faa52c537c95acf6cda22c4a4778ccc\"" Sep 20 14:05:45 marina containerd[680]: time="2023-09-20T14:05:45.849851661Z" level=info msg="PullImage \"docker.io/calico/kube-controllers:v3.26.1\"" Sep 20 14:05:45 marina containerd[680]: time="2023-09-20T14:05:45.852304360Z" level=error msg="failed to decode hosts.toml" error="invalid `host` tree" Sep 20 14:05:47 marina containerd[680]: time="2023-09-20T14:05:47.014840372Z" level=error msg="failed to decode hosts.toml" error="invalid `host` tree" * * ==> coredns [dd5d86b0c42849f1f3e06f3b3c9d96274d42c7330c0849c9afa5f88afd346e00] <== * .:53 [INFO] plugin/reload: Running configuration SHA512 = b69cf0c8e938111fb046045ef5ed9a23341ad3db13dfbef74baee330cc03dc3bfdcdd95cbb2df69ce4eed7a0c8a70ef9ed4e0226368346980b3b0be75428cc6d CoreDNS-1.10.1 linux/amd64, go1.20, 055b2c3 [INFO] 127.0.0.1:43706 - 39021 "HINFO IN 7959795810624893823.6858087051100379327. udp 57 false 512" NXDOMAIN qr,rd,ra 132 0.127622483s * * ==> describe nodes <== * Name: marina Roles: control-plane Labels: beta.kubernetes.io/arch=amd64 beta.kubernetes.io/os=linux kubernetes.io/arch=amd64 kubernetes.io/hostname=marina kubernetes.io/os=linux minikube.k8s.io/commit=fd7ecd9c4599bef9f04c0986c4a0187f98a4396e minikube.k8s.io/name=marina minikube.k8s.io/primary=true minikube.k8s.io/updated_at=2023_09_20T11_04_39_0700 minikube.k8s.io/version=v1.31.2 node-role.kubernetes.io/control-plane= node.kubernetes.io/exclude-from-external-load-balancers= Annotations: kubeadm.alpha.kubernetes.io/cri-socket: unix:///run/containerd/containerd.sock node.alpha.kubernetes.io/ttl: 0 projectcalico.org/IPv4Address: 172.30.0.2/16 projectcalico.org/IPv4IPIPTunnelAddr: 10.244.168.64 volumes.kubernetes.io/controller-managed-attach-detach: true CreationTimestamp: Wed, 20 Sep 2023 14:04:35 +0000 Taints: Unschedulable: false Lease: HolderIdentity: marina AcquireTime: RenewTime: Wed, 20 Sep 2023 14:05:43 +0000 Conditions: Type Status LastHeartbeatTime LastTransitionTime Reason Message ---- ------ ----------------- ------------------ ------ ------- NetworkUnavailable False Wed, 20 Sep 2023 14:05:34 +0000 Wed, 20 Sep 2023 14:05:34 +0000 CalicoIsUp Calico is running on this node MemoryPressure False Wed, 20 Sep 2023 14:05:39 +0000 Wed, 20 Sep 2023 14:04:35 +0000 KubeletHasSufficientMemory kubelet has sufficient memory available DiskPressure False Wed, 20 Sep 2023 14:05:39 +0000 Wed, 20 Sep 2023 14:04:35 +0000 KubeletHasNoDiskPressure kubelet has no disk pressure PIDPressure False Wed, 20 Sep 2023 14:05:39 +0000 Wed, 20 Sep 2023 14:04:35 +0000 KubeletHasSufficientPID kubelet has sufficient PID available Ready True Wed, 20 Sep 2023 14:05:39 +0000 Wed, 20 Sep 2023 14:04:35 +0000 KubeletReady kubelet is posting ready status Addresses: InternalIP: 172.30.0.2 Hostname: marina Capacity: cpu: 12 ephemeral-storage: 114790500Ki hugepages-1Gi: 0 hugepages-2Mi: 0 memory: 20383556Ki pods: 110 Allocatable: cpu: 12 ephemeral-storage: 114790500Ki hugepages-1Gi: 0 hugepages-2Mi: 0 memory: 20383556Ki pods: 110 System Info: Machine ID: bf517ad2ed8a458ea05f903239441104 System UUID: e33b7d61-b660-4318-ba09-b1c9aaf25199 Boot ID: a9246b52-8add-4a0a-9661-a4038ed86d7d Kernel Version: 6.4.14-1-MANJARO OS Image: Ubuntu 22.04.2 LTS Operating System: linux Architecture: amd64 Container Runtime Version: containerd://1.6.21 Kubelet Version: v1.27.4 Kube-Proxy Version: v1.27.4 PodCIDR: 10.244.0.0/24 PodCIDRs: 10.244.0.0/24 Non-terminated Pods: (9 in total) Namespace Name CPU Requests CPU Limits Memory Requests Memory Limits Age --------- ---- ------------ ---------- --------------- ------------- --- kube-system calico-kube-controllers-85578c44bf-p876v 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 55s kube-system calico-node-66kqz 250m (2%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 55s kube-system coredns-5d78c9869d-bzf9j 100m (0%!)(MISSING) 0 (0%!)(MISSING) 70Mi (0%!)(MISSING) 170Mi (0%!)(MISSING) 55s kube-system etcd-marina 100m (0%!)(MISSING) 0 (0%!)(MISSING) 100Mi (0%!)(MISSING) 0 (0%!)(MISSING) 71s kube-system kube-apiserver-marina 250m (2%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 72s kube-system kube-controller-manager-marina 200m (1%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 71s kube-system kube-proxy-zxknw 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 55s kube-system kube-scheduler-marina 100m (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 71s kube-system storage-provisioner 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 0 (0%!)(MISSING) 69s Allocated resources: (Total limits may be over 100 percent, i.e., overcommitted.) Resource Requests Limits -------- -------- ------ cpu 1 (8%!)(MISSING) 0 (0%!)(MISSING) memory 170Mi (0%!)(MISSING) 170Mi (0%!)(MISSING) ephemeral-storage 0 (0%!)(MISSING) 0 (0%!)(MISSING) hugepages-1Gi 0 (0%!)(MISSING) 0 (0%!)(MISSING) hugepages-2Mi 0 (0%!)(MISSING) 0 (0%!)(MISSING) Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Starting 54s kube-proxy Normal Starting 76s kubelet Starting kubelet. Normal NodeHasSufficientMemory 76s (x8 over 76s) kubelet Node marina status is now: NodeHasSufficientMemory Normal NodeHasNoDiskPressure 76s (x8 over 76s) kubelet Node marina status is now: NodeHasNoDiskPressure Normal NodeHasSufficientPID 76s (x7 over 76s) kubelet Node marina status is now: NodeHasSufficientPID Normal NodeAllocatableEnforced 76s kubelet Updated Node Allocatable limit across pods Normal Starting 71s kubelet Starting kubelet. Normal NodeAllocatableEnforced 71s kubelet Updated Node Allocatable limit across pods Normal NodeHasSufficientMemory 71s kubelet Node marina status is now: NodeHasSufficientMemory Normal NodeHasNoDiskPressure 71s kubelet Node marina status is now: NodeHasNoDiskPressure Normal NodeHasSufficientPID 71s kubelet Node marina status is now: NodeHasSufficientPID Normal RegisteredNode 55s node-controller Node marina event: Registered Node marina in Controller * * ==> dmesg <== * [ +0.003412] overlayfs: upperdir is in-use as upperdir/workdir of another mount, accessing files from both mounts will result in undefined behavior. [ +0.000004] overlayfs: workdir is in-use as upperdir/workdir of another mount, accessing files from both mounts will result in undefined behavior. [ +0.002803] overlayfs: upperdir is in-use as upperdir/workdir of another mount, accessing files from both mounts will result in undefined behavior. [ +0.000004] overlayfs: workdir is in-use as upperdir/workdir of another mount, accessing files from both mounts will result in undefined behavior. [ +32.639282] overlayfs: upperdir is in-use as upperdir/workdir of another mount, accessing files from both mounts will result in undefined behavior. [ +0.000005] overlayfs: workdir is in-use as upperdir/workdir of another mount, accessing files from both mounts will result in undefined behavior. [Sep13 19:24] overlayfs: upperdir is in-use as upperdir/workdir of another mount, accessing files from both mounts will result in undefined behavior. [ +0.000006] overlayfs: workdir is in-use as upperdir/workdir of another mount, accessing files from both mounts will result in undefined behavior. [Sep15 13:09] overlayfs: lowerdir is in-use as upperdir/workdir of another mount, accessing files from both mounts will result in undefined behavior. [ +1.428878] overlayfs: lowerdir is in-use as upperdir/workdir of another mount, accessing files from both mounts will result in undefined behavior. [Sep15 13:10] overlayfs: lowerdir is in-use as upperdir/workdir of another mount, accessing files from both mounts will result in undefined behavior. [Sep15 15:04] CIFS: No dialect specified on mount. Default has changed to a more secure dialect, SMB2.1 or later (e.g. SMB3.1.1), from CIFS (SMB1). To use the less secure SMB1 dialect to access old servers which do not support SMB3.1.1 (or even SMB3 or SMB2.1) specify vers=1.0 on mount. [Sep15 15:11] logitech-hidpp-device 0003:046D:4023.0003: hidpp20_batterylevel_get_battery_capacity: received protocol error 0x07 [Sep15 20:02] overlayfs: upperdir is in-use as upperdir/workdir of another mount, accessing files from both mounts will result in undefined behavior. [ +0.000005] overlayfs: workdir is in-use as upperdir/workdir of another mount, accessing files from both mounts will result in undefined behavior. [Sep19 16:35] CIFS: VFS: \\192.168.15.12 has not responded in 180 seconds. Reconnecting... [Sep20 13:26] overlayfs: lowerdir is in-use as upperdir/workdir of another mount, accessing files from both mounts will result in undefined behavior. [Sep20 13:32] I/O error, dev sr0, sector 1267256 op 0x0:(READ) flags 0x80700 phys_seg 1 prio class 2 [ +0.067607] I/O error, dev sr0, sector 1267256 op 0x0:(READ) flags 0x0 phys_seg 1 prio class 2 [ +0.000005] Buffer I/O error on dev sr0, logical block 158407, async page read [ +0.060102] I/O error, dev sr0, sector 0 op 0x0:(READ) flags 0x80700 phys_seg 1 prio class 2 [ +0.040078] I/O error, dev sr0, sector 0 op 0x0:(READ) flags 0x0 phys_seg 1 prio class 2 [ +0.000007] Buffer I/O error on dev sr0, logical block 0, async page read [ +0.066592] I/O error, dev sr0, sector 8 op 0x0:(READ) flags 0x80700 phys_seg 1 prio class 2 [ +0.080012] I/O error, dev sr0, sector 8 op 0x0:(READ) flags 0x0 phys_seg 1 prio class 2 [ +0.000007] Buffer I/O error on dev sr0, logical block 1, async page read [ +0.096640] I/O error, dev sr0, sector 8 op 0x0:(READ) flags 0x0 phys_seg 1 prio class 2 [ +0.000006] Buffer I/O error on dev sr0, logical block 1, async page read [ +0.066742] I/O error, dev sr0, sector 0 op 0x0:(READ) flags 0x0 phys_seg 1 prio class 2 [ +0.000006] Buffer I/O error on dev sr0, logical block 0, async page read [ +0.066491] I/O error, dev sr0, sector 0 op 0x0:(READ) flags 0x0 phys_seg 1 prio class 2 [ +0.000006] Buffer I/O error on dev sr0, logical block 0, async page read [ +0.056703] I/O error, dev sr0, sector 0 op 0x0:(READ) flags 0x0 phys_seg 1 prio class 2 [ +0.000005] Buffer I/O error on dev sr0, logical block 0, async page read [ +0.060040] Buffer I/O error on dev sr0, logical block 0, async page read [ +0.046749] Buffer I/O error on dev sr0, logical block 0, async page read [ +0.318518] Buffer I/O error on dev sr0, logical block 0, async page read [ +3.981598] scsi_io_completion_action: 63 callbacks suppressed [ +0.000019] blk_print_req_error: 63 callbacks suppressed [ +0.000001] I/O error, dev sr0, sector 0 op 0x0:(READ) flags 0x0 phys_seg 1 prio class 2 [ +0.059925] I/O error, dev sr0, sector 0 op 0x0:(READ) flags 0x0 phys_seg 1 prio class 2 [ +0.069837] I/O error, dev sr0, sector 0 op 0x0:(READ) flags 0x0 phys_seg 1 prio class 2 [ +0.000005] buffer_io_error: 57 callbacks suppressed [ +0.000001] Buffer I/O error on dev sr0, logical block 0, async page read [ +0.046643] I/O error, dev sr0, sector 0 op 0x0:(READ) flags 0x0 phys_seg 1 prio class 2 [ +0.000006] Buffer I/O error on dev sr0, logical block 0, async page read [ +0.053662] I/O error, dev sr0, sector 0 op 0x0:(READ) flags 0x0 phys_seg 1 prio class 2 [ +0.000006] Buffer I/O error on dev sr0, logical block 0, async page read [ +0.053122] I/O error, dev sr0, sector 0 op 0x0:(READ) flags 0x0 phys_seg 1 prio class 2 [ +0.000005] Buffer I/O error on dev sr0, logical block 0, async page read [ +0.086719] I/O error, dev sr0, sector 0 op 0x0:(READ) flags 0x0 phys_seg 1 prio class 2 [ +0.000006] Buffer I/O error on dev sr0, logical block 0, async page read [ +0.053168] I/O error, dev sr0, sector 0 op 0x0:(READ) flags 0x0 phys_seg 1 prio class 2 [ +0.000006] Buffer I/O error on dev sr0, logical block 0, async page read [ +0.050915] I/O error, dev sr0, sector 0 op 0x0:(READ) flags 0x0 phys_seg 1 prio class 2 [ +0.000005] Buffer I/O error on dev sr0, logical block 0, async page read [ +0.052690] I/O error, dev sr0, sector 0 op 0x0:(READ) flags 0x0 phys_seg 1 prio class 2 [ +0.000006] Buffer I/O error on dev sr0, logical block 0, async page read [ +0.063158] Buffer I/O error on dev sr0, logical block 0, async page read [ +0.040169] Buffer I/O error on dev sr0, logical block 0, async page read * * ==> etcd [456300849f817f02255369ced5bc2ad21159248bf1c64d6ab4674aa6cbbb897c] <== * {"level":"info","ts":"2023-09-20T14:05:31.656Z","caller":"traceutil/trace.go:171","msg":"trace[179283142] range","detail":"{range_begin:/registry/crd.projectcalico.org/ipamhandles/; range_end:/registry/crd.projectcalico.org/ipamhandles0; response_count:0; response_revision:517; }","duration":"4.381241783s","start":"2023-09-20T14:05:27.275Z","end":"2023-09-20T14:05:31.656Z","steps":["trace[179283142] 'agreement among raft nodes before linearized reading' (duration: 4.378881037s)"],"step_count":1} {"level":"warn","ts":"2023-09-20T14:05:31.657Z","caller":"v3rpc/interceptor.go:197","msg":"request stats","start time":"2023-09-20T14:05:27.275Z","time spent":"4.38126143s","remote":"127.0.0.1:46620","response type":"/etcdserverpb.KV/Range","request count":0,"request size":95,"response count":0,"response size":29,"request content":"key:\"/registry/crd.projectcalico.org/ipamhandles/\" range_end:\"/registry/crd.projectcalico.org/ipamhandles0\" limit:500 "} {"level":"warn","ts":"2023-09-20T14:05:31.657Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"4.381567542s","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/crd.projectcalico.org/ipamhandles/\" range_end:\"/registry/crd.projectcalico.org/ipamhandles0\" count_only:true ","response":"range_response_count:0 size:5"} {"level":"info","ts":"2023-09-20T14:05:31.657Z","caller":"traceutil/trace.go:171","msg":"trace[747403361] range","detail":"{range_begin:/registry/crd.projectcalico.org/ipamhandles/; range_end:/registry/crd.projectcalico.org/ipamhandles0; response_count:0; response_revision:517; }","duration":"4.381583232s","start":"2023-09-20T14:05:27.275Z","end":"2023-09-20T14:05:31.657Z","steps":["trace[747403361] 'agreement among raft nodes before linearized reading' (duration: 4.379105947s)"],"step_count":1} {"level":"warn","ts":"2023-09-20T14:05:31.657Z","caller":"v3rpc/interceptor.go:197","msg":"request stats","start time":"2023-09-20T14:05:27.275Z","time spent":"4.381600304s","remote":"127.0.0.1:46620","response type":"/etcdserverpb.KV/Range","request count":0,"request size":94,"response count":0,"response size":29,"request content":"key:\"/registry/crd.projectcalico.org/ipamhandles/\" range_end:\"/registry/crd.projectcalico.org/ipamhandles0\" count_only:true "} {"level":"warn","ts":"2023-09-20T14:05:31.657Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"4.381699339s","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/crd.projectcalico.org/ipamhandles/\" range_end:\"/registry/crd.projectcalico.org/ipamhandles0\" limit:10000 ","response":"range_response_count:0 size:5"} {"level":"info","ts":"2023-09-20T14:05:31.657Z","caller":"traceutil/trace.go:171","msg":"trace[797218262] range","detail":"{range_begin:/registry/crd.projectcalico.org/ipamhandles/; range_end:/registry/crd.projectcalico.org/ipamhandles0; response_count:0; response_revision:517; }","duration":"4.381712474s","start":"2023-09-20T14:05:27.275Z","end":"2023-09-20T14:05:31.657Z","steps":["trace[797218262] 'agreement among raft nodes before linearized reading' (duration: 4.379160109s)"],"step_count":1} {"level":"warn","ts":"2023-09-20T14:05:31.657Z","caller":"v3rpc/interceptor.go:197","msg":"request stats","start time":"2023-09-20T14:05:27.275Z","time spent":"4.381729085s","remote":"127.0.0.1:46620","response type":"/etcdserverpb.KV/Range","request count":0,"request size":95,"response count":0,"response size":29,"request content":"key:\"/registry/crd.projectcalico.org/ipamhandles/\" range_end:\"/registry/crd.projectcalico.org/ipamhandles0\" limit:10000 "} {"level":"warn","ts":"2023-09-20T14:05:31.657Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"4.439874148s","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/crd.projectcalico.org/felixconfigurations/\" range_end:\"/registry/crd.projectcalico.org/felixconfigurations0\" limit:500 ","response":"range_response_count:0 size:5"} {"level":"info","ts":"2023-09-20T14:05:31.657Z","caller":"traceutil/trace.go:171","msg":"trace[1143508009] range","detail":"{range_begin:/registry/crd.projectcalico.org/felixconfigurations/; range_end:/registry/crd.projectcalico.org/felixconfigurations0; response_count:0; response_revision:517; }","duration":"4.439894917s","start":"2023-09-20T14:05:27.217Z","end":"2023-09-20T14:05:31.657Z","steps":["trace[1143508009] 'agreement among raft nodes before linearized reading' (duration: 4.43726132s)"],"step_count":1} {"level":"warn","ts":"2023-09-20T14:05:31.657Z","caller":"v3rpc/interceptor.go:197","msg":"request stats","start time":"2023-09-20T14:05:27.217Z","time spent":"4.4399127s","remote":"127.0.0.1:46536","response type":"/etcdserverpb.KV/Range","request count":0,"request size":111,"response count":0,"response size":29,"request content":"key:\"/registry/crd.projectcalico.org/felixconfigurations/\" range_end:\"/registry/crd.projectcalico.org/felixconfigurations0\" limit:500 "} {"level":"warn","ts":"2023-09-20T14:05:31.657Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"4.447522198s","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/crd.projectcalico.org/ipreservations/\" range_end:\"/registry/crd.projectcalico.org/ipreservations0\" limit:500 ","response":"range_response_count:0 size:5"} {"level":"info","ts":"2023-09-20T14:05:31.657Z","caller":"traceutil/trace.go:171","msg":"trace[2119784110] range","detail":"{range_begin:/registry/crd.projectcalico.org/ipreservations/; range_end:/registry/crd.projectcalico.org/ipreservations0; response_count:0; response_revision:517; }","duration":"4.447540992s","start":"2023-09-20T14:05:27.209Z","end":"2023-09-20T14:05:31.657Z","steps":["trace[2119784110] 'agreement among raft nodes before linearized reading' (duration: 4.444836582s)"],"step_count":1} {"level":"warn","ts":"2023-09-20T14:05:31.657Z","caller":"v3rpc/interceptor.go:197","msg":"request stats","start time":"2023-09-20T14:05:27.209Z","time spent":"4.447564146s","remote":"127.0.0.1:49940","response type":"/etcdserverpb.KV/Range","request count":0,"request size":101,"response count":0,"response size":29,"request content":"key:\"/registry/crd.projectcalico.org/ipreservations/\" range_end:\"/registry/crd.projectcalico.org/ipreservations0\" limit:500 "} {"level":"warn","ts":"2023-09-20T14:05:31.657Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"7.043221891s","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/crd.projectcalico.org/bgpfilters/\" range_end:\"/registry/crd.projectcalico.org/bgpfilters0\" count_only:true ","response":"range_response_count:0 size:5"} {"level":"info","ts":"2023-09-20T14:05:31.657Z","caller":"traceutil/trace.go:171","msg":"trace[969667375] range","detail":"{range_begin:/registry/crd.projectcalico.org/bgpfilters/; range_end:/registry/crd.projectcalico.org/bgpfilters0; response_count:0; response_revision:517; }","duration":"7.043240536s","start":"2023-09-20T14:05:24.614Z","end":"2023-09-20T14:05:31.657Z","steps":["trace[969667375] 'agreement among raft nodes before linearized reading' (duration: 7.040385074s)"],"step_count":1} {"level":"warn","ts":"2023-09-20T14:05:31.657Z","caller":"v3rpc/interceptor.go:197","msg":"request stats","start time":"2023-09-20T14:05:24.614Z","time spent":"7.04327496s","remote":"127.0.0.1:49870","response type":"/etcdserverpb.KV/Range","request count":0,"request size":92,"response count":0,"response size":29,"request content":"key:\"/registry/crd.projectcalico.org/bgpfilters/\" range_end:\"/registry/crd.projectcalico.org/bgpfilters0\" count_only:true "} {"level":"warn","ts":"2023-09-20T14:05:31.657Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"7.027199999s","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/crd.projectcalico.org/globalnetworkpolicies/\" range_end:\"/registry/crd.projectcalico.org/globalnetworkpolicies0\" count_only:true ","response":"range_response_count:0 size:5"} {"level":"info","ts":"2023-09-20T14:05:31.657Z","caller":"traceutil/trace.go:171","msg":"trace[1694243093] range","detail":"{range_begin:/registry/crd.projectcalico.org/globalnetworkpolicies/; range_end:/registry/crd.projectcalico.org/globalnetworkpolicies0; response_count:0; response_revision:517; }","duration":"7.027221629s","start":"2023-09-20T14:05:24.630Z","end":"2023-09-20T14:05:31.657Z","steps":["trace[1694243093] 'agreement among raft nodes before linearized reading' (duration: 7.024216527s)"],"step_count":1} {"level":"warn","ts":"2023-09-20T14:05:31.657Z","caller":"v3rpc/interceptor.go:197","msg":"request stats","start time":"2023-09-20T14:05:24.630Z","time spent":"7.027246926s","remote":"127.0.0.1:49872","response type":"/etcdserverpb.KV/Range","request count":0,"request size":114,"response count":0,"response size":29,"request content":"key:\"/registry/crd.projectcalico.org/globalnetworkpolicies/\" range_end:\"/registry/crd.projectcalico.org/globalnetworkpolicies0\" count_only:true "} {"level":"warn","ts":"2023-09-20T14:05:31.657Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"7.043121002s","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/crd.projectcalico.org/bgpfilters/\" range_end:\"/registry/crd.projectcalico.org/bgpfilters0\" limit:500 ","response":"range_response_count:0 size:5"} {"level":"info","ts":"2023-09-20T14:05:31.657Z","caller":"traceutil/trace.go:171","msg":"trace[1637366136] range","detail":"{range_begin:/registry/crd.projectcalico.org/bgpfilters/; range_end:/registry/crd.projectcalico.org/bgpfilters0; response_count:0; response_revision:517; }","duration":"7.04314137s","start":"2023-09-20T14:05:24.614Z","end":"2023-09-20T14:05:31.657Z","steps":["trace[1637366136] 'agreement among raft nodes before linearized reading' (duration: 7.04003577s)"],"step_count":1} {"level":"warn","ts":"2023-09-20T14:05:31.657Z","caller":"v3rpc/interceptor.go:197","msg":"request stats","start time":"2023-09-20T14:05:24.614Z","time spent":"7.04316806s","remote":"127.0.0.1:49870","response type":"/etcdserverpb.KV/Range","request count":0,"request size":93,"response count":0,"response size":29,"request content":"key:\"/registry/crd.projectcalico.org/bgpfilters/\" range_end:\"/registry/crd.projectcalico.org/bgpfilters0\" limit:500 "} {"level":"warn","ts":"2023-09-20T14:05:31.657Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"7.36489823s","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/crd.projectcalico.org/networkpolicies/\" range_end:\"/registry/crd.projectcalico.org/networkpolicies0\" limit:500 ","response":"range_response_count:0 size:5"} {"level":"info","ts":"2023-09-20T14:05:31.657Z","caller":"traceutil/trace.go:171","msg":"trace[1790083016] range","detail":"{range_begin:/registry/crd.projectcalico.org/networkpolicies/; range_end:/registry/crd.projectcalico.org/networkpolicies0; response_count:0; response_revision:517; }","duration":"7.364916745s","start":"2023-09-20T14:05:24.292Z","end":"2023-09-20T14:05:31.657Z","steps":["trace[1790083016] 'agreement among raft nodes before linearized reading' (duration: 7.361720085s)"],"step_count":1} {"level":"warn","ts":"2023-09-20T14:05:31.657Z","caller":"v3rpc/interceptor.go:197","msg":"request stats","start time":"2023-09-20T14:05:24.292Z","time spent":"7.364933536s","remote":"127.0.0.1:49864","response type":"/etcdserverpb.KV/Range","request count":0,"request size":103,"response count":0,"response size":29,"request content":"key:\"/registry/crd.projectcalico.org/networkpolicies/\" range_end:\"/registry/crd.projectcalico.org/networkpolicies0\" limit:500 "} {"level":"warn","ts":"2023-09-20T14:05:31.657Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"7.04362713s","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/crd.projectcalico.org/bgpfilters/\" range_end:\"/registry/crd.projectcalico.org/bgpfilters0\" limit:10000 ","response":"range_response_count:0 size:5"} {"level":"info","ts":"2023-09-20T14:05:31.657Z","caller":"traceutil/trace.go:171","msg":"trace[78333930] range","detail":"{range_begin:/registry/crd.projectcalico.org/bgpfilters/; range_end:/registry/crd.projectcalico.org/bgpfilters0; response_count:0; response_revision:517; }","duration":"7.043653769s","start":"2023-09-20T14:05:24.614Z","end":"2023-09-20T14:05:31.657Z","steps":["trace[78333930] 'agreement among raft nodes before linearized reading' (duration: 7.040367471s)"],"step_count":1} {"level":"warn","ts":"2023-09-20T14:05:31.657Z","caller":"v3rpc/interceptor.go:197","msg":"request stats","start time":"2023-09-20T14:05:24.614Z","time spent":"7.04369184s","remote":"127.0.0.1:49870","response type":"/etcdserverpb.KV/Range","request count":0,"request size":93,"response count":0,"response size":29,"request content":"key:\"/registry/crd.projectcalico.org/bgpfilters/\" range_end:\"/registry/crd.projectcalico.org/bgpfilters0\" limit:10000 "} {"level":"warn","ts":"2023-09-20T14:05:31.658Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"7.365377086s","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/crd.projectcalico.org/networkpolicies/\" range_end:\"/registry/crd.projectcalico.org/networkpolicies0\" limit:10000 ","response":"range_response_count:0 size:5"} {"level":"info","ts":"2023-09-20T14:05:31.658Z","caller":"traceutil/trace.go:171","msg":"trace[998253477] range","detail":"{range_begin:/registry/crd.projectcalico.org/networkpolicies/; range_end:/registry/crd.projectcalico.org/networkpolicies0; response_count:0; response_revision:517; }","duration":"7.365399077s","start":"2023-09-20T14:05:24.292Z","end":"2023-09-20T14:05:31.658Z","steps":["trace[998253477] 'agreement among raft nodes before linearized reading' (duration: 7.361976554s)"],"step_count":1} {"level":"warn","ts":"2023-09-20T14:05:31.658Z","caller":"v3rpc/interceptor.go:197","msg":"request stats","start time":"2023-09-20T14:05:24.292Z","time spent":"7.365421228s","remote":"127.0.0.1:49864","response type":"/etcdserverpb.KV/Range","request count":0,"request size":103,"response count":0,"response size":29,"request content":"key:\"/registry/crd.projectcalico.org/networkpolicies/\" range_end:\"/registry/crd.projectcalico.org/networkpolicies0\" limit:10000 "} {"level":"warn","ts":"2023-09-20T14:05:31.658Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"4.420746196s","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/crd.projectcalico.org/globalnetworksets/\" range_end:\"/registry/crd.projectcalico.org/globalnetworksets0\" count_only:true ","response":"range_response_count:0 size:5"} {"level":"info","ts":"2023-09-20T14:05:31.658Z","caller":"traceutil/trace.go:171","msg":"trace[1039980213] range","detail":"{range_begin:/registry/crd.projectcalico.org/globalnetworksets/; range_end:/registry/crd.projectcalico.org/globalnetworksets0; response_count:0; response_revision:517; }","duration":"4.420776292s","start":"2023-09-20T14:05:27.238Z","end":"2023-09-20T14:05:31.658Z","steps":["trace[1039980213] 'agreement among raft nodes before linearized reading' (duration: 4.416440801s)"],"step_count":1} {"level":"warn","ts":"2023-09-20T14:05:31.658Z","caller":"v3rpc/interceptor.go:197","msg":"request stats","start time":"2023-09-20T14:05:27.238Z","time spent":"4.420802712s","remote":"127.0.0.1:46574","response type":"/etcdserverpb.KV/Range","request count":0,"request size":106,"response count":0,"response size":29,"request content":"key:\"/registry/crd.projectcalico.org/globalnetworksets/\" range_end:\"/registry/crd.projectcalico.org/globalnetworksets0\" count_only:true "} {"level":"warn","ts":"2023-09-20T14:05:31.659Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"4.441987382s","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/crd.projectcalico.org/felixconfigurations/\" range_end:\"/registry/crd.projectcalico.org/felixconfigurations0\" count_only:true ","response":"range_response_count:0 size:5"} {"level":"info","ts":"2023-09-20T14:05:31.659Z","caller":"traceutil/trace.go:171","msg":"trace[1969574833] range","detail":"{range_begin:/registry/crd.projectcalico.org/felixconfigurations/; range_end:/registry/crd.projectcalico.org/felixconfigurations0; response_count:0; response_revision:517; }","duration":"4.442008211s","start":"2023-09-20T14:05:27.217Z","end":"2023-09-20T14:05:31.659Z","steps":["trace[1969574833] 'agreement among raft nodes before linearized reading' (duration: 4.437491501s)"],"step_count":1} {"level":"warn","ts":"2023-09-20T14:05:31.659Z","caller":"v3rpc/interceptor.go:197","msg":"request stats","start time":"2023-09-20T14:05:27.217Z","time spent":"4.442036063s","remote":"127.0.0.1:46536","response type":"/etcdserverpb.KV/Range","request count":0,"request size":110,"response count":0,"response size":29,"request content":"key:\"/registry/crd.projectcalico.org/felixconfigurations/\" range_end:\"/registry/crd.projectcalico.org/felixconfigurations0\" count_only:true "} {"level":"warn","ts":"2023-09-20T14:05:33.674Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"876.470955ms","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/health\" ","response":"range_response_count:0 size:5"} {"level":"info","ts":"2023-09-20T14:05:33.674Z","caller":"traceutil/trace.go:171","msg":"trace[1683361300] range","detail":"{range_begin:/registry/health; range_end:; response_count:0; response_revision:523; }","duration":"876.546417ms","start":"2023-09-20T14:05:32.798Z","end":"2023-09-20T14:05:33.674Z","steps":["trace[1683361300] 'range keys from in-memory index tree' (duration: 876.400043ms)"],"step_count":1} {"level":"warn","ts":"2023-09-20T14:05:33.674Z","caller":"v3rpc/interceptor.go:197","msg":"request stats","start time":"2023-09-20T14:05:32.798Z","time spent":"876.605738ms","remote":"127.0.0.1:55330","response type":"/etcdserverpb.KV/Range","request count":0,"request size":18,"response count":0,"response size":29,"request content":"key:\"/registry/health\" "} {"level":"info","ts":"2023-09-20T14:05:33.675Z","caller":"traceutil/trace.go:171","msg":"trace[1540133221] transaction","detail":"{read_only:false; response_revision:524; number_of_response:1; }","duration":"527.333307ms","start":"2023-09-20T14:05:33.147Z","end":"2023-09-20T14:05:33.675Z","steps":["trace[1540133221] 'process raft request' (duration: 498.319468ms)","trace[1540133221] 'compare' (duration: 28.481332ms)"],"step_count":2} {"level":"warn","ts":"2023-09-20T14:05:33.675Z","caller":"v3rpc/interceptor.go:197","msg":"request stats","start time":"2023-09-20T14:05:33.147Z","time spent":"527.402736ms","remote":"127.0.0.1:59798","response type":"/etcdserverpb.KV/Txn","request count":1,"request size":512,"response count":0,"response size":40,"request content":"compare: success:> failure: >"} {"level":"warn","ts":"2023-09-20T14:05:37.297Z","caller":"etcdserver/v3_server.go:840","msg":"waiting for ReadIndex response took too long, retrying","sent-request-id":600538626327759633,"retry-timeout":"500ms"} {"level":"warn","ts":"2023-09-20T14:05:37.798Z","caller":"etcdserver/v3_server.go:840","msg":"waiting for ReadIndex response took too long, retrying","sent-request-id":600538626327759633,"retry-timeout":"500ms"} {"level":"warn","ts":"2023-09-20T14:05:37.982Z","caller":"wal/wal.go:805","msg":"slow fdatasync","took":"1.23399229s","expected-duration":"1s"} {"level":"info","ts":"2023-09-20T14:05:37.982Z","caller":"traceutil/trace.go:171","msg":"trace[2047958161] linearizableReadLoop","detail":"{readStateIndex:574; appliedIndex:573; }","duration":"1.186112788s","start":"2023-09-20T14:05:36.796Z","end":"2023-09-20T14:05:37.982Z","steps":["trace[2047958161] 'read index received' (duration: 1.186015256s)","trace[2047958161] 'applied index is now lower than readState.Index' (duration: 97.011ยตs)"],"step_count":2} {"level":"info","ts":"2023-09-20T14:05:37.982Z","caller":"traceutil/trace.go:171","msg":"trace[269850878] transaction","detail":"{read_only:false; response_revision:552; number_of_response:1; }","duration":"1.234231878s","start":"2023-09-20T14:05:36.748Z","end":"2023-09-20T14:05:37.982Z","steps":["trace[269850878] 'process raft request' (duration: 1.234126931s)"],"step_count":1} {"level":"warn","ts":"2023-09-20T14:05:37.983Z","caller":"v3rpc/interceptor.go:197","msg":"request stats","start time":"2023-09-20T14:05:36.748Z","time spent":"1.234281551s","remote":"127.0.0.1:59602","response type":"/etcdserverpb.KV/Txn","request count":1,"request size":956,"response count":0,"response size":40,"request content":"compare: success:> failure:<>"} {"level":"warn","ts":"2023-09-20T14:05:37.983Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"1.186223956s","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/health\" ","response":"range_response_count:0 size:5"} {"level":"warn","ts":"2023-09-20T14:05:37.983Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"1.019145433s","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/namespaces/default\" ","response":"range_response_count:1 size:339"} {"level":"info","ts":"2023-09-20T14:05:37.983Z","caller":"traceutil/trace.go:171","msg":"trace[1910382275] range","detail":"{range_begin:/registry/health; range_end:; response_count:0; response_revision:552; }","duration":"1.186253261s","start":"2023-09-20T14:05:36.796Z","end":"2023-09-20T14:05:37.983Z","steps":["trace[1910382275] 'agreement among raft nodes before linearized reading' (duration: 1.186164174s)"],"step_count":1} {"level":"info","ts":"2023-09-20T14:05:37.983Z","caller":"traceutil/trace.go:171","msg":"trace[341616209] range","detail":"{range_begin:/registry/namespaces/default; range_end:; response_count:1; response_revision:552; }","duration":"1.019164288s","start":"2023-09-20T14:05:36.963Z","end":"2023-09-20T14:05:37.983Z","steps":["trace[341616209] 'agreement among raft nodes before linearized reading' (duration: 1.019107792s)"],"step_count":1} {"level":"warn","ts":"2023-09-20T14:05:37.983Z","caller":"etcdserver/util.go:170","msg":"apply request took too long","took":"1.170741282s","expected-duration":"100ms","prefix":"read-only range ","request":"key:\"/registry/services/endpoints/kube-system/k8s.io-minikube-hostpath\" ","response":"range_response_count:1 size:598"} {"level":"warn","ts":"2023-09-20T14:05:37.983Z","caller":"v3rpc/interceptor.go:197","msg":"request stats","start time":"2023-09-20T14:05:36.796Z","time spent":"1.186288206s","remote":"127.0.0.1:55330","response type":"/etcdserverpb.KV/Range","request count":0,"request size":18,"response count":0,"response size":29,"request content":"key:\"/registry/health\" "} {"level":"warn","ts":"2023-09-20T14:05:37.983Z","caller":"v3rpc/interceptor.go:197","msg":"request stats","start time":"2023-09-20T14:05:36.963Z","time spent":"1.019194314s","remote":"127.0.0.1:59674","response type":"/etcdserverpb.KV/Range","request count":0,"request size":30,"response count":1,"response size":363,"request content":"key:\"/registry/namespaces/default\" "} {"level":"info","ts":"2023-09-20T14:05:37.983Z","caller":"traceutil/trace.go:171","msg":"trace[1634866594] range","detail":"{range_begin:/registry/services/endpoints/kube-system/k8s.io-minikube-hostpath; range_end:; response_count:1; response_revision:552; }","duration":"1.170772811s","start":"2023-09-20T14:05:36.812Z","end":"2023-09-20T14:05:37.983Z","steps":["trace[1634866594] 'agreement among raft nodes before linearized reading' (duration: 1.170676641s)"],"step_count":1} {"level":"warn","ts":"2023-09-20T14:05:37.983Z","caller":"v3rpc/interceptor.go:197","msg":"request stats","start time":"2023-09-20T14:05:36.812Z","time spent":"1.170805713s","remote":"127.0.0.1:59678","response type":"/etcdserverpb.KV/Range","request count":0,"request size":67,"response count":1,"response size":622,"request content":"key:\"/registry/services/endpoints/kube-system/k8s.io-minikube-hostpath\" "} {"level":"warn","ts":"2023-09-20T14:05:44.088Z","caller":"v3rpc/interceptor.go:197","msg":"request stats","start time":"2023-09-20T14:05:43.587Z","time spent":"501.752976ms","remote":"127.0.0.1:59602","response type":"/etcdserverpb.Lease/LeaseGrant","request count":-1,"request size":-1,"response count":-1,"response size":-1,"request content":""} {"level":"info","ts":"2023-09-20T14:05:44.090Z","caller":"traceutil/trace.go:171","msg":"trace[1110042460] transaction","detail":"{read_only:false; response_revision:559; number_of_response:1; }","duration":"249.692697ms","start":"2023-09-20T14:05:43.840Z","end":"2023-09-20T14:05:44.090Z","steps":["trace[1110042460] 'process raft request' (duration: 249.556122ms)"],"step_count":1} * * ==> kernel <== * 14:05:49 up 7 days, 18:23, 0 users, load average: 3.57, 1.84, 1.79 Linux marina 6.4.14-1-MANJARO #1 SMP PREEMPT_DYNAMIC Sat Sep 2 22:56:16 UTC 2023 x86_64 x86_64 x86_64 GNU/Linux PRETTY_NAME="Ubuntu 22.04.2 LTS" * * ==> kube-apiserver [89c61e0d46fc0d7e3c8c033ab3b519fb3f704d84d87be9392c72a01ac68f4830] <== * I0920 14:05:31.659265 1 trace.go:219] Trace[435963208]: "List(recursive=true) etcd3" audit-id:,key:/crd.projectcalico.org/caliconodestatuses,resourceVersion:,resourceVersionMatch:,limit:10000,continue: (20-Sep-2023 14:05:27.244) (total time: 4414ms): Trace[435963208]: [4.414306017s] [4.414306017s] END I0920 14:05:31.659313 1 trace.go:219] Trace[157683204]: "List(recursive=true) etcd3" audit-id:,key:/crd.projectcalico.org/kubecontrollersconfigurations,resourceVersion:,resourceVersionMatch:,limit:10000,continue: (20-Sep-2023 14:05:27.224) (total time: 4434ms): Trace[157683204]: [4.434916595s] [4.434916595s] END I0920 14:05:31.659527 1 trace.go:219] Trace[1336066515]: "List(recursive=true) etcd3" audit-id:,key:/crd.projectcalico.org/globalnetworkpolicies,resourceVersion:,resourceVersionMatch:,limit:10000,continue: (20-Sep-2023 14:05:24.630) (total time: 7029ms): Trace[1336066515]: [7.02932283s] [7.02932283s] END I0920 14:05:31.659698 1 trace.go:219] Trace[295210924]: "List(recursive=true) etcd3" audit-id:,key:/crd.projectcalico.org/networksets,resourceVersion:,resourceVersionMatch:,limit:10000,continue: (20-Sep-2023 14:05:24.285) (total time: 7374ms): Trace[295210924]: [7.374607293s] [7.374607293s] END I0920 14:05:31.659721 1 trace.go:219] Trace[294210129]: "List" accept:application/vnd.kubernetes.protobuf;as=PartialObjectMetadataList;g=meta.k8s.io;v=v1,application/json;as=PartialObjectMetadataList;g=meta.k8s.io;v=v1,application/json,audit-id:e7b91999-a1f5-4d2b-b1cf-601c0206ecff,client:172.30.0.2,protocol:HTTP/2.0,resource:globalnetworkpolicies,scope:cluster,url:/apis/crd.projectcalico.org/v1/globalnetworkpolicies,user-agent:kube-controller-manager/v1.27.4 (linux/amd64) kubernetes/fa3d799/metadata-informers,verb:LIST (20-Sep-2023 14:05:24.630) (total time: 7029ms): Trace[294210129]: ["List(recursive=true) etcd3" audit-id:e7b91999-a1f5-4d2b-b1cf-601c0206ecff,key:/crd.projectcalico.org/globalnetworkpolicies,resourceVersion:0,resourceVersionMatch:,limit:500,continue: 7029ms (14:05:24.630)] Trace[294210129]: [7.029108199s] [7.029108199s] END I0920 14:05:31.659827 1 trace.go:219] Trace[1949813973]: "List" accept:application/vnd.kubernetes.protobuf;as=PartialObjectMetadataList;g=meta.k8s.io;v=v1,application/json;as=PartialObjectMetadataList;g=meta.k8s.io;v=v1,application/json,audit-id:47ffe859-a30d-4d9d-bd42-439cdc694858,client:172.30.0.2,protocol:HTTP/2.0,resource:kubecontrollersconfigurations,scope:cluster,url:/apis/crd.projectcalico.org/v1/kubecontrollersconfigurations,user-agent:kube-controller-manager/v1.27.4 (linux/amd64) kubernetes/fa3d799/metadata-informers,verb:LIST (20-Sep-2023 14:05:27.224) (total time: 4435ms): Trace[1949813973]: ["List(recursive=true) etcd3" audit-id:47ffe859-a30d-4d9d-bd42-439cdc694858,key:/crd.projectcalico.org/kubecontrollersconfigurations,resourceVersion:0,resourceVersionMatch:,limit:500,continue: 4435ms (14:05:27.224)] Trace[1949813973]: [4.435126818s] [4.435126818s] END I0920 14:05:31.659907 1 trace.go:219] Trace[22378092]: "List(recursive=true) etcd3" audit-id:,key:/crd.projectcalico.org/felixconfigurations,resourceVersion:,resourceVersionMatch:,limit:10000,continue: (20-Sep-2023 14:05:27.216) (total time: 4442ms): Trace[22378092]: [4.442936998s] [4.442936998s] END I0920 14:05:31.660005 1 trace.go:219] Trace[1949121998]: "List" accept:application/vnd.kubernetes.protobuf;as=PartialObjectMetadataList;g=meta.k8s.io;v=v1,application/json;as=PartialObjectMetadataList;g=meta.k8s.io;v=v1,application/json,audit-id:65f90be3-5a02-42ba-ae7d-2594333712ba,client:172.30.0.2,protocol:HTTP/2.0,resource:felixconfigurations,scope:cluster,url:/apis/crd.projectcalico.org/v1/felixconfigurations,user-agent:kube-controller-manager/v1.27.4 (linux/amd64) kubernetes/fa3d799/metadata-informers,verb:LIST (20-Sep-2023 14:05:27.217) (total time: 4442ms): Trace[1949121998]: ["List(recursive=true) etcd3" audit-id:65f90be3-5a02-42ba-ae7d-2594333712ba,key:/crd.projectcalico.org/felixconfigurations,resourceVersion:0,resourceVersionMatch:,limit:500,continue: 4442ms (14:05:27.217)] Trace[1949121998]: [4.442738858s] [4.442738858s] END I0920 14:05:31.660027 1 trace.go:219] Trace[1511072514]: "List" accept:application/vnd.kubernetes.protobuf;as=PartialObjectMetadataList;g=meta.k8s.io;v=v1,application/json;as=PartialObjectMetadataList;g=meta.k8s.io;v=v1,application/json,audit-id:35e6addf-d1d1-4764-9b8b-f669b9390a2d,client:172.30.0.2,protocol:HTTP/2.0,resource:bgpconfigurations,scope:cluster,url:/apis/crd.projectcalico.org/v1/bgpconfigurations,user-agent:kube-controller-manager/v1.27.4 (linux/amd64) kubernetes/fa3d799/metadata-informers,verb:LIST (20-Sep-2023 14:05:27.259) (total time: 4400ms): Trace[1511072514]: ["List(recursive=true) etcd3" audit-id:35e6addf-d1d1-4764-9b8b-f669b9390a2d,key:/crd.projectcalico.org/bgpconfigurations,resourceVersion:0,resourceVersionMatch:,limit:500,continue: 4400ms (14:05:27.259)] Trace[1511072514]: [4.400311358s] [4.400311358s] END I0920 14:05:31.660102 1 trace.go:219] Trace[1844378850]: "List" accept:application/vnd.kubernetes.protobuf;as=PartialObjectMetadataList;g=meta.k8s.io;v=v1,application/json;as=PartialObjectMetadataList;g=meta.k8s.io;v=v1,application/json,audit-id:7394fb2c-d83c-4ef0-9940-cdbd6e3a4b1e,client:172.30.0.2,protocol:HTTP/2.0,resource:globalnetworksets,scope:cluster,url:/apis/crd.projectcalico.org/v1/globalnetworksets,user-agent:kube-controller-manager/v1.27.4 (linux/amd64) kubernetes/fa3d799/metadata-informers,verb:LIST (20-Sep-2023 14:05:27.238) (total time: 4421ms): Trace[1844378850]: ["List(recursive=true) etcd3" audit-id:7394fb2c-d83c-4ef0-9940-cdbd6e3a4b1e,key:/crd.projectcalico.org/globalnetworksets,resourceVersion:0,resourceVersionMatch:,limit:500,continue: 4421ms (14:05:27.238)] Trace[1844378850]: [4.421817931s] [4.421817931s] END I0920 14:05:31.660125 1 trace.go:219] Trace[922235045]: "List(recursive=true) etcd3" audit-id:,key:/crd.projectcalico.org/bgpfilters,resourceVersion:,resourceVersionMatch:,limit:10000,continue: (20-Sep-2023 14:05:24.613) (total time: 7046ms): Trace[922235045]: [7.046173984s] [7.046173984s] END I0920 14:05:31.660130 1 trace.go:219] Trace[964806533]: "List" accept:application/vnd.kubernetes.protobuf;as=PartialObjectMetadataList;g=meta.k8s.io;v=v1,application/json;as=PartialObjectMetadataList;g=meta.k8s.io;v=v1,application/json,audit-id:d5ce0de9-6052-4125-98df-74256077b455,client:172.30.0.2,protocol:HTTP/2.0,resource:blockaffinities,scope:cluster,url:/apis/crd.projectcalico.org/v1/blockaffinities,user-agent:kube-controller-manager/v1.27.4 (linux/amd64) kubernetes/fa3d799/metadata-informers,verb:LIST (20-Sep-2023 14:05:27.266) (total time: 4393ms): Trace[964806533]: ["List(recursive=true) etcd3" audit-id:d5ce0de9-6052-4125-98df-74256077b455,key:/crd.projectcalico.org/blockaffinities,resourceVersion:0,resourceVersionMatch:,limit:500,continue: 4393ms (14:05:27.267)] Trace[964806533]: [4.393114955s] [4.393114955s] END I0920 14:05:31.660150 1 trace.go:219] Trace[241387446]: "List" accept:application/vnd.kubernetes.protobuf;as=PartialObjectMetadataList;g=meta.k8s.io;v=v1,application/json;as=PartialObjectMetadataList;g=meta.k8s.io;v=v1,application/json,audit-id:0e6931c7-e1dd-415d-80b0-21d0ba2f8fde,client:172.30.0.2,protocol:HTTP/2.0,resource:clusterinformations,scope:cluster,url:/apis/crd.projectcalico.org/v1/clusterinformations,user-agent:kube-controller-manager/v1.27.4 (linux/amd64) kubernetes/fa3d799/metadata-informers,verb:LIST (20-Sep-2023 14:05:24.643) (total time: 7016ms): Trace[241387446]: ["List(recursive=true) etcd3" audit-id:0e6931c7-e1dd-415d-80b0-21d0ba2f8fde,key:/crd.projectcalico.org/clusterinformations,resourceVersion:0,resourceVersionMatch:,limit:500,continue: 7016ms (14:05:24.643)] Trace[241387446]: [7.016727958s] [7.016727958s] END I0920 14:05:31.660180 1 trace.go:219] Trace[249936340]: "List(recursive=true) etcd3" audit-id:,key:/crd.projectcalico.org/networkpolicies,resourceVersion:,resourceVersionMatch:,limit:10000,continue: (20-Sep-2023 14:05:24.292) (total time: 7367ms): Trace[249936340]: [7.367657202s] [7.367657202s] END I0920 14:05:31.660225 1 trace.go:219] Trace[1719654241]: "List(recursive=true) etcd3" audit-id:,key:/crd.projectcalico.org/blockaffinities,resourceVersion:,resourceVersionMatch:,limit:10000,continue: (20-Sep-2023 14:05:27.266) (total time: 4393ms): Trace[1719654241]: [4.393515355s] [4.393515355s] END I0920 14:05:31.669596 1 trace.go:219] Trace[1398234113]: "Patch" accept:application/vnd.kubernetes.protobuf,application/json,audit-id:1fb7dedc-62ee-4e09-a9c0-f265bb2e5e4b,client:172.30.0.2,protocol:HTTP/2.0,resource:events,scope:resource,url:/api/v1/namespaces/kube-system/events/calico-kube-controllers-85578c44bf-p876v.1786a06f4d036355,user-agent:kubelet/v1.27.4 (linux/amd64) kubernetes/fa3d799,verb:PATCH (20-Sep-2023 14:05:29.764) (total time: 1904ms): Trace[1398234113]: ["GuaranteedUpdate etcd3" audit-id:1fb7dedc-62ee-4e09-a9c0-f265bb2e5e4b,key:/events/kube-system/calico-kube-controllers-85578c44bf-p876v.1786a06f4d036355,type:*core.Event,resource:events 1904ms (14:05:29.764) Trace[1398234113]: ---"initial value restored" 1895ms (14:05:31.660)] Trace[1398234113]: [1.904670719s] [1.904670719s] END I0920 14:05:33.675974 1 trace.go:219] Trace[61798578]: "Update" accept:application/vnd.kubernetes.protobuf,application/json,audit-id:594b4fff-6584-4d2d-87a0-92daa8575883,client:172.30.0.2,protocol:HTTP/2.0,resource:leases,scope:resource,url:/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/marina,user-agent:kubelet/v1.27.4 (linux/amd64) kubernetes/fa3d799,verb:PUT (20-Sep-2023 14:05:33.146) (total time: 529ms): Trace[61798578]: ["GuaranteedUpdate etcd3" audit-id:594b4fff-6584-4d2d-87a0-92daa8575883,key:/leases/kube-node-lease/marina,type:*coordination.Lease,resource:leases.coordination.k8s.io 528ms (14:05:33.146) Trace[61798578]: ---"Txn call completed" 528ms (14:05:33.675)] Trace[61798578]: [529.054556ms] [529.054556ms] END I0920 14:05:37.983496 1 trace.go:219] Trace[1038184308]: "Get" accept:application/vnd.kubernetes.protobuf, */*,audit-id:0c859617-654b-4fc0-9df8-12f9862d56b8,client:127.0.0.1,protocol:HTTP/2.0,resource:namespaces,scope:resource,url:/api/v1/namespaces/default,user-agent:kube-apiserver/v1.27.4 (linux/amd64) kubernetes/fa3d799,verb:GET (20-Sep-2023 14:05:36.963) (total time: 1019ms): Trace[1038184308]: ---"About to write a response" 1019ms (14:05:37.983) Trace[1038184308]: [1.01980672s] [1.01980672s] END I0920 14:05:37.983541 1 trace.go:219] Trace[1953608151]: "Create" accept:application/vnd.kubernetes.protobuf,application/json,audit-id:6606f0d1-3673-4fe7-b7f9-6c977ed02d9d,client:172.30.0.2,protocol:HTTP/2.0,resource:events,scope:resource,url:/api/v1/namespaces/kube-system/events,user-agent:kubelet/v1.27.4 (linux/amd64) kubernetes/fa3d799,verb:POST (20-Sep-2023 14:05:36.748) (total time: 1235ms): Trace[1953608151]: ["Create etcd3" audit-id:6606f0d1-3673-4fe7-b7f9-6c977ed02d9d,key:/events/kube-system/calico-node-66kqz.1786a0759c6b9102,type:*core.Event,resource:events 1235ms (14:05:36.748) Trace[1953608151]: ---"Txn call succeeded" 1234ms (14:05:37.983)] Trace[1953608151]: [1.235319943s] [1.235319943s] END I0920 14:05:37.983496 1 trace.go:219] Trace[1869476121]: "Get" accept:application/json, */*,audit-id:3365f076-4e19-4b40-a0fe-1ea44112d181,client:172.30.0.2,protocol:HTTP/2.0,resource:endpoints,scope:resource,url:/api/v1/namespaces/kube-system/endpoints/k8s.io-minikube-hostpath,user-agent:storage-provisioner/v0.0.0 (linux/amd64) kubernetes/$Format,verb:GET (20-Sep-2023 14:05:36.812) (total time: 1171ms): Trace[1869476121]: ---"About to write a response" 1171ms (14:05:37.983) Trace[1869476121]: [1.17140852s] [1.17140852s] END I0920 14:05:44.092359 1 trace.go:219] Trace[1368107124]: "Patch" accept:application/vnd.kubernetes.protobuf,application/json,audit-id:8b809d07-59ee-443f-8568-897b9935042c,client:172.30.0.2,protocol:HTTP/2.0,resource:events,scope:resource,url:/api/v1/namespaces/kube-system/events/coredns-5d78c9869d-bzf9j.1786a06f4d0a34b5,user-agent:kubelet/v1.27.4 (linux/amd64) kubernetes/fa3d799,verb:PATCH (20-Sep-2023 14:05:43.585) (total time: 506ms): Trace[1368107124]: ["GuaranteedUpdate etcd3" audit-id:8b809d07-59ee-443f-8568-897b9935042c,key:/events/kube-system/coredns-5d78c9869d-bzf9j.1786a06f4d0a34b5,type:*core.Event,resource:events 506ms (14:05:43.585) Trace[1368107124]: ---"Transaction prepared" 502ms (14:05:44.089)] Trace[1368107124]: ---"Object stored in database" 505ms (14:05:44.092) Trace[1368107124]: [506.636694ms] [506.636694ms] END * * ==> kube-controller-manager [8894f6b2581be76ff207d03821b97edacf49e48396d1ca97ed788758b42bf5e9] <== * I0920 14:04:54.008116 1 shared_informer.go:318] Caches are synced for PVC protection I0920 14:04:54.018243 1 shared_informer.go:318] Caches are synced for taint I0920 14:04:54.018314 1 node_lifecycle_controller.go:1223] "Initializing eviction metric for zone" zone="" I0920 14:04:54.018381 1 taint_manager.go:206] "Starting NoExecuteTaintManager" I0920 14:04:54.018422 1 taint_manager.go:211] "Sending events to api server" I0920 14:04:54.018448 1 event.go:307] "Event occurred" object="marina" fieldPath="" kind="Node" apiVersion="v1" type="Normal" reason="RegisteredNode" message="Node marina event: Registered Node marina in Controller" I0920 14:04:54.018468 1 node_lifecycle_controller.go:875] "Missing timestamp for Node. Assuming now as a timestamp" node="marina" I0920 14:04:54.018522 1 node_lifecycle_controller.go:1069] "Controller detected that zone is now in new state" zone="" newState=Normal I0920 14:04:54.019896 1 shared_informer.go:318] Caches are synced for ReplicationController I0920 14:04:54.020314 1 shared_informer.go:318] Caches are synced for endpoint_slice_mirroring I0920 14:04:54.020328 1 shared_informer.go:318] Caches are synced for daemon sets I0920 14:04:54.020318 1 shared_informer.go:318] Caches are synced for bootstrap_signer I0920 14:04:54.020409 1 shared_informer.go:318] Caches are synced for endpoint I0920 14:04:54.021582 1 shared_informer.go:318] Caches are synced for PV protection I0920 14:04:54.033857 1 shared_informer.go:318] Caches are synced for ClusterRoleAggregator I0920 14:04:54.038334 1 event.go:307] "Event occurred" object="kube-system/kube-proxy" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: kube-proxy-zxknw" I0920 14:04:54.040791 1 event.go:307] "Event occurred" object="kube-system/calico-node" fieldPath="" kind="DaemonSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: calico-node-66kqz" I0920 14:04:54.062928 1 shared_informer.go:318] Caches are synced for ephemeral I0920 14:04:54.069477 1 shared_informer.go:318] Caches are synced for node I0920 14:04:54.069491 1 shared_informer.go:318] Caches are synced for cronjob I0920 14:04:54.069492 1 shared_informer.go:318] Caches are synced for stateful set I0920 14:04:54.069514 1 range_allocator.go:174] "Sending events to api server" I0920 14:04:54.069530 1 range_allocator.go:178] "Starting range CIDR allocator" I0920 14:04:54.069534 1 shared_informer.go:311] Waiting for caches to sync for cidrallocator I0920 14:04:54.069539 1 shared_informer.go:318] Caches are synced for cidrallocator I0920 14:04:54.070193 1 shared_informer.go:318] Caches are synced for crt configmap I0920 14:04:54.071280 1 shared_informer.go:318] Caches are synced for GC I0920 14:04:54.071296 1 shared_informer.go:318] Caches are synced for job I0920 14:04:54.071302 1 shared_informer.go:318] Caches are synced for certificate-csrsigning-kubelet-serving I0920 14:04:54.071306 1 shared_informer.go:318] Caches are synced for TTL after finished I0920 14:04:54.071302 1 shared_informer.go:318] Caches are synced for certificate-csrsigning-kubelet-client I0920 14:04:54.071315 1 shared_informer.go:318] Caches are synced for persistent volume I0920 14:04:54.072436 1 shared_informer.go:318] Caches are synced for ReplicaSet I0920 14:04:54.072442 1 shared_informer.go:318] Caches are synced for certificate-csrsigning-legacy-unknown I0920 14:04:54.072465 1 shared_informer.go:318] Caches are synced for certificate-csrsigning-kube-apiserver-client I0920 14:04:54.074144 1 range_allocator.go:380] "Set node PodCIDR" node="marina" podCIDRs=[10.244.0.0/24] I0920 14:04:54.076180 1 shared_informer.go:318] Caches are synced for TTL I0920 14:04:54.078805 1 shared_informer.go:318] Caches are synced for expand I0920 14:04:54.082672 1 shared_informer.go:318] Caches are synced for attach detach I0920 14:04:54.089324 1 shared_informer.go:318] Caches are synced for endpoint_slice I0920 14:04:54.095923 1 shared_informer.go:318] Caches are synced for disruption I0920 14:04:54.118360 1 shared_informer.go:318] Caches are synced for HPA I0920 14:04:54.120535 1 shared_informer.go:318] Caches are synced for deployment I0920 14:04:54.129368 1 event.go:307] "Event occurred" object="kube-system/calico-kube-controllers" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set calico-kube-controllers-85578c44bf to 1" I0920 14:04:54.132131 1 event.go:307] "Event occurred" object="kube-system/coredns" fieldPath="" kind="Deployment" apiVersion="apps/v1" type="Normal" reason="ScalingReplicaSet" message="Scaled up replica set coredns-5d78c9869d to 1" I0920 14:04:54.147221 1 event.go:307] "Event occurred" object="kube-system/calico-kube-controllers-85578c44bf" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: calico-kube-controllers-85578c44bf-p876v" I0920 14:04:54.147234 1 event.go:307] "Event occurred" object="kube-system/coredns-5d78c9869d" fieldPath="" kind="ReplicaSet" apiVersion="apps/v1" type="Normal" reason="SuccessfulCreate" message="Created pod: coredns-5d78c9869d-bzf9j" I0920 14:04:54.183937 1 shared_informer.go:318] Caches are synced for resource quota I0920 14:04:54.221272 1 shared_informer.go:318] Caches are synced for service account I0920 14:04:54.224846 1 shared_informer.go:318] Caches are synced for resource quota I0920 14:04:54.274269 1 shared_informer.go:318] Caches are synced for namespace I0920 14:04:54.598143 1 shared_informer.go:318] Caches are synced for garbage collector I0920 14:04:54.669371 1 shared_informer.go:318] Caches are synced for garbage collector I0920 14:04:54.669391 1 garbagecollector.go:166] "All resource monitors have synced. Proceeding to collect garbage" I0920 14:05:24.187264 1 resource_quota_monitor.go:223] "QuotaMonitor created object count evaluator" resource="networkpolicies.crd.projectcalico.org" I0920 14:05:24.187309 1 resource_quota_monitor.go:223] "QuotaMonitor created object count evaluator" resource="networksets.crd.projectcalico.org" I0920 14:05:24.187357 1 shared_informer.go:311] Waiting for caches to sync for resource quota I0920 14:05:24.604898 1 shared_informer.go:311] Waiting for caches to sync for garbage collector I0920 14:05:31.687658 1 shared_informer.go:318] Caches are synced for resource quota I0920 14:05:31.705967 1 shared_informer.go:318] Caches are synced for garbage collector * * ==> kube-proxy [694b329798fda2eb7d7d4cb1f0543f83226c7b6eedfa0700dccc1d55f9c5e9c9] <== * I0920 14:04:54.853078 1 node.go:141] Successfully retrieved node IP: 172.30.0.2 I0920 14:04:54.853306 1 server_others.go:110] "Detected node IP" address="172.30.0.2" I0920 14:04:54.853319 1 server_others.go:554] "Using iptables proxy" I0920 14:04:54.870308 1 server_others.go:192] "Using iptables Proxier" I0920 14:04:54.870334 1 server_others.go:199] "kube-proxy running in dual-stack mode" ipFamily=IPv4 I0920 14:04:54.870344 1 server_others.go:200] "Creating dualStackProxier for iptables" I0920 14:04:54.870363 1 server_others.go:484] "Detect-local-mode set to ClusterCIDR, but no IPv6 cluster CIDR defined, defaulting to no-op detect-local for IPv6" I0920 14:04:54.872064 1 proxier.go:253] "Setting route_localnet=1 to allow node-ports on localhost; to change this either disable iptables.localhostNodePorts (--iptables-localhost-nodeports) or set nodePortAddresses (--nodeport-addresses) to filter loopback addresses" I0920 14:04:54.873140 1 server.go:658] "Version info" version="v1.27.4" I0920 14:04:54.873149 1 server.go:660] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" I0920 14:04:54.874684 1 config.go:188] "Starting service config controller" I0920 14:04:54.874738 1 config.go:97] "Starting endpoint slice config controller" I0920 14:04:54.874818 1 config.go:315] "Starting node config controller" I0920 14:04:54.875905 1 shared_informer.go:311] Waiting for caches to sync for node config I0920 14:04:54.875909 1 shared_informer.go:311] Waiting for caches to sync for service config I0920 14:04:54.875905 1 shared_informer.go:311] Waiting for caches to sync for endpoint slice config I0920 14:04:54.976964 1 shared_informer.go:318] Caches are synced for endpoint slice config I0920 14:04:54.976973 1 shared_informer.go:318] Caches are synced for node config I0920 14:04:54.976977 1 shared_informer.go:318] Caches are synced for service config I0920 14:05:30.167767 1 trace.go:219] Trace[1443493947]: "iptables ChainExists" (20-Sep-2023 14:05:24.875) (total time: 5292ms): Trace[1443493947]: [5.292090179s] [5.292090179s] END I0920 14:05:30.260762 1 trace.go:219] Trace[1851698009]: "iptables ChainExists" (20-Sep-2023 14:05:24.975) (total time: 5284ms): Trace[1851698009]: [5.284779582s] [5.284779582s] END * * ==> kube-scheduler [914636ad99ab3e19e64882f6e8ee1d923a4b382d5a98da4bf723575cdbe15d4f] <== * I0920 14:04:34.752966 1 serving.go:348] Generated self-signed cert in-memory W0920 14:04:35.565837 1 requestheader_controller.go:193] Unable to get configmap/extension-apiserver-authentication in kube-system. Usually fixed by 'kubectl create rolebinding -n kube-system ROLEBINDING_NAME --role=extension-apiserver-authentication-reader --serviceaccount=YOUR_NS:YOUR_SA' W0920 14:04:35.565991 1 authentication.go:368] Error looking up in-cluster authentication configuration: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot get resource "configmaps" in API group "" in the namespace "kube-system" W0920 14:04:35.566061 1 authentication.go:369] Continuing without authentication configuration. This may treat all requests as anonymous. W0920 14:04:35.566111 1 authentication.go:370] To require authentication configuration lookup to succeed, set --authentication-tolerate-lookup-failure=false I0920 14:04:35.581012 1 server.go:154] "Starting Kubernetes Scheduler" version="v1.27.4" I0920 14:04:35.581026 1 server.go:156] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" I0920 14:04:35.584176 1 configmap_cafile_content.go:202] "Starting controller" name="client-ca::kube-system::extension-apiserver-authentication::client-ca-file" I0920 14:04:35.584193 1 shared_informer.go:311] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file I0920 14:04:35.584638 1 secure_serving.go:210] Serving securely on 127.0.0.1:10259 I0920 14:04:35.584671 1 tlsconfig.go:240] "Starting DynamicServingCertificateController" W0920 14:04:35.587225 1 reflector.go:533] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope E0920 14:04:35.587263 1 reflector.go:148] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StatefulSet: failed to list *v1.StatefulSet: statefulsets.apps is forbidden: User "system:kube-scheduler" cannot list resource "statefulsets" in API group "apps" at the cluster scope W0920 14:04:35.587388 1 reflector.go:533] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope E0920 14:04:35.587425 1 reflector.go:148] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSINode: failed to list *v1.CSINode: csinodes.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csinodes" in API group "storage.k8s.io" at the cluster scope W0920 14:04:35.587633 1 reflector.go:533] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope E0920 14:04:35.587653 1 reflector.go:148] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope W0920 14:04:35.588135 1 reflector.go:533] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope W0920 14:04:35.588141 1 reflector.go:533] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope E0920 14:04:35.588149 1 reflector.go:148] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.StorageClass: failed to list *v1.StorageClass: storageclasses.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "storageclasses" in API group "storage.k8s.io" at the cluster scope W0920 14:04:35.588148 1 reflector.go:533] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope E0920 14:04:35.588154 1 reflector.go:148] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.ReplicationController: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope W0920 14:04:35.588163 1 reflector.go:533] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope E0920 14:04:35.588165 1 reflector.go:148] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.ReplicaSet: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope W0920 14:04:35.588135 1 reflector.go:533] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope E0920 14:04:35.588198 1 reflector.go:148] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Service: failed to list *v1.Service: services is forbidden: User "system:kube-scheduler" cannot list resource "services" in API group "" at the cluster scope E0920 14:04:35.588178 1 reflector.go:148] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PodDisruptionBudget: failed to list *v1.PodDisruptionBudget: poddisruptionbudgets.policy is forbidden: User "system:kube-scheduler" cannot list resource "poddisruptionbudgets" in API group "policy" at the cluster scope W0920 14:04:35.588233 1 reflector.go:533] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope E0920 14:04:35.588258 1 reflector.go:148] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope W0920 14:04:35.588257 1 reflector.go:533] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope W0920 14:04:35.588244 1 reflector.go:533] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope W0920 14:04:35.588261 1 reflector.go:533] pkg/server/dynamiccertificates/configmap_cafile_content.go:206: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system" E0920 14:04:35.588271 1 reflector.go:148] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PersistentVolume: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope E0920 14:04:35.588272 1 reflector.go:148] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIStorageCapacity: failed to list *v1.CSIStorageCapacity: csistoragecapacities.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csistoragecapacities" in API group "storage.k8s.io" at the cluster scope W0920 14:04:35.588281 1 reflector.go:533] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope E0920 14:04:35.588282 1 reflector.go:148] pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system" E0920 14:04:35.588294 1 reflector.go:148] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope W0920 14:04:35.588309 1 reflector.go:533] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope W0920 14:04:35.588318 1 reflector.go:533] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope E0920 14:04:35.588323 1 reflector.go:148] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Node: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope E0920 14:04:35.588328 1 reflector.go:148] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Pod: failed to list *v1.Pod: pods is forbidden: User "system:kube-scheduler" cannot list resource "pods" in API group "" at the cluster scope W0920 14:04:36.521433 1 reflector.go:533] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope E0920 14:04:36.521455 1 reflector.go:148] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PersistentVolumeClaim: failed to list *v1.PersistentVolumeClaim: persistentvolumeclaims is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumeclaims" in API group "" at the cluster scope W0920 14:04:36.552377 1 reflector.go:533] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope E0920 14:04:36.552402 1 reflector.go:148] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Node: failed to list *v1.Node: nodes is forbidden: User "system:kube-scheduler" cannot list resource "nodes" in API group "" at the cluster scope W0920 14:04:36.573169 1 reflector.go:533] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope E0920 14:04:36.573184 1 reflector.go:148] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: csidrivers.storage.k8s.io is forbidden: User "system:kube-scheduler" cannot list resource "csidrivers" in API group "storage.k8s.io" at the cluster scope W0920 14:04:36.585821 1 reflector.go:533] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope E0920 14:04:36.585848 1 reflector.go:148] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.ReplicationController: failed to list *v1.ReplicationController: replicationcontrollers is forbidden: User "system:kube-scheduler" cannot list resource "replicationcontrollers" in API group "" at the cluster scope W0920 14:04:36.586901 1 reflector.go:533] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope E0920 14:04:36.586926 1 reflector.go:148] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.ReplicaSet: failed to list *v1.ReplicaSet: replicasets.apps is forbidden: User "system:kube-scheduler" cannot list resource "replicasets" in API group "apps" at the cluster scope W0920 14:04:36.690064 1 reflector.go:533] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope E0920 14:04:36.690104 1 reflector.go:148] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.PersistentVolume: failed to list *v1.PersistentVolume: persistentvolumes is forbidden: User "system:kube-scheduler" cannot list resource "persistentvolumes" in API group "" at the cluster scope W0920 14:04:36.712883 1 reflector.go:533] vendor/k8s.io/client-go/informers/factory.go:150: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope E0920 14:04:36.712907 1 reflector.go:148] vendor/k8s.io/client-go/informers/factory.go:150: Failed to watch *v1.Namespace: failed to list *v1.Namespace: namespaces is forbidden: User "system:kube-scheduler" cannot list resource "namespaces" in API group "" at the cluster scope W0920 14:04:36.769189 1 reflector.go:533] pkg/server/dynamiccertificates/configmap_cafile_content.go:206: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system" E0920 14:04:36.769221 1 reflector.go:148] pkg/server/dynamiccertificates/configmap_cafile_content.go:206: Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps "extension-apiserver-authentication" is forbidden: User "system:kube-scheduler" cannot list resource "configmaps" in API group "" in the namespace "kube-system" I0920 14:04:38.585304 1 shared_informer.go:318] Caches are synced for client-ca::kube-system::extension-apiserver-authentication::client-ca-file * * ==> kubelet <== * Sep 20 14:04:54 marina kubelet[1529]: I0920 14:04:54.234366 1529 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/e8f217b1-c59c-41b0-94ba-2a1f96353be0-lib-modules\") pod \"kube-proxy-zxknw\" (UID: \"e8f217b1-c59c-41b0-94ba-2a1f96353be0\") " pod="kube-system/kube-proxy-zxknw" Sep 20 14:04:54 marina kubelet[1529]: I0920 14:04:54.234433 1529 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h77mp\" (UniqueName: \"kubernetes.io/projected/e8f217b1-c59c-41b0-94ba-2a1f96353be0-kube-api-access-h77mp\") pod \"kube-proxy-zxknw\" (UID: \"e8f217b1-c59c-41b0-94ba-2a1f96353be0\") " pod="kube-system/kube-proxy-zxknw" Sep 20 14:04:54 marina kubelet[1529]: I0920 14:04:54.234502 1529 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"policysync\" (UniqueName: \"kubernetes.io/host-path/dbe1e1f4-0e72-4b65-b401-54265740f3da-policysync\") pod \"calico-node-66kqz\" (UID: \"dbe1e1f4-0e72-4b65-b401-54265740f3da\") " pod="kube-system/calico-node-66kqz" Sep 20 14:04:54 marina kubelet[1529]: I0920 14:04:54.234584 1529 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sys-fs\" (UniqueName: \"kubernetes.io/host-path/dbe1e1f4-0e72-4b65-b401-54265740f3da-sys-fs\") pod \"calico-node-66kqz\" (UID: \"dbe1e1f4-0e72-4b65-b401-54265740f3da\") " pod="kube-system/calico-node-66kqz" Sep 20 14:04:54 marina kubelet[1529]: I0920 14:04:54.234642 1529 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bpffs\" (UniqueName: \"kubernetes.io/host-path/dbe1e1f4-0e72-4b65-b401-54265740f3da-bpffs\") pod \"calico-node-66kqz\" (UID: \"dbe1e1f4-0e72-4b65-b401-54265740f3da\") " pod="kube-system/calico-node-66kqz" Sep 20 14:04:54 marina kubelet[1529]: I0920 14:04:54.234718 1529 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-np4dk\" (UniqueName: \"kubernetes.io/projected/dbe1e1f4-0e72-4b65-b401-54265740f3da-kube-api-access-np4dk\") pod \"calico-node-66kqz\" (UID: \"dbe1e1f4-0e72-4b65-b401-54265740f3da\") " pod="kube-system/calico-node-66kqz" Sep 20 14:04:54 marina kubelet[1529]: I0920 14:04:54.234754 1529 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-calico\" (UniqueName: \"kubernetes.io/host-path/dbe1e1f4-0e72-4b65-b401-54265740f3da-var-lib-calico\") pod \"calico-node-66kqz\" (UID: \"dbe1e1f4-0e72-4b65-b401-54265740f3da\") " pod="kube-system/calico-node-66kqz" Sep 20 14:04:54 marina kubelet[1529]: I0920 14:04:54.234803 1529 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lib-modules\" (UniqueName: \"kubernetes.io/host-path/dbe1e1f4-0e72-4b65-b401-54265740f3da-lib-modules\") pod \"calico-node-66kqz\" (UID: \"dbe1e1f4-0e72-4b65-b401-54265740f3da\") " pod="kube-system/calico-node-66kqz" Sep 20 14:04:54 marina kubelet[1529]: I0920 14:04:54.234844 1529 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-calico\" (UniqueName: \"kubernetes.io/host-path/dbe1e1f4-0e72-4b65-b401-54265740f3da-var-run-calico\") pod \"calico-node-66kqz\" (UID: \"dbe1e1f4-0e72-4b65-b401-54265740f3da\") " pod="kube-system/calico-node-66kqz" Sep 20 14:04:54 marina kubelet[1529]: I0920 14:04:54.234896 1529 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/dbe1e1f4-0e72-4b65-b401-54265740f3da-xtables-lock\") pod \"calico-node-66kqz\" (UID: \"dbe1e1f4-0e72-4b65-b401-54265740f3da\") " pod="kube-system/calico-node-66kqz" Sep 20 14:04:54 marina kubelet[1529]: I0920 14:04:54.234944 1529 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-local-net-dir\" (UniqueName: \"kubernetes.io/host-path/dbe1e1f4-0e72-4b65-b401-54265740f3da-host-local-net-dir\") pod \"calico-node-66kqz\" (UID: \"dbe1e1f4-0e72-4b65-b401-54265740f3da\") " pod="kube-system/calico-node-66kqz" Sep 20 14:04:54 marina kubelet[1529]: I0920 14:04:54.235005 1529 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"xtables-lock\" (UniqueName: \"kubernetes.io/host-path/e8f217b1-c59c-41b0-94ba-2a1f96353be0-xtables-lock\") pod \"kube-proxy-zxknw\" (UID: \"e8f217b1-c59c-41b0-94ba-2a1f96353be0\") " pod="kube-system/kube-proxy-zxknw" Sep 20 14:04:54 marina kubelet[1529]: I0920 14:04:54.235059 1529 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-bin-dir\" (UniqueName: \"kubernetes.io/host-path/dbe1e1f4-0e72-4b65-b401-54265740f3da-cni-bin-dir\") pod \"calico-node-66kqz\" (UID: \"dbe1e1f4-0e72-4b65-b401-54265740f3da\") " pod="kube-system/calico-node-66kqz" Sep 20 14:04:54 marina kubelet[1529]: I0920 14:04:54.235083 1529 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nodeproc\" (UniqueName: \"kubernetes.io/host-path/dbe1e1f4-0e72-4b65-b401-54265740f3da-nodeproc\") pod \"calico-node-66kqz\" (UID: \"dbe1e1f4-0e72-4b65-b401-54265740f3da\") " pod="kube-system/calico-node-66kqz" Sep 20 14:04:54 marina kubelet[1529]: I0920 14:04:54.235181 1529 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-proxy\" (UniqueName: \"kubernetes.io/configmap/e8f217b1-c59c-41b0-94ba-2a1f96353be0-kube-proxy\") pod \"kube-proxy-zxknw\" (UID: \"e8f217b1-c59c-41b0-94ba-2a1f96353be0\") " pod="kube-system/kube-proxy-zxknw" Sep 20 14:04:54 marina kubelet[1529]: I0920 14:04:54.336346 1529 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/12a12c87-2dd1-4db5-8a7d-80239632737d-config-volume\") pod \"coredns-5d78c9869d-bzf9j\" (UID: \"12a12c87-2dd1-4db5-8a7d-80239632737d\") " pod="kube-system/coredns-5d78c9869d-bzf9j" Sep 20 14:04:54 marina kubelet[1529]: I0920 14:04:54.336508 1529 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xmztc\" (UniqueName: \"kubernetes.io/projected/12a12c87-2dd1-4db5-8a7d-80239632737d-kube-api-access-xmztc\") pod \"coredns-5d78c9869d-bzf9j\" (UID: \"12a12c87-2dd1-4db5-8a7d-80239632737d\") " pod="kube-system/coredns-5d78c9869d-bzf9j" Sep 20 14:04:54 marina kubelet[1529]: I0920 14:04:54.336536 1529 reconciler_common.go:258] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q6g2s\" (UniqueName: \"kubernetes.io/projected/5de66571-f84d-466b-ba58-603d5e8b6688-kube-api-access-q6g2s\") pod \"calico-kube-controllers-85578c44bf-p876v\" (UID: \"5de66571-f84d-466b-ba58-603d5e8b6688\") " pod="kube-system/calico-kube-controllers-85578c44bf-p876v" Sep 20 14:04:54 marina kubelet[1529]: E0920 14:04:54.572076 1529 remote_runtime.go:176] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to setup network for sandbox \"5215d28378cd49e9b910e6632e9aa5d7d235529ec6d6df9b803c0bf036b4ae4f\": failed to find network info for sandbox \"5215d28378cd49e9b910e6632e9aa5d7d235529ec6d6df9b803c0bf036b4ae4f\"" Sep 20 14:04:54 marina kubelet[1529]: E0920 14:04:54.572133 1529 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to setup network for sandbox \"5215d28378cd49e9b910e6632e9aa5d7d235529ec6d6df9b803c0bf036b4ae4f\": failed to find network info for sandbox \"5215d28378cd49e9b910e6632e9aa5d7d235529ec6d6df9b803c0bf036b4ae4f\"" pod="kube-system/calico-kube-controllers-85578c44bf-p876v" Sep 20 14:04:54 marina kubelet[1529]: E0920 14:04:54.572158 1529 kuberuntime_manager.go:1122] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to setup network for sandbox \"5215d28378cd49e9b910e6632e9aa5d7d235529ec6d6df9b803c0bf036b4ae4f\": failed to find network info for sandbox \"5215d28378cd49e9b910e6632e9aa5d7d235529ec6d6df9b803c0bf036b4ae4f\"" pod="kube-system/calico-kube-controllers-85578c44bf-p876v" Sep 20 14:04:54 marina kubelet[1529]: E0920 14:04:54.572214 1529 pod_workers.go:1294] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"calico-kube-controllers-85578c44bf-p876v_kube-system(5de66571-f84d-466b-ba58-603d5e8b6688)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"calico-kube-controllers-85578c44bf-p876v_kube-system(5de66571-f84d-466b-ba58-603d5e8b6688)\\\": rpc error: code = Unknown desc = failed to setup network for sandbox \\\"5215d28378cd49e9b910e6632e9aa5d7d235529ec6d6df9b803c0bf036b4ae4f\\\": failed to find network info for sandbox \\\"5215d28378cd49e9b910e6632e9aa5d7d235529ec6d6df9b803c0bf036b4ae4f\\\"\"" pod="kube-system/calico-kube-controllers-85578c44bf-p876v" podUID=5de66571-f84d-466b-ba58-603d5e8b6688 Sep 20 14:04:54 marina kubelet[1529]: E0920 14:04:54.573951 1529 remote_runtime.go:176] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to setup network for sandbox \"0d5919f3c12550fb5054909b01329152792769d89dd5dd7727d119767959d63e\": failed to find network info for sandbox \"0d5919f3c12550fb5054909b01329152792769d89dd5dd7727d119767959d63e\"" Sep 20 14:04:54 marina kubelet[1529]: E0920 14:04:54.573982 1529 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to setup network for sandbox \"0d5919f3c12550fb5054909b01329152792769d89dd5dd7727d119767959d63e\": failed to find network info for sandbox \"0d5919f3c12550fb5054909b01329152792769d89dd5dd7727d119767959d63e\"" pod="kube-system/coredns-5d78c9869d-bzf9j" Sep 20 14:04:54 marina kubelet[1529]: E0920 14:04:54.574001 1529 kuberuntime_manager.go:1122] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to setup network for sandbox \"0d5919f3c12550fb5054909b01329152792769d89dd5dd7727d119767959d63e\": failed to find network info for sandbox \"0d5919f3c12550fb5054909b01329152792769d89dd5dd7727d119767959d63e\"" pod="kube-system/coredns-5d78c9869d-bzf9j" Sep 20 14:04:54 marina kubelet[1529]: E0920 14:04:54.574039 1529 pod_workers.go:1294] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"coredns-5d78c9869d-bzf9j_kube-system(12a12c87-2dd1-4db5-8a7d-80239632737d)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"coredns-5d78c9869d-bzf9j_kube-system(12a12c87-2dd1-4db5-8a7d-80239632737d)\\\": rpc error: code = Unknown desc = failed to setup network for sandbox \\\"0d5919f3c12550fb5054909b01329152792769d89dd5dd7727d119767959d63e\\\": failed to find network info for sandbox \\\"0d5919f3c12550fb5054909b01329152792769d89dd5dd7727d119767959d63e\\\"\"" pod="kube-system/coredns-5d78c9869d-bzf9j" podUID=12a12c87-2dd1-4db5-8a7d-80239632737d Sep 20 14:04:54 marina kubelet[1529]: I0920 14:04:54.620341 1529 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/storage-provisioner" podStartSLOduration=14.620314572 podCreationTimestamp="2023-09-20 14:04:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2023-09-20 14:04:54.619331343 +0000 UTC m=+16.099964662" watchObservedRunningTime="2023-09-20 14:04:54.620314572 +0000 UTC m=+16.100947901" Sep 20 14:04:54 marina kubelet[1529]: I0920 14:04:54.628105 1529 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/kube-proxy-zxknw" podStartSLOduration=0.628082603 podCreationTimestamp="2023-09-20 14:04:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2023-09-20 14:04:54.627911333 +0000 UTC m=+16.108544652" watchObservedRunningTime="2023-09-20 14:04:54.628082603 +0000 UTC m=+16.108715932" Sep 20 14:04:59 marina kubelet[1529]: I0920 14:04:59.016909 1529 kuberuntime_manager.go:1460] "Updating runtime config through cri with podcidr" CIDR="10.244.0.0/24" Sep 20 14:04:59 marina kubelet[1529]: I0920 14:04:59.017453 1529 kubelet_network.go:61] "Updating Pod CIDR" originalPodCIDR="" newPodCIDR="10.244.0.0/24" Sep 20 14:05:08 marina kubelet[1529]: E0920 14:05:08.678642 1529 remote_runtime.go:176] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to setup network for sandbox \"2d9f15bf0e44880c8b4e685626b74b593376d46e686778bed4ef3c81ac02015c\": plugin type=\"calico\" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/" Sep 20 14:05:08 marina kubelet[1529]: E0920 14:05:08.678669 1529 remote_runtime.go:176] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to setup network for sandbox \"6de572466feff2309f57c55703228ca6ca97fb29fc801a73ec3344e56beaf84d\": plugin type=\"calico\" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/" Sep 20 14:05:08 marina kubelet[1529]: E0920 14:05:08.678692 1529 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to setup network for sandbox \"2d9f15bf0e44880c8b4e685626b74b593376d46e686778bed4ef3c81ac02015c\": plugin type=\"calico\" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/" pod="kube-system/coredns-5d78c9869d-bzf9j" Sep 20 14:05:08 marina kubelet[1529]: E0920 14:05:08.678694 1529 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to setup network for sandbox \"6de572466feff2309f57c55703228ca6ca97fb29fc801a73ec3344e56beaf84d\": plugin type=\"calico\" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/" pod="kube-system/calico-kube-controllers-85578c44bf-p876v" Sep 20 14:05:08 marina kubelet[1529]: E0920 14:05:08.678714 1529 kuberuntime_manager.go:1122] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to setup network for sandbox \"2d9f15bf0e44880c8b4e685626b74b593376d46e686778bed4ef3c81ac02015c\": plugin type=\"calico\" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/" pod="kube-system/coredns-5d78c9869d-bzf9j" Sep 20 14:05:08 marina kubelet[1529]: E0920 14:05:08.678714 1529 kuberuntime_manager.go:1122] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to setup network for sandbox \"6de572466feff2309f57c55703228ca6ca97fb29fc801a73ec3344e56beaf84d\": plugin type=\"calico\" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/" pod="kube-system/calico-kube-controllers-85578c44bf-p876v" Sep 20 14:05:08 marina kubelet[1529]: E0920 14:05:08.678767 1529 pod_workers.go:1294] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"calico-kube-controllers-85578c44bf-p876v_kube-system(5de66571-f84d-466b-ba58-603d5e8b6688)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"calico-kube-controllers-85578c44bf-p876v_kube-system(5de66571-f84d-466b-ba58-603d5e8b6688)\\\": rpc error: code = Unknown desc = failed to setup network for sandbox \\\"6de572466feff2309f57c55703228ca6ca97fb29fc801a73ec3344e56beaf84d\\\": plugin type=\\\"calico\\\" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/\"" pod="kube-system/calico-kube-controllers-85578c44bf-p876v" podUID=5de66571-f84d-466b-ba58-603d5e8b6688 Sep 20 14:05:08 marina kubelet[1529]: E0920 14:05:08.678798 1529 pod_workers.go:1294] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"coredns-5d78c9869d-bzf9j_kube-system(12a12c87-2dd1-4db5-8a7d-80239632737d)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"coredns-5d78c9869d-bzf9j_kube-system(12a12c87-2dd1-4db5-8a7d-80239632737d)\\\": rpc error: code = Unknown desc = failed to setup network for sandbox \\\"2d9f15bf0e44880c8b4e685626b74b593376d46e686778bed4ef3c81ac02015c\\\": plugin type=\\\"calico\\\" failed (add): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/\"" pod="kube-system/coredns-5d78c9869d-bzf9j" podUID=12a12c87-2dd1-4db5-8a7d-80239632737d Sep 20 14:05:09 marina kubelet[1529]: I0920 14:05:09.645202 1529 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6de572466feff2309f57c55703228ca6ca97fb29fc801a73ec3344e56beaf84d" Sep 20 14:05:09 marina kubelet[1529]: I0920 14:05:09.645743 1529 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2d9f15bf0e44880c8b4e685626b74b593376d46e686778bed4ef3c81ac02015c" Sep 20 14:05:09 marina kubelet[1529]: E0920 14:05:09.729231 1529 remote_runtime.go:205] "StopPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to destroy network for sandbox \"2d9f15bf0e44880c8b4e685626b74b593376d46e686778bed4ef3c81ac02015c\": plugin type=\"calico\" failed (delete): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/" podSandboxID="2d9f15bf0e44880c8b4e685626b74b593376d46e686778bed4ef3c81ac02015c" Sep 20 14:05:09 marina kubelet[1529]: E0920 14:05:09.729272 1529 kuberuntime_manager.go:1312] "Failed to stop sandbox" podSandboxID={Type:containerd ID:2d9f15bf0e44880c8b4e685626b74b593376d46e686778bed4ef3c81ac02015c} Sep 20 14:05:09 marina kubelet[1529]: E0920 14:05:09.729292 1529 remote_runtime.go:205] "StopPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to destroy network for sandbox \"6de572466feff2309f57c55703228ca6ca97fb29fc801a73ec3344e56beaf84d\": plugin type=\"calico\" failed (delete): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/" podSandboxID="6de572466feff2309f57c55703228ca6ca97fb29fc801a73ec3344e56beaf84d" Sep 20 14:05:09 marina kubelet[1529]: E0920 14:05:09.729311 1529 kuberuntime_manager.go:1312] "Failed to stop sandbox" podSandboxID={Type:containerd ID:6de572466feff2309f57c55703228ca6ca97fb29fc801a73ec3344e56beaf84d} Sep 20 14:05:09 marina kubelet[1529]: E0920 14:05:09.729336 1529 kuberuntime_manager.go:1038] "killPodWithSyncResult failed" err="failed to \"KillPodSandbox\" for \"5de66571-f84d-466b-ba58-603d5e8b6688\" with KillPodSandboxError: \"rpc error: code = Unknown desc = failed to destroy network for sandbox \\\"6de572466feff2309f57c55703228ca6ca97fb29fc801a73ec3344e56beaf84d\\\": plugin type=\\\"calico\\\" failed (delete): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/\"" Sep 20 14:05:09 marina kubelet[1529]: E0920 14:05:09.729296 1529 kuberuntime_manager.go:1038] "killPodWithSyncResult failed" err="failed to \"KillPodSandbox\" for \"12a12c87-2dd1-4db5-8a7d-80239632737d\" with KillPodSandboxError: \"rpc error: code = Unknown desc = failed to destroy network for sandbox \\\"2d9f15bf0e44880c8b4e685626b74b593376d46e686778bed4ef3c81ac02015c\\\": plugin type=\\\"calico\\\" failed (delete): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/\"" Sep 20 14:05:09 marina kubelet[1529]: E0920 14:05:09.729359 1529 pod_workers.go:1294] "Error syncing pod, skipping" err="failed to \"KillPodSandbox\" for \"5de66571-f84d-466b-ba58-603d5e8b6688\" with KillPodSandboxError: \"rpc error: code = Unknown desc = failed to destroy network for sandbox \\\"6de572466feff2309f57c55703228ca6ca97fb29fc801a73ec3344e56beaf84d\\\": plugin type=\\\"calico\\\" failed (delete): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/\"" pod="kube-system/calico-kube-controllers-85578c44bf-p876v" podUID=5de66571-f84d-466b-ba58-603d5e8b6688 Sep 20 14:05:09 marina kubelet[1529]: E0920 14:05:09.729376 1529 pod_workers.go:1294] "Error syncing pod, skipping" err="failed to \"KillPodSandbox\" for \"12a12c87-2dd1-4db5-8a7d-80239632737d\" with KillPodSandboxError: \"rpc error: code = Unknown desc = failed to destroy network for sandbox \\\"2d9f15bf0e44880c8b4e685626b74b593376d46e686778bed4ef3c81ac02015c\\\": plugin type=\\\"calico\\\" failed (delete): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/\"" pod="kube-system/coredns-5d78c9869d-bzf9j" podUID=12a12c87-2dd1-4db5-8a7d-80239632737d Sep 20 14:05:29 marina kubelet[1529]: E0920 14:05:29.763929 1529 event.go:280] Server rejected event '&v1.Event{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"coredns-5d78c9869d-bzf9j.1786a06f4d0a34b5", GenerateName:"", Namespace:"kube-system", SelfLink:"", UID:"", ResourceVersion:"511", Generation:0, CreationTimestamp:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), DeletionTimestamp:, DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string(nil), Annotations:map[string]string(nil), OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ManagedFields:[]v1.ManagedFieldsEntry(nil)}, InvolvedObject:v1.ObjectReference{Kind:"Pod", Namespace:"kube-system", Name:"coredns-5d78c9869d-bzf9j", UID:"12a12c87-2dd1-4db5-8a7d-80239632737d", APIVersion:"v1", ResourceVersion:"457", FieldPath:""}, Reason:"SandboxChanged", Message:"Pod sandbox changed, it will be killed and re-created.", Source:v1.EventSource{Component:"kubelet", Host:"marina"}, FirstTimestamp:time.Date(2023, time.September, 20, 14, 5, 9, 0, time.Local), LastTimestamp:time.Date(2023, time.September, 20, 14, 5, 21, 584632510, time.Local), Count:2, Type:"Normal", EventTime:time.Date(1, time.January, 1, 0, 0, 0, 0, time.UTC), Series:(*v1.EventSeries)(nil), Action:"", Related:(*v1.ObjectReference)(nil), ReportingController:"", ReportingInstance:""}': 'etcdserver: request timed out' (will not retry!) Sep 20 14:05:31 marina kubelet[1529]: E0920 14:05:31.671955 1529 remote_runtime.go:205] "StopPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to destroy network for sandbox \"6de572466feff2309f57c55703228ca6ca97fb29fc801a73ec3344e56beaf84d\": plugin type=\"calico\" failed (delete): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/" podSandboxID="6de572466feff2309f57c55703228ca6ca97fb29fc801a73ec3344e56beaf84d" Sep 20 14:05:31 marina kubelet[1529]: E0920 14:05:31.671977 1529 remote_runtime.go:205] "StopPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to destroy network for sandbox \"2d9f15bf0e44880c8b4e685626b74b593376d46e686778bed4ef3c81ac02015c\": plugin type=\"calico\" failed (delete): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/" podSandboxID="2d9f15bf0e44880c8b4e685626b74b593376d46e686778bed4ef3c81ac02015c" Sep 20 14:05:31 marina kubelet[1529]: E0920 14:05:31.671989 1529 kuberuntime_manager.go:1312] "Failed to stop sandbox" podSandboxID={Type:containerd ID:6de572466feff2309f57c55703228ca6ca97fb29fc801a73ec3344e56beaf84d} Sep 20 14:05:31 marina kubelet[1529]: E0920 14:05:31.671998 1529 kuberuntime_manager.go:1312] "Failed to stop sandbox" podSandboxID={Type:containerd ID:2d9f15bf0e44880c8b4e685626b74b593376d46e686778bed4ef3c81ac02015c} Sep 20 14:05:31 marina kubelet[1529]: E0920 14:05:31.672033 1529 kuberuntime_manager.go:1038] "killPodWithSyncResult failed" err="failed to \"KillPodSandbox\" for \"12a12c87-2dd1-4db5-8a7d-80239632737d\" with KillPodSandboxError: \"rpc error: code = Unknown desc = failed to destroy network for sandbox \\\"2d9f15bf0e44880c8b4e685626b74b593376d46e686778bed4ef3c81ac02015c\\\": plugin type=\\\"calico\\\" failed (delete): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/\"" Sep 20 14:05:31 marina kubelet[1529]: E0920 14:05:31.672035 1529 kuberuntime_manager.go:1038] "killPodWithSyncResult failed" err="failed to \"KillPodSandbox\" for \"5de66571-f84d-466b-ba58-603d5e8b6688\" with KillPodSandboxError: \"rpc error: code = Unknown desc = failed to destroy network for sandbox \\\"6de572466feff2309f57c55703228ca6ca97fb29fc801a73ec3344e56beaf84d\\\": plugin type=\\\"calico\\\" failed (delete): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/\"" Sep 20 14:05:31 marina kubelet[1529]: E0920 14:05:31.672067 1529 pod_workers.go:1294] "Error syncing pod, skipping" err="failed to \"KillPodSandbox\" for \"12a12c87-2dd1-4db5-8a7d-80239632737d\" with KillPodSandboxError: \"rpc error: code = Unknown desc = failed to destroy network for sandbox \\\"2d9f15bf0e44880c8b4e685626b74b593376d46e686778bed4ef3c81ac02015c\\\": plugin type=\\\"calico\\\" failed (delete): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/\"" pod="kube-system/coredns-5d78c9869d-bzf9j" podUID=12a12c87-2dd1-4db5-8a7d-80239632737d Sep 20 14:05:31 marina kubelet[1529]: E0920 14:05:31.672070 1529 pod_workers.go:1294] "Error syncing pod, skipping" err="failed to \"KillPodSandbox\" for \"5de66571-f84d-466b-ba58-603d5e8b6688\" with KillPodSandboxError: \"rpc error: code = Unknown desc = failed to destroy network for sandbox \\\"6de572466feff2309f57c55703228ca6ca97fb29fc801a73ec3344e56beaf84d\\\": plugin type=\\\"calico\\\" failed (delete): stat /var/lib/calico/nodename: no such file or directory: check that the calico/node container is running and has mounted /var/lib/calico/\"" pod="kube-system/calico-kube-controllers-85578c44bf-p876v" podUID=5de66571-f84d-466b-ba58-603d5e8b6688 Sep 20 14:05:34 marina kubelet[1529]: I0920 14:05:34.679825 1529 scope.go:115] "RemoveContainer" containerID="69b158ef7340ae092836caccac423f894387c1a02a7874c7993cf92570f30f30" Sep 20 14:05:35 marina kubelet[1529]: I0920 14:05:35.691791 1529 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/calico-node-66kqz" podStartSLOduration=4.077572713 podCreationTimestamp="2023-09-20 14:04:54 +0000 UTC" firstStartedPulling="2023-09-20 14:04:54.47639177 +0000 UTC m=+15.957025089" lastFinishedPulling="2023-09-20 14:05:32.09055887 +0000 UTC m=+53.571192199" observedRunningTime="2023-09-20 14:05:35.691569414 +0000 UTC m=+57.172202733" watchObservedRunningTime="2023-09-20 14:05:35.691739823 +0000 UTC m=+57.172373142" Sep 20 14:05:44 marina kubelet[1529]: I0920 14:05:44.705920 1529 pod_startup_latency_tracker.go:102] "Observed pod startup duration" pod="kube-system/coredns-5d78c9869d-bzf9j" podStartSLOduration=50.705882895 podCreationTimestamp="2023-09-20 14:04:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2023-09-20 14:05:44.705522811 +0000 UTC m=+66.186156130" watchObservedRunningTime="2023-09-20 14:05:44.705882895 +0000 UTC m=+66.186516244" * * ==> storage-provisioner [34b3f36c348dd588e3b18456cc2636809711ddafcf0bac51dc7bd3a9612d2522] <== * I0920 14:05:34.784913 1 storage_provisioner.go:116] Initializing the minikube storage provisioner... I0920 14:05:34.798235 1 storage_provisioner.go:141] Storage provisioner initialized, now starting service! I0920 14:05:34.798662 1 leaderelection.go:243] attempting to acquire leader lease kube-system/k8s.io-minikube-hostpath... I0920 14:05:34.806881 1 leaderelection.go:253] successfully acquired lease kube-system/k8s.io-minikube-hostpath I0920 14:05:34.807068 1 controller.go:835] Starting provisioner controller k8s.io/minikube-hostpath_marina_cbea3b77-650a-4ef0-a1e6-8851eabefba1! I0920 14:05:34.807071 1 event.go:282] Event(v1.ObjectReference{Kind:"Endpoints", Namespace:"kube-system", Name:"k8s.io-minikube-hostpath", UID:"a50c38ca-d59e-4b68-ae9b-23868a0a8905", APIVersion:"v1", ResourceVersion:"535", FieldPath:""}): type: 'Normal' reason: 'LeaderElection' marina_cbea3b77-650a-4ef0-a1e6-8851eabefba1 became leader I0920 14:05:34.908279 1 controller.go:884] Started provisioner controller k8s.io/minikube-hostpath_marina_cbea3b77-650a-4ef0-a1e6-8851eabefba1! * * ==> storage-provisioner [69b158ef7340ae092836caccac423f894387c1a02a7874c7993cf92570f30f30] <== * I0920 14:04:54.740347 1 storage_provisioner.go:116] Initializing the minikube storage provisioner... F0920 14:05:24.751813 1 main.go:39] error getting server version: Get "https://10.96.0.1:443/version?timeout=32s": dial tcp 10.96.0.1:443: i/o timeout